repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunkedreader/sequential.go | fs/chunkedreader/sequential.go | package chunkedreader
import (
"context"
"io"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// sequential is a reader for an Object with the possibility
// of reading the source in chunks of given size
//
// An initialChunkSize of <= 0 will disable chunked reading.
type sequential struct {
ctx context.Context
mu sync.Mutex // protects following fields
o fs.Object // source to read from
rc io.ReadCloser // reader for the current open chunk
offset int64 // offset the next Read will start. -1 forces a reopen of o
chunkOffset int64 // beginning of the current or next chunk
chunkSize int64 // length of the current or next chunk. -1 will open o from chunkOffset to the end
initialChunkSize int64 // default chunkSize after the chunk specified by RangeSeek is complete
maxChunkSize int64 // consecutive read chunks will double in size until reached. -1 means no limit
customChunkSize bool // is the current chunkSize set by RangeSeek?
closed bool // has Close been called?
}
// Make a new sequential chunked reader
func newSequential(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) ChunkedReader {
return &sequential{
ctx: ctx,
o: o,
offset: -1,
chunkSize: initialChunkSize,
initialChunkSize: initialChunkSize,
maxChunkSize: maxChunkSize,
}
}
// Read from the file - for details see io.Reader
func (cr *sequential) Read(p []byte) (n int, err error) {
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.closed {
return 0, ErrorFileClosed
}
for reqSize := int64(len(p)); reqSize > 0; reqSize = int64(len(p)) {
// the current chunk boundary. valid only when chunkSize > 0
chunkEnd := cr.chunkOffset + cr.chunkSize
fs.Debugf(cr.o, "ChunkedReader.Read at %d length %d chunkOffset %d chunkSize %d", cr.offset, reqSize, cr.chunkOffset, cr.chunkSize)
switch {
case cr.chunkSize > 0 && cr.offset == chunkEnd: // last chunk read completely
cr.chunkOffset = cr.offset
if cr.customChunkSize { // last chunkSize was set by RangeSeek
cr.customChunkSize = false
cr.chunkSize = cr.initialChunkSize
} else {
cr.chunkSize *= 2
if cr.chunkSize > cr.maxChunkSize && cr.maxChunkSize != -1 {
cr.chunkSize = cr.maxChunkSize
}
}
// recalculate the chunk boundary. valid only when chunkSize > 0
chunkEnd = cr.chunkOffset + cr.chunkSize
fallthrough
case cr.offset == -1: // first Read or Read after RangeSeek
err = cr.openRange()
if err != nil {
return
}
}
var buf []byte
chunkRest := chunkEnd - cr.offset
// limit read to chunk boundaries if chunkSize > 0
if reqSize > chunkRest && cr.chunkSize > 0 {
buf, p = p[0:chunkRest], p[chunkRest:]
} else {
buf, p = p, nil
}
var rn int
rn, err = io.ReadFull(cr.rc, buf)
n += rn
cr.offset += int64(rn)
if err != nil {
if err == io.ErrUnexpectedEOF {
err = io.EOF
}
return
}
}
return n, nil
}
// Close the file - for details see io.Closer
//
// All methods on ChunkedReader will return ErrorFileClosed afterwards
func (cr *sequential) Close() error {
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.closed {
return ErrorFileClosed
}
cr.closed = true
return cr.resetReader(nil, 0)
}
// Seek the file - for details see io.Seeker
func (cr *sequential) Seek(offset int64, whence int) (int64, error) {
return cr.RangeSeek(context.TODO(), offset, whence, -1)
}
// RangeSeek the file - for details see RangeSeeker
//
// The specified length will only apply to the next chunk opened.
// RangeSeek will not reopen the source until Read is called.
func (cr *sequential) RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
fs.Debugf(cr.o, "ChunkedReader.RangeSeek from %d to %d length %d", cr.offset, offset, length)
if cr.closed {
return 0, ErrorFileClosed
}
size := cr.o.Size()
switch whence {
case io.SeekStart:
cr.offset = 0
case io.SeekEnd:
if size < 0 {
return 0, ErrorInvalidSeek // Can't seek from end for unknown size
}
cr.offset = size
}
// set the new chunk start
cr.chunkOffset = cr.offset + offset
// force reopen on next Read
cr.offset = -1
if length > 0 {
cr.customChunkSize = true
cr.chunkSize = length
} else {
cr.chunkSize = cr.initialChunkSize
}
if cr.chunkOffset < 0 || cr.chunkOffset >= size {
cr.chunkOffset = 0
return 0, ErrorInvalidSeek
}
return cr.chunkOffset, nil
}
// Open forces the connection to be opened
func (cr *sequential) Open() (ChunkedReader, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.rc != nil && cr.offset != -1 {
return cr, nil
}
return cr, cr.openRange()
}
// openRange will open the source Object with the current chunk range
//
// If the current open reader implements RangeSeeker, it is tried first.
// When RangeSeek fails, o.Open with a RangeOption is used.
//
// A length <= 0 will request till the end of the file
func (cr *sequential) openRange() error {
offset, length := cr.chunkOffset, cr.chunkSize
fs.Debugf(cr.o, "ChunkedReader.openRange at %d length %d", offset, length)
if cr.closed {
return ErrorFileClosed
}
if rs, ok := cr.rc.(fs.RangeSeeker); ok {
n, err := rs.RangeSeek(cr.ctx, offset, io.SeekStart, length)
if err == nil && n == offset {
cr.offset = offset
return nil
}
if err != nil {
fs.Debugf(cr.o, "ChunkedReader.openRange seek failed (%s). Trying Open", err)
} else {
fs.Debugf(cr.o, "ChunkedReader.openRange seeked to wrong offset. Wanted %d, got %d. Trying Open", offset, n)
}
}
var rc io.ReadCloser
var err error
if length <= 0 {
if offset == 0 {
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)})
} else {
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)}, &fs.RangeOption{Start: offset, End: -1})
}
} else {
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)}, &fs.RangeOption{Start: offset, End: offset + length - 1})
}
if err != nil {
return err
}
return cr.resetReader(rc, offset)
}
// resetReader switches the current reader to the given reader.
// The old reader will be Close'd before setting the new reader.
func (cr *sequential) resetReader(rc io.ReadCloser, offset int64) error {
if cr.rc != nil {
if err := cr.rc.Close(); err != nil {
return err
}
}
cr.rc = rc
cr.offset = offset
return nil
}
var (
_ ChunkedReader = (*sequential)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunkedreader/parallel_test.go | fs/chunkedreader/parallel_test.go | package chunkedreader
import (
"context"
"io"
"math/rand"
"testing"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/rclone/rclone/lib/multipart"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestParallel(t *testing.T) {
content := makeContent(t, 1024)
for _, mode := range mockobject.SeekModes {
t.Run(mode.String(), testRead(content, mode, 3))
}
}
func TestParallelErrorAfterClose(t *testing.T) {
testErrorAfterClose(t, 3)
}
func TestParallelLarge(t *testing.T) {
ctx := context.Background()
const streams = 3
const chunkSize = multipart.BufferSize
const size = (2*streams+1)*chunkSize + 255
content := makeContent(t, size)
o := mockobject.New("test.bin").WithContent(content, mockobject.SeekModeNone)
cr := New(ctx, o, chunkSize, 0, streams)
for _, test := range []struct {
name string
offset int64
seekMode int
}{
{name: "Straight", offset: 0, seekMode: -1},
{name: "Rewind", offset: 0, seekMode: io.SeekStart},
{name: "NearStart", offset: 1, seekMode: io.SeekStart},
{name: "NearEnd", offset: size - 2*chunkSize - 127, seekMode: io.SeekEnd},
} {
t.Run(test.name, func(t *testing.T) {
if test.seekMode >= 0 {
var n int64
var err error
if test.seekMode == io.SeekEnd {
n, err = cr.Seek(test.offset-size, test.seekMode)
} else {
n, err = cr.Seek(test.offset, test.seekMode)
}
require.NoError(t, err)
assert.Equal(t, test.offset, n)
}
got, err := io.ReadAll(cr)
require.NoError(t, err)
require.Equal(t, len(content[test.offset:]), len(got))
assert.Equal(t, content[test.offset:], got)
})
}
require.NoError(t, cr.Close())
t.Run("Seeky", func(t *testing.T) {
cr := New(ctx, o, chunkSize, 0, streams)
offset := 0
buf := make([]byte, 1024)
for {
// Read and check a random read
readSize := rand.Intn(1024)
readBuf := buf[:readSize]
n, err := cr.Read(readBuf)
require.Equal(t, content[offset:offset+n], readBuf[:n])
offset += n
if err == io.EOF {
assert.Equal(t, size, offset)
break
}
require.NoError(t, err)
// Now do a smaller random seek backwards
seekSize := rand.Intn(512)
if offset-seekSize < 0 {
seekSize = offset
}
nn, err := cr.Seek(-int64(seekSize), io.SeekCurrent)
offset -= seekSize
require.NoError(t, err)
assert.Equal(t, nn, int64(offset))
}
require.NoError(t, cr.Close())
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunkedreader/parallel.go | fs/chunkedreader/parallel.go | package chunkedreader
import (
"context"
"fmt"
"io"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
)
// parallel reads Object in chunks of a given size in parallel.
type parallel struct {
ctx context.Context
o fs.Object // source to read from
mu sync.Mutex // protects following fields
endStream int64 // offset we have started streams for
offset int64 // offset the read file pointer is at
chunkSize int64 // length of the chunks to read
nstreams int // number of streams to use
streams []*stream // the opened streams in offset order - the current one is first
closed bool // has Close been called?
}
// stream holds the info about a single download
type stream struct {
cr *parallel // parent reader
ctx context.Context // ctx to cancel if needed
cancel func() // cancel the stream
rc io.ReadCloser // reader that it is reading from, may be nil
offset int64 // where the stream is reading from
size int64 // and the size it is reading
readBytes int64 // bytes read from the stream
rw *pool.RW // buffer for read
err chan error // error returned from the read
name string // name of this stream for debugging
}
// Start a stream reading (offset, offset+size)
func (cr *parallel) newStream(ctx context.Context, offset, size int64) (s *stream, err error) {
ctx, cancel := context.WithCancel(ctx)
// Create the stream
rw := multipart.NewRW()
s = &stream{
cr: cr,
ctx: ctx,
cancel: cancel,
offset: offset,
size: size,
rw: rw,
err: make(chan error, 1),
}
s.name = fmt.Sprintf("stream(%d,%d,%p)", s.offset, s.size, s)
// Start the background read into the buffer
go s.readFrom(ctx)
// Return the stream to the caller
return s, nil
}
// read the file into the buffer
func (s *stream) readFrom(ctx context.Context) {
// Open the object at the correct range
fs.Debugf(s.cr.o, "%s: open", s.name)
rc, err := operations.Open(ctx, s.cr.o,
&fs.HashesOption{Hashes: hash.Set(hash.None)},
&fs.RangeOption{Start: s.offset, End: s.offset + s.size - 1})
if err != nil {
s.err <- fmt.Errorf("parallel chunked reader: failed to open stream at %d size %d: %w", s.offset, s.size, err)
return
}
s.rc = rc
fs.Debugf(s.cr.o, "%s: readfrom started", s.name)
_, err = s.rw.ReadFrom(s.rc)
fs.Debugf(s.cr.o, "%s: readfrom finished (%d bytes): %v", s.name, s.rw.Size(), err)
s.err <- err
}
// eof is true when we've read all the data we are expecting
func (s *stream) eof() bool {
return s.readBytes >= s.size
}
// read reads up to len(p) bytes into p. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. If some
// data is available but not len(p) bytes, read returns what is
// available instead of waiting for more.
func (s *stream) read(p []byte) (n int, err error) {
defer log.Trace(s.cr.o, "%s: Read len(p)=%d", s.name, len(p))("n=%d, err=%v", &n, &err)
if len(p) == 0 {
return n, nil
}
for {
var nn int
nn, err = s.rw.Read(p[n:])
fs.Debugf(s.cr.o, "%s: rw.Read nn=%d, err=%v", s.name, nn, err)
s.readBytes += int64(nn)
n += nn
if err != nil && err != io.EOF {
return n, err
}
if s.eof() {
return n, io.EOF
}
// Received a faux io.EOF because we haven't read all the data yet
if n >= len(p) {
break
}
// Wait for a write to happen to read more
s.rw.WaitWrite(s.ctx)
}
return n, nil
}
// Sets *perr to newErr if err is nil
func orErr(perr *error, newErr error) {
if *perr == nil {
*perr = newErr
}
}
// Close a stream
func (s *stream) close() (err error) {
defer log.Trace(s.cr.o, "%s: close", s.name)("err=%v", &err)
s.cancel()
err = <-s.err // wait for readFrom to stop and return error
orErr(&err, s.rw.Close())
if s.rc != nil {
orErr(&err, s.rc.Close())
}
if err != nil && err != io.EOF {
return fmt.Errorf("parallel chunked reader: failed to read stream at %d size %d: %w", s.offset, s.size, err)
}
return nil
}
// Make a new parallel chunked reader
//
// Mustn't be called for an unknown size object
func newParallel(ctx context.Context, o fs.Object, chunkSize int64, streams int) ChunkedReader {
// Make sure chunkSize is a multiple of multipart.BufferSize
if chunkSize < 0 {
chunkSize = multipart.BufferSize
}
newChunkSize := multipart.BufferSize * (chunkSize / multipart.BufferSize)
if newChunkSize < chunkSize {
newChunkSize += multipart.BufferSize
}
fs.Debugf(o, "newParallel chunkSize=%d, streams=%d", chunkSize, streams)
return ¶llel{
ctx: ctx,
o: o,
offset: 0,
chunkSize: newChunkSize,
nstreams: streams,
}
}
// _open starts the file transferring at offset
//
// Call with the lock held
func (cr *parallel) _open() (err error) {
size := cr.o.Size()
if size < 0 {
return fmt.Errorf("parallel chunked reader: can't use multiple threads for unknown sized object %q", cr.o)
}
// Launched enough streams already
if cr.endStream >= size {
return nil
}
// Make sure cr.nstreams are running
for i := len(cr.streams); i < cr.nstreams; i++ {
// clip to length of file
chunkSize := cr.chunkSize
newEndStream := cr.endStream + chunkSize
if newEndStream > size {
chunkSize = size - cr.endStream
newEndStream = cr.endStream + chunkSize
}
s, err := cr.newStream(cr.ctx, cr.endStream, chunkSize)
if err != nil {
return err
}
cr.streams = append(cr.streams, s)
cr.endStream = newEndStream
if cr.endStream >= size {
break
}
}
return nil
}
// Finished reading the current stream so pop it off and destroy it
//
// Call with lock held
func (cr *parallel) _popStream() (err error) {
defer log.Trace(cr.o, "streams=%+v", cr.streams)("streams=%+v, err=%v", &cr.streams, &err)
if len(cr.streams) == 0 {
return nil
}
stream := cr.streams[0]
err = stream.close()
cr.streams[0] = nil
cr.streams = cr.streams[1:]
return err
}
// Get rid of all the streams
//
// Call with lock held
func (cr *parallel) _popStreams() (err error) {
defer log.Trace(cr.o, "streams=%+v", cr.streams)("streams=%+v, err=%v", &cr.streams, &err)
for len(cr.streams) > 0 {
orErr(&err, cr._popStream())
}
cr.streams = nil
return err
}
// Read from the file - for details see io.Reader
func (cr *parallel) Read(p []byte) (n int, err error) {
defer log.Trace(cr.o, "Read len(p)=%d", len(p))("n=%d, err=%v", &n, &err)
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.closed {
return 0, ErrorFileClosed
}
for n < len(p) {
// Make sure we have the correct number of streams open
err = cr._open()
if err != nil {
return n, err
}
// No streams left means EOF
if len(cr.streams) == 0 {
return n, io.EOF
}
// Read from the stream
stream := cr.streams[0]
nn, err := stream.read(p[n:])
n += nn
cr.offset += int64(nn)
if err == io.EOF {
err = cr._popStream()
if err != nil {
break
}
} else if err != nil {
break
}
}
return n, err
}
// Close the file - for details see io.Closer
//
// All methods on ChunkedReader will return ErrorFileClosed afterwards
func (cr *parallel) Close() error {
cr.mu.Lock()
defer cr.mu.Unlock()
if cr.closed {
return ErrorFileClosed
}
cr.closed = true
// Close all the streams
return cr._popStreams()
}
// Seek the file - for details see io.Seeker
func (cr *parallel) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
fs.Debugf(cr.o, "parallel chunked reader: seek from %d to %d whence %d", cr.offset, offset, whence)
if cr.closed {
return 0, ErrorFileClosed
}
size := cr.o.Size()
currentOffset := cr.offset
switch whence {
case io.SeekStart:
currentOffset = 0
case io.SeekEnd:
currentOffset = size
}
// set the new chunk start
newOffset := currentOffset + offset
if newOffset < 0 || newOffset >= size {
return 0, ErrorInvalidSeek
}
// If seek pointer didn't move, return now
if newOffset == cr.offset {
fs.Debugf(cr.o, "parallel chunked reader: seek pointer didn't move")
return cr.offset, nil
}
cr.offset = newOffset
// Ditch out of range streams
for len(cr.streams) > 0 {
stream := cr.streams[0]
if newOffset >= stream.offset+stream.size {
_ = cr._popStream()
} else {
break
}
}
// If no streams remain we can just restart
if len(cr.streams) == 0 {
fs.Debugf(cr.o, "parallel chunked reader: no streams remain")
cr.endStream = cr.offset
return cr.offset, nil
}
// Current stream
stream := cr.streams[0]
// If new offset is before current stream then ditch all the streams
if newOffset < stream.offset {
_ = cr._popStreams()
fs.Debugf(cr.o, "parallel chunked reader: new offset is before current stream - ditch all")
cr.endStream = cr.offset
return cr.offset, nil
}
// Seek the current stream
streamOffset := newOffset - stream.offset
stream.readBytes = streamOffset // correct read value
fs.Debugf(cr.o, "parallel chunked reader: seek the current stream to %d", streamOffset)
// Wait for the read to the correct part of the data
for stream.rw.Size() < streamOffset {
stream.rw.WaitWrite(cr.ctx)
}
_, err := stream.rw.Seek(streamOffset, io.SeekStart)
if err != nil {
return cr.offset, fmt.Errorf("parallel chunked reader: failed to seek stream: %w", err)
}
return cr.offset, nil
}
// RangeSeek the file - for details see RangeSeeker
//
// In the parallel chunked reader this just acts like Seek
func (cr *parallel) RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error) {
return cr.Seek(offset, whence)
}
// Open forces the connection to be opened
func (cr *parallel) Open() (ChunkedReader, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
return cr, cr._open()
}
var (
_ ChunkedReader = (*parallel)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunkedreader/sequential_test.go | fs/chunkedreader/sequential_test.go | package chunkedreader
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest/mockobject"
)
func TestSequential(t *testing.T) {
content := makeContent(t, 1024)
for _, mode := range mockobject.SeekModes {
t.Run(mode.String(), testRead(content, mode, 0))
}
}
func TestSequentialErrorAfterClose(t *testing.T) {
testErrorAfterClose(t, 0)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunkedreader/chunkedreader.go | fs/chunkedreader/chunkedreader.go | // Package chunkedreader provides functionality for reading a stream in chunks.
package chunkedreader
import (
"context"
"errors"
"io"
"github.com/rclone/rclone/fs"
)
// io related errors returned by ChunkedReader
var (
ErrorFileClosed = errors.New("file already closed")
ErrorInvalidSeek = errors.New("invalid seek position")
)
// ChunkedReader describes what a chunked reader can do.
type ChunkedReader interface {
io.Reader
io.Seeker
io.Closer
fs.RangeSeeker
Open() (ChunkedReader, error)
}
// New returns a ChunkedReader for the Object.
//
// An initialChunkSize of <= 0 will disable chunked reading.
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
// doubled after each chunk read with a maximum of maxChunkSize.
// A Seek or RangeSeek will reset the chunk size to it's initial value
func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64, streams int) ChunkedReader {
if initialChunkSize <= 0 {
initialChunkSize = -1
}
if maxChunkSize != -1 && maxChunkSize < initialChunkSize {
maxChunkSize = initialChunkSize
}
if streams < 0 {
streams = 0
}
if streams <= 1 || o.Size() < 0 {
return newSequential(ctx, o, initialChunkSize, maxChunkSize)
}
return newParallel(ctx, o, initialChunkSize, streams)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/object/object_test.go | fs/object/object_test.go | package object_test
import (
"bytes"
"context"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/stretchr/testify/assert"
)
func TestStaticObject(t *testing.T) {
now := time.Now()
remote := "path/to/object"
size := int64(1024)
o := object.NewStaticObjectInfo(remote, now, size, true, nil, object.MemoryFs)
assert.Equal(t, object.MemoryFs, o.Fs())
assert.Equal(t, remote, o.Remote())
assert.Equal(t, remote, o.String())
assert.Equal(t, now, o.ModTime(context.Background()))
assert.Equal(t, size, o.Size())
assert.Equal(t, true, o.Storable())
Hash, err := o.Hash(context.Background(), hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "", Hash)
o = object.NewStaticObjectInfo(remote, now, size, true, nil, nil)
_, err = o.Hash(context.Background(), hash.MD5)
assert.Equal(t, hash.ErrUnsupported, err)
assert.Equal(t, object.MemoryFs, o.Fs())
hs := map[hash.Type]string{
hash.MD5: "potato",
}
o = object.NewStaticObjectInfo(remote, now, size, true, hs, nil)
Hash, err = o.Hash(context.Background(), hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "potato", Hash)
_, err = o.Hash(context.Background(), hash.SHA1)
assert.Equal(t, hash.ErrUnsupported, err)
}
func TestMemoryFs(t *testing.T) {
f := object.MemoryFs
assert.Equal(t, "memory", f.Name())
assert.Equal(t, "", f.Root())
assert.Equal(t, "memory", f.String())
assert.Equal(t, time.Nanosecond, f.Precision())
assert.Equal(t, hash.Supported(), f.Hashes())
assert.Equal(t, &fs.Features{}, f.Features())
entries, err := f.List(context.Background(), "")
assert.NoError(t, err)
assert.Nil(t, entries)
o, err := f.NewObject(context.Background(), "obj")
assert.Equal(t, fs.ErrorObjectNotFound, err)
assert.Nil(t, o)
buf := bytes.NewBufferString("potato")
now := time.Now()
src := object.NewStaticObjectInfo("remote", now, int64(buf.Len()), true, nil, nil)
o, err = f.Put(context.Background(), buf, src)
assert.NoError(t, err)
hash, err := o.Hash(context.Background(), hash.SHA1)
assert.NoError(t, err)
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", hash)
err = f.Mkdir(context.Background(), "dir")
assert.Error(t, err)
err = f.Rmdir(context.Background(), "dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
}
func TestMemoryObject(t *testing.T) {
remote := "path/to/object"
now := time.Now()
content := []byte("potatoXXXXXXXXXXXXX")
content = content[:6] // make some extra cap
o := object.NewMemoryObject(remote, now, content)
o.WithMimeType("text/plain; charset=utf-8")
assert.Equal(t, content, o.Content())
assert.Equal(t, object.MemoryFs, o.Fs())
assert.Equal(t, remote, o.Remote())
assert.Equal(t, remote, o.String())
assert.Equal(t, now, o.ModTime(context.Background()))
assert.Equal(t, int64(len(content)), o.Size())
assert.Equal(t, true, o.Storable())
assert.Equal(t, "text/plain; charset=utf-8", o.MimeType(context.Background()))
Hash, err := o.Hash(context.Background(), hash.MD5)
assert.NoError(t, err)
assert.Equal(t, "8ee2027983915ec78acc45027d874316", Hash)
Hash, err = o.Hash(context.Background(), hash.SHA1)
assert.NoError(t, err)
assert.Equal(t, "3e2e95f5ad970eadfa7e17eaf73da97024aa5359", Hash)
newNow := now.Add(time.Minute)
err = o.SetModTime(context.Background(), newNow)
assert.NoError(t, err)
assert.Equal(t, newNow, o.ModTime(context.Background()))
checkOpen := func(rc io.ReadCloser, expected string) {
t.Helper()
actual, err := io.ReadAll(rc)
assert.NoError(t, err)
err = rc.Close()
assert.NoError(t, err)
assert.Equal(t, expected, string(actual))
}
checkContent := func(o fs.Object, expected string) {
t.Helper()
rc, err := o.Open(context.Background())
assert.NoError(t, err)
checkOpen(rc, expected)
}
checkContent(o, string(content))
rc, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 3})
assert.NoError(t, err)
checkOpen(rc, "ota")
rc, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: -1})
assert.NoError(t, err)
checkOpen(rc, "otato")
rc, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 4096})
assert.NoError(t, err)
checkOpen(rc, "otato")
rc, err = o.Open(context.Background(), &fs.RangeOption{Start: -1, End: 4})
assert.NoError(t, err)
checkOpen(rc, "tato")
rc, err = o.Open(context.Background(), &fs.SeekOption{Offset: 3})
assert.NoError(t, err)
checkOpen(rc, "ato")
rc, err = o.Open(context.Background(), &fs.SeekOption{Offset: -100})
assert.NoError(t, err)
checkOpen(rc, "potato")
// check it fits within the buffer
newNow = now.Add(2 * time.Minute)
newContent := bytes.NewBufferString("Rutabaga")
assert.True(t, newContent.Len() < cap(content)) // fits within cap(content)
src := object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
err = o.Update(context.Background(), newContent, src)
assert.NoError(t, err)
checkContent(o, "Rutabaga")
assert.Equal(t, newNow, o.ModTime(context.Background()))
assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
// not within the buffer
newStr := "0123456789"
newStr = newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr + newStr
newContent = bytes.NewBufferString(newStr)
assert.True(t, newContent.Len() > cap(content)) // does not fit within cap(content)
src = object.NewStaticObjectInfo(remote, newNow, int64(newContent.Len()), true, nil, nil)
err = o.Update(context.Background(), newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)
assert.Equal(t, "Rutaba", string(content)) // check we didn't reuse the buffer
// now try streaming
newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
newContent = bytes.NewBufferString(newStr)
src = object.NewStaticObjectInfo(remote, newNow, -1, true, nil, nil)
err = o.Update(context.Background(), newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)
// and zero length
newStr = ""
newContent = bytes.NewBufferString(newStr)
src = object.NewStaticObjectInfo(remote, newNow, 0, true, nil, nil)
err = o.Update(context.Background(), newContent, src)
assert.NoError(t, err)
checkContent(o, newStr)
err = o.Remove(context.Background())
assert.Error(t, err)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/object/object.go | fs/object/object.go | // Package object defines some useful Objects
package object
import (
"bytes"
"context"
"errors"
"io"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// StaticObjectInfo is an ObjectInfo which can be constructed from scratch
type StaticObjectInfo struct {
remote string
modTime time.Time
size int64
storable bool
hashes map[hash.Type]string
fs fs.Info
meta fs.Metadata
mimeType string
}
// NewStaticObjectInfo returns a static ObjectInfo
// If hashes is nil and fs is not nil, the hash map will be replaced with
// empty hashes of the types supported by the fs.
func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, f fs.Info) *StaticObjectInfo {
info := &StaticObjectInfo{
remote: remote,
modTime: modTime,
size: size,
storable: storable,
hashes: hashes,
fs: f,
}
if f != nil && hashes == nil {
set := f.Hashes().Array()
info.hashes = make(map[hash.Type]string)
for _, ht := range set {
info.hashes[ht] = ""
}
}
if f == nil {
info.fs = MemoryFs
}
return info
}
// WithMetadata adds meta to the ObjectInfo
func (i *StaticObjectInfo) WithMetadata(meta fs.Metadata) *StaticObjectInfo {
i.meta = meta
return i
}
// WithMimeType adds meta to the ObjectInfo
func (i *StaticObjectInfo) WithMimeType(mimeType string) *StaticObjectInfo {
i.mimeType = mimeType
return i
}
// Fs returns read only access to the Fs that this object is part of
func (i *StaticObjectInfo) Fs() fs.Info {
return i.fs
}
// Remote returns the remote path
func (i *StaticObjectInfo) Remote() string {
return i.remote
}
// String returns a description of the Object
func (i *StaticObjectInfo) String() string {
return i.remote
}
// ModTime returns the modification date of the file
func (i *StaticObjectInfo) ModTime(ctx context.Context) time.Time {
return i.modTime
}
// Size returns the size of the file
func (i *StaticObjectInfo) Size() int64 {
return i.size
}
// Storable says whether this object can be stored
func (i *StaticObjectInfo) Storable() bool {
return i.storable
}
// Hash returns the requested hash of the contents
func (i *StaticObjectInfo) Hash(ctx context.Context, h hash.Type) (string, error) {
if len(i.hashes) == 0 {
return "", hash.ErrUnsupported
}
if hash, ok := i.hashes[h]; ok {
return hash, nil
}
return "", hash.ErrUnsupported
}
// Metadata on the object
func (i *StaticObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
return i.meta, nil
}
// MimeType returns the content type of the Object if
// known, or "" if not
func (i *StaticObjectInfo) MimeType(ctx context.Context) string {
return i.mimeType
}
// Check interfaces
var (
_ fs.ObjectInfo = (*StaticObjectInfo)(nil)
_ fs.Metadataer = (*StaticObjectInfo)(nil)
_ fs.MimeTyper = (*StaticObjectInfo)(nil)
)
// MemoryFs is an in memory Fs, it only supports FsInfo and Put
var MemoryFs memoryFs
// memoryFs is an in memory fs
type memoryFs struct{}
// Name of the remote (as passed into NewFs)
func (memoryFs) Name() string { return "memory" }
// Root of the remote (as passed into NewFs)
func (memoryFs) Root() string { return "" }
// String returns a description of the FS
func (memoryFs) String() string { return "memory" }
// Precision of the ModTimes in this Fs
func (memoryFs) Precision() time.Duration { return time.Nanosecond }
// Returns the supported hash types of the filesystem
func (memoryFs) Hashes() hash.Set { return hash.Supported() }
// Features returns the optional features of this Fs
func (memoryFs) Features() *fs.Features { return &fs.Features{} }
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (memoryFs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (memoryFs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (memoryFs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := NewMemoryObject(src.Remote(), src.ModTime(ctx), nil)
return o, o.Update(ctx, in, src, options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (memoryFs) Mkdir(ctx context.Context, dir string) error {
return errors.New("memoryFs: can't make directory")
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (memoryFs) Rmdir(ctx context.Context, dir string) error {
return fs.ErrorDirNotFound
}
var _ fs.Fs = MemoryFs
// MemoryObject is an in memory object
type MemoryObject struct {
remote string
modTime time.Time
content []byte
meta fs.Metadata
fs fs.Fs
mimeType string
}
// NewMemoryObject returns an in memory Object with the modTime and content passed in
func NewMemoryObject(remote string, modTime time.Time, content []byte) *MemoryObject {
return &MemoryObject{
remote: remote,
modTime: modTime,
content: content,
fs: MemoryFs,
}
}
// WithMetadata adds meta to the MemoryObject
func (o *MemoryObject) WithMetadata(meta fs.Metadata) *MemoryObject {
o.meta = meta
return o
}
// WithMimeType adds mimeType to the MemoryObject
func (o *MemoryObject) WithMimeType(mimeType string) *MemoryObject {
o.mimeType = mimeType
return o
}
// Content returns the underlying buffer
func (o *MemoryObject) Content() []byte {
return o.content
}
// Fs returns read only access to the Fs that this object is part of
func (o *MemoryObject) Fs() fs.Info {
return o.fs
}
// SetFs sets the Fs that this memory object thinks it is part of
// It will ignore nil f
func (o *MemoryObject) SetFs(f fs.Fs) *MemoryObject {
if f != nil {
o.fs = f
}
return o
}
// Remote returns the remote path
func (o *MemoryObject) Remote() string {
return o.remote
}
// String returns a description of the Object
func (o *MemoryObject) String() string {
return o.remote
}
// ModTime returns the modification date of the file
func (o *MemoryObject) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size returns the size of the file
func (o *MemoryObject) Size() int64 {
return int64(len(o.content))
}
// Storable says whether this object can be stored
func (o *MemoryObject) Storable() bool {
return true
}
// Hash returns the requested hash of the contents
func (o *MemoryObject) Hash(ctx context.Context, h hash.Type) (string, error) {
hash, err := hash.NewMultiHasherTypes(hash.Set(h))
if err != nil {
return "", err
}
_, err = hash.Write(o.content)
if err != nil {
return "", err
}
return hash.Sums()[h], nil
}
// SetModTime sets the metadata on the object to set the modification date
func (o *MemoryObject) SetModTime(ctx context.Context, modTime time.Time) error {
o.modTime = modTime
return nil
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
content := o.content
offset = max(offset, 0)
if limit < 0 {
content = content[offset:]
} else {
content = content[offset:min(offset+limit, int64(len(content)))]
}
return io.NopCloser(bytes.NewBuffer(content)), nil
}
// Update in to the object with the modTime given of the given size
//
// This reuses the internal buffer if at all possible.
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size == 0 {
o.content = nil
} else if size < 0 || int64(cap(o.content)) < size {
o.content, err = io.ReadAll(in)
} else {
o.content = o.content[:size]
_, err = io.ReadFull(in, o.content)
}
o.modTime = src.ModTime(ctx)
return err
}
// Remove this object
func (o *MemoryObject) Remove(ctx context.Context) error {
return errors.New("memoryObject.Remove not supported")
}
// Metadata on the object
func (o *MemoryObject) Metadata(ctx context.Context) (fs.Metadata, error) {
return o.meta, nil
}
// MimeType on the object
func (o *MemoryObject) MimeType(ctx context.Context) string {
return o.mimeType
}
// Check interfaces
var (
_ fs.Object = (*MemoryObject)(nil)
_ fs.MimeTyper = (*MemoryObject)(nil)
_ fs.Metadataer = (*MemoryObject)(nil)
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/rc.go | fs/operations/rc.go | package operations
import (
"context"
"errors"
"fmt"
"io"
"mime"
"mime/multipart"
"net/http"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/diskusage"
)
func init() {
rc.Add(rc.Call{
Path: "operations/list",
AuthRequired: true,
Fn: rcList,
Title: "List the given remote and path in JSON format",
Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:"
- remote - a path within that remote e.g. "dir"
- opt - a dictionary of options to control the listing (optional)
- recurse - If set recurse directories
- noModTime - If set return modification time
- showEncrypted - If set show decrypted names
- showOrigIDs - If set show the IDs for each item if known
- showHash - If set return a dictionary of hashes
- noMimeType - If set don't show mime types
- dirsOnly - If set only show directories
- filesOnly - If set only show files
- metadata - If set return metadata of objects also
- hashTypes - array of strings of hash types to show if showHash set
Returns:
- list
- This is an array of objects as described in the lsjson command
See the [lsjson](/commands/rclone_lsjson/) command for more information on the above and examples.
`,
})
}
// List the directory
func rcList(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, remote, err := rc.GetFsAndRemote(ctx, in)
if err != nil {
return nil, err
}
var opt ListJSONOpt
err = in.GetStruct("opt", &opt)
if rc.NotErrParamNotFound(err) {
return nil, err
}
list := []*ListJSONItem{}
err = ListJSON(ctx, f, remote, &opt, func(item *ListJSONItem) error {
list = append(list, item)
return nil
})
if err != nil {
return nil, err
}
out = make(rc.Params)
out["list"] = list
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/stat",
AuthRequired: true,
Fn: rcStat,
Title: "Give information about the supplied file or directory",
Help: `This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
- opt - a dictionary of options to control the listing (optional)
- see operations/list for the options
The result is
- item - an object as described in the lsjson command. Will be null if not found.
Note that if you are only interested in files then it is much more
efficient to set the filesOnly flag in the options.
See the [lsjson](/commands/rclone_lsjson/) command for more information on the above and examples.
`,
})
}
// List the directory
func rcStat(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, remote, err := rc.GetFsAndRemote(ctx, in)
if err != nil {
return nil, err
}
var opt ListJSONOpt
err = in.GetStruct("opt", &opt)
if rc.NotErrParamNotFound(err) {
return nil, err
}
item, err := StatJSON(ctx, f, remote, &opt)
if err != nil {
return nil, err
}
out = make(rc.Params)
out["item"] = item
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/about",
AuthRequired: true,
Fn: rcAbout,
Title: "Return the space used on the remote",
Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:"
The result is as returned from rclone about --json
See the [about](/commands/rclone_about/) command for more information on the above.
`,
})
}
// About the remote
func rcAbout(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
doAbout := f.Features().About
if doAbout == nil {
return nil, fmt.Errorf("%v doesn't support about", f)
}
u, err := doAbout(ctx)
if err != nil {
return nil, fmt.Errorf("about call failed: %w", err)
}
err = rc.Reshape(&out, u)
if err != nil {
return nil, fmt.Errorf("about Reshape failed: %w", err)
}
return out, nil
}
func init() {
for _, copy := range []bool{false, true} {
name := "Move"
if copy {
name = "Copy"
}
rc.Add(rc.Call{
Path: "operations/" + strings.ToLower(name) + "file",
AuthRequired: true,
Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) {
return rcMoveOrCopyFile(ctx, in, copy)
},
Title: name + " a file from source remote to destination remote",
Help: `This takes the following parameters:
- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- srcRemote - a path within that remote e.g. "file.txt" for the source
- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- dstRemote - a path within that remote e.g. "file2.txt" for the destination
`,
})
}
}
// Copy a file
func rcMoveOrCopyFile(ctx context.Context, in rc.Params, cp bool) (out rc.Params, err error) {
srcFs, srcRemote, err := rc.GetFsAndRemoteNamed(ctx, in, "srcFs", "srcRemote")
if err != nil {
return nil, err
}
dstFs, dstRemote, err := rc.GetFsAndRemoteNamed(ctx, in, "dstFs", "dstRemote")
if err != nil {
return nil, err
}
return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp, false)
}
func init() {
for _, op := range []struct {
name string
title string
help string
noRemote bool
needsRequest bool
noCommand bool
}{
{name: "mkdir", title: "Make a destination directory or container"},
{name: "rmdir", title: "Remove an empty directory or container"},
{name: "purge", title: "Remove a directory or container and all of its contents"},
{name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root\n"},
{name: "delete", title: "Remove files in the path", noRemote: true},
{name: "deletefile", title: "Remove the single file pointed to"},
{name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n - autoFilename - boolean, set to true to retrieve destination file name from url\n"},
{name: "uploadfile", title: "Upload file using multiform/form-data", help: "- each part in body represents a file to be uploaded\n", needsRequest: true, noCommand: true},
{name: "cleanup", title: "Remove trashed files in the remote or path", noRemote: true},
{name: "settier", title: "Changes storage tier or class on all files in the path", noRemote: true},
{name: "settierfile", title: "Changes storage tier or class on the single file pointed to", noCommand: true},
} {
var remote, command string
if !op.noRemote {
remote = "- remote - a path within that remote e.g. \"dir\"\n"
}
if !op.noCommand {
command = "See the [" + op.name + "](/commands/rclone_" + op.name + "/) command for more information on the above.\n"
}
rc.Add(rc.Call{
Path: "operations/" + op.name,
AuthRequired: true,
NeedsRequest: op.needsRequest,
Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) {
return rcSingleCommand(ctx, in, op.name, op.noRemote)
},
Title: op.title,
Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:"
` + remote + op.help + "\n" + command,
})
}
}
// Run a single command, e.g. Mkdir
func rcSingleCommand(ctx context.Context, in rc.Params, name string, noRemote bool) (out rc.Params, err error) {
var (
f fs.Fs
remote string
)
if noRemote {
f, err = rc.GetFs(ctx, in)
} else {
f, remote, err = rc.GetFsAndRemote(ctx, in)
}
if err != nil {
return nil, err
}
switch name {
case "mkdir":
return nil, Mkdir(ctx, f, remote)
case "rmdir":
return nil, Rmdir(ctx, f, remote)
case "purge":
return nil, Purge(ctx, f, remote)
case "rmdirs":
leaveRoot, err := in.GetBool("leaveRoot")
if rc.NotErrParamNotFound(err) {
return nil, err
}
return nil, Rmdirs(ctx, f, remote, leaveRoot)
case "delete":
return nil, Delete(ctx, f)
case "deletefile":
o, err := f.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return nil, DeleteFile(ctx, o)
case "copyurl":
url, err := in.GetString("url")
if err != nil {
return nil, err
}
autoFilename, _ := in.GetBool("autoFilename")
noClobber, _ := in.GetBool("noClobber")
headerFilename, _ := in.GetBool("headerFilename")
_, err = CopyURL(ctx, f, remote, url, autoFilename, headerFilename, noClobber)
return nil, err
case "uploadfile":
var request *http.Request
request, err := in.GetHTTPRequest()
if err != nil {
return nil, err
}
contentType := request.Header.Get("Content-Type")
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
return nil, err
}
if strings.HasPrefix(mediaType, "multipart/") {
mr := multipart.NewReader(request.Body, params["boundary"])
for {
p, err := mr.NextPart()
if err == io.EOF {
return nil, nil
}
if err != nil {
return nil, err
}
if p.FileName() != "" {
obj, err := Rcat(ctx, f, path.Join(remote, p.FileName()), p, time.Now(), nil)
if err != nil {
return nil, err
}
fs.Debugf(obj, "Upload Succeeded")
}
}
}
return nil, nil
case "cleanup":
return nil, CleanUp(ctx, f)
case "settier":
if !f.Features().SetTier {
return nil, fmt.Errorf("remote %s does not support settier", f.Name())
}
tier, err := in.GetString("tier")
if err != nil {
return nil, err
}
return nil, SetTier(ctx, f, tier)
case "settierfile":
if !f.Features().SetTier {
return nil, fmt.Errorf("remote %s does not support settier", f.Name())
}
tier, err := in.GetString("tier")
if err != nil {
return nil, err
}
o, err := f.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return nil, SetTierFile(ctx, o, tier)
}
panic("unknown rcSingleCommand type")
}
func init() {
rc.Add(rc.Call{
Path: "operations/size",
AuthRequired: true,
Fn: rcSize,
Title: "Count the number of bytes and files in remote",
Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:path/to/dir"
Returns:
- count - number of files
- bytes - number of bytes in those files
See the [size](/commands/rclone_size/) command for more information on the above.
`,
})
}
// Size a directory
func rcSize(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
count, bytes, sizeless, err := Count(ctx, f)
if err != nil {
return nil, err
}
out = make(rc.Params)
out["count"] = count
out["bytes"] = bytes
out["sizeless"] = sizeless
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/publiclink",
AuthRequired: true,
Fn: rcPublicLink,
Title: "Create or retrieve a public link to the given file or folder.",
Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:"
- remote - a path within that remote e.g. "dir"
- unlink - boolean - if set removes the link rather than adding it (optional)
- expire - string - the expiry time of the link e.g. "1d" (optional)
Returns:
- url - URL of the resource
See the [link](/commands/rclone_link/) command for more information on the above.
`,
})
}
// Make a public link
func rcPublicLink(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, remote, err := rc.GetFsAndRemote(ctx, in)
if err != nil {
return nil, err
}
unlink, _ := in.GetBool("unlink")
expire, err := in.GetDuration("expire")
if rc.IsErrParamNotFound(err) {
expire = time.Duration(fs.DurationOff)
} else if err != nil {
return nil, err
}
url, err := PublicLink(ctx, f, remote, fs.Duration(expire), unlink)
if err != nil {
return nil, err
}
out = make(rc.Params)
out["url"] = url
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/fsinfo",
Fn: rcFsInfo,
Title: "Return information about the remote",
Help: `This takes the following parameters:
- fs - a remote name string e.g. "drive:"
This returns info about the remote passed in;
` + "```" + `
{
// optional features and whether they are available or not
"Features": {
"About": true,
"BucketBased": false,
"BucketBasedRootOK": false,
"CanHaveEmptyDirectories": true,
"CaseInsensitive": false,
"ChangeNotify": false,
"CleanUp": false,
"Command": true,
"Copy": false,
"DirCacheFlush": false,
"DirMove": true,
"Disconnect": false,
"DuplicateFiles": false,
"GetTier": false,
"IsLocal": true,
"ListR": false,
"MergeDirs": false,
"MetadataInfo": true,
"Move": true,
"OpenWriterAt": true,
"PublicLink": false,
"Purge": true,
"PutStream": true,
"PutUnchecked": false,
"ReadMetadata": true,
"ReadMimeType": false,
"ServerSideAcrossConfigs": false,
"SetTier": false,
"SetWrapper": false,
"Shutdown": false,
"SlowHash": true,
"SlowModTime": false,
"UnWrap": false,
"UserInfo": false,
"UserMetadata": true,
"WrapFs": false,
"WriteMetadata": true,
"WriteMimeType": false
},
// Names of hashes available
"Hashes": [
"md5",
"sha1",
"whirlpool",
"crc32",
"sha256",
"dropbox",
"mailru",
"quickxor"
],
"Name": "local", // Name as created
"Precision": 1, // Precision of timestamps in ns
"Root": "/", // Path as created
"String": "Local file system at /", // how the remote will appear in logs
// Information about the system metadata for this backend
"MetadataInfo": {
"System": {
"atime": {
"Help": "Time of last access",
"Type": "RFC 3339",
"Example": "2006-01-02T15:04:05.999999999Z07:00"
},
"btime": {
"Help": "Time of file birth (creation)",
"Type": "RFC 3339",
"Example": "2006-01-02T15:04:05.999999999Z07:00"
},
"gid": {
"Help": "Group ID of owner",
"Type": "decimal number",
"Example": "500"
},
"mode": {
"Help": "File type and mode",
"Type": "octal, unix style",
"Example": "0100664"
},
"mtime": {
"Help": "Time of last modification",
"Type": "RFC 3339",
"Example": "2006-01-02T15:04:05.999999999Z07:00"
},
"rdev": {
"Help": "Device ID (if special file)",
"Type": "hexadecimal",
"Example": "1abc"
},
"uid": {
"Help": "User ID of owner",
"Type": "decimal number",
"Example": "500"
}
},
"Help": "Textual help string\n"
}
}
` + "```" + `
This command does not have a command line equivalent so use this instead:
rclone rc --loopback operations/fsinfo fs=remote:
`,
})
}
// Fsinfo the remote
func rcFsInfo(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
info := GetFsInfo(f)
err = rc.Reshape(&out, info)
if err != nil {
return nil, fmt.Errorf("fsinfo Reshape failed: %w", err)
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "backend/command",
AuthRequired: true,
Fn: rcBackend,
Title: "Runs a backend command.",
Help: `This takes the following parameters:
- command - a string with the command name
- fs - a remote name string e.g. "drive:"
- arg - a list of arguments for the backend command
- opt - a map of string to string of options
Returns:
- result - result from the backend command
Example:
rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2
Returns
` + "```" + `
{
"result": {
"arg": [
"path1",
"path2"
],
"name": "noop",
"opt": {
"blue": "",
"echo": "yes"
}
}
}
` + "```" + `
Note that this is the direct equivalent of using this "backend"
command:
rclone backend noop . -o echo=yes -o blue path1 path2
Note that arguments must be preceded by the "-a" flag
See the [backend](/commands/rclone_backend/) command for more information.
`,
})
}
// Make a public link
func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
doCommand := f.Features().Command
if doCommand == nil {
return nil, fmt.Errorf("%v: doesn't support backend commands", f)
}
command, err := in.GetString("command")
if err != nil {
return nil, err
}
opt := map[string]string{}
err = in.GetStructMissingOK("opt", &opt)
if err != nil {
return nil, err
}
arg := []string{}
err = in.GetStructMissingOK("arg", &arg)
if err != nil {
return nil, err
}
result, err := doCommand(ctx, command, arg, opt)
if err != nil {
return nil, fmt.Errorf("command %q failed: %w", command, err)
}
out = make(rc.Params)
out["result"] = result
return out, nil
}
// This should really be in fs/rc/internal.go but can't go there due
// to a circular dependency on config.
func init() {
rc.Add(rc.Call{
Path: "core/du",
Fn: rcDu,
Title: "Returns disk usage of a locally attached disk.",
Help: `
This returns the disk usage for the local directory passed in as dir.
If the directory is not passed in, it defaults to the directory
pointed to by --cache-dir.
- dir - string (optional)
Returns:
` + "```" + `
{
"dir": "/",
"info": {
"Available": 361769115648,
"Free": 361785892864,
"Total": 982141468672
}
}
` + "```" + `
`,
})
}
// Terminates app
func rcDu(ctx context.Context, in rc.Params) (out rc.Params, err error) {
dir, err := in.GetString("dir")
if rc.IsErrParamNotFound(err) {
dir = config.GetCacheDir()
} else if err != nil {
return nil, err
}
info, err := diskusage.New(dir)
if err != nil {
return nil, err
}
out = rc.Params{
"dir": dir,
"info": info,
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/check",
AuthRequired: true,
Fn: rcCheck,
Title: "check the source and destination are the same",
Help: `Checks the files in the source and destination match. It compares
sizes and hashes and logs a report of files that don't
match. It doesn't alter the source or destination.
This takes the following parameters:
- srcFs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- dstFs - a remote name string e.g. "drive2:" for the destination, "/" for local filesystem
- download - check by downloading rather than with hash
- checkFileHash - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
- checkFileFs - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
- checkFileRemote - treat checkFileFs:checkFileRemote as a SUM file with hashes of given type
- oneWay - check one way only, source files must exist on remote
- combined - make a combined report of changes (default false)
- missingOnSrc - report all files missing from the source (default true)
- missingOnDst - report all files missing from the destination (default true)
- match - report all matching files (default false)
- differ - report all non-matching files (default true)
- error - report all files with errors (hashing or reading) (default true)
If you supply the download flag, it will download the data from
both remotes and check them against each other on the fly. This can
be useful for remotes that don't support hashes or if you really want
to check all the data.
If you supply the size-only global flag, it will only compare the sizes not
the hashes as well. Use this for a quick check.
If you supply the checkFileHash option with a valid hash name, the
checkFileFs:checkFileRemote must point to a text file in the SUM
format. This treats the checksum file as the source and dstFs as the
destination. Note that srcFs is not used and should not be supplied in
this case.
Returns:
- success - true if no error, false otherwise
- status - textual summary of check, OK or text string
- hashType - hash used in check, may be missing
- combined - array of strings of combined report of changes
- missingOnSrc - array of strings of all files missing from the source
- missingOnDst - array of strings of all files missing from the destination
- match - array of strings of all matching files
- differ - array of strings of all non-matching files
- error - array of strings of all files with errors (hashing or reading)
`,
})
}
// Writer which writes into the slice provided
type stringWriter struct {
out *[]string
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data,
// even temporarily.
//
// Implementations must not retain p.
func (s stringWriter) Write(p []byte) (n int, err error) {
result := string(p)
result = strings.TrimSuffix(result, "\n")
*s.out = append(*s.out, result)
return len(p), nil
}
// Check two directories
func rcCheck(ctx context.Context, in rc.Params) (out rc.Params, err error) {
srcFs, err := rc.GetFsNamed(ctx, in, "srcFs")
if err != nil && !rc.IsErrParamNotFound(err) {
return nil, err
}
dstFs, err := rc.GetFsNamed(ctx, in, "dstFs")
if err != nil {
return nil, err
}
checkFileFs, checkFileRemote, err := rc.GetFsAndRemoteNamed(ctx, in, "checkFileFs", "checkFileRemote")
if err != nil && !rc.IsErrParamNotFound(err) {
return nil, err
}
checkFileHash, err := in.GetString("checkFileHash")
if err != nil && !rc.IsErrParamNotFound(err) {
return nil, err
}
checkFileSet := 0
if checkFileHash != "" {
checkFileSet++
}
if checkFileFs != nil {
checkFileSet++
}
if checkFileRemote != "" {
checkFileSet++
}
if checkFileSet > 0 && checkFileSet < 3 {
return nil, fmt.Errorf("need all of checkFileFs, checkFileRemote, checkFileHash to be set together")
}
var checkFileHashType hash.Type
if checkFileHash != "" {
if err := checkFileHashType.Set(checkFileHash); err != nil {
return nil, err
}
if srcFs != nil {
return nil, rc.NewErrParamInvalid(errors.New("only supply dstFs when using checkFileHash"))
}
} else if srcFs == nil {
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
}
oneway, _ := in.GetBool("oneWay")
download, _ := in.GetBool("download")
opt := &CheckOpt{
Fsrc: srcFs,
Fdst: dstFs,
OneWay: oneway,
}
out = rc.Params{}
getOutput := func(name string, Default bool) io.Writer {
active, err := in.GetBool(name)
if err != nil {
active = Default
}
if !active {
return nil
}
result := []string{}
out[name] = &result
return stringWriter{&result}
}
opt.Combined = getOutput("combined", false)
opt.MissingOnSrc = getOutput("missingOnSrc", true)
opt.MissingOnDst = getOutput("missingOnDst", true)
opt.Match = getOutput("match", false)
opt.Differ = getOutput("differ", true)
opt.Error = getOutput("error", true)
if checkFileHash != "" {
out["hashType"] = checkFileHashType.String()
err = CheckSum(ctx, dstFs, checkFileFs, checkFileRemote, checkFileHashType, opt, download)
} else {
if download {
err = CheckDownload(ctx, opt)
} else {
out["hashType"] = srcFs.Hashes().Overlap(dstFs.Hashes()).GetOne().String()
err = Check(ctx, opt)
}
}
if err != nil {
out["status"] = err.Error()
out["success"] = false
} else {
out["status"] = "OK"
out["success"] = true
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/hashsum",
AuthRequired: true,
Fn: rcHashsum,
Title: "Produces a hashsum file for all the objects in the path.",
Help: `Produces a hash file for all the objects in the path using the hash
named. The output is in the same format as the standard
md5sum/sha1sum tool.
This takes the following parameters:
- fs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
- this can point to a file and just that file will be returned in the listing.
- hashType - type of hash to be used
- download - check by downloading rather than with hash (boolean)
- base64 - output the hashes in base64 rather than hex (boolean)
If you supply the download flag, it will download the data from the
remote and create the hash on the fly. This can be useful for remotes
that don't support the given hash or if you really want to check all
the data.
Note that if you wish to supply a checkfile to check hashes against
the current files then you should use operations/check instead of
operations/hashsum.
Returns:
- hashsum - array of strings of the hashes
- hashType - type of hash used
Example:
$ rclone rc --loopback operations/hashsum fs=bin hashType=MD5 download=true base64=true
{
"hashType": "md5",
"hashsum": [
"WTSVLpuiXyJO_kGzJerRLg== backend-versions.sh",
"v1b_OlWCJO9LtNq3EIKkNQ== bisect-go-rclone.sh",
"VHbmHzHh4taXzgag8BAIKQ== bisect-rclone.sh",
]
}
See the [hashsum](/commands/rclone_hashsum/) command for more information on the above.
`,
})
}
// Hashsum a directory
func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) {
ctx, f, err := rc.GetFsNamedFileOK(ctx, in, "fs")
if err != nil {
return nil, err
}
download, _ := in.GetBool("download")
base64, _ := in.GetBool("base64")
hashType, err := in.GetString("hashType")
if err != nil {
return nil, fmt.Errorf("%s\n%w", hash.HelpString(0), err)
}
var ht hash.Type
err = ht.Set(hashType)
if err != nil {
return nil, fmt.Errorf("%s\n%w", hash.HelpString(0), err)
}
hashes := []string{}
err = HashLister(ctx, ht, base64, download, f, stringWriter{&hashes})
out = rc.Params{
"hashType": ht.String(),
"hashsum": hashes,
}
return out, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/dedupe_test.go | fs/operations/dedupe_test.go | package operations_test
import (
"context"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check flag satisfies the interface
var _ pflag.Value = (*operations.DeduplicateMode)(nil)
func skipIfCantDedupe(t *testing.T, f fs.Fs) {
if !f.Features().DuplicateFiles {
t.Skip("Can't test deduplicate - no duplicate files possible")
}
if f.Features().PutUnchecked == nil {
t.Skip("Can't test deduplicate - no PutUnchecked")
}
if f.Features().MergeDirs == nil {
t.Skip("Can't test deduplicate - no MergeDirs")
}
}
func skipIfNoHash(t *testing.T, f fs.Fs) {
if f.Hashes().GetOne() == hash.None {
t.Skip("Can't run this test without a hash")
}
}
func skipIfNoModTime(t *testing.T, f fs.Fs) {
if f.Precision() >= fs.ModTimeNotSupported {
t.Skip("Can't run this test without modtimes")
}
}
func TestDeduplicateInteractive(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
skipIfNoHash(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
r.CheckWithDuplicates(t, file1, file2, file3)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateInteractive, false)
require.NoError(t, err)
r.CheckRemoteItems(t, file1)
}
func TestDeduplicateSkip(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
haveHash := r.Fremote.Hashes().GetOne() != hash.None
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
files := []fstest.Item{file1}
if haveHash {
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
files = append(files, file2)
}
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t1)
files = append(files, file3)
r.CheckWithDuplicates(t, files...)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSkip, false)
require.NoError(t, err)
r.CheckWithDuplicates(t, file1, file3)
}
func TestDeduplicateSizeOnly(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
ctx := context.Background()
ci := fs.GetConfig(ctx)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "THIS IS ONE", t1)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t1)
r.CheckWithDuplicates(t, file1, file2, file3)
ci.SizeOnly = true
defer func() {
ci.SizeOnly = false
}()
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSkip, false)
require.NoError(t, err)
r.CheckWithDuplicates(t, file1, file3)
}
func TestDeduplicateFirst(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one A", t1)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is one BB", t1)
r.CheckWithDuplicates(t, file1, file2, file3)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateFirst, false)
require.NoError(t, err)
// list until we get one object
var objects, size int64
for try := 1; try <= *fstest.ListRetries; try++ {
objects, size, _, err = operations.Count(context.Background(), r.Fremote)
require.NoError(t, err)
if objects == 1 {
break
}
time.Sleep(time.Second)
}
assert.Equal(t, int64(1), objects)
if size != file1.Size && size != file2.Size && size != file3.Size {
t.Errorf("Size not one of the object sizes %d", size)
}
}
func TestDeduplicateNewest(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
skipIfNoModTime(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateNewest, false)
require.NoError(t, err)
r.CheckRemoteItems(t, file3)
}
func TestDeduplicateNewestByHash(t *testing.T) {
r := fstest.NewRun(t)
skipIfNoHash(t, r.Fremote)
skipIfNoModTime(t, r.Fremote)
contents := random.String(100)
file1 := r.WriteObject(context.Background(), "one", contents, t1)
file2 := r.WriteObject(context.Background(), "also/one", contents, t2)
file3 := r.WriteObject(context.Background(), "another", contents, t3)
file4 := r.WriteObject(context.Background(), "not-one", "stuff", t3)
r.CheckRemoteItems(t, file1, file2, file3, file4)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateNewest, true)
require.NoError(t, err)
r.CheckRemoteItems(t, file3, file4)
}
func TestDeduplicateOldest(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateOldest, false)
require.NoError(t, err)
r.CheckRemoteItems(t, file1)
}
func TestDeduplicateLargest(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateLargest, false)
require.NoError(t, err)
r.CheckRemoteItems(t, file3)
}
func TestDeduplicateSmallest(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one", "This is one too", t2)
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSmallest, false)
require.NoError(t, err)
r.CheckRemoteItems(t, file1)
}
func TestDeduplicateRename(t *testing.T) {
r := fstest.NewRun(t)
skipIfCantDedupe(t, r.Fremote)
file1 := r.WriteUncheckedObject(context.Background(), "one.txt", "This is one", t1)
file2 := r.WriteUncheckedObject(context.Background(), "one.txt", "This is one too", t2)
file3 := r.WriteUncheckedObject(context.Background(), "one.txt", "This is another one", t3)
file4 := r.WriteUncheckedObject(context.Background(), "one-1.txt", "This is not a duplicate", t1)
r.CheckWithDuplicates(t, file1, file2, file3, file4)
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateRename, false)
require.NoError(t, err)
require.NoError(t, walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) {
remote := o.Remote()
if remote != "one-1.txt" &&
remote != "one-2.txt" &&
remote != "one-3.txt" &&
remote != "one-4.txt" {
t.Errorf("Bad file name after rename %q", remote)
}
size := o.Size()
if size != file1.Size &&
size != file2.Size &&
size != file3.Size &&
size != file4.Size {
t.Errorf("Size not one of the object sizes %d", size)
}
if remote == "one-1.txt" && size != file4.Size {
t.Errorf("Existing non-duplicate file modified %q", remote)
}
})
return nil
}))
}
// This should really be a unit test, but the test framework there
// doesn't have enough tools to make it easy
func TestMergeDirs(t *testing.T) {
r := fstest.NewRun(t)
mergeDirs := r.Fremote.Features().MergeDirs
if mergeDirs == nil {
t.Skip("Can't merge directories")
}
file1 := r.WriteObject(context.Background(), "dupe1/one.txt", "This is one", t1)
file2 := r.WriteObject(context.Background(), "dupe2/two.txt", "This is one too", t2)
file3 := r.WriteObject(context.Background(), "dupe3/three.txt", "This is another one", t3)
objs, dirs, err := walk.GetAll(context.Background(), r.Fremote, "", true, 1)
require.NoError(t, err)
assert.Equal(t, 3, len(dirs))
assert.Equal(t, 0, len(objs))
err = mergeDirs(context.Background(), dirs)
require.NoError(t, err)
file2.Path = "dupe1/two.txt"
file3.Path = "dupe1/three.txt"
r.CheckRemoteItems(t, file1, file2, file3)
objs, dirs, err = walk.GetAll(context.Background(), r.Fremote, "", true, 1)
require.NoError(t, err)
assert.Equal(t, 1, len(dirs))
assert.Equal(t, 0, len(objs))
assert.Equal(t, "dupe1", dirs[0].Remote())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/multithread.go | fs/operations/multithread.go | package operations
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
const (
multithreadChunkSize = 64 << 10
)
// Return a boolean as to whether we should use multi thread copy for
// this transfer
func doMultiThreadCopy(ctx context.Context, f fs.Fs, src fs.Object) bool {
ci := fs.GetConfig(ctx)
// Disable multi thread if...
// ...it isn't configured
if ci.MultiThreadStreams <= 1 {
return false
}
// ...if the source doesn't support it
if src.Fs().Features().NoMultiThreading {
return false
}
// ...size of object is less than cutoff
if src.Size() < int64(ci.MultiThreadCutoff) {
return false
}
// ...destination doesn't support it
dstFeatures := f.Features()
if dstFeatures.OpenChunkWriter == nil && dstFeatures.OpenWriterAt == nil {
return false
}
// ...if --multi-thread-streams not in use and source and
// destination are both local
if !ci.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal {
return false
}
return true
}
// state for a multi-thread copy
type multiThreadCopyState struct {
ctx context.Context
partSize int64
size int64
src fs.Object
acc *accounting.Account
numChunks int
noBuffering bool // set to read the input without buffering
}
// Copy a single chunk into place
func (mc *multiThreadCopyState) copyChunk(ctx context.Context, chunk int, writer fs.ChunkWriter) (err error) {
defer func() {
if err != nil {
fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d failed: %v", chunk+1, mc.numChunks, err)
}
}()
start := int64(chunk) * mc.partSize
if start >= mc.size {
return nil
}
end := min(start+mc.partSize, mc.size)
size := end - start
// Reserve the memory first so we don't open the source and wait for memory buffers for ages
var rw *pool.RW
if !mc.noBuffering {
rw = multipart.NewRW().Reserve(size)
defer fs.CheckClose(rw, &err)
}
fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d (%d-%d) size %v starting", chunk+1, mc.numChunks, start, end, fs.SizeSuffix(size))
rc, err := Open(ctx, mc.src, &fs.RangeOption{Start: start, End: end - 1})
if err != nil {
return fmt.Errorf("multi-thread copy: failed to open source: %w", err)
}
defer fs.CheckClose(rc, &err)
var rs io.ReadSeeker
if mc.noBuffering {
// Read directly if we are sure we aren't going to seek
// and account with accounting
rc.SetAccounting(mc.acc.AccountRead)
rs = rc
} else {
// Read the chunk into buffered reader
_, err = io.CopyN(rw, rc, size)
if err != nil {
return fmt.Errorf("multi-thread copy: failed to read chunk: %w", err)
}
// Account as we go
rw.SetAccounting(mc.acc.AccountRead)
rs = rw
}
// Write the chunk
bytesWritten, err := writer.WriteChunk(ctx, chunk, rs)
if err != nil {
return fmt.Errorf("multi-thread copy: failed to write chunk: %w", err)
}
fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d (%d-%d) size %v finished", chunk+1, mc.numChunks, start, end, fs.SizeSuffix(bytesWritten))
return nil
}
// Given a file size and a chunkSize
// it returns the number of chunks, so that chunkSize * numChunks >= size
func calculateNumChunks(size int64, chunkSize int64) int {
numChunks := size / chunkSize
if size%chunkSize != 0 {
numChunks++
}
return int(numChunks)
}
// Copy src to (f, remote) using streams download threads. It tries to use the OpenChunkWriter feature
// and if that's not available it creates an adapter using OpenWriterAt
func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, concurrency int, tr *accounting.Transfer, options ...fs.OpenOption) (newDst fs.Object, err error) {
openChunkWriter := f.Features().OpenChunkWriter
ci := fs.GetConfig(ctx)
noBuffering := false
usingOpenWriterAt := false
if openChunkWriter == nil {
openWriterAt := f.Features().OpenWriterAt
if openWriterAt == nil {
return nil, errors.New("multi-thread copy: neither OpenChunkWriter nor OpenWriterAt supported")
}
openChunkWriter = openChunkWriterFromOpenWriterAt(openWriterAt, int64(ci.MultiThreadChunkSize), int64(ci.MultiThreadWriteBufferSize), f)
// If we are using OpenWriterAt we don't seek the chunks so don't need to buffer
fs.Debugf(src, "multi-thread copy: disabling buffering because destination uses OpenWriterAt")
noBuffering = true
usingOpenWriterAt = true
} else if src.Fs().Features().IsLocal {
// If the source fs is local we don't need to buffer
fs.Debugf(src, "multi-thread copy: disabling buffering because source is local disk")
noBuffering = true
} else if f.Features().ChunkWriterDoesntSeek {
// If the destination Fs promises not to seek its chunks
// (except for retries) then we don't need buffering.
fs.Debugf(src, "multi-thread copy: disabling buffering because destination has set ChunkWriterDoesntSeek")
noBuffering = true
}
if src.Size() < 0 {
return nil, fmt.Errorf("multi-thread copy: can't copy unknown sized file")
}
if src.Size() == 0 {
return nil, fmt.Errorf("multi-thread copy: can't copy zero sized file")
}
info, chunkWriter, err := openChunkWriter(ctx, remote, src, options...)
if err != nil {
return nil, fmt.Errorf("multi-thread copy: failed to open chunk writer: %w", err)
}
uploadCtx, cancel := context.WithCancel(ctx)
defer cancel()
uploadedOK := false
defer atexit.OnError(&err, func() {
cancel()
if info.LeavePartsOnError || uploadedOK {
return
}
fs.Debugf(src, "multi-thread copy: cancelling transfer on exit")
abortErr := chunkWriter.Abort(ctx)
if abortErr != nil {
fs.Debugf(src, "multi-thread copy: abort failed: %v", abortErr)
}
})()
if info.ChunkSize > src.Size() {
fs.Debugf(src, "multi-thread copy: chunk size %v was bigger than source file size %v", fs.SizeSuffix(info.ChunkSize), fs.SizeSuffix(src.Size()))
info.ChunkSize = src.Size()
}
// Use the backend concurrency if it is higher than --multi-thread-streams or if --multi-thread-streams wasn't set explicitly
if !ci.MultiThreadSet || info.Concurrency > concurrency {
fs.Debugf(src, "multi-thread copy: using backend concurrency of %d instead of --multi-thread-streams %d", info.Concurrency, concurrency)
concurrency = info.Concurrency
}
numChunks := calculateNumChunks(src.Size(), info.ChunkSize)
if concurrency > numChunks {
fs.Debugf(src, "multi-thread copy: number of streams %d was bigger than number of chunks %d", concurrency, numChunks)
concurrency = numChunks
}
if concurrency < 1 {
concurrency = 1
}
g, gCtx := errgroup.WithContext(uploadCtx)
g.SetLimit(concurrency)
mc := &multiThreadCopyState{
ctx: gCtx,
size: src.Size(),
src: src,
partSize: info.ChunkSize,
numChunks: numChunks,
noBuffering: noBuffering,
}
// Make accounting
mc.acc = tr.Account(gCtx, nil)
fs.Debugf(src, "Starting multi-thread copy with %d chunks of size %v with %v parallel streams", mc.numChunks, fs.SizeSuffix(mc.partSize), concurrency)
for chunk := range mc.numChunks {
// Fail fast, in case an errgroup managed function returns an error
if gCtx.Err() != nil {
break
}
chunk := chunk
g.Go(func() error {
return mc.copyChunk(gCtx, chunk, chunkWriter)
})
}
err = g.Wait()
if err != nil {
return nil, err
}
err = chunkWriter.Close(ctx)
if err != nil {
return nil, fmt.Errorf("multi-thread copy: failed to close object after copy: %w", err)
}
uploadedOK = true // file is definitely uploaded OK so no need to abort
obj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, fmt.Errorf("multi-thread copy: failed to find object after copy: %w", err)
}
// OpenWriterAt doesn't set metadata so we need to set it on completion
if usingOpenWriterAt {
setModTime := true
if ci.Metadata {
do, ok := obj.(fs.SetMetadataer)
if ok {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
return nil, fmt.Errorf("multi-thread copy: failed to read metadata from source object: %w", err)
}
if _, foundMeta := meta["mtime"]; !foundMeta {
meta.Set("mtime", src.ModTime(ctx).Format(time.RFC3339Nano))
}
err = do.SetMetadata(ctx, meta)
if err != nil {
return nil, fmt.Errorf("multi-thread copy: failed to set metadata: %w", err)
}
setModTime = false
} else {
fs.Errorf(obj, "multi-thread copy: can't set metadata as SetMetadata isn't implemented in: %v", f)
}
}
if setModTime {
err = obj.SetModTime(ctx, src.ModTime(ctx))
switch err {
case nil, fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete:
default:
return nil, fmt.Errorf("multi-thread copy: failed to set modification time: %w", err)
}
}
}
fs.Debugf(src, "Finished multi-thread copy with %d parts of size %v", mc.numChunks, fs.SizeSuffix(mc.partSize))
return obj, nil
}
// writerAtChunkWriter converts a WriterAtCloser into a ChunkWriter
type writerAtChunkWriter struct {
remote string
size int64
writerAt fs.WriterAtCloser
chunkSize int64
chunks int
writeBufferSize int64
f fs.Fs
closed bool
}
// WriteChunk writes chunkNumber from reader
func (w *writerAtChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
fs.Debugf(w.remote, "writing chunk %v", chunkNumber)
bytesToWrite := w.chunkSize
if chunkNumber == (w.chunks-1) && w.size%w.chunkSize != 0 {
bytesToWrite = w.size % w.chunkSize
}
var writer io.Writer = io.NewOffsetWriter(w.writerAt, int64(chunkNumber)*w.chunkSize)
if w.writeBufferSize > 0 {
writer = bufio.NewWriterSize(writer, int(w.writeBufferSize))
}
n, err := io.Copy(writer, reader)
if err != nil {
return -1, err
}
if n != bytesToWrite {
return -1, fmt.Errorf("expected to write %v bytes for chunk %v, but wrote %v bytes", bytesToWrite, chunkNumber, n)
}
// if we were buffering, flush to disk
switch w := writer.(type) {
case *bufio.Writer:
err = w.Flush()
if err != nil {
return -1, fmt.Errorf("multi-thread copy: flush failed: %w", err)
}
}
return n, nil
}
// Close the chunk writing
func (w *writerAtChunkWriter) Close(ctx context.Context) error {
if w.closed {
return nil
}
w.closed = true
return w.writerAt.Close()
}
// Abort the chunk writing
func (w *writerAtChunkWriter) Abort(ctx context.Context) error {
err := w.Close(ctx)
if err != nil {
fs.Errorf(w.remote, "multi-thread copy: failed to close file before aborting: %v", err)
}
obj, err := w.f.NewObject(ctx, w.remote)
if err != nil {
return fmt.Errorf("multi-thread copy: failed to find temp file when aborting chunk writer: %w", err)
}
return obj.Remove(ctx)
}
// openChunkWriterFromOpenWriterAt adapts an OpenWriterAtFn into an OpenChunkWriterFn using chunkSize and writeBufferSize
func openChunkWriterFromOpenWriterAt(openWriterAt fs.OpenWriterAtFn, chunkSize int64, writeBufferSize int64, f fs.Fs) fs.OpenChunkWriterFn {
return func(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
ci := fs.GetConfig(ctx)
writerAt, err := openWriterAt(ctx, remote, src.Size())
if err != nil {
return info, nil, err
}
if writeBufferSize > 0 {
fs.Debugf(src.Remote(), "multi-thread copy: write buffer set to %v", writeBufferSize)
}
chunkWriter := &writerAtChunkWriter{
remote: remote,
size: src.Size(),
chunkSize: chunkSize,
chunks: calculateNumChunks(src.Size(), chunkSize),
writerAt: writerAt,
writeBufferSize: writeBufferSize,
f: f,
}
info = fs.ChunkWriterInfo{
ChunkSize: chunkSize,
Concurrency: ci.MultiThreadStreams,
}
return info, chunkWriter, nil
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/lsjson.go | fs/operations/lsjson.go | package operations
import (
"context"
"errors"
"fmt"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
)
// ListJSONItem in the struct which gets marshalled for each line
type ListJSONItem struct {
Path string
Name string
EncryptedPath string `json:",omitempty"`
Encrypted string `json:",omitempty"`
Size int64
MimeType string `json:",omitempty"`
ModTime Timestamp //`json:",omitempty"`
IsDir bool
Hashes map[string]string `json:",omitempty"`
ID string `json:",omitempty"`
OrigID string `json:",omitempty"`
Tier string `json:",omitempty"`
IsBucket bool `json:",omitempty"`
Metadata fs.Metadata `json:",omitempty"`
}
// Timestamp a time in the provided format
type Timestamp struct {
When time.Time
Format string
}
// MarshalJSON turns a Timestamp into JSON
func (t Timestamp) MarshalJSON() (out []byte, err error) {
if t.When.IsZero() {
return []byte(`""`), nil
}
return []byte(`"` + t.When.Format(t.Format) + `"`), nil
}
// Returns a time format for the given precision
func formatForPrecision(precision time.Duration) string {
switch {
case precision <= time.Nanosecond:
return "2006-01-02T15:04:05.000000000Z07:00"
case precision <= 10*time.Nanosecond:
return "2006-01-02T15:04:05.00000000Z07:00"
case precision <= 100*time.Nanosecond:
return "2006-01-02T15:04:05.0000000Z07:00"
case precision <= time.Microsecond:
return "2006-01-02T15:04:05.000000Z07:00"
case precision <= 10*time.Microsecond:
return "2006-01-02T15:04:05.00000Z07:00"
case precision <= 100*time.Microsecond:
return "2006-01-02T15:04:05.0000Z07:00"
case precision <= time.Millisecond:
return "2006-01-02T15:04:05.000Z07:00"
case precision <= 10*time.Millisecond:
return "2006-01-02T15:04:05.00Z07:00"
case precision <= 100*time.Millisecond:
return "2006-01-02T15:04:05.0Z07:00"
}
return time.RFC3339
}
// ListJSONOpt describes the options for ListJSON
type ListJSONOpt struct {
Recurse bool `json:"recurse"`
NoModTime bool `json:"noModTime"`
NoMimeType bool `json:"noMimeType"`
ShowEncrypted bool `json:"showEncrypted"`
ShowOrigIDs bool `json:"showOrigIDs"`
ShowHash bool `json:"showHash"`
DirsOnly bool `json:"dirsOnly"`
FilesOnly bool `json:"filesOnly"`
Metadata bool `json:"metadata"`
HashTypes []string `json:"hashTypes"` // hash types to show if ShowHash is set, e.g. "MD5", "SHA-1"
}
// state for ListJson
type listJSON struct {
fsrc fs.Fs
remote string
format string
opt *ListJSONOpt
cipher *crypt.Cipher
hashTypes []hash.Type
dirs bool
files bool
canGetTier bool
isBucket bool
showHash bool
}
func newListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt) (*listJSON, error) {
lj := &listJSON{
fsrc: fsrc,
remote: remote,
opt: opt,
dirs: true,
files: true,
}
// Dirs Files
// !FilesOnly,!DirsOnly true true
// !FilesOnly,DirsOnly true false
// FilesOnly,!DirsOnly false true
// FilesOnly,DirsOnly true true
if !opt.FilesOnly && opt.DirsOnly {
lj.files = false
} else if opt.FilesOnly && !opt.DirsOnly {
lj.dirs = false
}
if opt.ShowEncrypted {
fsInfo, _, _, config, err := fs.ConfigFs(fs.ConfigStringFull(fsrc))
if err != nil {
return nil, fmt.Errorf("ListJSON failed to load config for crypt remote: %w", err)
}
if fsInfo.Name != "crypt" {
return nil, errors.New("the remote needs to be of type \"crypt\"")
}
lj.cipher, err = crypt.NewCipher(config)
if err != nil {
return nil, fmt.Errorf("ListJSON failed to make new crypt remote: %w", err)
}
}
features := fsrc.Features()
lj.canGetTier = features.GetTier
lj.format = formatForPrecision(fsrc.Precision())
lj.isBucket = features.BucketBased && remote == "" && fsrc.Root() == "" // if bucket-based remote listing the root mark directories as buckets
lj.showHash = opt.ShowHash
lj.hashTypes = fsrc.Hashes().Array()
if len(opt.HashTypes) != 0 {
lj.showHash = true
lj.hashTypes = []hash.Type{}
for _, hashType := range opt.HashTypes {
var ht hash.Type
err := ht.Set(hashType)
if err != nil {
return nil, err
}
lj.hashTypes = append(lj.hashTypes, ht)
}
}
return lj, nil
}
// Convert a single entry to JSON
//
// It may return nil if there is no entry to return
func (lj *listJSON) entry(ctx context.Context, entry fs.DirEntry) (*ListJSONItem, error) {
switch entry.(type) {
case fs.Directory:
if lj.opt.FilesOnly {
return nil, nil
}
case fs.Object:
if lj.opt.DirsOnly {
return nil, nil
}
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
item := &ListJSONItem{
Path: entry.Remote(),
Name: path.Base(entry.Remote()),
Size: entry.Size(),
}
if entry.Remote() == "" {
item.Name = ""
}
if !lj.opt.NoModTime {
item.ModTime = Timestamp{When: entry.ModTime(ctx), Format: lj.format}
}
if !lj.opt.NoMimeType {
item.MimeType = fs.MimeTypeDirEntry(ctx, entry)
}
if lj.cipher != nil {
switch entry.(type) {
case fs.Directory:
item.EncryptedPath = lj.cipher.EncryptDirName(entry.Remote())
case fs.Object:
item.EncryptedPath = lj.cipher.EncryptFileName(entry.Remote())
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
item.Encrypted = path.Base(item.EncryptedPath)
}
if lj.opt.Metadata {
metadata, err := fs.GetMetadata(ctx, entry)
if err != nil {
fs.Errorf(entry, "Failed to read metadata: %v", err)
} else if metadata != nil {
item.Metadata = metadata
}
}
if do, ok := entry.(fs.IDer); ok {
item.ID = do.ID()
}
if o, ok := entry.(fs.Object); lj.opt.ShowOrigIDs && ok {
if do, ok := fs.UnWrapObject(o).(fs.IDer); ok {
item.OrigID = do.ID()
}
}
switch x := entry.(type) {
case fs.Directory:
item.IsDir = true
item.IsBucket = lj.isBucket
case fs.Object:
item.IsDir = false
if lj.showHash {
item.Hashes = make(map[string]string)
for _, hashType := range lj.hashTypes {
hash, err := x.Hash(ctx, hashType)
if err != nil {
fs.Errorf(x, "Failed to read hash: %v", err)
} else if hash != "" {
item.Hashes[hashType.String()] = hash
}
}
}
if lj.canGetTier {
if do, ok := x.(fs.GetTierer); ok {
item.Tier = do.GetTier()
}
}
default:
fs.Errorf(nil, "Unknown type %T in listing in ListJSON", entry)
}
return item, nil
}
// ListJSON lists fsrc using the options in opt calling callback for each item
func ListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJSONItem) error) error {
lj, err := newListJSON(ctx, fsrc, remote, opt)
if err != nil {
return err
}
err = walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(ctx, lj.opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
for _, entry := range entries {
item, err := lj.entry(ctx, entry)
if err != nil {
return fmt.Errorf("creating entry failed in ListJSON: %w", err)
}
if item != nil {
err = callback(item)
if err != nil {
return fmt.Errorf("callback failed in ListJSON: %w", err)
}
}
}
return nil
})
if err != nil {
return fmt.Errorf("error in ListJSON: %w", err)
}
return nil
}
// StatJSON returns a single JSON stat entry for the fsrc, remote path
//
// The item returned may be nil if it is not found or excluded with DirsOnly/FilesOnly
func StatJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt) (item *ListJSONItem, err error) {
// FIXME this could me more efficient we had a new primitive
// NewDirEntry() which returned an Object or a Directory
lj, err := newListJSON(ctx, fsrc, remote, opt)
if err != nil {
return nil, err
}
// Root is always a directory. When we have a NewDirEntry
// primitive we need to call it, but for now this will do.
if remote == "" {
if !lj.dirs {
return nil, nil
}
// Check the root directory exists
entries, err := fsrc.List(ctx, "")
accounting.Stats(ctx).Listed(int64(len(entries)))
if err != nil {
return nil, err
}
return lj.entry(ctx, fs.NewDir("", time.Now()))
}
// Could be a file or a directory here
if lj.files && !strings.HasSuffix(remote, "/") {
// NewObject can return the sentinel errors ErrorObjectNotFound or ErrorIsDir
// ErrorObjectNotFound can mean the source is a directory or not found
obj, err := fsrc.NewObject(ctx, remote)
if err == fs.ErrorObjectNotFound {
if !lj.dirs {
return nil, nil
}
} else if err == fs.ErrorIsDir {
if !lj.dirs {
return nil, nil
}
// This could return a made up ListJSONItem here
// but that wouldn't have the IDs etc in
} else if err != nil {
if !lj.dirs {
return nil, err
}
} else {
return lj.entry(ctx, obj)
}
}
// Must be a directory here
//
// Remove trailing / as rclone listings won't have them
remote = strings.TrimRight(remote, "/")
parent := path.Dir(remote)
if parent == "." || parent == "/" {
parent = ""
}
entries, err := fsrc.List(ctx, parent)
accounting.Stats(ctx).Listed(int64(len(entries)))
if err == fs.ErrorDirNotFound {
return nil, nil
} else if err != nil {
return nil, err
}
equal := func(a, b string) bool { return a == b }
if fsrc.Features().CaseInsensitive {
equal = strings.EqualFold
}
var foundEntry fs.DirEntry
for _, entry := range entries {
if equal(entry.Remote(), remote) {
foundEntry = entry
break
}
}
if foundEntry == nil {
return nil, nil
}
return lj.entry(ctx, foundEntry)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/dedupe.go | fs/operations/dedupe.go | // dedupe - gets rid of identical files remotes which can have duplicate file names (drive, mega)
package operations
import (
"context"
"fmt"
"path"
"sort"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
)
// dedupeRename renames the objs slice to different names
func dedupeRename(ctx context.Context, f fs.Fs, remote string, objs []fs.Object) {
doMove := f.Features().Move
if doMove == nil {
fs.Fatalf(nil, "Fs %v doesn't support Move", f)
}
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
outer:
for i, o := range objs {
suffix := 1
newName := fmt.Sprintf("%s-%d%s", base, i+suffix, ext)
_, err := f.NewObject(ctx, newName)
for ; err != fs.ErrorObjectNotFound; suffix++ {
if err != nil {
err = fs.CountError(ctx, err)
fs.Errorf(o, "Failed to check for existing object: %v", err)
continue outer
}
if suffix > 100 {
fs.Errorf(o, "Could not find an available new name")
continue outer
}
newName = fmt.Sprintf("%s-%d%s", base, i+suffix, ext)
_, err = f.NewObject(ctx, newName)
}
if !SkipDestructive(ctx, o, "rename") {
newObj, err := doMove(ctx, o, newName)
if err != nil {
err = fs.CountError(ctx, err)
fs.Errorf(o, "Failed to rename: %v", err)
continue
}
fs.Infof(newObj, "renamed from: %v", o)
}
}
}
// dedupeDeleteAllButOne deletes all but the one in keep
func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []fs.Object) {
count := 0
for i, o := range objs {
if i == keep {
continue
}
err := DeleteFile(ctx, o)
if err == nil {
count++
}
}
if count > 0 {
fs.Logf(remote, "Deleted %d extra copies", count)
}
}
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) {
ci := fs.GetConfig(ctx)
// Make map of IDs
IDs := make(map[string]int, len(objs))
for _, o := range objs {
if do, ok := o.(fs.IDer); ok {
if ID := do.ID(); ID != "" {
IDs[ID]++
}
}
}
// Remove duplicate IDs
newObjs := objs[:0]
for _, o := range objs {
if do, ok := o.(fs.IDer); ok {
if ID := do.ID(); ID != "" {
if IDs[ID] <= 1 {
newObjs = append(newObjs, o)
} else {
fs.Logf(o, "Ignoring as it appears %d times in the listing and deleting would lead to data loss", IDs[ID])
}
}
}
}
objs = newObjs
// See how many of these duplicates are identical
dupesByID := make(map[string][]fs.Object, len(objs))
for _, o := range objs {
ID := ""
if ci.SizeOnly && o.Size() >= 0 {
ID = fmt.Sprintf("size %d", o.Size())
} else if ht != hash.None {
hashValue, err := o.Hash(ctx, ht)
if err == nil && hashValue != "" {
ID = fmt.Sprintf("%v %s", ht, hashValue)
}
}
if ID == "" {
remainingObjs = append(remainingObjs, o)
} else {
dupesByID[ID] = append(dupesByID[ID], o)
}
}
// Delete identical duplicates, filling remainingObjs with the ones remaining
for ID, dupes := range dupesByID {
remainingObjs = append(remainingObjs, dupes[0])
if len(dupes) > 1 {
fs.Logf(remote, "Deleting %d/%d identical duplicates (%s)", len(dupes)-1, len(dupes), ID)
for _, o := range dupes[1:] {
err := DeleteFile(ctx, o)
if err != nil {
remainingObjs = append(remainingObjs, o)
}
}
}
}
return remainingObjs
}
// dedupeList lists the duplicates and does nothing
func dedupeList(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) {
fmt.Printf("%s: %d duplicates\n", remote, len(objs))
for i, o := range objs {
hashValue := ""
if ht != hash.None {
var err error
hashValue, err = o.Hash(ctx, ht)
if err != nil {
hashValue = err.Error()
}
}
if byHash {
fmt.Printf(" %d: %12d bytes, %s, %s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
} else {
fmt.Printf(" %d: %12d bytes, %s, %v %32s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), ht, hashValue)
}
}
}
// dedupeInteractive interactively dedupes the slice of objects
func dedupeInteractive(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) bool {
dedupeList(ctx, f, ht, remote, objs, byHash)
commands := []string{"sSkip and do nothing", "kKeep just one (choose which in next step)"}
if !byHash {
commands = append(commands, "rRename all to be different (by changing file.jpg to file-1.jpg)")
}
commands = append(commands, "qQuit")
switch config.Command(commands) {
case 's':
case 'k':
keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs))
dedupeDeleteAllButOne(ctx, keep-1, remote, objs)
case 'r':
dedupeRename(ctx, f, remote, objs)
case 'q':
return false
}
return true
}
// DeduplicateMode is how the dedupe command chooses what to do
type DeduplicateMode int
// Deduplicate modes
const (
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
DeduplicateSkip // skip all conflicts
DeduplicateFirst // choose the first object
DeduplicateNewest // choose the newest object
DeduplicateOldest // choose the oldest object
DeduplicateRename // rename the objects
DeduplicateLargest // choose the largest object
DeduplicateSmallest // choose the smallest object
DeduplicateList // list duplicates only
)
func (x DeduplicateMode) String() string {
switch x {
case DeduplicateInteractive:
return "interactive"
case DeduplicateSkip:
return "skip"
case DeduplicateFirst:
return "first"
case DeduplicateNewest:
return "newest"
case DeduplicateOldest:
return "oldest"
case DeduplicateRename:
return "rename"
case DeduplicateLargest:
return "largest"
case DeduplicateSmallest:
return "smallest"
case DeduplicateList:
return "list"
}
return "unknown"
}
// Set a DeduplicateMode from a string
func (x *DeduplicateMode) Set(s string) error {
switch strings.ToLower(s) {
case "interactive":
*x = DeduplicateInteractive
case "skip":
*x = DeduplicateSkip
case "first":
*x = DeduplicateFirst
case "newest":
*x = DeduplicateNewest
case "oldest":
*x = DeduplicateOldest
case "rename":
*x = DeduplicateRename
case "largest":
*x = DeduplicateLargest
case "smallest":
*x = DeduplicateSmallest
case "list":
*x = DeduplicateList
default:
return fmt.Errorf("unknown mode for dedupe %q", s)
}
return nil
}
// Type of the value
func (x *DeduplicateMode) Type() string {
return "string"
}
// Directory with entry count and links to parents
type dedupeDir struct {
dir fs.Directory
parent string
count int
}
// Map of directories by ID with recursive counts
type dedupeDirsMap map[string]*dedupeDir
func (dm dedupeDirsMap) get(id string) *dedupeDir {
d := dm[id]
if d == nil {
d = &dedupeDir{}
dm[id] = d
}
return d
}
func (dm dedupeDirsMap) increment(parent string) {
if parent != "" {
d := dm.get(parent)
d.count++
dm.increment(d.parent)
}
}
// dedupeFindDuplicateDirs scans f for duplicate directories
func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*dedupeDir, err error) {
dirsByID := dedupeDirsMap{}
dirs := map[string][]*dedupeDir{}
ci := fs.GetConfig(ctx)
err = walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
for _, entry := range entries {
tr := accounting.Stats(ctx).NewCheckingTransfer(entry, "merging")
remote := entry.Remote()
parentRemote := path.Dir(remote)
if parentRemote == "." {
parentRemote = ""
}
// Obtain ID of the object parent, if known.
// (This usually means that backend allows duplicate paths)
// Fall back to remote parent path, if unavailable.
var parent string
if entryParentIDer, ok := entry.(fs.ParentIDer); ok {
parent = entryParentIDer.ParentID()
}
if parent == "" {
parent = parentRemote
}
var ID string
if entryIDer, ok := entry.(fs.IDer); ok {
ID = entryIDer.ID()
}
if ID == "" {
ID = remote
}
if fsDir, ok := entry.(fs.Directory); ok {
d := dirsByID.get(ID)
d.dir = fsDir
d.parent = parent
dirs[remote] = append(dirs[remote], d)
}
dirsByID.increment(parent)
tr.Done(ctx, nil)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("find duplicate dirs: %w", err)
}
// Make sure parents are before children
duplicateNames := []string{}
for name, ds := range dirs {
if len(ds) > 1 {
duplicateNames = append(duplicateNames, name)
}
}
sort.Strings(duplicateNames)
for _, name := range duplicateNames {
duplicateDirs = append(duplicateDirs, dirs[name])
}
return
}
// dedupeMergeDuplicateDirs merges all the duplicate directories found
func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]*dedupeDir) error {
mergeDirs := f.Features().MergeDirs
if mergeDirs == nil {
return fmt.Errorf("%v: can't merge directories", f)
}
dirCacheFlush := f.Features().DirCacheFlush
if dirCacheFlush == nil {
return fmt.Errorf("%v: can't flush dir cache", f)
}
for _, dedupeDirs := range duplicateDirs {
if SkipDestructive(ctx, dedupeDirs[0].dir, "merge duplicate directories") {
continue
}
// Put largest directory in front to minimize movements
fsDirs := []fs.Directory{}
largestCount := -1
largestIdx := 0
for i, d := range dedupeDirs {
fsDirs = append(fsDirs, d.dir)
if d.count > largestCount {
largestIdx = i
largestCount = d.count
}
}
fsDirs[largestIdx], fsDirs[0] = fsDirs[0], fsDirs[largestIdx]
fs.Infof(fsDirs[0], "Merging contents of duplicate directories")
err := mergeDirs(ctx, fsDirs)
if err != nil {
err = fs.CountError(ctx, err)
fs.Errorf(nil, "merge duplicate dirs: %v", err)
}
}
dirCacheFlush()
return nil
}
// sort oldest first
func sortOldestFirst(objs []fs.Object) {
sort.Slice(objs, func(i, j int) bool {
return objs[i].ModTime(context.TODO()).Before(objs[j].ModTime(context.TODO()))
})
}
// sort smallest first
func sortSmallestFirst(objs []fs.Object) {
sort.Slice(objs, func(i, j int) bool {
return objs[i].Size() < objs[j].Size()
})
}
// Deduplicate interactively finds duplicate files and offers to
// delete all but one or rename them to be different. Only useful with
// Google Drive which can have duplicate file names.
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool) error {
ci := fs.GetConfig(ctx)
// find a hash to use
ht := f.Hashes().GetOne()
what := "names"
if byHash {
if ht == hash.None {
return fmt.Errorf("%v has no hashes", f)
}
what = ht.String() + " hashes"
}
fs.Infof(f, "Looking for duplicate %s using %v mode.", what, mode)
// Find duplicate directories first and fix them
if !byHash {
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
if err != nil {
return err
}
if len(duplicateDirs) > 0 {
if mode != DeduplicateList {
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
if err != nil {
return err
}
} else {
for _, dedupeDirs := range duplicateDirs {
remote := dedupeDirs[0].dir.Remote()
fmt.Printf("%s: %d duplicates of this directory\n", remote, len(dedupeDirs))
}
}
}
}
// Now find duplicate files
files := map[string][]fs.Object{}
err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) {
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "checking")
defer tr.Done(ctx, nil)
var remote string
var err error
if byHash {
remote, err = o.Hash(ctx, ht)
if err != nil {
fs.Errorf(o, "Failed to hash: %v", err)
remote = ""
}
} else {
remote = o.Remote()
}
if remote != "" {
files[remote] = append(files[remote], o)
}
})
return nil
})
if err != nil {
return err
}
for remote, objs := range files {
if len(objs) <= 1 {
continue
}
fs.Logf(remote, "Found %d files with duplicate %s", len(objs), what)
if !byHash && mode != DeduplicateList {
objs = dedupeDeleteIdentical(ctx, ht, remote, objs)
if len(objs) <= 1 {
fs.Logf(remote, "All duplicates removed")
continue
}
}
switch mode {
case DeduplicateInteractive:
if !dedupeInteractive(ctx, f, ht, remote, objs, byHash) {
return nil
}
case DeduplicateFirst:
dedupeDeleteAllButOne(ctx, 0, remote, objs)
case DeduplicateNewest:
sortOldestFirst(objs)
dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs)
case DeduplicateOldest:
sortOldestFirst(objs)
dedupeDeleteAllButOne(ctx, 0, remote, objs)
case DeduplicateRename:
dedupeRename(ctx, f, remote, objs)
case DeduplicateLargest:
sortSmallestFirst(objs)
dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs)
case DeduplicateSmallest:
sortSmallestFirst(objs)
dedupeDeleteAllButOne(ctx, 0, remote, objs)
case DeduplicateSkip:
fs.Logf(remote, "Skipping %d files with duplicate %s", len(objs), what)
case DeduplicateList:
dedupeList(ctx, f, ht, remote, objs, byHash)
default:
//skip
}
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/check.go | fs/operations/check.go | package operations
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"regexp"
"strings"
"sync"
"sync/atomic"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/march"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/text/unicode/norm"
)
// checkFn is the type of the checking function used in CheckFn()
//
// It should check the two objects (a, b) and return if they differ
// and whether the hash was used.
//
// If there are differences then this should Errorf the difference and
// the reason but return with err = nil. It should not CountError in
// this case.
type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error)
// CheckOpt contains options for the Check functions
type CheckOpt struct {
Fdst, Fsrc fs.Fs // fses to check
Check checkFn // function to use for checking
OneWay bool // one way only?
Combined io.Writer // a file with file names with leading sigils
MissingOnSrc io.Writer // files only in the destination
MissingOnDst io.Writer // files only in the source
Match io.Writer // matching files
Differ io.Writer // differing files
Error io.Writer // files with errors of some kind
}
// checkMarch is used to march over two Fses in the same way as
// sync/copy
type checkMarch struct {
ctx context.Context
ioMu sync.Mutex
wg sync.WaitGroup
tokens chan struct{}
differences atomic.Int32
noHashes atomic.Int32
srcFilesMissing atomic.Int32
dstFilesMissing atomic.Int32
matches atomic.Int32
opt CheckOpt
}
// report outputs the fileName to out if required and to the combined log
func (c *checkMarch) report(o fs.DirEntry, out io.Writer, sigil rune) {
c.reportFilename(o.String(), out, sigil)
}
func (c *checkMarch) reportFilename(filename string, out io.Writer, sigil rune) {
if out != nil {
SyncFprintf(out, "%s\n", filename)
}
if c.opt.Combined != nil {
SyncFprintf(c.opt.Combined, "%c %s\n", sigil, filename)
}
}
// DstOnly have an object which is in the destination only
func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
switch dst.(type) {
case fs.Object:
if c.opt.OneWay {
return false
}
err := fmt.Errorf("file not in %v", c.opt.Fsrc)
fs.Errorf(dst, "%v", err)
_ = fs.CountError(c.ctx, err)
c.differences.Add(1)
c.srcFilesMissing.Add(1)
c.report(dst, c.opt.MissingOnSrc, '-')
case fs.Directory:
// Do the same thing to the entire contents of the directory
if c.opt.OneWay {
return false
}
return true
default:
panic("Bad object in DirEntries")
}
return false
}
// SrcOnly have an object which is in the source only
func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
switch src.(type) {
case fs.Object:
err := fmt.Errorf("file not in %v", c.opt.Fdst)
fs.Errorf(src, "%v", err)
_ = fs.CountError(c.ctx, err)
c.differences.Add(1)
c.dstFilesMissing.Add(1)
c.report(src, c.opt.MissingOnDst, '+')
case fs.Directory:
// Do the same thing to the entire contents of the directory
return true
default:
panic("Bad object in DirEntries")
}
return false
}
// check to see if two objects are identical using the check function
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "checking")
defer func() {
tr.Done(ctx, err)
}()
if sizeDiffers(ctx, src, dst) {
err = fmt.Errorf("sizes differ")
fs.Errorf(src, "%v", err)
return true, false, nil
}
if ci.SizeOnly {
return false, false, nil
}
return c.opt.Check(ctx, dst, src)
}
// Match is called when src and dst are present, so sync src to dst
func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
switch srcX := src.(type) {
case fs.Object:
dstX, ok := dst.(fs.Object)
if ok {
if SkipDestructive(ctx, src, "check") {
return false
}
c.wg.Add(1)
c.tokens <- struct{}{} // put a token to limit concurrency
go func() {
defer func() {
<-c.tokens // get the token back to free up a slot
c.wg.Done()
}()
differ, noHash, err := c.checkIdentical(ctx, dstX, srcX)
if err != nil {
fs.Errorf(src, "%v", err)
_ = fs.CountError(ctx, err)
c.report(src, c.opt.Error, '!')
} else if differ {
c.differences.Add(1)
err := errors.New("files differ")
// the checkFn has already logged the reason
_ = fs.CountError(ctx, err)
c.report(src, c.opt.Differ, '*')
} else {
c.matches.Add(1)
c.report(src, c.opt.Match, '=')
if noHash {
c.noHashes.Add(1)
fs.Debugf(dstX, "OK - could not check hash")
} else {
fs.Debugf(dstX, "OK")
}
}
}()
} else {
err := fmt.Errorf("is file on %v but directory on %v", c.opt.Fsrc, c.opt.Fdst)
fs.Errorf(src, "%v", err)
_ = fs.CountError(ctx, err)
c.differences.Add(1)
c.dstFilesMissing.Add(1)
c.report(src, c.opt.MissingOnDst, '+')
}
case fs.Directory:
// Do the same thing to the entire contents of the directory
_, ok := dst.(fs.Directory)
if ok {
return true
}
err := fmt.Errorf("is file on %v but directory on %v", c.opt.Fdst, c.opt.Fsrc)
fs.Errorf(dst, "%v", err)
_ = fs.CountError(ctx, err)
c.differences.Add(1)
c.srcFilesMissing.Add(1)
c.report(dst, c.opt.MissingOnSrc, '-')
default:
panic("Bad object in DirEntries")
}
return false
}
// CheckFn checks the files in fsrc and fdst according to Size and
// hash using checkFunction on each file to check the hashes.
//
// checkFunction sees if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
func CheckFn(ctx context.Context, opt *CheckOpt) error {
ci := fs.GetConfig(ctx)
if opt.Check == nil {
return errors.New("internal error: nil check function")
}
c := &checkMarch{
ctx: ctx,
tokens: make(chan struct{}, ci.Checkers),
opt: *opt,
}
// set up a march over fdst and fsrc
m := &march.March{
Ctx: ctx,
Fdst: c.opt.Fdst,
Fsrc: c.opt.Fsrc,
Dir: "",
Callback: c,
NoTraverse: ci.NoTraverse,
NoUnicodeNormalization: ci.NoUnicodeNormalization,
}
fs.Debugf(c.opt.Fdst, "Waiting for checks to finish")
err := m.Run(ctx)
c.wg.Wait() // wait for background go-routines
return c.reportResults(ctx, err)
}
func (c *checkMarch) reportResults(ctx context.Context, err error) error {
if c.dstFilesMissing.Load() > 0 {
fs.Logf(c.opt.Fdst, "%d files missing", c.dstFilesMissing.Load())
}
if c.srcFilesMissing.Load() > 0 {
entity := "files"
if c.opt.Fsrc == nil {
entity = "hashes"
}
fs.Logf(c.opt.Fsrc, "%d %s missing", c.srcFilesMissing.Load(), entity)
}
fs.Logf(c.opt.Fdst, "%d differences found", c.differences.Load())
if errs := accounting.Stats(ctx).GetErrors(); errs > 0 {
fs.Logf(c.opt.Fdst, "%d errors while checking", errs)
}
if c.noHashes.Load() > 0 {
fs.Logf(c.opt.Fdst, "%d hashes could not be checked", c.noHashes.Load())
}
if c.matches.Load() > 0 {
fs.Logf(c.opt.Fdst, "%d matching files", c.matches.Load())
}
if err != nil {
return err
}
if c.differences.Load() > 0 {
// Return an already counted error so we don't double count this error too
err = fserrors.FsError(fmt.Errorf("%d differences found", c.differences.Load()))
fserrors.Count(err)
return err
}
return nil
}
// Check the files in fsrc and fdst according to Size and hash
func Check(ctx context.Context, opt *CheckOpt) error {
optCopy := *opt
optCopy.Check = func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
same, ht, err := CheckHashes(ctx, src, dst)
if err != nil {
return true, false, err
}
if ht == hash.None {
return false, true, nil
}
if !same {
err = fmt.Errorf("%v differ", ht)
fs.Errorf(src, "%v", err)
return true, false, nil
}
return false, false, nil
}
return CheckFn(ctx, &optCopy)
}
// CheckEqualReaders checks to see if in1 and in2 have the same
// content when read.
//
// it returns true if no differences were found
func CheckEqualReaders(in1, in2 io.Reader) (equal bool, err error) {
const bufSize = 64 * 1024
buf1 := make([]byte, bufSize)
buf2 := make([]byte, bufSize)
for {
n1, err1 := readers.ReadFill(in1, buf1)
n2, err2 := readers.ReadFill(in2, buf2)
// check errors
if err1 != nil && err1 != io.EOF {
return false, err1
} else if err2 != nil && err2 != io.EOF {
return false, err2
}
// err1 && err2 are nil or io.EOF here
// process the data
if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) {
return false, nil
}
// if both streams finished the we have finished
if err1 == io.EOF && err2 == io.EOF {
break
}
}
return true, nil
}
// CheckIdenticalDownload checks to see if dst and src are identical
// by reading all their bytes if necessary.
//
// it returns true if no differences were found
func CheckIdenticalDownload(ctx context.Context, src, dst fs.Object) (equal bool, err error) {
ci := fs.GetConfig(ctx)
err = Retry(ctx, src, ci.LowLevelRetries, func() error {
equal, err = checkIdenticalDownload(ctx, src, dst)
return err
})
return equal, err
}
// Does the work for CheckIdenticalDownload
func checkIdenticalDownload(ctx context.Context, src, dst fs.Object) (equal bool, err error) {
var in1, in2 io.ReadCloser
in1, err = Open(ctx, dst)
if err != nil {
return false, fmt.Errorf("failed to open %q: %w", dst, err)
}
tr1 := accounting.Stats(ctx).NewTransfer(dst, nil)
defer func() {
tr1.Done(ctx, nil) // error handling is done by the caller
}()
in1 = tr1.Account(ctx, in1).WithBuffer() // account and buffer the transfer
in2, err = Open(ctx, src)
if err != nil {
return false, fmt.Errorf("failed to open %q: %w", src, err)
}
tr2 := accounting.Stats(ctx).NewTransfer(dst, nil)
defer func() {
tr2.Done(ctx, nil) // error handling is done by the caller
}()
in2 = tr2.Account(ctx, in2).WithBuffer() // account and buffer the transfer
// To assign err variable before defer.
equal, err = CheckEqualReaders(in1, in2)
return
}
// CheckDownload checks the files in fsrc and fdst according to Size
// and the actual contents of the files.
func CheckDownload(ctx context.Context, opt *CheckOpt) error {
optCopy := *opt
optCopy.Check = func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
same, err := CheckIdenticalDownload(ctx, src, dst)
if err != nil {
return true, true, fmt.Errorf("failed to download: %w", err)
}
if !same {
err = errors.New("contents differ")
fs.Errorf(src, "%v", err)
return true, false, nil
}
return false, false, nil
}
return CheckFn(ctx, &optCopy)
}
// ApplyTransforms handles --no-unicode-normalization and --ignore-case-sync for CheckSum
// so that it matches behavior of Check (where it's handled by March)
func ApplyTransforms(ctx context.Context, s string) string {
ci := fs.GetConfig(ctx)
return ToNormal(s, !ci.NoUnicodeNormalization, ci.IgnoreCaseSync)
}
// ToNormal normalizes case and unicode form and returns the transformed string.
// It is similar to ApplyTransforms but does not use a context.
// If normUnicode == true, s will be transformed to NFC.
// If normCase == true, s will be transformed to lowercase.
// If both are true, both transformations will be performed.
func ToNormal(s string, normUnicode, normCase bool) string {
if normUnicode {
s = norm.NFC.String(s)
}
if normCase {
s = strings.ToLower(s)
}
return s
}
// CheckSum checks filesystem hashes against a SUM file
func CheckSum(ctx context.Context, fsrc, fsum fs.Fs, sumFile string, hashType hash.Type, opt *CheckOpt, download bool) error {
var options CheckOpt
if opt != nil {
options = *opt
} else {
// default options for hashsum -c
options.Combined = os.Stdout
}
// CheckSum treats Fsrc and Fdst specially:
options.Fsrc = nil // no file system here, corresponds to the sum list
options.Fdst = fsrc // denotes the file system to check
opt = &options // override supplied argument
if !download && (hashType == hash.None || !opt.Fdst.Hashes().Contains(hashType)) {
return fmt.Errorf("%s: hash type is not supported by file system: %s", hashType, opt.Fdst)
}
if sumFile == "" {
return fmt.Errorf("not a sum file: %s", fsum)
}
sumObj, err := fsum.NewObject(ctx, sumFile)
if err != nil {
return fmt.Errorf("cannot open sum file: %w", err)
}
hashes, err := ParseSumFile(ctx, sumObj)
if err != nil {
return fmt.Errorf("failed to parse sum file: %w", err)
}
ci := fs.GetConfig(ctx)
c := &checkMarch{
ctx: ctx,
tokens: make(chan struct{}, ci.Checkers),
opt: *opt,
}
lastErr := ListFn(ctx, opt.Fdst, func(obj fs.Object) {
c.checkSum(ctx, obj, download, hashes, hashType)
})
c.wg.Wait() // wait for background go-routines
// make census of unhandled sums
fi := filter.GetConfig(ctx)
for filename, hash := range hashes {
if hash == "" { // the sum has been successfully consumed
continue
}
if !fi.IncludeRemote(filename) { // the file was filtered out
continue
}
// filesystem missed the file, sum wasn't consumed
err := fmt.Errorf("file not in %v", opt.Fdst)
fs.Errorf(filename, "%v", err)
_ = fs.CountError(ctx, err)
if lastErr == nil {
lastErr = err
}
c.dstFilesMissing.Add(1)
c.reportFilename(filename, opt.MissingOnDst, '+')
}
return c.reportResults(ctx, lastErr)
}
// checkSum checks single object against golden hashes
func (c *checkMarch) checkSum(ctx context.Context, obj fs.Object, download bool, hashes HashSums, hashType hash.Type) {
normalizedRemote := ApplyTransforms(ctx, obj.Remote())
c.ioMu.Lock()
sumHash, sumFound := hashes[normalizedRemote]
hashes[normalizedRemote] = "" // mark sum as consumed
c.ioMu.Unlock()
if !sumFound && c.opt.OneWay {
return
}
var err error
tr := accounting.Stats(ctx).NewCheckingTransfer(obj, "hashing")
defer tr.Done(ctx, err)
if !sumFound {
err = errors.New("sum not found")
_ = fs.CountError(ctx, err)
fs.Errorf(obj, "%v", err)
c.differences.Add(1)
c.srcFilesMissing.Add(1)
c.report(obj, c.opt.MissingOnSrc, '-')
return
}
if !download {
var objHash string
objHash, err = obj.Hash(ctx, hashType)
c.matchSum(ctx, sumHash, objHash, obj, err, hashType)
return
}
c.wg.Add(1)
c.tokens <- struct{}{} // put a token to limit concurrency
go func() {
var (
objHash string
err error
in io.ReadCloser
)
defer func() {
c.matchSum(ctx, sumHash, objHash, obj, err, hashType)
<-c.tokens // get the token back to free up a slot
c.wg.Done()
}()
if in, err = Open(ctx, obj); err != nil {
return
}
tr := accounting.Stats(ctx).NewTransfer(obj, nil)
in = tr.Account(ctx, in).WithBuffer() // account and buffer the transfer
defer func() {
tr.Done(ctx, nil) // will close the stream
}()
hashVals, err2 := hash.StreamTypes(in, hash.NewHashSet(hashType))
if err2 != nil {
err = err2 // pass to matchSum
return
}
objHash = hashVals[hashType]
}()
}
// matchSum sums up the results of hashsum matching for an object
func (c *checkMarch) matchSum(ctx context.Context, sumHash, objHash string, obj fs.Object, err error, hashType hash.Type) {
switch {
case err != nil:
_ = fs.CountError(ctx, err)
fs.Errorf(obj, "Failed to calculate hash: %v", err)
c.report(obj, c.opt.Error, '!')
case sumHash == "":
err = errors.New("duplicate file")
_ = fs.CountError(ctx, err)
fs.Errorf(obj, "%v", err)
c.report(obj, c.opt.Error, '!')
case objHash == "":
fs.Debugf(nil, "%v = %s (sum)", hashType, sumHash)
fs.Debugf(obj, "%v - could not check hash (%v)", hashType, c.opt.Fdst)
c.noHashes.Add(1)
c.matches.Add(1)
c.report(obj, c.opt.Match, '=')
case objHash == sumHash:
fs.Debugf(obj, "%v = %s OK", hashType, sumHash)
c.matches.Add(1)
c.report(obj, c.opt.Match, '=')
default:
err = errors.New("files differ")
_ = fs.CountError(ctx, err)
fs.Debugf(nil, "%v = %s (sum)", hashType, sumHash)
fs.Debugf(obj, "%v = %s (%v)", hashType, objHash, c.opt.Fdst)
fs.Errorf(obj, "%v", err)
c.differences.Add(1)
c.report(obj, c.opt.Differ, '*')
}
}
// HashSums represents a parsed SUM file
type HashSums map[string]string
// ParseSumFile parses a hash SUM file and returns hashes as a map
func ParseSumFile(ctx context.Context, sumFile fs.Object) (HashSums, error) {
rd, err := Open(ctx, sumFile)
if err != nil {
return nil, err
}
parser := bufio.NewReader(rd)
const maxWarn = 3
numWarn := 0
re := regexp.MustCompile(`^([^ ]+) [ *](.+)$`)
hashes := HashSums{}
for lineNo := 0; true; lineNo++ {
lineBytes, _, err := parser.ReadLine()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
line := string(lineBytes)
if line == "" {
continue
}
fields := re.FindStringSubmatch(ApplyTransforms(ctx, line))
if fields == nil {
numWarn++
if numWarn <= maxWarn {
fs.Logf(sumFile, "improperly formatted checksum line %d", lineNo)
}
continue
}
sum, file := fields[1], fields[2]
if hashes[file] != "" {
numWarn++
if numWarn <= maxWarn {
fs.Logf(sumFile, "duplicate file on checksum line %d", lineNo)
}
continue
}
// We've standardised on lower case checksums in rclone internals.
hashes[file] = strings.ToLower(sum)
}
if numWarn > maxWarn {
fs.Logf(sumFile, "%d warning(s) suppressed...", numWarn-maxWarn)
}
if err = rd.Close(); err != nil {
return nil, err
}
return hashes, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/operations_internal_test.go | fs/operations/operations_internal_test.go | // Internal tests for operations
package operations
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/stretchr/testify/assert"
)
func TestSizeDiffers(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
when := time.Now()
for _, test := range []struct {
ignoreSize bool
srcSize int64
dstSize int64
want bool
}{
{false, 0, 0, false},
{false, 1, 2, true},
{false, 1, -1, false},
{false, -1, 1, false},
{true, 0, 0, false},
{true, 1, 2, false},
{true, 1, -1, false},
{true, -1, 1, false},
} {
src := object.NewStaticObjectInfo("a", when, test.srcSize, true, nil, nil)
dst := object.NewStaticObjectInfo("a", when, test.dstSize, true, nil, nil)
oldIgnoreSize := ci.IgnoreSize
ci.IgnoreSize = test.ignoreSize
got := sizeDiffers(ctx, src, dst)
ci.IgnoreSize = oldIgnoreSize
assert.Equal(t, test.want, got, fmt.Sprintf("ignoreSize=%v, srcSize=%v, dstSize=%v", test.ignoreSize, test.srcSize, test.dstSize))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/copy.go | fs/operations/copy.go | // This file implements operations.Copy
//
// This is probably the most important operation in rclone.
package operations
import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"path"
"strings"
"time"
"unicode/utf8"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/transform"
)
// State of the copy
type copy struct {
f fs.Fs // destination fs.Fs
dstFeatures *fs.Features // Features() for fs.Fs
dst fs.Object // destination object to update, may be nil
remote string // destination path, used if dst is nil
src fs.Object // source object
ci *fs.ConfigInfo // current config
maxTries int // max number of tries to do the copy
doUpdate bool // whether we are updating an existing file or not
hashType hash.Type // common hash to use
hashOption *fs.HashesOption // open option for the common hash
tr *accounting.Transfer // accounting for the transfer
inplace bool // set if we are updating inplace and not using a partial name
remoteForCopy string // the name used for the transfer, either remote or remote+".partial"
}
// Used to remove a failed copy
func (c *copy) removeFailedCopy(ctx context.Context, o fs.Object) {
if o == nil {
return
}
fs.Infof(o, "Removing failed copy")
err := o.Remove(ctx)
if err != nil {
fs.Infof(o, "Failed to remove failed copy: %s", err)
}
}
// Used to remove a failed partial copy
func (c *copy) removeFailedPartialCopy(ctx context.Context, f fs.Fs, remote string) {
o, err := f.NewObject(ctx, remote)
if errors.Is(err, fs.ErrorObjectNotFound) {
// Assume object has been deleted
return
}
if err != nil {
fs.Infof(remote, "Failed to remove failed partial copy: %s", err)
return
}
c.removeFailedCopy(ctx, o)
}
// TruncateString s to n bytes.
//
// If s is valid UTF-8 then this may truncate to fewer than n bytes to
// make the returned string also valid UTF-8.
func TruncateString(s string, n int) string {
truncated := s[:n]
if !utf8.ValidString(s) {
// If input string wasn't valid UTF-8 then just return the truncation
return truncated
}
for len(truncated) > 0 {
if utf8.ValidString(truncated) {
return truncated
}
// Remove 1 byte until valid
truncated = truncated[:len(truncated)-1]
}
return truncated
}
// Check to see if we should be using a partial name and return the name for the copy and the inplace flag
func (c *copy) checkPartial(ctx context.Context) (remoteForCopy string, inplace bool, err error) {
remoteForCopy = c.remote
if c.ci.Inplace || c.dstFeatures.Move == nil || !c.dstFeatures.PartialUploads || strings.HasSuffix(c.remote, ".rclonelink") {
return remoteForCopy, true, nil
}
if len(c.ci.PartialSuffix) > 16 {
return remoteForCopy, true, fmt.Errorf("expecting length of --partial-suffix to be not greater than %d but got %d", 16, len(c.ci.PartialSuffix))
}
// Avoid making the leaf name longer if it's already lengthy to avoid
// trouble with file name length limits.
// generate a stable random suffix by hashing the filename and fingerprint
hasher := crc32.New(crc32.IEEETable)
_, _ = hasher.Write([]byte(c.remote))
_, _ = hasher.Write([]byte(fs.Fingerprint(ctx, c.src, true)))
hash := hasher.Sum32()
suffix := fmt.Sprintf(".%08x%s", hash, c.ci.PartialSuffix)
base := path.Base(remoteForCopy)
if len(base) > 100 {
remoteForCopy = TruncateString(remoteForCopy, len(remoteForCopy)-len(suffix)) + suffix
} else {
remoteForCopy += suffix
}
return remoteForCopy, false, nil
}
// Check to see if we have hit max transfer limits
func (c *copy) checkLimits(ctx context.Context) (err error) {
if c.ci.MaxTransfer < 0 {
return nil
}
var bytesSoFar int64
if c.ci.CutoffMode == fs.CutoffModeCautious {
bytesSoFar = accounting.Stats(ctx).GetBytesWithPending() + c.src.Size()
} else {
bytesSoFar = accounting.Stats(ctx).GetBytes()
}
if bytesSoFar >= int64(c.ci.MaxTransfer) {
if c.ci.CutoffMode == fs.CutoffModeHard {
return accounting.ErrorMaxTransferLimitReachedFatal
}
return accounting.ErrorMaxTransferLimitReachedGraceful
}
return nil
}
// Server side copy c.src to (c.f, c.remoteForCopy) if possible or return fs.ErrorCantCopy if not
func (c *copy) serverSideCopy(ctx context.Context) (actionTaken string, newDst fs.Object, err error) {
doCopy := c.dstFeatures.Copy
serverSideCopyOK := false
if doCopy == nil {
serverSideCopyOK = false
} else if SameConfig(c.src.Fs(), c.f) {
serverSideCopyOK = true
} else if SameRemoteType(c.src.Fs(), c.f) {
serverSideCopyOK = c.dstFeatures.ServerSideAcrossConfigs || c.ci.ServerSideAcrossConfigs
}
if !serverSideCopyOK {
return actionTaken, nil, fs.ErrorCantCopy
}
in := c.tr.Account(ctx, nil) // account the transfer
in.ServerSideTransferStart()
newDst, err = doCopy(ctx, c.src, c.remoteForCopy)
if err == nil {
in.ServerSideCopyEnd(newDst.Size()) // account the bytes for the server-side transfer
}
_ = in.Close()
if errors.Is(err, fs.ErrorCantCopy) {
c.tr.Reset(ctx) // skip incomplete accounting - will be overwritten by the manual copy
}
actionTaken = "Copied (server-side copy)"
return actionTaken, newDst, err
}
// Copy c.src to (c.f, c.remoteForCopy) using multiThreadCopy
func (c *copy) multiThreadCopy(ctx context.Context, uploadOptions []fs.OpenOption) (actionTaken string, newDst fs.Object, err error) {
newDst, err = multiThreadCopy(ctx, c.f, c.remoteForCopy, c.src, c.ci.MultiThreadStreams, c.tr, uploadOptions...)
if c.doUpdate {
actionTaken = "Multi-thread Copied (replaced existing)"
} else {
actionTaken = "Multi-thread Copied (new)"
}
return actionTaken, newDst, err
}
// Copy the stream from in to (c.f, c.remoteForCopy) and close it
//
// Use Rcat to handle both remotes supporting and not supporting PutStream.
func (c *copy) rcat(ctx context.Context, in io.ReadCloser) (actionTaken string, newDst fs.Object, err error) {
// Make any metadata to pass to rcat
var meta fs.Metadata
if c.ci.Metadata {
meta, err = fs.GetMetadata(ctx, c.src)
if err != nil {
fs.Errorf(c.src, "Failed to read metadata: %v", err)
}
}
// NB Rcat closes in0
fsrc, ok := c.src.Fs().(fs.Fs)
if !ok {
fsrc = nil
}
newDst, err = rcatSrc(ctx, c.f, c.remoteForCopy, in, c.src.ModTime(ctx), meta, fsrc)
if c.doUpdate {
actionTaken = "Copied (Rcat, replaced existing)"
} else {
actionTaken = "Copied (Rcat, new)"
}
return actionTaken, newDst, err
}
// Copy the stream from in to (c.f, c.remoteForCopy) and close it
func (c *copy) updateOrPut(ctx context.Context, in io.ReadCloser, uploadOptions []fs.OpenOption) (actionTaken string, newDst fs.Object, err error) {
// account and buffer the transfer
inAcc := c.tr.Account(ctx, in).WithBuffer()
var wrappedSrc fs.ObjectInfo = c.src
// We try to pass the original object if possible
if c.src.Remote() != c.remoteForCopy {
wrappedSrc = fs.NewOverrideRemote(c.src, c.remoteForCopy)
}
if c.doUpdate && c.inplace {
err = c.dst.Update(ctx, inAcc, wrappedSrc, uploadOptions...)
// Make sure newDst is c.dst since we updated it
if err == nil {
newDst = c.dst
}
} else {
newDst, err = c.f.Put(ctx, inAcc, wrappedSrc, uploadOptions...)
}
closeErr := inAcc.Close()
if err == nil {
err = closeErr
}
if c.doUpdate {
actionTaken = "Copied (replaced existing)"
} else {
actionTaken = "Copied (new)"
}
return actionTaken, newDst, err
}
// Do a manual copy by reading the bytes and writing them
func (c *copy) manualCopy(ctx context.Context) (actionTaken string, newDst fs.Object, err error) {
// Remove partial files on premature exit
if !c.inplace {
defer atexit.Unregister(atexit.Register(func() {
ctx := context.Background()
c.removeFailedPartialCopy(ctx, c.f, c.remoteForCopy)
}))
}
// Options for the upload
uploadOptions := []fs.OpenOption{c.hashOption}
for _, option := range c.ci.UploadHeaders {
uploadOptions = append(uploadOptions, option)
}
if c.ci.MetadataSet != nil {
uploadOptions = append(uploadOptions, fs.MetadataOption(c.ci.MetadataSet))
}
// Options for the download
downloadOptions := []fs.OpenOption{c.hashOption}
for _, option := range c.ci.DownloadHeaders {
downloadOptions = append(downloadOptions, option)
}
if doMultiThreadCopy(ctx, c.f, c.src) {
return c.multiThreadCopy(ctx, uploadOptions)
}
var in io.ReadCloser
in, err = Open(ctx, c.src, downloadOptions...)
if err != nil {
return actionTaken, nil, fmt.Errorf("failed to open source object: %w", err)
}
// Note that c.rcat and c.updateOrPut close in
if c.src.Size() == -1 {
return c.rcat(ctx, in)
}
return c.updateOrPut(ctx, in, uploadOptions)
}
// Verify the copy
func (c *copy) verify(ctx context.Context, newDst fs.Object) (err error) {
// Verify sizes are the same after transfer
if sizeDiffers(ctx, c.src, newDst) {
return fmt.Errorf("corrupted on transfer: sizes differ src(%s) %d vs dst(%s) %d", c.src.Fs(), c.src.Size(), newDst.Fs(), newDst.Size())
}
// Verify hashes are the same after transfer - ignoring blank hashes
if c.hashType != hash.None {
// checkHashes has logs and counts errors
equal, _, srcSum, dstSum, _ := checkHashes(ctx, c.src, newDst, c.hashType)
if !equal {
return fmt.Errorf("corrupted on transfer: %v hashes differ src(%s) %q vs dst(%s) %q", c.hashType, c.src.Fs(), srcSum, newDst.Fs(), dstSum)
}
}
return nil
}
// copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
//
// It returns the destination object if possible. Note that this may
// be nil.
func (c *copy) copy(ctx context.Context) (newDst fs.Object, err error) {
var actionTaken string
retry := true
for tries := 0; retry && tries < c.maxTries; tries++ {
// Check we haven't hit any accounting limits
err = c.checkLimits(ctx)
if err != nil {
return nil, err
}
// Try server side copy
actionTaken, newDst, err = c.serverSideCopy(ctx)
// If can't server-side copy, do it manually
if errors.Is(err, fs.ErrorCantCopy) {
actionTaken, newDst, err = c.manualCopy(ctx)
}
// End if ctx is in error
if fserrors.ContextError(ctx, &err) {
break
}
// Retry if err returned a retry error
retry = false
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
retry = true
} else if t, ok := pacer.IsRetryAfter(err); ok {
fs.Debugf(c.src, "Sleeping for %v (as indicated by the server) to obey Retry-After error: %v", t, err)
time.Sleep(t)
retry = true
}
if retry {
fs.Debugf(c.src, "Received error: %v - low level retry %d/%d", err, tries, c.maxTries)
c.tr.Reset(ctx) // skip incomplete accounting - will be overwritten by retry
continue
}
}
if err != nil {
err = fs.CountError(ctx, err)
fs.Errorf(c.src, "Failed to copy: %v", err)
if !c.inplace {
c.removeFailedPartialCopy(ctx, c.f, c.remoteForCopy)
}
return newDst, err
}
// Verify the copy
err = c.verify(ctx, newDst)
if err != nil {
fs.Errorf(newDst, "%v", err)
err = fs.CountError(ctx, err)
c.removeFailedCopy(ctx, newDst)
return nil, err
}
// Move the copied file to its real destination.
if !c.inplace && c.remoteForCopy != c.remote {
movedNewDst, err := c.dstFeatures.Move(ctx, newDst, c.remote)
if err != nil {
fs.Errorf(newDst, "partial file rename failed: %v", err)
err = fs.CountError(ctx, err)
c.removeFailedCopy(ctx, newDst)
return nil, err
}
fs.Debugf(newDst, "renamed to: %s", c.remote)
newDst = movedNewDst
}
// Log what we have done
if newDst != nil && c.src.String() != newDst.String() {
actionTaken = fmt.Sprintf("%s to: %s", actionTaken, newDst.String())
}
fs.Infof(c.src, "%s%s", actionTaken, fs.LogValueHide("size", fs.SizeSuffix(c.src.Size())))
return newDst, nil
}
// Copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
//
// It returns the destination object if possible. Note that this may
// be nil.
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewTransfer(src, f)
defer func() {
tr.Done(ctx, err)
}()
if SkipDestructive(ctx, src, "copy") {
in := tr.Account(ctx, nil)
in.DryRun(src.Size())
return newDst, nil
}
c := ©{
f: f,
dstFeatures: f.Features(),
dst: dst,
remote: transform.Path(ctx, remote, false),
src: src,
ci: ci,
tr: tr,
maxTries: ci.LowLevelRetries,
doUpdate: dst != nil,
}
c.hashType, c.hashOption = CommonHash(ctx, f, src.Fs())
if c.dst != nil {
c.remote = transform.Path(ctx, c.dst.Remote(), false)
}
// Are we using partials?
//
// If so set the flag and update the name we use for the copy
c.remoteForCopy, c.inplace, err = c.checkPartial(ctx)
if err != nil {
return nil, err
}
// Do the copy now everything is set up
return c.copy(ctx)
}
// CopyFile moves a single file possibly to a new name
func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true, false)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/reopen_test.go | fs/operations/reopen_test.go | package operations
import (
"context"
"errors"
"io"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// check interfaces
var (
_ io.ReadSeekCloser = (*ReOpen)(nil)
_ io.ReaderAt = (*ReOpen)(nil)
_ pool.DelayAccountinger = (*ReOpen)(nil)
)
var errorTestError = errors.New("test error")
// this is a wrapper for a mockobject with a custom Open function
//
// breaks indicate the number of bytes to read before returning an
// error
type reOpenTestObject struct {
fs.Object
t *testing.T
wantStart int64
breaks []int64
unknownSize bool
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
//
// This will break after reading the number of bytes in breaks
func (o *reOpenTestObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
// Lots of backends do this - make sure it works as it modifies options
fs.FixRangeOption(options, o.Size())
gotHash := false
gotRange := false
startPos := int64(0)
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
gotHash = true
case *fs.RangeOption:
gotRange = true
startPos = x.Start
if o.unknownSize {
assert.Equal(o.t, int64(-1), x.End)
}
case *fs.SeekOption:
startPos = x.Offset
}
}
assert.Equal(o.t, o.wantStart, startPos)
// Check if ranging, mustn't have hash if offset != 0
if gotHash && gotRange {
assert.Equal(o.t, int64(0), startPos)
}
rc, err := o.Object.Open(ctx, options...)
if err != nil {
return nil, err
}
if len(o.breaks) > 0 {
// Pop a breakpoint off
N := o.breaks[0]
o.breaks = o.breaks[1:]
o.wantStart += N
// If 0 then return an error immediately
if N == 0 {
return nil, errorTestError
}
// Read N bytes then an error
r := io.MultiReader(&io.LimitedReader{R: rc, N: N}, readers.ErrorReader{Err: errorTestError})
// Wrap with Close in a new readCloser
rc = readCloser{Reader: r, Closer: rc}
}
return rc, nil
}
func TestReOpen(t *testing.T) {
for _, testName := range []string{"Normal", "WithRangeOption", "WithSeekOption", "UnknownSize"} {
t.Run(testName, func(t *testing.T) {
// Contents for the mock object
var (
reOpenTestcontents = []byte("0123456789")
expectedRead = reOpenTestcontents
rangeOption *fs.RangeOption
seekOption *fs.SeekOption
unknownSize = false
)
switch testName {
case "Normal":
case "WithRangeOption":
rangeOption = &fs.RangeOption{Start: 1, End: 7} // range is inclusive
expectedRead = reOpenTestcontents[1:8]
case "WithSeekOption":
seekOption = &fs.SeekOption{Offset: 2}
expectedRead = reOpenTestcontents[2:]
case "UnknownSize":
rangeOption = &fs.RangeOption{Start: 1, End: -1}
expectedRead = reOpenTestcontents[1:]
unknownSize = true
default:
panic("bad test name")
}
// Start the test with the given breaks
testReOpen := func(breaks []int64, maxRetries int) (*ReOpen, *reOpenTestObject, error) {
srcOrig := mockobject.New("potato").WithContent(reOpenTestcontents, mockobject.SeekModeNone)
srcOrig.SetUnknownSize(unknownSize)
src := &reOpenTestObject{
Object: srcOrig,
t: t,
breaks: breaks,
unknownSize: unknownSize,
}
opts := []fs.OpenOption{}
if rangeOption == nil && seekOption == nil {
opts = append(opts, &fs.HashesOption{Hashes: hash.NewHashSet(hash.MD5)})
}
if rangeOption != nil {
opts = append(opts, rangeOption)
src.wantStart = rangeOption.Start
}
if seekOption != nil {
opts = append(opts, seekOption)
src.wantStart = seekOption.Offset
}
rc, err := NewReOpen(context.Background(), src, maxRetries, opts...)
return rc, src, err
}
// Reset the start after a seek, taking into account the offset
setWantStart := func(src *reOpenTestObject, x int64) {
src.wantStart = x
if rangeOption != nil {
src.wantStart += rangeOption.Start
} else if seekOption != nil {
src.wantStart += seekOption.Offset
}
}
t.Run("Basics", func(t *testing.T) {
// open
h, _, err := testReOpen(nil, 10)
assert.NoError(t, err)
// Check contents read correctly
got, err := io.ReadAll(h)
assert.NoError(t, err)
assert.Equal(t, expectedRead, got)
// Check read after end
var buf = make([]byte, 1)
n, err := h.Read(buf)
assert.Equal(t, 0, n)
assert.Equal(t, io.EOF, err)
// Rewind the stream
_, err = h.Seek(0, io.SeekStart)
require.NoError(t, err)
// Check contents read correctly
got, err = io.ReadAll(h)
assert.NoError(t, err)
assert.Equal(t, expectedRead, got)
// Check close
assert.NoError(t, h.Close())
// Check double close
assert.Equal(t, errFileClosed, h.Close())
// Check read after close
n, err = h.Read(buf)
assert.Equal(t, 0, n)
assert.Equal(t, errFileClosed, err)
})
t.Run("ErrorAtStart", func(t *testing.T) {
// open with immediate breaking
h, _, err := testReOpen([]int64{0}, 10)
assert.Equal(t, errorTestError, err)
assert.Nil(t, h)
})
t.Run("WithErrors", func(t *testing.T) {
// open with a few break points but less than the max
h, _, err := testReOpen([]int64{2, 1, 3}, 10)
assert.NoError(t, err)
// check contents
got, err := io.ReadAll(h)
assert.NoError(t, err)
assert.Equal(t, expectedRead, got)
// check close
assert.NoError(t, h.Close())
})
t.Run("TooManyErrors", func(t *testing.T) {
// open with a few break points but >= the max
h, _, err := testReOpen([]int64{2, 1, 3}, 3)
assert.NoError(t, err)
// check contents
got, err := io.ReadAll(h)
assert.Equal(t, errorTestError, err)
assert.Equal(t, expectedRead[:6], got)
// check old error is returned
var buf = make([]byte, 1)
n, err := h.Read(buf)
assert.Equal(t, 0, n)
assert.Equal(t, errTooManyTries, err)
// Check close
assert.Equal(t, errFileClosed, h.Close())
})
t.Run("ReadAt", func(t *testing.T) {
// open
h, src, err := testReOpen([]int64{2, 1, 3}, 10)
assert.NoError(t, err)
buf := make([]byte, 5)
// Read at 0
n, err := h.ReadAt(buf, 0)
require.NoError(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, expectedRead[:n], buf[:n])
// Read at 1
setWantStart(src, 1)
n, err = h.ReadAt(buf[:3], 1)
require.NoError(t, err)
assert.Equal(t, 3, n)
assert.Equal(t, expectedRead[1:n+1], buf[:n])
// check position unchanged
pos, err := h.Seek(0, io.SeekCurrent)
require.NoError(t, err)
assert.Equal(t, int64(0), pos)
// check close
assert.NoError(t, h.Close())
_, err = h.Seek(0, io.SeekCurrent)
assert.Equal(t, errFileClosed, err)
})
t.Run("Seek", func(t *testing.T) {
// open
h, src, err := testReOpen([]int64{2, 1, 3}, 10)
assert.NoError(t, err)
// Seek to end
pos, err := h.Seek(int64(len(expectedRead)), io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(len(expectedRead)), pos)
// Seek to start
pos, err = h.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(0), pos)
// Should not allow seek past end
pos, err = h.Seek(int64(len(expectedRead))+1, io.SeekCurrent)
if !unknownSize {
assert.Equal(t, errSeekPastEnd, err)
assert.Equal(t, len(expectedRead), int(pos))
} else {
assert.Equal(t, nil, err)
assert.Equal(t, len(expectedRead)+1, int(pos))
// Seek back to start to get tests in sync
pos, err = h.Seek(0, io.SeekStart)
assert.NoError(t, err)
assert.Equal(t, int64(0), pos)
}
// Should not allow seek to negative position start
pos, err = h.Seek(-1, io.SeekCurrent)
assert.Equal(t, errNegativeSeek, err)
assert.Equal(t, 0, int(pos))
// Should not allow seek with invalid whence
pos, err = h.Seek(0, 3)
assert.Equal(t, errInvalidWhence, err)
assert.Equal(t, 0, int(pos))
// check read
dst := make([]byte, 5)
n, err := h.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, expectedRead[:5], dst)
// Test io.SeekCurrent
pos, err = h.Seek(-3, io.SeekCurrent)
assert.Nil(t, err)
assert.Equal(t, 2, int(pos))
// check read
setWantStart(src, 2)
n, err = h.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, expectedRead[2:7], dst)
pos, err = h.Seek(-2, io.SeekCurrent)
assert.Nil(t, err)
assert.Equal(t, 5, int(pos))
// Test io.SeekEnd
pos, err = h.Seek(-3, io.SeekEnd)
if !unknownSize {
assert.Nil(t, err)
assert.Equal(t, len(expectedRead)-3, int(pos))
} else {
assert.Equal(t, errBadEndSeek, err)
assert.Equal(t, 0, int(pos))
// sync
pos, err = h.Seek(1, io.SeekCurrent)
assert.Nil(t, err)
assert.Equal(t, 6, int(pos))
}
// check read
dst = make([]byte, 3)
setWantStart(src, int64(len(expectedRead)-3))
n, err = h.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 3, n)
assert.Equal(t, expectedRead[len(expectedRead)-3:], dst)
// check close
assert.NoError(t, h.Close())
_, err = h.Seek(0, io.SeekCurrent)
assert.Equal(t, errFileClosed, err)
})
t.Run("AccountRead", func(t *testing.T) {
h, _, err := testReOpen(nil, 10)
assert.NoError(t, err)
var total int
h.SetAccounting(func(n int) error {
total += n
return nil
})
dst := make([]byte, 3)
n, err := h.Read(dst)
assert.Equal(t, 3, n)
assert.NoError(t, err)
assert.Equal(t, 3, total)
})
t.Run("AccountReadDelay", func(t *testing.T) {
h, _, err := testReOpen(nil, 10)
assert.NoError(t, err)
var total int
h.SetAccounting(func(n int) error {
total += n
return nil
})
rewind := func() {
_, err := h.Seek(0, io.SeekStart)
require.NoError(t, err)
}
h.DelayAccounting(3)
dst := make([]byte, 16)
n, err := h.Read(dst)
assert.Equal(t, len(expectedRead), n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, total)
rewind()
n, err = h.Read(dst)
assert.Equal(t, len(expectedRead), n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, total)
rewind()
n, err = h.Read(dst)
assert.Equal(t, len(expectedRead), n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, len(expectedRead), total)
rewind()
n, err = h.Read(dst)
assert.Equal(t, len(expectedRead), n)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 2*len(expectedRead), total)
rewind()
})
t.Run("AccountReadError", func(t *testing.T) {
// Test accounting errors
h, _, err := testReOpen(nil, 10)
assert.NoError(t, err)
h.SetAccounting(func(n int) error {
return errorTestError
})
dst := make([]byte, 3)
n, err := h.Read(dst)
assert.Equal(t, 3, n)
assert.Equal(t, errorTestError, err)
})
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/operations.go | fs/operations/operations.go | // Package operations does generic operations on filesystems and objects
package operations
import (
"bytes"
"context"
"encoding/base64"
"encoding/csv"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/errcount"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/transform"
"golang.org/x/sync/errgroup"
"golang.org/x/text/unicode/norm"
)
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
//
// Returns.
//
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
//
// If an error is returned it will return equal as false
func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// fs.Debugf(nil, "Shared hashes: %v", common)
if common.Count() == 0 {
return true, hash.None, nil
}
equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne())
return equal, ht, err
}
var errNoHash = errors.New("no hash available")
// checkHashes does the work of CheckHashes but takes a hash.Type and
// returns the effective hash type used.
func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) {
// Calculate hashes in parallel
g, ctx := errgroup.WithContext(ctx)
var srcErr, dstErr error
g.Go(func() (err error) {
srcHash, srcErr = src.Hash(ctx, ht)
if srcErr != nil {
return srcErr
}
if srcHash == "" {
fs.Debugf(src, "Src hash empty - aborting Dst hash check")
return errNoHash
}
return nil
})
g.Go(func() (err error) {
dstHash, dstErr = dst.Hash(ctx, ht)
if dstErr != nil {
return dstErr
}
if dstHash == "" {
fs.Debugf(dst, "Dst hash empty - aborting Src hash check")
return errNoHash
}
return nil
})
err = g.Wait()
if err == errNoHash {
return true, hash.None, srcHash, dstHash, nil
}
if srcErr != nil {
err = fs.CountError(ctx, srcErr)
fs.Errorf(src, "Failed to calculate src hash: %v", err)
}
if dstErr != nil {
err = fs.CountError(ctx, dstErr)
fs.Errorf(dst, "Failed to calculate dst hash: %v", err)
}
if err != nil {
return false, ht, srcHash, dstHash, err
}
if srcHash != dstHash {
fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs())
fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs())
return false, ht, srcHash, dstHash, nil
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
return true, ht, srcHash, dstHash, nil
}
// Equal checks to see if the src and dst objects are equal by looking at
// size, mtime and hash
//
// If the src and dst size are different then it is considered to be
// not equal. If --size-only is in effect then this is the only check
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the hash is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
return equal(ctx, src, dst, defaultEqualOpt(ctx))
}
// DirsEqual is like Equal but for dirs instead of objects.
// It returns true if two dirs should be considered "equal" for the purposes of syncCopyMove
// (in other words, true == "skip updating modtime/metadata for this dir".)
// Unlike Equal, it does not consider size or checksum, as these do not apply to directories.
func DirsEqual(ctx context.Context, src, dst fs.Directory, opt DirsEqualOpt) (equal bool) {
if dst == nil {
return false
}
ci := fs.GetConfig(ctx)
if ci.SizeOnly || ci.Immutable || ci.IgnoreExisting || opt.ModifyWindow == fs.ModTimeNotSupported {
return true
}
if ci.IgnoreTimes {
return false
}
if !(opt.SetDirModtime || opt.SetDirMetadata) {
return true
}
srcModTime, dstModTime := src.ModTime(ctx), dst.ModTime(ctx)
if srcModTime.IsZero() || dstModTime.IsZero() {
return false
}
dt := dstModTime.Sub(srcModTime)
if dt < opt.ModifyWindow && dt > -opt.ModifyWindow {
fs.Debugf(dst, "Directory modification time the same (differ by %s, within tolerance %s)", dt, opt.ModifyWindow)
return true
}
if ci.UpdateOlder && dt >= opt.ModifyWindow {
fs.Debugf(dst, "Destination directory is newer than source, skipping")
return true
}
return false
}
// sizeDiffers compare the size of src and dst taking into account the
// various ways of ignoring sizes
func sizeDiffers(ctx context.Context, src, dst fs.ObjectInfo) bool {
ci := fs.GetConfig(ctx)
if ci.IgnoreSize || src.Size() < 0 || dst.Size() < 0 {
return false
}
if src.Size() == dst.Size() {
fs.Debugf(dst, "size = %d OK", dst.Size())
return false
}
fs.Debugf(src, "size = %d (%v)", src.Size(), src.Fs())
fs.Debugf(dst, "size = %d (%v)", dst.Size(), dst.Fs())
return true
}
var checksumWarning sync.Once
// options for equal function()
type equalOpt struct {
sizeOnly bool // if set only check size
checkSum bool // if set check checksum+size instead of modtime+size
updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size
forceModTimeMatch bool // if set assume modtimes match
}
// default set of options for equal()
func defaultEqualOpt(ctx context.Context) equalOpt {
ci := fs.GetConfig(ctx)
return equalOpt{
sizeOnly: ci.SizeOnly,
checkSum: ci.CheckSum,
updateModTime: !ci.NoUpdateModTime,
forceModTimeMatch: false,
}
}
// DirsEqualOpt represents options for DirsEqual function()
type DirsEqualOpt struct {
ModifyWindow time.Duration // Max time diff to be considered the same
SetDirModtime bool // whether to consider dir modtime
SetDirMetadata bool // whether to consider dir metadata
}
var modTimeUploadOnce sync.Once
// emit a log if we are about to upload a file to set its modification time
func logModTimeUpload(dst fs.Object) {
modTimeUploadOnce.Do(func() {
fs.Logf(dst.Fs(), "Forced to upload files to set modification times on this backend.")
})
}
// EqualFn allows replacing Equal() with a custom function during NeedTransfer()
type (
EqualFn func(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool
equalFnContextKey struct{}
)
var equalFnKey = equalFnContextKey{}
// WithEqualFn stores equalFn in ctx and returns a copy of ctx in which equalFnKey = equalFn
func WithEqualFn(ctx context.Context, equalFn EqualFn) context.Context {
return context.WithValue(ctx, equalFnKey, equalFn)
}
func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool {
ci := fs.GetConfig(ctx)
logger, _ := GetLogger(ctx)
if sizeDiffers(ctx, src, dst) {
fs.Debug(src, "Sizes differ")
logger(ctx, Differ, src, dst, nil)
return false
}
if opt.sizeOnly {
fs.Debugf(src, "Sizes identical")
logger(ctx, Match, src, dst, nil)
return true
}
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
if opt.checkSum {
// Check the hash
same, ht, _ := CheckHashes(ctx, src, dst)
if !same {
fs.Debugf(src, "%v differ", ht)
logger(ctx, Differ, src, dst, nil)
return false
}
if ht == hash.None {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
if common.Count() == 0 {
checksumWarning.Do(func() {
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
})
}
fs.Debugf(src, "Size of src and dst objects identical")
} else {
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
}
logger(ctx, Match, src, dst, nil)
return true
}
srcModTime := src.ModTime(ctx)
if !opt.forceModTimeMatch {
// Sizes the same so check the mtime
modifyWindow := fs.GetModifyWindow(ctx, src.Fs(), dst.Fs())
if modifyWindow == fs.ModTimeNotSupported {
fs.Debugf(src, "Sizes identical")
logger(ctx, Match, src, dst, nil)
return true
}
dstModTime := dst.ModTime(ctx)
dt := dstModTime.Sub(srcModTime)
if dt < modifyWindow && dt > -modifyWindow {
fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow)
logger(ctx, Match, src, dst, nil)
return true
}
fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
}
// Check if the hashes are the same
same, ht, _ := CheckHashes(ctx, src, dst)
if !same {
fs.Debugf(src, "%v differ", ht)
logger(ctx, Differ, src, dst, nil)
return false
}
if ht == hash.None && !ci.RefreshTimes {
// if couldn't check hash, return that they differ
logger(ctx, Differ, src, dst, nil)
return false
}
// mod time differs but hash is the same to reset mod time if required
if opt.updateModTime {
if !SkipDestructive(ctx, src, "update modification time") {
// Size and hash the same but mtime different
// Error if objects are treated as immutable
if ci.Immutable {
fs.Errorf(dst, "Timestamp mismatch between immutable objects")
logger(ctx, Differ, src, dst, nil)
return false
}
// Update the mtime of the dst object here
err := dst.SetModTime(ctx, srcModTime)
if errors.Is(err, fs.ErrorCantSetModTime) {
logModTimeUpload(dst)
fs.Infof(dst, "src and dst identical but can't set mod time without re-uploading")
logger(ctx, Differ, src, dst, nil)
return false
} else if errors.Is(err, fs.ErrorCantSetModTimeWithoutDelete) {
logModTimeUpload(dst)
fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
// put in the BackupDir than deleted which is what will happen if we don't delete it.
if ci.BackupDir == "" {
err = dst.Remove(ctx)
if err != nil {
fs.Errorf(dst, "failed to delete before re-upload: %v", err)
}
}
logger(ctx, Differ, src, dst, nil)
return false
} else if err != nil {
err = fs.CountError(ctx, err)
fs.Errorf(dst, "Failed to set modification time: %v", err)
} else {
fs.Infof(src, "Updated modification time in destination")
}
}
}
logger(ctx, Match, src, dst, nil)
return true
}
// CommonHash returns a single hash.Type and a HashOption with that
// type which is in common between the two fs.Fs.
func CommonHash(ctx context.Context, fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
ci := fs.GetConfig(ctx)
// work out which hash to use - limit to 1 hash in common
var common hash.Set
hashType := hash.None
if !ci.IgnoreChecksum {
common = fb.Hashes().Overlap(fa.Hashes())
if common.Count() > 0 {
hashType = common.GetOne()
common = hash.Set(hashType)
}
}
return hashType, &fs.HashesOption{Hashes: common}
}
// SameObject returns true if src and dst could be pointing to the
// same object.
func SameObject(src, dst fs.Object) bool {
srcFs, dstFs := src.Fs(), dst.Fs()
if !SameConfig(srcFs, dstFs) {
// If same remote type then check ID of objects if available
doSrcID, srcIDOK := src.(fs.IDer)
doDstID, dstIDOK := dst.(fs.IDer)
if srcIDOK && dstIDOK && SameRemoteType(srcFs, dstFs) {
srcID, dstID := doSrcID.ID(), doDstID.ID()
if srcID != "" && srcID == dstID {
return true
}
}
return false
}
srcPath := path.Join(srcFs.Root(), src.Remote())
dstPath := path.Join(dstFs.Root(), dst.Remote())
if srcFs.Features().IsLocal && dstFs.Features().IsLocal && runtime.GOOS == "darwin" {
if norm.NFC.String(srcPath) == norm.NFC.String(dstPath) {
return true
}
}
if dst.Fs().Features().CaseInsensitive {
srcPath = strings.ToLower(srcPath)
dstPath = strings.ToLower(dstPath)
}
return srcPath == dstPath
}
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
//
// Note that you must check the destination does not exist before
// calling this and pass it as dst. If you pass dst=nil and the
// destination does exist then this may create duplicates or return
// errors.
//
// It returns the destination object if possible. Note that this may
// be nil.
//
// This is accounted as a check.
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
return move(ctx, fdst, dst, remote, src, false)
}
// MoveTransfer moves src object to dst or fdst if nil. If dst is nil
// then it uses remote as the name of the new object.
//
// This is identical to Move but is accounted as a transfer.
func MoveTransfer(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
return move(ctx, fdst, dst, remote, src, true)
}
// move - see Move for help
func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object, isTransfer bool) (newDst fs.Object, err error) {
origRemote := remote // avoid double-transform on fallback to copy
remote = transform.Path(ctx, remote, false)
ci := fs.GetConfig(ctx)
newDst = dst
if ci.DryRun && dst != nil && SameObject(src, dst) && src.Remote() == transform.Path(ctx, dst.Remote(), false) {
return // avoid SkipDestructive log for objects that won't really be moved
}
var tr *accounting.Transfer
if isTransfer {
tr = accounting.Stats(ctx).NewTransfer(src, fdst)
} else {
tr = accounting.Stats(ctx).NewCheckingTransfer(src, "moving")
}
defer func() {
if err == nil {
accounting.Stats(ctx).Renames(1)
}
tr.Done(ctx, err)
}()
action := "move"
if remote != src.Remote() {
action += " to " + remote
}
if SkipDestructive(ctx, src, action) {
in := tr.Account(ctx, nil)
in.DryRun(src.Size())
return newDst, nil
}
// See if we have Move available
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && (fdst.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
if dst != nil {
remote = transform.Path(ctx, dst.Remote(), false)
if !SameObject(src, dst) {
err = DeleteFile(ctx, dst)
if err != nil {
return newDst, err
}
} else if src.Remote() == remote {
return newDst, nil
} else if needsMoveCaseInsensitive(fdst, fdst, remote, src.Remote(), false) {
doMove = func(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
return MoveCaseInsensitive(ctx, fdst, fdst, remote, src.Remote(), false, src)
}
}
} else if needsMoveCaseInsensitive(fdst, fdst, remote, src.Remote(), false) {
doMove = func(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
return MoveCaseInsensitive(ctx, fdst, fdst, remote, src.Remote(), false, src)
}
}
// Move dst <- src
in := tr.Account(ctx, nil) // account the transfer
in.ServerSideTransferStart()
newDst, err = doMove(ctx, src, remote)
switch err {
case nil:
if newDst != nil && src.String() != newDst.String() {
fs.Infof(src, "Moved (server-side) to: %s", newDst.String())
} else {
fs.Infof(src, "Moved (server-side)")
}
in.ServerSideMoveEnd(newDst.Size()) // account the bytes for the server-side transfer
_ = in.Close()
return newDst, nil
case fs.ErrorCantMove:
fs.Debugf(src, "Can't move, switching to copy")
_ = in.Close()
default:
err = fs.CountError(ctx, err)
fs.Errorf(src, "Couldn't move: %v", err)
_ = in.Close()
return newDst, err
}
}
// Move not found or didn't work so copy dst <- src
if origRemote != remote {
dst = nil
}
newDst, err = Copy(ctx, fdst, dst, origRemote, src)
if err != nil {
fs.Errorf(src, "Not deleting source as copy failed: %v", err)
return newDst, err
}
// Delete src if no error on copy
return newDst, DeleteFile(ctx, src)
}
// CanServerSideMove returns true if fdst support server-side moves or
// server-side copies
//
// Some remotes simulate rename by server-side copy and delete, so include
// remotes that implements either Mover or Copier.
func CanServerSideMove(fdst fs.Fs) bool {
canMove := fdst.Features().Move != nil
canCopy := fdst.Features().Copy != nil
return canMove || canCopy
}
// SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set
func SuffixName(ctx context.Context, remote string) string {
ci := fs.GetConfig(ctx)
if ci.Suffix == "" {
return remote
}
if ci.SuffixKeepExtension {
return transform.SuffixKeepExtension(remote, ci.Suffix)
}
return remote + ci.Suffix
}
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
// and accumulating stats and errors.
//
// If backupDir is set then it moves the file to there instead of
// deleting
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
tr := accounting.Stats(ctx).NewCheckingTransfer(dst, "deleting")
defer func() {
tr.Done(ctx, err)
}()
err = accounting.Stats(ctx).DeleteFile(ctx, dst.Size())
if err != nil {
return err
}
action, actioned := "delete", "Deleted"
if backupDir != nil {
action, actioned = "move into backup dir", "Moved into backup dir"
}
skip := SkipDestructive(ctx, dst, action)
if skip {
// do nothing
} else if backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dst)
} else {
err = dst.Remove(ctx)
}
if err != nil {
fs.Errorf(dst, "Couldn't %s: %v", action, err)
err = fs.CountError(ctx, err)
} else if !skip {
fs.Infof(dst, "%s", actioned)
}
return err
}
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
//
// If useBackupDir is set and --backup-dir is in effect then it moves
// the file to there instead of deleting
func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
return DeleteFileWithBackupDir(ctx, dst, nil)
}
// DeleteFilesWithBackupDir removes all the files passed in the
// channel
//
// If backupDir is set the files will be placed into that directory
// instead of being deleted.
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
var wg sync.WaitGroup
ci := fs.GetConfig(ctx)
wg.Add(ci.Checkers)
var errorCount atomic.Int32
var fatalErrorCount atomic.Int32
for range ci.Checkers {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
err := DeleteFileWithBackupDir(ctx, dst, backupDir)
if err != nil {
errorCount.Add(1)
logger, _ := GetLogger(ctx)
logger(ctx, TransferError, nil, dst, err)
if fserrors.IsFatalError(err) {
fs.Errorf(dst, "Got fatal error on delete: %s", err)
fatalErrorCount.Add(1)
return
}
}
}
}()
}
fs.Debugf(nil, "Waiting for deletions to finish")
wg.Wait()
if errorCount.Load() > 0 {
err := fmt.Errorf("failed to delete %d files", errorCount.Load())
if fatalErrorCount.Load() > 0 {
return fserrors.FatalError(err)
}
return err
}
return nil
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error {
return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil)
}
// ReadFile reads the object into memory and accounts it
func ReadFile(ctx context.Context, o fs.Object) (b []byte, err error) {
tr := accounting.Stats(ctx).NewTransfer(o, nil)
defer func() {
tr.Done(ctx, err)
}()
in0, err := Open(ctx, o)
if err != nil {
return nil, fmt.Errorf("failed to open %v: %w", o, err)
}
in := tr.Account(ctx, in0).WithBuffer() // account and buffer the transfer
defer fs.CheckClose(in, &err) // closes in0 also
b, err = io.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("failed to read %v: %w", o, err)
}
return b, nil
}
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType(fdst, fsrc fs.Info) bool {
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
}
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
func SameConfig(fdst, fsrc fs.Info) bool {
return fdst.Name() == fsrc.Name()
}
// SameConfigArr returns true if any of []fsrcs has same config file entry with fdst
func SameConfigArr(fdst fs.Info, fsrcs []fs.Fs) bool {
for _, fsrc := range fsrcs {
if fdst.Name() == fsrc.Name() {
return true
}
}
return false
}
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc fs.Info) bool {
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
}
// fixRoot returns the Root with a trailing / if not empty.
//
// It returns a case folded version for case insensitive file systems
func fixRoot(f fs.Info) (s string, folded string) {
s = strings.Trim(filepath.ToSlash(f.Root()), "/")
if s != "" {
s += "/"
}
folded = s
if f.Features().CaseInsensitive {
folded = strings.ToLower(s)
}
return s, folded
}
// OverlappingFilterCheck returns true if fdst and fsrc point to the same
// underlying Fs and they overlap without fdst being excluded by any filter rule.
func OverlappingFilterCheck(ctx context.Context, fdst fs.Fs, fsrc fs.Fs) bool {
if !SameConfig(fdst, fsrc) {
return false
}
fdstRoot, fdstRootFolded := fixRoot(fdst)
fsrcRoot, fsrcRootFolded := fixRoot(fsrc)
if fdstRootFolded == fsrcRootFolded {
return true
} else if strings.HasPrefix(fdstRootFolded, fsrcRootFolded) {
fdstRelative := fdstRoot[len(fsrcRoot):]
return filterCheck(ctx, fsrc, fdstRelative)
} else if strings.HasPrefix(fsrcRootFolded, fdstRootFolded) {
fsrcRelative := fsrcRoot[len(fdstRoot):]
return filterCheck(ctx, fdst, fsrcRelative)
}
return false
}
// filterCheck checks if dir is included in f
func filterCheck(ctx context.Context, f fs.Fs, dir string) bool {
fi := filter.GetConfig(ctx)
includeDirectory := fi.IncludeDirectory(ctx, f)
include, err := includeDirectory(dir)
if err != nil {
fs.Errorf(f, "Failed to discover whether directory is included: %v", err)
return true
}
return include
}
// SameDir returns true if fdst and fsrc point to the same
// underlying Fs and they are the same directory.
func SameDir(fdst, fsrc fs.Info) bool {
if !SameConfig(fdst, fsrc) {
return false
}
_, fdstRootFolded := fixRoot(fdst)
_, fsrcRootFolded := fixRoot(fsrc)
return fdstRootFolded == fsrcRootFolded
}
// Retry runs fn up to maxTries times if it returns a retriable error
func Retry(ctx context.Context, o any, maxTries int, fn func() error) (err error) {
for tries := 1; tries <= maxTries; tries++ {
// Call the function which might error
err = fn()
if err == nil {
break
}
// End if ctx is in error
if fserrors.ContextError(ctx, &err) {
break
}
// Retry if err returned a retry error
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
fs.Debugf(o, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
continue
} else if t, ok := pacer.IsRetryAfter(err); ok {
fs.Debugf(o, "Sleeping for %v (as indicated by the server) to obey Retry-After error: %v", t, err)
time.Sleep(t)
continue
}
break
}
return err
}
// ListFn lists the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error {
ci := fs.GetConfig(ctx)
return walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(fn)
return nil
})
}
// StdoutMutex mutex for synchronized output on stdout
var StdoutMutex sync.Mutex
// SyncPrintf is a global var holding the Printf function so that it
// can be overridden.
//
// This writes to stdout holding the StdoutMutex. If you are going to
// override it and write to os.Stdout then you should hold the
// StdoutMutex too.
var SyncPrintf = func(format string, a ...any) {
StdoutMutex.Lock()
defer StdoutMutex.Unlock()
fmt.Printf(format, a...)
}
// SyncFprintf - Synchronized fmt.Fprintf
//
// Ignores errors from Fprintf.
//
// Prints to stdout if w is nil
func SyncFprintf(w io.Writer, format string, a ...any) {
if w == nil || w == os.Stdout {
SyncPrintf(format, a...)
} else {
StdoutMutex.Lock()
defer StdoutMutex.Unlock()
_, _ = fmt.Fprintf(w, format, a...)
}
}
// SizeString make string representation of size for output
//
// Optional human-readable format including a binary suffix
func SizeString(size int64, humanReadable bool) string {
if humanReadable {
if size < 0 {
return "-" + fs.SizeSuffix(-size).String()
}
return fs.SizeSuffix(size).String()
}
return strconv.FormatInt(size, 10)
}
// SizeStringField make string representation of size for output in fixed width field
//
// Optional human-readable format including a binary suffix
// Argument rawWidth is used to format field with of raw value. When humanReadable
// option the width is hard coded to 9, since SizeSuffix strings have precision 3
// and longest value will be "999.999Ei". This way the width can be optimized
// depending to the humanReadable option. To always use a longer width the return
// value can always be fed into another format string with a specific field with.
func SizeStringField(size int64, humanReadable bool, rawWidth int) string {
str := SizeString(size, humanReadable)
if humanReadable {
return fmt.Sprintf("%9s", str)
}
return fmt.Sprintf("%[2]*[1]s", str, rawWidth)
}
// CountString make string representation of count for output
//
// Optional human-readable format including a decimal suffix
func CountString(count int64, humanReadable bool) string {
if humanReadable {
if count < 0 {
return "-" + fs.CountSuffix(-count).String()
}
return fs.CountSuffix(count).String()
}
return strconv.FormatInt(count, 10)
}
// CountStringField make string representation of count for output in fixed width field
//
// Similar to SizeStringField, but human readable with decimal prefix and field width 8
// since there is no 'i' in the decimal prefix symbols (e.g. "999.999E")
func CountStringField(count int64, humanReadable bool, rawWidth int) string {
str := CountString(count, humanReadable)
if humanReadable {
return fmt.Sprintf("%8s", str)
}
return fmt.Sprintf("%[2]*[1]s", str, rawWidth)
}
// List the Fs to the supplied writer
//
// Shows size and path - obeys includes and excludes.
//
// Lists in parallel which may get them out of order
func List(ctx context.Context, f fs.Fs, w io.Writer) error {
ci := fs.GetConfig(ctx)
return ListFn(ctx, f, func(o fs.Object) {
SyncFprintf(w, "%s %s\n", SizeStringField(o.Size(), ci.HumanReadable, 9), o.Remote())
})
}
// ListLong lists the Fs to the supplied writer
//
// Shows size, mod time and path - obeys includes and excludes.
//
// Lists in parallel which may get them out of order
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
ci := fs.GetConfig(ctx)
return ListFn(ctx, f, func(o fs.Object) {
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "listing")
defer func() {
tr.Done(ctx, nil)
}()
modTime := o.ModTime(ctx)
SyncFprintf(w, "%s %s %s\n", SizeStringField(o.Size(), ci.HumanReadable, 9), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
})
}
// HashSum returns the human-readable hash for ht passed in. This may
// be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will
// return an error.
func HashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag bool, o fs.Object) (string, error) {
var sum string
var err error
// If downloadFlag is true, download and hash the file.
// If downloadFlag is false, call o.Hash asking the remote for the hash
if downloadFlag {
// Setup: Define accounting, open the file with NewReOpen to provide restarts, account for the transfer, and setup a multi-hasher with the appropriate type
// Execution: io.Copy file to hasher, get hash and encode in hex
tr := accounting.Stats(ctx).NewTransfer(o, nil)
defer func() {
tr.Done(ctx, err)
}()
// Open with NewReOpen to provide restarts
var options []fs.OpenOption
for _, option := range fs.GetConfig(ctx).DownloadHeaders {
options = append(options, option)
}
var in io.ReadCloser
in, err = Open(ctx, o, options...)
if err != nil {
return "ERROR", fmt.Errorf("failed to open file %v: %w", o, err)
}
// Account and buffer the transfer
in = tr.Account(ctx, in).WithBuffer()
// Setup hasher
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return "UNSUPPORTED", fmt.Errorf("hash unsupported: %w", err)
}
// Copy to hasher, downloading the file and passing directly to hash
_, err = io.Copy(hasher, in)
if err != nil {
return "ERROR", fmt.Errorf("failed to copy file to hasher: %w", err)
}
// Get hash as hex or base64 encoded string
sum, err = hasher.SumString(ht, base64Encoded)
if err != nil {
return "ERROR", fmt.Errorf("hasher returned an error: %w", err)
}
} else {
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "hashing")
defer func() {
tr.Done(ctx, err)
}()
sum, err = o.Hash(ctx, ht)
if base64Encoded {
hexBytes, _ := hex.DecodeString(sum)
sum = base64.URLEncoding.EncodeToString(hexBytes)
}
if err == hash.ErrUnsupported {
return "", fmt.Errorf("hash unsupported: %w", err)
}
if err != nil {
return "", fmt.Errorf("failed to get hash %v from backend: %w", ht, err)
}
}
return sum, nil
}
// HashLister does an md5sum equivalent for the hash type passed in
// Updated to handle both standard hex encoding and base64
// Updated to perform multiple hashes concurrently
func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error {
width := hash.Width(ht, outputBase64)
// Use --checkers concurrency unless downloading in which case use --transfers
concurrency := fs.GetConfig(ctx).Checkers
if downloadFlag {
concurrency = fs.GetConfig(ctx).Transfers
}
concurrencyControl := make(chan struct{}, concurrency)
var wg sync.WaitGroup
err := ListFn(ctx, f, func(o fs.Object) {
wg.Add(1)
concurrencyControl <- struct{}{}
go func() {
defer func() {
<-concurrencyControl
wg.Done()
}()
sum, err := HashSum(ctx, ht, outputBase64, downloadFlag, o)
if err != nil {
fs.Errorf(o, "%v", fs.CountError(ctx, err))
return
}
SyncFprintf(w, "%*s %s\n", width, sum, o.Remote())
}()
})
wg.Wait()
return err
}
// HashSumStream outputs a line compatible with md5sum to w based on the
// input stream in and the hash type ht passed in. If outputBase64 is
// set then the hash will be base64 instead of hexadecimal.
func HashSumStream(ht hash.Type, outputBase64 bool, in io.ReadCloser, w io.Writer) error {
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return fmt.Errorf("hash unsupported: %w", err)
}
written, err := io.Copy(hasher, in)
fs.Debugf(nil, "Creating %s hash of %d bytes read from input stream", ht, written)
if err != nil {
return fmt.Errorf("failed to copy input to hasher: %w", err)
}
sum, err := hasher.SumString(ht, outputBase64)
if err != nil {
return fmt.Errorf("hasher returned an error: %w", err)
}
width := hash.Width(ht, outputBase64)
SyncFprintf(w, "%*s -\n", width, sum)
return nil
}
// Count counts the objects and their sizes in the Fs
//
// Obeys includes and excludes
func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, sizelessObjects int64, err error) {
err = ListFn(ctx, f, func(o fs.Object) {
atomic.AddInt64(&objects, 1)
objectSize := o.Size()
if objectSize < 0 {
atomic.AddInt64(&sizelessObjects, 1)
} else if objectSize > 0 {
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | true |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/listdirsorted_test.go | fs/operations/listdirsorted_test.go | package operations_test
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// testListDirSorted is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func testListDirSorted(t *testing.T, listFn func(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error)) {
r := fstest.NewRun(t)
ctx := context.Background()
fi := filter.GetConfig(ctx)
fi.Opt.MaxSize = 10
defer func() {
fi.Opt.MaxSize = -1
}()
files := []fstest.Item{
r.WriteObject(context.Background(), "a.txt", "hello world", t1),
r.WriteObject(context.Background(), "zend.txt", "hello", t1),
r.WriteObject(context.Background(), "sub dir/hello world", "hello world", t1),
r.WriteObject(context.Background(), "sub dir/hello world2", "hello world", t1),
r.WriteObject(context.Background(), "sub dir/ignore dir/.ignore", "-", t1),
r.WriteObject(context.Background(), "sub dir/ignore dir/should be ignored", "to ignore", t1),
r.WriteObject(context.Background(), "sub dir/sub sub dir/hello world3", "hello world", t1),
}
r.CheckRemoteItems(t, files...)
var items fs.DirEntries
var err error
// Turn the DirEntry into a name, ending with a / if it is a
// dir
str := func(i int) string {
item := items[i]
name := item.Remote()
switch item.(type) {
case fs.Object:
case fs.Directory:
name += "/"
default:
t.Fatalf("Unknown type %+v", item)
}
return name
}
items, err = listFn(context.Background(), r.Fremote, true, "")
require.NoError(t, err)
require.Len(t, items, 3)
assert.Equal(t, "a.txt", str(0))
assert.Equal(t, "sub dir/", str(1))
assert.Equal(t, "zend.txt", str(2))
items, err = listFn(context.Background(), r.Fremote, false, "")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/", str(0))
assert.Equal(t, "zend.txt", str(1))
items, err = listFn(context.Background(), r.Fremote, true, "sub dir")
require.NoError(t, err)
require.Len(t, items, 4)
assert.Equal(t, "sub dir/hello world", str(0))
assert.Equal(t, "sub dir/hello world2", str(1))
assert.Equal(t, "sub dir/ignore dir/", str(2))
assert.Equal(t, "sub dir/sub sub dir/", str(3))
items, err = listFn(context.Background(), r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/", str(0))
assert.Equal(t, "sub dir/sub sub dir/", str(1))
// testing ignore file
fi.Opt.ExcludeFile = []string{".ignore"}
items, err = listFn(context.Background(), r.Fremote, false, "sub dir")
require.NoError(t, err)
require.Len(t, items, 1)
assert.Equal(t, "sub dir/sub sub dir/", str(0))
items, err = listFn(context.Background(), r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 0)
items, err = listFn(context.Background(), r.Fremote, true, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
fi.Opt.ExcludeFile = nil
items, err = listFn(context.Background(), r.Fremote, false, "sub dir/ignore dir")
require.NoError(t, err)
require.Len(t, items, 2)
assert.Equal(t, "sub dir/ignore dir/.ignore", str(0))
assert.Equal(t, "sub dir/ignore dir/should be ignored", str(1))
}
// TestListDirSorted is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func TestListDirSorted(t *testing.T) {
testListDirSorted(t, list.DirSorted)
}
// TestListDirSortedFn is integration testing code in fs/list/list.go
// which can't be tested there due to import loops.
func TestListDirSortedFn(t *testing.T) {
listFn := func(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) {
callback := func(newEntries fs.DirEntries) error {
entries = append(entries, newEntries...)
return nil
}
err = list.DirSortedFn(ctx, f, includeAll, dir, callback, nil)
return entries, err
}
testListDirSorted(t, listFn)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/multithread_test.go | fs/operations/multithread_test.go | package operations
import (
"context"
"errors"
"fmt"
"io"
"sync"
"testing"
"time"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDoMultiThreadCopy(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
f, err := mockfs.NewFs(ctx, "potato", "", nil)
require.NoError(t, err)
src := mockobject.New("file.txt").WithContent([]byte(random.String(100)), mockobject.SeekModeNone)
srcFs, err := mockfs.NewFs(ctx, "sausage", "", nil)
require.NoError(t, err)
src.SetFs(srcFs)
oldStreams := ci.MultiThreadStreams
oldCutoff := ci.MultiThreadCutoff
oldIsSet := ci.MultiThreadSet
defer func() {
ci.MultiThreadStreams = oldStreams
ci.MultiThreadCutoff = oldCutoff
ci.MultiThreadSet = oldIsSet
}()
ci.MultiThreadStreams, ci.MultiThreadCutoff = 4, 50
ci.MultiThreadSet = false
nullWriterAt := func(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
panic("don't call me")
}
f.Features().OpenWriterAt = nullWriterAt
assert.True(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadStreams = 0
assert.False(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadStreams = 1
assert.False(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadStreams = 2
assert.True(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadCutoff = 200
assert.False(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadCutoff = 101
assert.False(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadCutoff = 100
assert.True(t, doMultiThreadCopy(ctx, f, src))
f.Features().OpenWriterAt = nil
assert.False(t, doMultiThreadCopy(ctx, f, src))
f.Features().OpenWriterAt = nullWriterAt
assert.True(t, doMultiThreadCopy(ctx, f, src))
f.Features().IsLocal = true
srcFs.Features().IsLocal = true
assert.False(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadSet = true
assert.True(t, doMultiThreadCopy(ctx, f, src))
ci.MultiThreadSet = false
assert.False(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().IsLocal = true
assert.False(t, doMultiThreadCopy(ctx, f, src))
f.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().IsLocal = false
assert.True(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().NoMultiThreading = true
assert.False(t, doMultiThreadCopy(ctx, f, src))
srcFs.Features().NoMultiThreading = false
assert.True(t, doMultiThreadCopy(ctx, f, src))
}
func TestMultithreadCalculateNumChunks(t *testing.T) {
for _, test := range []struct {
size int64
chunkSize int64
wantNumChunks int
}{
{size: 1, chunkSize: multithreadChunkSize, wantNumChunks: 1},
{size: 1 << 20, chunkSize: 1, wantNumChunks: 1 << 20},
{size: 1 << 20, chunkSize: 2, wantNumChunks: 1 << 19},
{size: (1 << 20) + 1, chunkSize: 2, wantNumChunks: (1 << 19) + 1},
{size: (1 << 20) - 1, chunkSize: 2, wantNumChunks: 1 << 19},
} {
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
mc := &multiThreadCopyState{}
mc.numChunks = calculateNumChunks(test.size, test.chunkSize)
assert.Equal(t, test.wantNumChunks, mc.numChunks)
})
}
}
// Skip if not multithread, returning the chunkSize otherwise
func skipIfNotMultithread(ctx context.Context, t *testing.T, r *fstest.Run) int {
features := r.Fremote.Features()
if features.OpenChunkWriter == nil && features.OpenWriterAt == nil {
t.Skip("multithread writing not supported")
}
// Only support one hash for the local backend otherwise we end up spending a huge amount of CPU on hashing!
if r.Fremote.Features().IsLocal {
oldHashes := hash.SupportOnly([]hash.Type{r.Fremote.Hashes().GetOne()})
t.Cleanup(func() {
_ = hash.SupportOnly(oldHashes)
})
}
ci := fs.GetConfig(ctx)
chunkSize := int(ci.MultiThreadChunkSize)
if features.OpenChunkWriter != nil {
//OpenChunkWriter func(ctx context.Context, remote string, src ObjectInfo, options ...OpenOption) (info ChunkWriterInfo, writer ChunkWriter, err error)
const fileName = "chunksize-probe"
src := object.NewStaticObjectInfo(fileName, time.Now(), int64(100*fs.Mebi), true, nil, nil)
info, writer, err := features.OpenChunkWriter(ctx, fileName, src)
require.NoError(t, err)
chunkSize = int(info.ChunkSize)
err = writer.Abort(ctx)
require.NoError(t, err)
}
return chunkSize
}
func TestMultithreadCopy(t *testing.T) {
r := fstest.NewRun(t)
ctx := context.Background()
chunkSize := skipIfNotMultithread(ctx, t, r)
// Check every other transfer for metadata
checkMetadata := false
ctx, ci := fs.AddConfig(ctx)
for _, upload := range []bool{false, true} {
for _, test := range []struct {
size int
streams int
}{
{size: chunkSize*2 - 1, streams: 2},
{size: chunkSize * 2, streams: 2},
{size: chunkSize*2 + 1, streams: 2},
} {
checkMetadata = !checkMetadata
ci.Metadata = checkMetadata
fileName := fmt.Sprintf("test-multithread-copy-%v-%d-%d", upload, test.size, test.streams)
t.Run(fmt.Sprintf("upload=%v,size=%v,streams=%v", upload, test.size, test.streams), func(t *testing.T) {
if *fstest.SizeLimit > 0 && int64(test.size) > *fstest.SizeLimit {
t.Skipf("exceeded file size limit %d > %d", test.size, *fstest.SizeLimit)
}
var (
contents = random.String(test.size)
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
file1 fstest.Item
src, dst fs.Object
err error
testMetadata = fs.Metadata{
// System metadata supported by all backends
"mtime": t1.Format(time.RFC3339Nano),
// User metadata
"potato": "jersey",
}
)
var fSrc, fDst fs.Fs
if upload {
file1 = r.WriteFile(fileName, contents, t1)
r.CheckRemoteItems(t)
r.CheckLocalItems(t, file1)
fDst, fSrc = r.Fremote, r.Flocal
} else {
file1 = r.WriteObject(ctx, fileName, contents, t1)
r.CheckRemoteItems(t, file1)
r.CheckLocalItems(t)
fDst, fSrc = r.Flocal, r.Fremote
}
src, err = fSrc.NewObject(ctx, fileName)
require.NoError(t, err)
do, canSetMetadata := src.(fs.SetMetadataer)
if checkMetadata && canSetMetadata {
// Set metadata on the source if required
err := do.SetMetadata(ctx, testMetadata)
if err == fs.ErrorNotImplemented {
canSetMetadata = false
} else {
require.NoError(t, err)
fstest.CheckEntryMetadata(ctx, t, r.Flocal, src, testMetadata)
}
}
accounting.GlobalStats().ResetCounters()
tr := accounting.GlobalStats().NewTransfer(src, nil)
defer func() {
tr.Done(ctx, err)
}()
dst, err = multiThreadCopy(ctx, fDst, fileName, src, test.streams, tr)
require.NoError(t, err)
assert.Equal(t, src.Size(), dst.Size())
assert.Equal(t, fileName, dst.Remote())
fstest.CheckListingWithPrecision(t, fSrc, []fstest.Item{file1}, nil, fs.GetModifyWindow(ctx, fDst, fSrc))
fstest.CheckListingWithPrecision(t, fDst, []fstest.Item{file1}, nil, fs.GetModifyWindow(ctx, fDst, fSrc))
if checkMetadata && canSetMetadata && fDst.Features().ReadMetadata {
fstest.CheckEntryMetadata(ctx, t, fDst, dst, testMetadata)
}
require.NoError(t, dst.Remove(ctx))
require.NoError(t, src.Remove(ctx))
})
}
}
}
type errorObject struct {
fs.Object
size int64
wg *sync.WaitGroup
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
//
// Remember this is called multiple times whenever the backend seeks (eg having read checksum)
func (o errorObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.Debugf(nil, "Open with options = %v", options)
rc, err := o.Object.Open(ctx, options...)
if err != nil {
return nil, err
}
// Return an error reader for the second segment
for _, option := range options {
if ropt, ok := option.(*fs.RangeOption); ok {
end := ropt.End + 1
if end >= o.size {
// Give the other chunks a chance to start
time.Sleep(time.Second)
// Wait for chunks to upload first
o.wg.Wait()
fs.Debugf(nil, "Returning error reader")
return errorReadCloser{rc}, nil
}
}
}
o.wg.Add(1)
return wgReadCloser{rc, o.wg}, nil
}
type errorReadCloser struct {
io.ReadCloser
}
func (rc errorReadCloser) Read(p []byte) (n int, err error) {
fs.Debugf(nil, "BOOM: simulated read failure")
return 0, errors.New("BOOM: simulated read failure")
}
type wgReadCloser struct {
io.ReadCloser
wg *sync.WaitGroup
}
func (rc wgReadCloser) Close() (err error) {
rc.wg.Done()
return rc.ReadCloser.Close()
}
// Make sure aborting the multi-thread copy doesn't overwrite an existing file.
func TestMultithreadCopyAbort(t *testing.T) {
r := fstest.NewRun(t)
ctx := context.Background()
chunkSize := skipIfNotMultithread(ctx, t, r)
size := 2*chunkSize + 1
if *fstest.SizeLimit > 0 && int64(size) > *fstest.SizeLimit {
t.Skipf("exceeded file size limit %d > %d", size, *fstest.SizeLimit)
}
// first write a canary file which we are trying not to overwrite
const fileName = "test-multithread-abort"
contents := random.String(100)
t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
canary := r.WriteObject(ctx, fileName, contents, t1)
r.CheckRemoteItems(t, canary)
// Now write a local file to upload
file1 := r.WriteFile(fileName, random.String(size), t1)
r.CheckLocalItems(t, file1)
src, err := r.Flocal.NewObject(ctx, fileName)
require.NoError(t, err)
accounting.GlobalStats().ResetCounters()
tr := accounting.GlobalStats().NewTransfer(src, nil)
defer func() {
tr.Done(ctx, err)
}()
wg := new(sync.WaitGroup)
dst, err := multiThreadCopy(ctx, r.Fremote, fileName, errorObject{src, int64(size), wg}, 1, tr)
assert.Error(t, err)
assert.Nil(t, dst)
if r.Fremote.Features().PartialUploads {
r.CheckRemoteItems(t)
} else {
r.CheckRemoteItems(t, canary)
o, err := r.Fremote.NewObject(ctx, fileName)
require.NoError(t, err)
require.NoError(t, o.Remove(ctx))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/lsjson_test.go | fs/operations/lsjson_test.go | package operations_test
import (
"context"
"sort"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Compare a and b in a file system independent way
func compareListJSONItem(t *testing.T, a, b *operations.ListJSONItem, precision time.Duration) {
assert.Equal(t, a.Path, b.Path, "Path")
assert.Equal(t, a.Name, b.Name, "Name")
// assert.Equal(t, a.EncryptedPath, b.EncryptedPath, "EncryptedPath")
// assert.Equal(t, a.Encrypted, b.Encrypted, "Encrypted")
if !a.IsDir {
assert.Equal(t, a.Size, b.Size, "Size")
}
// assert.Equal(t, a.MimeType, a.Mib.MimeType, "MimeType")
if !a.IsDir {
fstest.AssertTimeEqualWithPrecision(t, "ListJSON", a.ModTime.When, b.ModTime.When, precision)
}
assert.Equal(t, a.IsDir, b.IsDir, "IsDir")
// assert.Equal(t, a.Hashes, a.b.Hashes, "Hashes")
// assert.Equal(t, a.ID, b.ID, "ID")
// assert.Equal(t, a.OrigID, a.b.OrigID, "OrigID")
// assert.Equal(t, a.Tier, b.Tier, "Tier")
// assert.Equal(t, a.IsBucket, a.Isb.IsBucket, "IsBucket")
}
func TestListJSON(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "file1", "file1", t1)
file2 := r.WriteBoth(ctx, "sub/file2", "sub/file2", t2)
r.CheckRemoteItems(t, file1, file2)
precision := fs.GetModifyWindow(ctx, r.Fremote)
for _, test := range []struct {
name string
remote string
opt operations.ListJSONOpt
want []*operations.ListJSONItem
}{
{
name: "Default",
opt: operations.ListJSONOpt{},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}, {
Path: "sub",
Name: "sub",
IsDir: true,
}},
}, {
name: "FilesOnly",
opt: operations.ListJSONOpt{
FilesOnly: true,
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}},
}, {
name: "DirsOnly",
opt: operations.ListJSONOpt{
DirsOnly: true,
},
want: []*operations.ListJSONItem{{
Path: "sub",
Name: "sub",
IsDir: true,
}},
}, {
name: "Recurse",
opt: operations.ListJSONOpt{
Recurse: true,
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}, {
Path: "sub",
Name: "sub",
IsDir: true,
}, {
Path: "sub/file2",
Name: "file2",
Size: 9,
ModTime: operations.Timestamp{When: t2},
IsDir: false,
}},
}, {
name: "SubDir",
remote: "sub",
opt: operations.ListJSONOpt{},
want: []*operations.ListJSONItem{{
Path: "sub/file2",
Name: "file2",
Size: 9,
ModTime: operations.Timestamp{When: t2},
IsDir: false,
}},
}, {
name: "NoModTime",
opt: operations.ListJSONOpt{
FilesOnly: true,
NoModTime: true,
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: time.Time{}},
IsDir: false,
}},
}, {
name: "NoMimeType",
opt: operations.ListJSONOpt{
FilesOnly: true,
NoMimeType: true,
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}},
}, {
name: "ShowHash",
opt: operations.ListJSONOpt{
FilesOnly: true,
ShowHash: true,
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}},
}, {
name: "HashTypes",
opt: operations.ListJSONOpt{
FilesOnly: true,
ShowHash: true,
HashTypes: []string{"MD5"},
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}},
}, {
name: "Metadata",
opt: operations.ListJSONOpt{
FilesOnly: false,
Metadata: true,
},
want: []*operations.ListJSONItem{{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
}, {
Path: "sub",
Name: "sub",
IsDir: true,
}},
},
} {
t.Run(test.name, func(t *testing.T) {
var got []*operations.ListJSONItem
require.NoError(t, operations.ListJSON(ctx, r.Fremote, test.remote, &test.opt, func(item *operations.ListJSONItem) error {
got = append(got, item)
return nil
}))
sort.Slice(got, func(i, j int) bool {
return got[i].Path < got[j].Path
})
require.Equal(t, len(test.want), len(got), "Wrong number of results")
for i := range test.want {
compareListJSONItem(t, test.want[i], got[i], precision)
if test.opt.NoMimeType {
assert.Equal(t, "", got[i].MimeType)
} else {
assert.NotEqual(t, "", got[i].MimeType)
}
if test.opt.Metadata {
features := r.Fremote.Features()
if features.ReadMetadata && !got[i].IsDir {
assert.Greater(t, len(got[i].Metadata), 0, "Expecting metadata for file")
}
if features.ReadDirMetadata && got[i].IsDir {
assert.Greater(t, len(got[i].Metadata), 0, "Expecting metadata for dir")
}
}
if test.opt.ShowHash {
hashes := got[i].Hashes
assert.NotNil(t, hashes)
if len(test.opt.HashTypes) > 0 && len(hashes) > 0 {
assert.Equal(t, 1, len(hashes))
}
if hashes["crc32"] != "" {
assert.Equal(t, "9ee760e5", hashes["crc32"])
}
if hashes["dropbox"] != "" {
assert.Equal(t, "f4d62afeaee6f35d3efdd8c66623360395165473bcc958f835343eb3f542f983", hashes["dropbox"])
}
if hashes["mailru"] != "" {
assert.Equal(t, "66696c6531000000000000000000000000000000", hashes["mailru"])
}
if hashes["md5"] != "" {
assert.Equal(t, "826e8142e6baabe8af779f5f490cf5f5", hashes["md5"])
}
if hashes["quickxor"] != "" {
assert.Equal(t, "6648031bca100300000000000500000000000000", hashes["quickxor"])
}
if hashes["sha1"] != "" {
assert.Equal(t, "60b27f004e454aca81b0480209cce5081ec52390", hashes["sha1"])
}
if hashes["sha256"] != "" {
assert.Equal(t, "c147efcfc2d7ea666a9e4f5187b115c90903f0fc896a56df9a6ef5d8f3fc9f31", hashes["sha256"])
}
if hashes["whirlpool"] != "" {
assert.Equal(t, "02fa11755b6470bfc5aab6d94cde5cf2939474fb5b0ebbf8ddf3d32bf06aa438eb92eac097047c02017dc1c317ee83fa8a2717ca4d544b4ee75b3231d1c466b0", hashes["whirlpool"])
}
} else {
assert.Nil(t, got[i].Hashes)
}
}
})
}
}
func TestStatJSON(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "file1", "file1", t1)
file2 := r.WriteBoth(ctx, "sub/file2", "sub/file2", t2)
r.CheckRemoteItems(t, file1, file2)
precision := fs.GetModifyWindow(ctx, r.Fremote)
for _, test := range []struct {
name string
remote string
opt operations.ListJSONOpt
want *operations.ListJSONItem
}{
{
name: "Root",
remote: "",
opt: operations.ListJSONOpt{},
want: &operations.ListJSONItem{
Path: "",
Name: "",
IsDir: true,
},
}, {
name: "RootFilesOnly",
remote: "",
opt: operations.ListJSONOpt{
FilesOnly: true,
},
want: nil,
}, {
name: "RootDirsOnly",
remote: "",
opt: operations.ListJSONOpt{
DirsOnly: true,
},
want: &operations.ListJSONItem{
Path: "",
Name: "",
IsDir: true,
},
}, {
name: "Dir",
remote: "sub",
opt: operations.ListJSONOpt{},
want: &operations.ListJSONItem{
Path: "sub",
Name: "sub",
IsDir: true,
},
}, {
name: "DirWithTrailingSlash",
remote: "sub/",
opt: operations.ListJSONOpt{},
want: &operations.ListJSONItem{
Path: "sub",
Name: "sub",
IsDir: true,
},
}, {
name: "File",
remote: "file1",
opt: operations.ListJSONOpt{},
want: &operations.ListJSONItem{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
},
}, {
name: "NotFound",
remote: "notfound",
opt: operations.ListJSONOpt{},
want: nil,
}, {
name: "DirFilesOnly",
remote: "sub",
opt: operations.ListJSONOpt{
FilesOnly: true,
},
want: nil,
}, {
name: "FileFilesOnly",
remote: "file1",
opt: operations.ListJSONOpt{
FilesOnly: true,
},
want: &operations.ListJSONItem{
Path: "file1",
Name: "file1",
Size: 5,
ModTime: operations.Timestamp{When: t1},
IsDir: false,
},
}, {
name: "NotFoundFilesOnly",
remote: "notfound",
opt: operations.ListJSONOpt{
FilesOnly: true,
},
want: nil,
}, {
name: "DirDirsOnly",
remote: "sub",
opt: operations.ListJSONOpt{
DirsOnly: true,
},
want: &operations.ListJSONItem{
Path: "sub",
Name: "sub",
IsDir: true,
},
}, {
name: "FileDirsOnly",
remote: "file1",
opt: operations.ListJSONOpt{
DirsOnly: true,
},
want: nil,
}, {
name: "NotFoundDirsOnly",
remote: "notfound",
opt: operations.ListJSONOpt{
DirsOnly: true,
},
want: nil,
},
} {
t.Run(test.name, func(t *testing.T) {
got, err := operations.StatJSON(ctx, r.Fremote, test.remote, &test.opt)
require.NoError(t, err)
if test.want == nil {
assert.Nil(t, got)
return
}
require.NotNil(t, got)
compareListJSONItem(t, test.want, got, precision)
})
}
t.Run("RootNotFound", func(t *testing.T) {
f, err := fs.NewFs(ctx, r.FremoteName+"/notfound")
require.NoError(t, err)
_, err = operations.StatJSON(ctx, f, "", &operations.ListJSONOpt{})
// This should return an error except for bucket based remotes
assert.True(t, err != nil || f.Features().BucketBased, "Need an error for non bucket based backends")
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/check_test.go | fs/operations/check_test.go | package operations_test
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"sort"
"strings"
"testing"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operations.CheckOpt) error) {
r := fstest.NewRun(t)
ctx := context.Background()
ci := fs.GetConfig(ctx)
addBuffers := func(opt *operations.CheckOpt) {
opt.Combined = new(bytes.Buffer)
opt.MissingOnSrc = new(bytes.Buffer)
opt.MissingOnDst = new(bytes.Buffer)
opt.Match = new(bytes.Buffer)
opt.Differ = new(bytes.Buffer)
opt.Error = new(bytes.Buffer)
}
sortLines := func(in string) []string {
if in == "" {
return []string{}
}
lines := strings.Split(in, "\n")
sort.Strings(lines)
return lines
}
checkBuffer := func(name string, want map[string]string, out io.Writer) {
expected := want[name]
buf, ok := out.(*bytes.Buffer)
require.True(t, ok)
assert.Equal(t, sortLines(expected), sortLines(buf.String()), name)
}
checkBuffers := func(opt *operations.CheckOpt, want map[string]string) {
checkBuffer("combined", want, opt.Combined)
checkBuffer("missingonsrc", want, opt.MissingOnSrc)
checkBuffer("missingondst", want, opt.MissingOnDst)
checkBuffer("match", want, opt.Match)
checkBuffer("differ", want, opt.Differ)
checkBuffer("error", want, opt.Error)
}
check := func(i int, wantErrors int64, wantChecks int64, oneway bool, wantOutput map[string]string) {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
accounting.GlobalStats().ResetCounters()
opt := operations.CheckOpt{
Fdst: r.Fremote,
Fsrc: r.Flocal,
OneWay: oneway,
}
addBuffers(&opt)
var err error
buf := bilib.CaptureOutput(func() {
err = checkFunction(ctx, &opt)
})
gotErrors := accounting.GlobalStats().GetErrors()
gotChecks := accounting.GlobalStats().GetChecks()
if wantErrors == 0 && err != nil {
t.Errorf("%d: Got error when not expecting one: %v", i, err)
}
if wantErrors != 0 && err == nil {
t.Errorf("%d: No error when expecting one", i)
}
if wantErrors != gotErrors {
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
}
if gotChecks > 0 && !strings.Contains(string(buf), "matching files") {
t.Errorf("%d: Total files matching line missing", i)
}
if wantChecks != gotChecks {
t.Errorf("%d: Expecting %d total matching files but got %d", i, wantChecks, gotChecks)
}
checkBuffers(&opt, wantOutput)
})
}
file1 := r.WriteBoth(ctx, "rutabaga", "is tasty", t3)
r.CheckRemoteItems(t, file1)
r.CheckLocalItems(t, file1)
check(1, 0, 1, false, map[string]string{
"combined": "= rutabaga\n",
"missingonsrc": "",
"missingondst": "",
"match": "rutabaga\n",
"differ": "",
"error": "",
})
file2 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
r.CheckLocalItems(t, file1, file2)
check(2, 1, 1, false, map[string]string{
"combined": "+ potato2\n= rutabaga\n",
"missingonsrc": "",
"missingondst": "potato2\n",
"match": "rutabaga\n",
"differ": "",
"error": "",
})
file3 := r.WriteObject(ctx, "empty space", "-", t2)
r.CheckRemoteItems(t, file1, file3)
check(3, 2, 1, false, map[string]string{
"combined": "- empty space\n+ potato2\n= rutabaga\n",
"missingonsrc": "empty space\n",
"missingondst": "potato2\n",
"match": "rutabaga\n",
"differ": "",
"error": "",
})
file2r := file2
if ci.SizeOnly {
file2r = r.WriteObject(ctx, "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
} else {
r.WriteObject(ctx, "potato2", "------------------------------------------------------------", t1)
}
r.CheckRemoteItems(t, file1, file2r, file3)
check(4, 1, 2, false, map[string]string{
"combined": "- empty space\n= potato2\n= rutabaga\n",
"missingonsrc": "empty space\n",
"missingondst": "",
"match": "rutabaga\npotato2\n",
"differ": "",
"error": "",
})
file3r := file3
file3l := r.WriteFile("empty space", "DIFFER", t2)
r.CheckLocalItems(t, file1, file2, file3l)
check(5, 1, 3, false, map[string]string{
"combined": "* empty space\n= potato2\n= rutabaga\n",
"missingonsrc": "",
"missingondst": "",
"match": "potato2\nrutabaga\n",
"differ": "empty space\n",
"error": "",
})
file4 := r.WriteObject(ctx, "remotepotato", "------------------------------------------------------------", t1)
r.CheckRemoteItems(t, file1, file2r, file3r, file4)
check(6, 2, 3, false, map[string]string{
"combined": "* empty space\n= potato2\n= rutabaga\n- remotepotato\n",
"missingonsrc": "remotepotato\n",
"missingondst": "",
"match": "potato2\nrutabaga\n",
"differ": "empty space\n",
"error": "",
})
check(7, 1, 3, true, map[string]string{
"combined": "* empty space\n= potato2\n= rutabaga\n",
"missingonsrc": "",
"missingondst": "",
"match": "potato2\nrutabaga\n",
"differ": "empty space\n",
"error": "",
})
}
func TestCheck(t *testing.T) {
testCheck(t, operations.Check)
}
func TestCheckFsError(t *testing.T) {
ctx := context.Background()
dstFs, err := fs.NewFs(ctx, "nonexistent")
if err != nil {
t.Fatal(err)
}
srcFs, err := fs.NewFs(ctx, "nonexistent")
if err != nil {
t.Fatal(err)
}
opt := operations.CheckOpt{
Fdst: dstFs,
Fsrc: srcFs,
OneWay: false,
}
err = operations.Check(ctx, &opt)
require.Error(t, err)
}
func TestCheckDownload(t *testing.T) {
testCheck(t, operations.CheckDownload)
}
func TestCheckSizeOnly(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
ci.SizeOnly = true
defer func() { ci.SizeOnly = false }()
TestCheck(t)
}
func TestCheckEqualReaders(t *testing.T) {
b65a := make([]byte, 65*1024)
b65b := make([]byte, 65*1024)
b65b[len(b65b)-1] = 1
b66 := make([]byte, 66*1024)
equal, err := operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65a))
assert.NoError(t, err)
assert.Equal(t, equal, true)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b65b))
assert.NoError(t, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), bytes.NewBuffer(b66))
assert.NoError(t, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), bytes.NewBuffer(b65a))
assert.NoError(t, err)
assert.Equal(t, equal, false)
myErr := errors.New("sentinel")
wrap := func(b []byte) io.Reader {
r := bytes.NewBuffer(b)
e := readers.ErrorReader{Err: myErr}
return io.MultiReader(r, e)
}
equal, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b65b))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(wrap(b65a), bytes.NewBuffer(b66))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(wrap(b66), bytes.NewBuffer(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b65b))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b65a), wrap(b66))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
equal, err = operations.CheckEqualReaders(bytes.NewBuffer(b66), wrap(b65a))
assert.Equal(t, myErr, err)
assert.Equal(t, equal, false)
}
func TestParseSumFile(t *testing.T) {
r := fstest.NewRun(t)
ctx := context.Background()
const sumFile = "test.sum"
samples := []struct {
hash, sep, name string
ok bool
}{
{"1", " ", "file1", true},
{"2", " *", "file2", true},
{"3", " ", " file3 ", true},
{"4", " ", "\tfile3\t", true},
{"5", " ", "file5", false},
{"6", "\t", "file6", false},
{"7", " \t", " file7 ", false},
{"", " ", "file8", false},
{"", "", "file9", false},
}
for _, eol := range []string{"\n", "\r\n"} {
data := &bytes.Buffer{}
wantNum := 0
for _, s := range samples {
_, _ = data.WriteString(s.hash + s.sep + s.name + eol)
if s.ok {
wantNum++
}
}
_ = r.WriteObject(ctx, sumFile, data.String(), t1)
file := fstest.NewObject(ctx, t, r.Fremote, sumFile)
sums, err := operations.ParseSumFile(ctx, file)
assert.NoError(t, err)
assert.Equal(t, wantNum, len(sums))
for _, s := range samples {
if s.ok {
assert.Equal(t, s.hash, sums[s.name])
}
}
}
}
func testCheckSum(t *testing.T, download bool) {
const dataDir = "data"
const sumFile = "test.sum"
hashType := hash.MD5
const (
testString1 = "Hello, World!"
testDigest1 = "65a8e27d8879283831b664bd8b7f0ad4"
testDigest1Upper = "65A8E27D8879283831B664BD8B7F0AD4"
testString2 = "I am the walrus"
testDigest2 = "87396e030ef3f5b35bbf85c0a09a4fb3"
testDigest2Mixed = "87396e030EF3f5b35BBf85c0a09a4FB3"
)
type wantType map[string]string
ctx := context.Background()
r := fstest.NewRun(t)
subRemote := r.FremoteName
if !strings.HasSuffix(subRemote, ":") {
subRemote += "/"
}
subRemote += dataDir
dataFs, err := fs.NewFs(ctx, subRemote)
require.NoError(t, err)
if !download && !dataFs.Hashes().Contains(hashType) {
t.Skipf("%s lacks %s, skipping", dataFs, hashType)
}
makeFile := func(name, content string) fstest.Item {
remote := dataDir + "/" + name
return r.WriteObject(ctx, remote, content, t1)
}
makeSums := func(sums operations.HashSums) fstest.Item {
files := make([]string, 0, len(sums))
for name := range sums {
files = append(files, name)
}
sort.Strings(files)
buf := &bytes.Buffer{}
for _, name := range files {
_, _ = fmt.Fprintf(buf, "%s %s\n", sums[name], name)
}
return r.WriteObject(ctx, sumFile, buf.String(), t1)
}
sortLines := func(in string) []string {
if in == "" {
return []string{}
}
lines := strings.Split(in, "\n")
sort.Strings(lines)
return lines
}
checkResult := func(runNo int, want wantType, name string, out io.Writer) {
expected := want[name]
buf, ok := out.(*bytes.Buffer)
require.True(t, ok)
assert.Equal(t, sortLines(expected), sortLines(buf.String()), "wrong %s result in run %d", name, runNo)
}
checkRun := func(runNo, wantChecks, wantErrors int, want wantType) {
accounting.GlobalStats().ResetCounters()
opt := operations.CheckOpt{
Combined: new(bytes.Buffer),
Match: new(bytes.Buffer),
Differ: new(bytes.Buffer),
Error: new(bytes.Buffer),
MissingOnSrc: new(bytes.Buffer),
MissingOnDst: new(bytes.Buffer),
}
var err error
buf := bilib.CaptureOutput(func() {
err = operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
})
gotErrors := int(accounting.GlobalStats().GetErrors())
if wantErrors == 0 {
assert.NoError(t, err, "unexpected error in run %d", runNo)
}
if wantErrors > 0 {
assert.Error(t, err, "no expected error in run %d", runNo)
}
assert.Equal(t, wantErrors, gotErrors, "wrong error count in run %d", runNo)
gotChecks := int(accounting.GlobalStats().GetChecks())
if wantChecks > 0 || gotChecks > 0 {
assert.Contains(t, string(buf), "matching files", "missing matching files in run %d", runNo)
}
assert.Equal(t, wantChecks, gotChecks, "wrong number of checks in run %d", runNo)
checkResult(runNo, want, "combined", opt.Combined)
checkResult(runNo, want, "missingonsrc", opt.MissingOnSrc)
checkResult(runNo, want, "missingondst", opt.MissingOnDst)
checkResult(runNo, want, "match", opt.Match)
checkResult(runNo, want, "differ", opt.Differ)
checkResult(runNo, want, "error", opt.Error)
}
check := func(runNo, wantChecks, wantErrors int, wantResults wantType) {
runName := fmt.Sprintf("subtest%d", runNo)
t.Run(runName, func(t *testing.T) {
checkRun(runNo, wantChecks, wantErrors, wantResults)
})
}
file1 := makeFile("banana", testString1)
fcsums := makeSums(operations.HashSums{
"banana": testDigest1,
})
r.CheckRemoteItems(t, fcsums, file1)
check(1, 1, 0, wantType{
"combined": "= banana\n",
"missingonsrc": "",
"missingondst": "",
"match": "banana\n",
"differ": "",
"error": "",
})
file2 := makeFile("potato", testString2)
fcsums = makeSums(operations.HashSums{
"banana": testDigest1,
})
r.CheckRemoteItems(t, fcsums, file1, file2)
check(2, 2, 1, wantType{
"combined": "- potato\n= banana\n",
"missingonsrc": "potato\n",
"missingondst": "",
"match": "banana\n",
"differ": "",
"error": "",
})
fcsums = makeSums(operations.HashSums{
"banana": testDigest1,
"potato": testDigest2,
})
r.CheckRemoteItems(t, fcsums, file1, file2)
check(3, 2, 0, wantType{
"combined": "= potato\n= banana\n",
"missingonsrc": "",
"missingondst": "",
"match": "banana\npotato\n",
"differ": "",
"error": "",
})
fcsums = makeSums(operations.HashSums{
"banana": testDigest2,
"potato": testDigest2,
})
r.CheckRemoteItems(t, fcsums, file1, file2)
check(4, 2, 1, wantType{
"combined": "* banana\n= potato\n",
"missingonsrc": "",
"missingondst": "",
"match": "potato\n",
"differ": "banana\n",
"error": "",
})
fcsums = makeSums(operations.HashSums{
"banana": testDigest1,
"potato": testDigest2,
"orange": testDigest2,
})
r.CheckRemoteItems(t, fcsums, file1, file2)
check(5, 2, 1, wantType{
"combined": "+ orange\n= potato\n= banana\n",
"missingonsrc": "",
"missingondst": "orange\n",
"match": "banana\npotato\n",
"differ": "",
"error": "",
})
fcsums = makeSums(operations.HashSums{
"banana": testDigest1,
"potato": testDigest1,
"orange": testDigest2,
})
r.CheckRemoteItems(t, fcsums, file1, file2)
check(6, 2, 2, wantType{
"combined": "+ orange\n* potato\n= banana\n",
"missingonsrc": "",
"missingondst": "orange\n",
"match": "banana\n",
"differ": "potato\n",
"error": "",
})
// test mixed-case checksums
file1 = makeFile("banana", testString1)
file2 = makeFile("potato", testString2)
fcsums = makeSums(operations.HashSums{
"banana": testDigest1Upper,
"potato": testDigest2Mixed,
})
r.CheckRemoteItems(t, fcsums, file1, file2)
check(7, 2, 0, wantType{
"combined": "= banana\n= potato\n",
"missingonsrc": "",
"missingondst": "",
"match": "banana\npotato\n",
"differ": "",
"error": "",
})
}
func TestCheckSum(t *testing.T) {
testCheckSum(t, false)
}
func TestCheckSumDownload(t *testing.T) {
testCheckSum(t, true)
}
func TestApplyTransforms(t *testing.T) {
var (
hashType = hash.MD5
content = "Hello, World!"
hash = "65a8e27d8879283831b664bd8b7f0ad4"
nfc = norm.NFC.String(norm.NFD.String("測試_Русский___ě_áñ"))
nfd = norm.NFD.String(nfc)
nfcx2 = nfc + nfc
nfdx2 = nfd + nfd
both = nfc + nfd
upper = "HELLO, WORLD!"
lower = "hello, world!"
upperlowermixed = "HeLlO, wOrLd!"
)
testScenario := func(checkfileName, remotefileName, scenario string) {
r := fstest.NewRunIndividual(t)
ctx := context.Background()
ci := fs.GetConfig(ctx)
opt := operations.CheckOpt{}
remotefile := r.WriteObject(ctx, remotefileName, content, t2)
// test whether remote is capable of running test
entries, err := r.Fremote.List(ctx, "")
assert.NoError(t, err)
if entries.Len() == 1 && entries[0].Remote() != remotefileName {
t.Skipf("Fs is incapable of running test, skipping: %s (expected: %s (%s) actual: %s (%s))", scenario, remotefileName, detectEncoding(remotefileName), entries[0].Remote(), detectEncoding(entries[0].Remote()))
}
checkfile := r.WriteFile("test.sum", hash+" "+checkfileName, t2)
r.CheckLocalItems(t, checkfile)
assert.False(t, checkfileName == remotefile.Path, "Values match but should not: %s %s", checkfileName, remotefile.Path)
testname := scenario + " (without normalization)"
println(testname)
ci.NoUnicodeNormalization = true
ci.IgnoreCaseSync = false
accounting.GlobalStats().ResetCounters()
err = operations.CheckSum(ctx, r.Fremote, r.Flocal, "test.sum", hashType, &opt, true)
assert.Error(t, err, "no expected error for %s %v %v", testname, checkfileName, remotefileName)
testname = scenario + " (with normalization)"
println(testname)
ci.NoUnicodeNormalization = false
ci.IgnoreCaseSync = true
accounting.GlobalStats().ResetCounters()
err = operations.CheckSum(ctx, r.Fremote, r.Flocal, "test.sum", hashType, &opt, true)
assert.NoError(t, err, "unexpected error for %s %v %v", testname, checkfileName, remotefileName)
}
testScenario(upper, lower, "upper checkfile vs. lower remote")
testScenario(lower, upper, "lower checkfile vs. upper remote")
testScenario(lower, upperlowermixed, "lower checkfile vs. upperlowermixed remote")
testScenario(upperlowermixed, upper, "upperlowermixed checkfile vs. upper remote")
testScenario(nfd, nfc, "NFD checkfile vs. NFC remote")
testScenario(nfc, nfd, "NFC checkfile vs. NFD remote")
testScenario(nfdx2, both, "NFDx2 checkfile vs. both remote")
testScenario(nfcx2, both, "NFCx2 checkfile vs. both remote")
testScenario(both, nfdx2, "both checkfile vs. NFDx2 remote")
testScenario(both, nfcx2, "both checkfile vs. NFCx2 remote")
}
func detectEncoding(s string) string {
if norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
return "BOTH"
}
if !norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
return "NFD"
}
if norm.NFC.IsNormalString(s) && !norm.NFD.IsNormalString(s) {
return "NFC"
}
return "OTHER"
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/rc_test.go | fs/operations/rc_test.go | package operations_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/diskusage"
"github.com/rclone/rclone/lib/rest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func rcNewRun(t *testing.T, method string) (*fstest.Run, *rc.Call) {
if *fstest.RemoteName != "" {
t.Skip("Skipping test on non local remote")
}
r := fstest.NewRun(t)
call := rc.Calls.Get(method)
assert.NotNil(t, call)
cache.Put(r.LocalName, r.Flocal)
cache.Put(r.FremoteName, r.Fremote)
return r, call
}
// operations/about: Return the space used on the remote
func TestRcAbout(t *testing.T) {
r, call := rcNewRun(t, "operations/about")
r.Mkdir(context.Background(), r.Fremote)
// Will get an error if remote doesn't support About
expectedErr := r.Fremote.Features().About == nil
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(context.Background(), in)
if expectedErr {
assert.Error(t, err)
return
}
require.NoError(t, err)
// Can't really check the output much!
assert.NotEqual(t, int64(0), out["Total"])
}
// operations/cleanup: Remove trashed files in the remote or path
func TestRcCleanup(t *testing.T) {
r, call := rcNewRun(t, "operations/cleanup")
in := rc.Params{
"fs": r.LocalName,
}
out, err := call.Fn(context.Background(), in)
require.Error(t, err)
assert.Equal(t, rc.Params(nil), out)
assert.Contains(t, err.Error(), "doesn't support cleanup")
}
// operations/copyfile: Copy a file from source remote to destination remote
func TestRcCopyfile(t *testing.T) {
r, call := rcNewRun(t, "operations/copyfile")
file1 := r.WriteFile("file1", "file1 contents", t1)
r.Mkdir(context.Background(), r.Fremote)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t)
in := rc.Params{
"srcFs": r.LocalName,
"srcRemote": "file1",
"dstFs": r.FremoteName,
"dstRemote": "file1-renamed",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
r.CheckLocalItems(t, file1)
file1.Path = "file1-renamed"
r.CheckRemoteItems(t, file1)
}
// operations/copyurl: Copy the URL to the object
func TestRcCopyurl(t *testing.T) {
r, call := rcNewRun(t, "operations/copyurl")
contents := "file1 contents\n"
file1 := r.WriteFile("file1", contents, t1)
r.Mkdir(context.Background(), r.Fremote)
r.CheckRemoteItems(t)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte(contents))
assert.NoError(t, err)
}))
defer ts.Close()
in := rc.Params{
"fs": r.FremoteName,
"remote": "file1",
"url": ts.URL,
"autoFilename": false,
"noClobber": false,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
in = rc.Params{
"fs": r.FremoteName,
"remote": "file1",
"url": ts.URL,
"autoFilename": false,
"noClobber": true,
}
out, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Equal(t, rc.Params(nil), out)
urlFileName := "filename.txt"
in = rc.Params{
"fs": r.FremoteName,
"remote": "",
"url": ts.URL + "/" + urlFileName,
"autoFilename": true,
"noClobber": false,
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
in = rc.Params{
"fs": r.FremoteName,
"remote": "",
"url": ts.URL,
"autoFilename": true,
"noClobber": false,
}
out, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, fstest.NewItem(urlFileName, contents, t1)}, nil, fs.ModTimeNotSupported)
}
// operations/delete: Remove files in the path
func TestRcDelete(t *testing.T) {
r, call := rcNewRun(t, "operations/delete")
file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(context.Background(), "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(context.Background(), "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
r.CheckRemoteItems(t)
}
// operations/deletefile: Remove the single file pointed to
func TestRcDeletefile(t *testing.T) {
r, call := rcNewRun(t, "operations/deletefile")
file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(context.Background(), "medium", "------------------------------------------------------------", t1) // 60 bytes
r.CheckRemoteItems(t, file1, file2)
in := rc.Params{
"fs": r.FremoteName,
"remote": "small",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
r.CheckRemoteItems(t, file2)
}
// operations/list: List the given remote and path in JSON format.
func TestRcList(t *testing.T) {
r, call := rcNewRun(t, "operations/list")
file1 := r.WriteObject(context.Background(), "a", "a", t1)
file2 := r.WriteObject(context.Background(), "subdir/b", "bb", t2)
r.CheckRemoteItems(t, file1, file2)
in := rc.Params{
"fs": r.FremoteName,
"remote": "",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
list := out["list"].([]*operations.ListJSONItem)
assert.Equal(t, 2, len(list))
checkFile1 := func(got *operations.ListJSONItem) {
assert.WithinDuration(t, t1, got.ModTime.When, time.Second)
assert.Equal(t, "a", got.Path)
assert.Equal(t, "a", got.Name)
assert.Equal(t, int64(1), got.Size)
assert.Equal(t, "application/octet-stream", got.MimeType)
assert.Equal(t, false, got.IsDir)
}
checkFile1(list[0])
checkSubdir := func(got *operations.ListJSONItem) {
assert.Equal(t, "subdir", got.Path)
assert.Equal(t, "subdir", got.Name)
// assert.Equal(t, int64(-1), got.Size) // size can vary for directories
assert.Equal(t, "inode/directory", got.MimeType)
assert.Equal(t, true, got.IsDir)
}
checkSubdir(list[1])
in = rc.Params{
"fs": r.FremoteName,
"remote": "",
"opt": rc.Params{
"recurse": true,
},
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
list = out["list"].([]*operations.ListJSONItem)
assert.Equal(t, 3, len(list))
checkFile1(list[0])
checkSubdir(list[1])
checkFile2 := func(got *operations.ListJSONItem) {
assert.WithinDuration(t, t2, got.ModTime.When, time.Second)
assert.Equal(t, "subdir/b", got.Path)
assert.Equal(t, "b", got.Name)
assert.Equal(t, int64(2), got.Size)
assert.Equal(t, "application/octet-stream", got.MimeType)
assert.Equal(t, false, got.IsDir)
}
checkFile2(list[2])
}
// operations/stat: Stat the given remote and path in JSON format.
func TestRcStat(t *testing.T) {
r, call := rcNewRun(t, "operations/stat")
file1 := r.WriteObject(context.Background(), "subdir/a", "a", t1)
r.CheckRemoteItems(t, file1)
fetch := func(t *testing.T, remotePath string) *operations.ListJSONItem {
in := rc.Params{
"fs": r.FremoteName,
"remote": remotePath,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
return out["item"].(*operations.ListJSONItem)
}
t.Run("Root", func(t *testing.T) {
stat := fetch(t, "")
assert.Equal(t, "", stat.Path)
assert.Equal(t, "", stat.Name)
assert.Equal(t, int64(-1), stat.Size)
assert.Equal(t, "inode/directory", stat.MimeType)
assert.Equal(t, true, stat.IsDir)
})
t.Run("File", func(t *testing.T) {
stat := fetch(t, "subdir/a")
assert.WithinDuration(t, t1, stat.ModTime.When, time.Second)
assert.Equal(t, "subdir/a", stat.Path)
assert.Equal(t, "a", stat.Name)
assert.Equal(t, int64(1), stat.Size)
assert.Equal(t, "application/octet-stream", stat.MimeType)
assert.Equal(t, false, stat.IsDir)
})
t.Run("Subdir", func(t *testing.T) {
stat := fetch(t, "subdir")
assert.Equal(t, "subdir", stat.Path)
assert.Equal(t, "subdir", stat.Name)
// assert.Equal(t, int64(-1), stat.Size) // size can vary for directories
assert.Equal(t, "inode/directory", stat.MimeType)
assert.Equal(t, true, stat.IsDir)
})
t.Run("NotFound", func(t *testing.T) {
stat := fetch(t, "notfound")
assert.Nil(t, stat)
})
}
// operations/settier: Set the storage tier of a fs
func TestRcSetTier(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/settier")
if !r.Fremote.Features().SetTier {
t.Skip("settier not supported")
}
file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1)
r.CheckRemoteItems(t, file1)
// Because we don't know what the current tier options here are, let's
// just get the current tier, and reuse that
o, err := r.Fremote.NewObject(ctx, file1.Path)
require.NoError(t, err)
trr, ok := o.(fs.GetTierer)
require.True(t, ok)
ctier := trr.GetTier()
in := rc.Params{
"fs": r.FremoteName,
"tier": ctier,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
}
// operations/settier: Set the storage tier of a file
func TestRcSetTierFile(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/settierfile")
if !r.Fremote.Features().SetTier {
t.Skip("settier not supported")
}
file1 := r.WriteObject(context.Background(), "file1", "file1 contents", t1)
r.CheckRemoteItems(t, file1)
// Because we don't know what the current tier options here are, let's
// just get the current tier, and reuse that
o, err := r.Fremote.NewObject(ctx, file1.Path)
require.NoError(t, err)
trr, ok := o.(fs.GetTierer)
require.True(t, ok)
ctier := trr.GetTier()
in := rc.Params{
"fs": r.FremoteName,
"remote": "file1",
"tier": ctier,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
}
// operations/mkdir: Make a destination directory or container
func TestRcMkdir(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/mkdir")
r.Mkdir(context.Background(), r.Fremote)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(ctx, r.Fremote))
}
// operations/movefile: Move a file from source remote to destination remote
func TestRcMovefile(t *testing.T) {
r, call := rcNewRun(t, "operations/movefile")
file1 := r.WriteFile("file1", "file1 contents", t1)
r.Mkdir(context.Background(), r.Fremote)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t)
in := rc.Params{
"srcFs": r.LocalName,
"srcRemote": "file1",
"dstFs": r.FremoteName,
"dstRemote": "file1-renamed",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
r.CheckLocalItems(t)
file1.Path = "file1-renamed"
r.CheckRemoteItems(t, file1)
}
// operations/purge: Remove a directory or container and all of its contents
func TestRcPurge(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/purge")
file1 := r.WriteObject(context.Background(), "subdir/file1", "subdir/file1 contents", t1)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"subdir"}, fs.GetModifyWindow(ctx, r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, r.Fremote))
}
// operations/rmdir: Remove an empty directory or container
func TestRcRmdir(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/rmdir")
r.Mkdir(context.Background(), r.Fremote)
assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir"))
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(ctx, r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, r.Fremote))
}
// operations/rmdirs: Remove all the empty directories in the path
func TestRcRmdirs(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/rmdirs")
r.Mkdir(context.Background(), r.Fremote)
assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir"))
assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir/subsubdir"))
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir", "subdir/subsubdir"}, fs.GetModifyWindow(ctx, r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, r.Fremote))
assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir"))
assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir/subsubdir"))
in = rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
"leaveRoot": true,
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(ctx, r.Fremote))
}
// operations/size: Count the number of bytes and files in remote
func TestRcSize(t *testing.T) {
r, call := rcNewRun(t, "operations/size")
file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(context.Background(), "subdir/medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(context.Background(), "subdir/subsubdir/large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 50 bytes
r.CheckRemoteItems(t, file1, file2, file3)
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"count": int64(3),
"bytes": int64(120),
"sizeless": int64(0),
}, out)
}
// operations/publiclink: Create or retrieve a public link to the given file or folder.
func TestRcPublicLink(t *testing.T) {
r, call := rcNewRun(t, "operations/publiclink")
in := rc.Params{
"fs": r.FremoteName,
"remote": "",
"expire": "5m",
"unlink": false,
}
_, err := call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "doesn't support public links")
}
// operations/fsinfo: Return information about the remote
func TestRcFsInfo(t *testing.T) {
r, call := rcNewRun(t, "operations/fsinfo")
in := rc.Params{
"fs": r.FremoteName,
}
got, err := call.Fn(context.Background(), in)
require.NoError(t, err)
want := operations.GetFsInfo(r.Fremote)
assert.Equal(t, want.Name, got["Name"])
assert.Equal(t, want.Root, got["Root"])
assert.Equal(t, want.String, got["String"])
assert.Equal(t, float64(want.Precision), got["Precision"])
var hashes []any
for _, hash := range want.Hashes {
hashes = append(hashes, hash)
}
assert.Equal(t, hashes, got["Hashes"])
var features = map[string]any{}
for k, v := range want.Features {
features[k] = v
}
assert.Equal(t, features, got["Features"])
}
// operations/uploadfile : Tests if upload file succeeds
func TestUploadFile(t *testing.T) {
r, call := rcNewRun(t, "operations/uploadfile")
ctx := context.Background()
testFileName := "uploadfile-test.txt"
testFileContent := "Hello World"
r.WriteFile(testFileName, testFileContent, t1)
testItem1 := fstest.NewItem(testFileName, testFileContent, t1)
testItem2 := fstest.NewItem(path.Join("subdir", testFileName), testFileContent, t1)
currentFile, err := os.Open(path.Join(r.LocalName, testFileName))
require.NoError(t, err)
defer func() {
assert.NoError(t, currentFile.Close())
}()
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
require.NoError(t, err)
httpReq := httptest.NewRequest("POST", "/", formReader)
httpReq.Header.Add("Content-Type", contentType)
in := rc.Params{
"_request": httpReq,
"fs": r.FremoteName,
"remote": "",
}
_, err = call.Fn(context.Background(), in)
require.NoError(t, err)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{testItem1}, nil, fs.ModTimeNotSupported)
assert.NoError(t, r.Fremote.Mkdir(context.Background(), "subdir"))
currentFile2, err := os.Open(path.Join(r.LocalName, testFileName))
require.NoError(t, err)
defer func() {
assert.NoError(t, currentFile2.Close())
}()
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName)
require.NoError(t, err)
httpReq = httptest.NewRequest("POST", "/", formReader)
httpReq.Header.Add("Content-Type", contentType)
in = rc.Params{
"_request": httpReq,
"fs": r.FremoteName,
"remote": "subdir",
}
_, err = call.Fn(context.Background(), in)
require.NoError(t, err)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{testItem1, testItem2}, nil, fs.ModTimeNotSupported)
}
// operations/command: Runs a backend command
func TestRcCommand(t *testing.T) {
r, call := rcNewRun(t, "backend/command")
in := rc.Params{
"fs": r.FremoteName,
"command": "noop",
"opt": map[string]string{
"echo": "true",
"blue": "",
},
"arg": []string{
"path1",
"path2",
},
}
got, err := call.Fn(context.Background(), in)
if err != nil {
assert.False(t, r.Fremote.Features().IsLocal, "mustn't fail on local remote")
assert.Contains(t, err.Error(), "command not found")
return
}
want := rc.Params{"result": map[string]any{
"arg": []string{
"path1",
"path2",
},
"name": "noop",
"opt": map[string]string{
"blue": "",
"echo": "true",
},
}}
assert.Equal(t, want, got)
errTxt := "explosion in the sausage factory"
in["opt"].(map[string]string)["error"] = errTxt
_, err = call.Fn(context.Background(), in)
assert.Error(t, err)
assert.Contains(t, err.Error(), errTxt)
}
// operations/command: Runs a backend command
func TestRcDu(t *testing.T) {
ctx := context.Background()
_, call := rcNewRun(t, "core/du")
in := rc.Params{}
out, err := call.Fn(ctx, in)
if err == diskusage.ErrUnsupported {
t.Skip(err)
}
assert.NotEqual(t, "", out["dir"])
info := out["info"].(diskusage.Info)
assert.True(t, info.Total != 0)
assert.True(t, info.Total > info.Free)
assert.True(t, info.Total > info.Available)
assert.True(t, info.Free >= info.Available)
}
// operations/check: check the source and destination are the same
func TestRcCheck(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/check")
r.Mkdir(ctx, r.Fremote)
MD5SUMS := `
0ef726ce9b1a7692357ff70dd321d595 file1
deadbeefcafe00000000000000000000 subdir/file2
0386a8b8fcf672c326845c00ba41b9e2 subdir/subsubdir/file4
`
file1 := r.WriteBoth(ctx, "file1", "file1 contents", t1)
file2 := r.WriteFile("subdir/file2", MD5SUMS, t2)
file3 := r.WriteObject(ctx, "subdir/subsubdir/file3", "file3 contents", t3)
file4a := r.WriteFile("subdir/subsubdir/file4", "file4 contents", t3)
file4b := r.WriteObject(ctx, "subdir/subsubdir/file4", "file4 different contents", t3)
// operations.HashLister(ctx, hash.MD5, false, false, r.Fremote, os.Stdout)
r.CheckLocalItems(t, file1, file2, file4a)
r.CheckRemoteItems(t, file1, file3, file4b)
pstring := func(items ...fstest.Item) *[]string {
xs := make([]string, len(items))
for i, item := range items {
xs[i] = item.Path
}
return &xs
}
for _, testName := range []string{"Normal", "Download"} {
t.Run(testName, func(t *testing.T) {
in := rc.Params{
"srcFs": r.LocalName,
"dstFs": r.FremoteName,
"combined": true,
"missingOnSrc": true,
"missingOnDst": true,
"match": true,
"differ": true,
"error": true,
}
if testName == "Download" {
in["download"] = true
}
out, err := call.Fn(ctx, in)
require.NoError(t, err)
combined := []string{
"= " + file1.Path,
"+ " + file2.Path,
"- " + file3.Path,
"* " + file4a.Path,
}
sort.Strings(combined)
sort.Strings(*out["combined"].(*[]string))
want := rc.Params{
"missingOnSrc": pstring(file3),
"missingOnDst": pstring(file2),
"differ": pstring(file4a),
"error": pstring(),
"match": pstring(file1),
"combined": &combined,
"status": "3 differences found",
"success": false,
}
if testName == "Normal" {
want["hashType"] = "md5"
}
assert.Equal(t, want, out)
})
}
t.Run("CheckFile", func(t *testing.T) {
// The checksum file is treated as the source and srcFs is not used
in := rc.Params{
"dstFs": r.FremoteName,
"combined": true,
"missingOnSrc": true,
"missingOnDst": true,
"match": true,
"differ": true,
"error": true,
"checkFileFs": r.LocalName,
"checkFileRemote": file2.Path,
"checkFileHash": "md5",
}
out, err := call.Fn(ctx, in)
require.NoError(t, err)
combined := []string{
"= " + file1.Path,
"+ " + file2.Path,
"- " + file3.Path,
"* " + file4a.Path,
}
sort.Strings(combined)
sort.Strings(*out["combined"].(*[]string))
if strings.HasPrefix(out["status"].(string), "file not in") {
out["status"] = "file not in"
}
want := rc.Params{
"missingOnSrc": pstring(file3),
"missingOnDst": pstring(file2),
"differ": pstring(file4a),
"error": pstring(),
"match": pstring(file1),
"combined": &combined,
"hashType": "md5",
"status": "file not in",
"success": false,
}
assert.Equal(t, want, out)
})
}
// operations/hashsum: hashsum a directory
func TestRcHashsum(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/hashsum")
r.Mkdir(ctx, r.Fremote)
file1Contents := "file1 contents"
file1 := r.WriteBoth(ctx, "hashsum-file1", file1Contents, t1)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file1)
hasher := hash.NewMultiHasher()
_, err := hasher.Write([]byte(file1Contents))
require.NoError(t, err)
for _, test := range []struct {
ht hash.Type
base64 bool
download bool
}{
{
ht: r.Fremote.Hashes().GetOne(),
}, {
ht: r.Fremote.Hashes().GetOne(),
base64: true,
}, {
ht: hash.Whirlpool,
base64: false,
download: true,
}, {
ht: hash.Whirlpool,
base64: true,
download: true,
},
} {
t.Run(fmt.Sprintf("hash=%v,base64=%v,download=%v", test.ht, test.base64, test.download), func(t *testing.T) {
file1Hash, err := hasher.SumString(test.ht, test.base64)
require.NoError(t, err)
in := rc.Params{
"fs": r.FremoteName,
"hashType": test.ht.String(),
"base64": test.base64,
"download": test.download,
}
out, err := call.Fn(ctx, in)
require.NoError(t, err)
assert.Equal(t, test.ht.String(), out["hashType"])
want := []string{
fmt.Sprintf("%s hashsum-file1", file1Hash),
}
assert.Equal(t, want, out["hashsum"])
})
}
}
// operations/hashsum: hashsum a single file
func TestRcHashsumFile(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/hashsum")
r.Mkdir(ctx, r.Fremote)
file1Contents := "file1 contents"
file1 := r.WriteBoth(ctx, "hashsum-file1", file1Contents, t1)
file2Contents := "file2 contents"
file2 := r.WriteBoth(ctx, "hashsum-file2", file2Contents, t1)
r.CheckLocalItems(t, file1, file2)
r.CheckRemoteItems(t, file1, file2)
// Make an fs pointing to just the file
fsString := path.Join(r.FremoteName, file1.Path)
in := rc.Params{
"fs": fsString,
"hashType": "MD5",
"download": true,
}
out, err := call.Fn(ctx, in)
require.NoError(t, err)
assert.Equal(t, "md5", out["hashType"])
assert.Equal(t, []string{"0ef726ce9b1a7692357ff70dd321d595 hashsum-file1"}, out["hashsum"])
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/operations_test.go | fs/operations/operations_test.go | // Integration tests - test rclone by doing real transactions to a
// storage provider to and from the local disk.
//
// By default it will use a local fs, however you can provide a
// -remote option to use a different remote. The test_all.go script
// is a wrapper to call this for all the test remotes.
//
// FIXME not safe for concurrent running of tests until fs.Config is
// no longer a global
//
// NB When writing tests
//
// Make sure every series of writes to the remote has a
// fstest.CheckItems() before use. This make sure the directory
// listing is now consistent and stops cascading errors.
//
// Call accounting.GlobalStats().ResetCounters() before every fs.Sync() as it
// uses the error count internally.
package operations_test
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"regexp"
"strings"
"testing"
"time"
_ "github.com/rclone/rclone/backend/all" // import all backends
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/pacer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/cases"
"golang.org/x/text/language"
)
// Some times used in the tests
var (
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
)
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestMkdir(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
err := operations.Mkdir(ctx, r.Fremote, "")
require.NoError(t, err)
fstest.CheckListing(t, r.Fremote, []fstest.Item{})
err = operations.Mkdir(ctx, r.Fremote, "")
require.NoError(t, err)
}
func TestLsd(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
r.CheckRemoteItems(t, file1)
var buf bytes.Buffer
err := operations.ListDir(ctx, r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
assert.Contains(t, res, "sub dir\n")
}
func TestLs(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
file2 := r.WriteBoth(ctx, "empty space", "-", t2)
r.CheckRemoteItems(t, file1, file2)
var buf bytes.Buffer
err := operations.List(ctx, r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
assert.Contains(t, res, " 1 empty space\n")
assert.Contains(t, res, " 60 potato2\n")
}
func TestLsWithFilesFrom(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
file2 := r.WriteBoth(ctx, "empty space", "-", t2)
r.CheckRemoteItems(t, file1, file2)
// Set the --files-from equivalent
f, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, f.AddFile("potato2"))
require.NoError(t, f.AddFile("notfound"))
// Change the active filter
ctx = filter.ReplaceConfig(ctx, f)
var buf bytes.Buffer
err = operations.List(ctx, r.Fremote, &buf)
require.NoError(t, err)
assert.Equal(t, " 60 potato2\n", buf.String())
// Now try with --no-traverse
ci.NoTraverse = true
buf.Reset()
err = operations.List(ctx, r.Fremote, &buf)
require.NoError(t, err)
assert.Equal(t, " 60 potato2\n", buf.String())
}
func TestLsLong(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
file2 := r.WriteBoth(ctx, "empty space", "-", t2)
r.CheckRemoteItems(t, file1, file2)
var buf bytes.Buffer
err := operations.ListLong(ctx, r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
lines := strings.Split(strings.Trim(res, "\n"), "\n")
assert.Equal(t, 2, len(lines))
timeFormat := "2006-01-02 15:04:05.000000000"
precision := r.Fremote.Precision()
location := time.Now().Location()
checkTime := func(m, filename string, expected time.Time) {
modTime, err := time.ParseInLocation(timeFormat, m, location) // parse as localtime
if err != nil {
t.Errorf("Error parsing %q: %v", m, err)
} else {
fstest.AssertTimeEqualWithPrecision(t, filename, expected, modTime, precision)
}
}
m1 := regexp.MustCompile(`(?m)^ 1 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
if ms := m1.FindStringSubmatch(res); ms == nil {
t.Errorf("empty space missing: %q", res)
} else {
checkTime(ms[1], "empty space", t2.Local())
}
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
if ms := m2.FindStringSubmatch(res); ms == nil {
t.Errorf("potato2 missing: %q", res)
} else {
checkTime(ms[1], "potato2", t1.Local())
}
}
func TestHashSums(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
file2 := r.WriteBoth(ctx, "empty space", "-", t2)
r.CheckRemoteItems(t, file1, file2)
hashes := r.Fremote.Hashes()
var quickXorHash hash.Type
err := quickXorHash.Set("QuickXorHash")
require.NoError(t, err)
for _, test := range []struct {
name string
download bool
base64 bool
ht hash.Type
want []string
}{
{
ht: hash.MD5,
want: []string{
"336d5ebc5436534e61d16e63ddfca327 empty space\n",
"d6548b156ea68a4e003e786df99eee76 potato2\n",
},
},
{
ht: hash.MD5,
download: true,
want: []string{
"336d5ebc5436534e61d16e63ddfca327 empty space\n",
"d6548b156ea68a4e003e786df99eee76 potato2\n",
},
},
{
ht: hash.SHA1,
want: []string{
"3bc15c8aae3e4124dd409035f32ea2fd6835efc9 empty space\n",
"9dc7f7d3279715991a22853f5981df582b7f9f6d potato2\n",
},
},
{
ht: hash.SHA1,
download: true,
want: []string{
"3bc15c8aae3e4124dd409035f32ea2fd6835efc9 empty space\n",
"9dc7f7d3279715991a22853f5981df582b7f9f6d potato2\n",
},
},
{
ht: quickXorHash,
want: []string{
"2d00000000000000000000000100000000000000 empty space\n",
"4001dad296b6b4a52d6d694b67dad296b6b4a52d potato2\n",
},
},
{
ht: quickXorHash,
download: true,
want: []string{
"2d00000000000000000000000100000000000000 empty space\n",
"4001dad296b6b4a52d6d694b67dad296b6b4a52d potato2\n",
},
},
{
ht: quickXorHash,
base64: true,
want: []string{
"LQAAAAAAAAAAAAAAAQAAAAAAAAA= empty space\n",
"QAHa0pa2tKUtbWlLZ9rSlra0pS0= potato2\n",
},
},
{
ht: quickXorHash,
base64: true,
download: true,
want: []string{
"LQAAAAAAAAAAAAAAAQAAAAAAAAA= empty space\n",
"QAHa0pa2tKUtbWlLZ9rSlra0pS0= potato2\n",
},
},
} {
if !hashes.Contains(test.ht) {
continue
}
name := cases.Title(language.Und, cases.NoLower).String(test.ht.String())
if test.download {
name += "Download"
}
if test.base64 {
name += "Base64"
}
t.Run(name, func(t *testing.T) {
var buf bytes.Buffer
err := operations.HashLister(ctx, test.ht, test.base64, test.download, r.Fremote, &buf)
require.NoError(t, err)
res := buf.String()
for _, line := range test.want {
assert.Contains(t, res, line)
}
})
}
}
func TestHashSumsWithErrors(t *testing.T) {
ctx := context.Background()
memFs, err := fs.NewFs(ctx, ":memory:")
require.NoError(t, err)
// Make a test file
content := "-"
item1 := fstest.NewItem("file1", content, t1)
_ = fstests.PutTestContents(ctx, t, memFs, &item1, content, true)
// MemoryFS supports MD5
buf := &bytes.Buffer{}
err = operations.HashLister(ctx, hash.MD5, false, false, memFs, buf)
require.NoError(t, err)
assert.Contains(t, buf.String(), "336d5ebc5436534e61d16e63ddfca327 file1\n")
// MemoryFS can't do SHA1, but UNSUPPORTED must not appear in the output
buf.Reset()
err = operations.HashLister(ctx, hash.SHA1, false, false, memFs, buf)
require.NoError(t, err)
assert.NotContains(t, buf.String(), " UNSUPPORTED ")
// ERROR must not appear in the output either
assert.NotContains(t, buf.String(), " ERROR ")
// TODO mock an unreadable file
}
func TestHashStream(t *testing.T) {
reader := strings.NewReader("")
in := io.NopCloser(reader)
out := &bytes.Buffer{}
for _, test := range []struct {
input string
ht hash.Type
wantHex string
wantBase64 string
}{
{
input: "",
ht: hash.MD5,
wantHex: "d41d8cd98f00b204e9800998ecf8427e -\n",
wantBase64: "1B2M2Y8AsgTpgAmY7PhCfg== -\n",
},
{
input: "",
ht: hash.SHA1,
wantHex: "da39a3ee5e6b4b0d3255bfef95601890afd80709 -\n",
wantBase64: "2jmj7l5rSw0yVb_vlWAYkK_YBwk= -\n",
},
{
input: "Hello world!",
ht: hash.MD5,
wantHex: "86fb269d190d2c85f6e0468ceca42a20 -\n",
wantBase64: "hvsmnRkNLIX24EaM7KQqIA== -\n",
},
{
input: "Hello world!",
ht: hash.SHA1,
wantHex: "d3486ae9136e7856bc42212385ea797094475802 -\n",
wantBase64: "00hq6RNueFa8QiEjhep5cJRHWAI= -\n",
},
} {
reader.Reset(test.input)
require.NoError(t, operations.HashSumStream(test.ht, false, in, out))
assert.Equal(t, test.wantHex, out.String())
_, _ = reader.Seek(0, io.SeekStart)
out.Reset()
require.NoError(t, operations.HashSumStream(test.ht, true, in, out))
assert.Equal(t, test.wantBase64, out.String())
out.Reset()
}
}
func TestSuffixName(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
for _, test := range []struct {
remote string
suffix string
keepExt bool
want string
}{
{"test.txt", "", false, "test.txt"},
{"test.txt", "", true, "test.txt"},
{"test.txt", "-suffix", false, "test.txt-suffix"},
{"test.txt", "-suffix", true, "test-suffix.txt"},
{"test.txt.csv", "-suffix", false, "test.txt.csv-suffix"},
{"test.txt.csv", "-suffix", true, "test-suffix.txt.csv"},
{"test", "-suffix", false, "test-suffix"},
{"test", "-suffix", true, "test-suffix"},
{"test.html", "-suffix", true, "test-suffix.html"},
{"test.html.txt", "-suffix", true, "test-suffix.html.txt"},
{"test.csv.html.txt", "-suffix", true, "test-suffix.csv.html.txt"},
{"test.badext.csv.html.txt", "-suffix", true, "test.badext-suffix.csv.html.txt"},
{"test.badext", "-suffix", true, "test-suffix.badext"},
} {
ci.Suffix = test.suffix
ci.SuffixKeepExtension = test.keepExt
got := operations.SuffixName(ctx, test.remote)
assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test))
}
}
func TestCount(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
file2 := r.WriteBoth(ctx, "empty space", "-", t2)
file3 := r.WriteBoth(ctx, "sub dir/potato3", "hello", t2)
r.CheckRemoteItems(t, file1, file2, file3)
// Check the MaxDepth too
ci.MaxDepth = 1
objects, size, sizeless, err := operations.Count(ctx, r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(2), objects)
assert.Equal(t, int64(61), size)
assert.Equal(t, int64(0), sizeless)
}
func TestDelete(t *testing.T) {
ctx := context.Background()
fi, err := filter.NewFilter(nil)
require.NoError(t, err)
fi.Opt.MaxSize = 60
ctx = filter.ReplaceConfig(ctx, fi)
r := fstest.NewRun(t)
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err = operations.Delete(ctx, r.Fremote)
require.NoError(t, err)
r.CheckRemoteItems(t, file3)
}
func isChunker(f fs.Fs) bool {
return strings.HasPrefix(f.Name(), "TestChunker")
}
func skipIfChunker(t *testing.T, f fs.Fs) {
if isChunker(f) {
t.Skip("Skipping test on chunker backend")
}
}
func TestMaxDelete(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
accounting.GlobalStats().ResetCounters()
ci.MaxDelete = 2
defer r.Finalise()
skipIfChunker(t, r.Fremote) // chunker does copy/delete on s3
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err := operations.Delete(ctx, r.Fremote)
require.Error(t, err)
objects, _, _, err := operations.Count(ctx, r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(1), objects)
}
// TestMaxDeleteSizeLargeFile one of the files is larger than allowed
func TestMaxDeleteSizeLargeFile(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
accounting.GlobalStats().ResetCounters()
ci.MaxDeleteSize = 70
defer r.Finalise()
skipIfChunker(t, r.Fremote) // chunker does copy/delete on s3
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err := operations.Delete(ctx, r.Fremote)
require.Error(t, err)
r.CheckRemoteItems(t, file3)
}
func TestMaxDeleteSize(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
accounting.GlobalStats().ResetCounters()
ci.MaxDeleteSize = 160
defer r.Finalise()
skipIfChunker(t, r.Fremote) // chunker does copy/delete on s3
file1 := r.WriteObject(ctx, "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(ctx, "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(ctx, "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
err := operations.Delete(ctx, r.Fremote)
require.Error(t, err)
objects, _, _, err := operations.Count(ctx, r.Fremote)
require.NoError(t, err)
assert.Equal(t, int64(1), objects) // 10 or 100 bytes
}
func TestReadFile(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
contents := "A file to read the contents."
file := r.WriteObject(ctx, "ReadFile", contents, t1)
r.CheckRemoteItems(t, file)
o, err := r.Fremote.NewObject(ctx, file.Path)
require.NoError(t, err)
buf, err := operations.ReadFile(ctx, o)
require.NoError(t, err)
assert.Equal(t, contents, string(buf))
}
func TestRetry(t *testing.T) {
ctx := context.Background()
var i int
var err error
fn := func() error {
i--
if i <= 0 {
return nil
}
return err
}
i, err = 3, fmt.Errorf("Wrapped EOF is retriable: %w", io.EOF)
assert.Equal(t, nil, operations.Retry(ctx, nil, 5, fn))
assert.Equal(t, 0, i)
i, err = 10, pacer.RetryAfterError(errors.New("BANG"), 10*time.Millisecond)
assert.Equal(t, err, operations.Retry(ctx, nil, 5, fn))
assert.Equal(t, 5, i)
i, err = 10, fs.ErrorObjectNotFound
assert.Equal(t, fs.ErrorObjectNotFound, operations.Retry(ctx, nil, 5, fn))
assert.Equal(t, 9, i)
}
func TestCat(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteBoth(ctx, "file1", "ABCDEFGHIJ", t1)
file2 := r.WriteBoth(ctx, "file2", "012345678", t2)
r.CheckRemoteItems(t, file1, file2)
for _, test := range []struct {
offset int64
count int64
separator string
a string
b string
}{
{0, -1, "", "ABCDEFGHIJ", "012345678"},
{0, 5, "", "ABCDE", "01234"},
{-3, -1, "", "HIJ", "678"},
{1, 3, "", "BCD", "123"},
{0, -1, "\n", "ABCDEFGHIJ", "012345678"},
} {
var buf bytes.Buffer
err := operations.Cat(ctx, r.Fremote, &buf, test.offset, test.count, []byte(test.separator))
require.NoError(t, err)
res := buf.String()
if res != test.a+test.separator+test.b+test.separator && res != test.b+test.separator+test.a+test.separator {
t.Errorf("Incorrect output from Cat(%d,%d,%s): %q", test.offset, test.count, test.separator, res)
}
}
}
func TestPurge(t *testing.T) {
ctx := context.Background()
r := fstest.NewRunIndividual(t) // make new container (azureblob has delayed mkdir after rmdir)
r.Mkdir(ctx, r.Fremote)
// Make some files and dirs
r.ForceMkdir(ctx, r.Fremote)
file1 := r.WriteObject(ctx, "A1/B1/C1/one", "aaa", t1)
//..and dirs we expect to delete
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A2"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B2"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B2/C2"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B1/C3"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A3"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A3/B3"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A3/B3/C4"))
//..and one more file at the end
file2 := r.WriteObject(ctx, "A1/two", "bbb", t2)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1, file2,
},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
"A2",
"A1/B2",
"A1/B2/C2",
"A1/B1/C3",
"A3",
"A3/B3",
"A3/B3/C4",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
require.NoError(t, operations.Purge(ctx, r.Fremote, "A1/B1"))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file2,
},
[]string{
"A1",
"A2",
"A1/B2",
"A1/B2/C2",
"A3",
"A3/B3",
"A3/B3/C4",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
require.NoError(t, operations.Purge(ctx, r.Fremote, ""))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{},
fs.GetModifyWindow(ctx, r.Fremote),
)
}
func TestRmdirsNoLeaveRoot(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
r.Mkdir(ctx, r.Fremote)
// Make some files and dirs we expect to keep
r.ForceMkdir(ctx, r.Fremote)
file1 := r.WriteObject(ctx, "A1/B1/C1/one", "aaa", t1)
//..and dirs we expect to delete
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A2"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B2"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B2/C2"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B1/C3"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A3"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A3/B3"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A3/B3/C4"))
//..and one more file at the end
file2 := r.WriteObject(ctx, "A1/two", "bbb", t2)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1, file2,
},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
"A2",
"A1/B2",
"A1/B2/C2",
"A1/B1/C3",
"A3",
"A3/B3",
"A3/B3/C4",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
require.NoError(t, operations.Rmdirs(ctx, r.Fremote, "A3/B3/C4", false))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1, file2,
},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
"A2",
"A1/B2",
"A1/B2/C2",
"A1/B1/C3",
"A3",
"A3/B3",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
require.NoError(t, operations.Rmdirs(ctx, r.Fremote, "", false))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1, file2,
},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
// Delete the files so we can remove everything including the root
for _, file := range []fstest.Item{file1, file2} {
o, err := r.Fremote.NewObject(ctx, file.Path)
require.NoError(t, err)
require.NoError(t, o.Remove(ctx))
}
require.NoError(t, operations.Rmdirs(ctx, r.Fremote, "", false))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{},
fs.GetModifyWindow(ctx, r.Fremote),
)
}
func TestRmdirsLeaveRoot(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
r.Mkdir(ctx, r.Fremote)
r.ForceMkdir(ctx, r.Fremote)
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B1"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B1/C1"))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
require.NoError(t, operations.Rmdirs(ctx, r.Fremote, "A1", true))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{
"A1",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
}
func TestRmdirsWithFilter(t *testing.T) {
ctx := context.Background()
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ /A1/B1/**"))
require.NoError(t, fi.AddRule("- *"))
r := fstest.NewRun(t)
r.Mkdir(ctx, r.Fremote)
r.ForceMkdir(ctx, r.Fremote)
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B1"))
require.NoError(t, operations.Mkdir(ctx, r.Fremote, "A1/B1/C1"))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
require.NoError(t, operations.Rmdirs(ctx, r.Fremote, "", false))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{
"A1",
},
fs.GetModifyWindow(ctx, r.Fremote),
)
}
func TestCopyURL(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
contents := "file contents\n"
file1 := r.WriteFile("file1", contents, t1)
file2 := r.WriteFile("file2", contents, t1)
r.Mkdir(ctx, r.Fremote)
r.CheckRemoteItems(t)
// check when reading from regular HTTP server
status := 0
nameHeader := false
headerFilename := "headerfilename.txt"
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if status != 0 {
http.Error(w, "an error occurred", status)
}
if nameHeader {
w.Header().Set("Content-Disposition", `attachment; filename="folder\`+headerFilename+`"`)
}
_, err := w.Write([]byte(contents))
assert.NoError(t, err)
})
ts := httptest.NewServer(handler)
defer ts.Close()
o, err := operations.CopyURL(ctx, r.Fremote, "file1", ts.URL, false, false, false)
require.NoError(t, err)
assert.Equal(t, int64(len(contents)), o.Size())
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, nil, fs.ModTimeNotSupported)
// Check file clobbering
_, err = operations.CopyURL(ctx, r.Fremote, "file1", ts.URL, false, false, true)
require.Error(t, err)
// Check auto file naming
status = 0
urlFileName := "filename.txt"
o, err = operations.CopyURL(ctx, r.Fremote, "", ts.URL+"/"+urlFileName, true, false, false)
require.NoError(t, err)
assert.Equal(t, int64(len(contents)), o.Size())
assert.Equal(t, urlFileName, o.Remote())
// Check header file naming
nameHeader = true
o, err = operations.CopyURL(ctx, r.Fremote, "", ts.URL, true, true, false)
require.NoError(t, err)
assert.Equal(t, int64(len(contents)), o.Size())
assert.Equal(t, headerFilename, o.Remote())
// Check auto file naming when url without file name
_, err = operations.CopyURL(ctx, r.Fremote, "file1", ts.URL, true, false, false)
require.Error(t, err)
// Check header file naming without header set
nameHeader = false
_, err = operations.CopyURL(ctx, r.Fremote, "file1", ts.URL, true, true, false)
require.Error(t, err)
// Check an error is returned for a 404
status = http.StatusNotFound
o, err = operations.CopyURL(ctx, r.Fremote, "file1", ts.URL, false, false, false)
require.Error(t, err)
assert.Contains(t, err.Error(), "Not Found")
assert.Nil(t, o)
status = 0
// check when reading from unverified HTTPS server
ci.InsecureSkipVerify = true
fshttp.ResetTransport()
defer fshttp.ResetTransport()
tss := httptest.NewTLSServer(handler)
defer tss.Close()
o, err = operations.CopyURL(ctx, r.Fremote, "file2", tss.URL, false, false, false)
require.NoError(t, err)
assert.Equal(t, int64(len(contents)), o.Size())
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2, fstest.NewItem(urlFileName, contents, t1), fstest.NewItem(headerFilename, contents, t1)}, nil, fs.ModTimeNotSupported)
}
func TestCopyURLToWriter(t *testing.T) {
ctx := context.Background()
contents := "file contents\n"
// check when reading from regular HTTP server
status := 0
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if status != 0 {
http.Error(w, "an error occurred", status)
return
}
_, err := w.Write([]byte(contents))
assert.NoError(t, err)
})
ts := httptest.NewServer(handler)
defer ts.Close()
// test normal fetch
var buf bytes.Buffer
err := operations.CopyURLToWriter(ctx, ts.URL, &buf)
require.NoError(t, err)
assert.Equal(t, contents, buf.String())
// test fetch with error
status = http.StatusNotFound
buf.Reset()
err = operations.CopyURLToWriter(ctx, ts.URL, &buf)
require.Error(t, err)
assert.Contains(t, err.Error(), "Not Found")
assert.Equal(t, 0, len(buf.String()))
}
func TestMoveFile(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file2 := file1
file2.Path = "sub/file2"
err := operations.MoveFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2)
r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
err = operations.MoveFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2)
err = operations.MoveFile(ctx, r.Fremote, r.Fremote, file2.Path, file2.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2)
}
func TestMoveFileWithIgnoreExisting(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
file1 := r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
ci.IgnoreExisting = true
err := operations.MoveFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file1)
// Recreate file with updated content
file1b := r.WriteFile("file1", "file1 modified", t2)
r.CheckLocalItems(t, file1b)
// Ensure modified file did not transfer and was not deleted
err = operations.MoveFile(ctx, r.Fremote, r.Flocal, file1.Path, file1b.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1b)
r.CheckRemoteItems(t, file1)
}
func TestCaseInsensitiveMoveFile(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
if !r.Fremote.Features().CaseInsensitive {
return
}
file1 := r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file2 := file1
file2.Path = "sub/file2"
err := operations.MoveFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2)
r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
err = operations.MoveFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2)
file2Capitalized := file2
file2Capitalized.Path = "sub/File2"
err = operations.MoveFile(ctx, r.Fremote, r.Fremote, file2Capitalized.Path, file2.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
r.CheckRemoteItems(t, file2Capitalized)
}
func TestCaseInsensitiveMoveFileDryRun(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if !r.Fremote.Features().CaseInsensitive {
return
}
file1 := r.WriteObject(ctx, "hello", "world", t1)
r.CheckRemoteItems(t, file1)
ci.DryRun = true
err := operations.MoveFile(ctx, r.Fremote, r.Fremote, "HELLO", file1.Path)
require.NoError(t, err)
r.CheckRemoteItems(t, file1)
}
func TestMoveFileBackupDir(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if !operations.CanServerSideMove(r.Fremote) {
t.Skip("Skipping test as remote does not support server-side move or copy")
}
ci.BackupDir = r.FremoteName + "/backup"
file1 := r.WriteFile("dst/file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file1old := r.WriteObject(ctx, "dst/file1", "file1 contents old", t1)
r.CheckRemoteItems(t, file1old)
err := operations.MoveFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t)
file1old.Path = "backup/dst/file1"
r.CheckRemoteItems(t, file1old, file1)
}
// testFsInfo is for unit testing fs.Info
type testFsInfo struct {
name string
root string
stringVal string
precision time.Duration
hashes hash.Set
features fs.Features
}
// Name of the remote (as passed into NewFs)
func (i *testFsInfo) Name() string { return i.name }
// Root of the remote (as passed into NewFs)
func (i *testFsInfo) Root() string { return i.root }
// String returns a description of the FS
func (i *testFsInfo) String() string { return i.stringVal }
// Precision of the ModTimes in this Fs
func (i *testFsInfo) Precision() time.Duration { return i.precision }
// Returns the supported hash types of the filesystem
func (i *testFsInfo) Hashes() hash.Set { return i.hashes }
// Returns the supported hash types of the filesystem
func (i *testFsInfo) Features() *fs.Features { return &i.features }
func TestSameConfig(t *testing.T) {
a := &testFsInfo{name: "name", root: "root"}
for _, test := range []struct {
name string
root string
expected bool
}{
{"name", "root", true},
{"name", "rooty", true},
{"namey", "root", false},
{"namey", "roott", false},
} {
b := &testFsInfo{name: test.name, root: test.root}
actual := operations.SameConfig(a, b)
assert.Equal(t, test.expected, actual)
actual = operations.SameConfig(b, a)
assert.Equal(t, test.expected, actual)
}
}
func TestSame(t *testing.T) {
a := &testFsInfo{name: "name", root: "root"}
for _, test := range []struct {
name string
root string
expected bool
}{
{"name", "root", true},
{"name", "rooty", false},
{"namey", "root", false},
{"namey", "roott", false},
} {
b := &testFsInfo{name: test.name, root: test.root}
actual := operations.Same(a, b)
assert.Equal(t, test.expected, actual)
actual = operations.Same(b, a)
assert.Equal(t, test.expected, actual)
}
}
// testFs is for unit testing fs.Fs
type testFs struct {
testFsInfo
}
func (i *testFs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, nil
}
func (i *testFs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return nil, nil }
func (i *testFs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, nil
}
func (i *testFs) Mkdir(ctx context.Context, dir string) error { return nil }
func (i *testFs) Rmdir(ctx context.Context, dir string) error { return nil }
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | true |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/logger.go | fs/operations/logger.go | package operations
import (
"bytes"
"context"
"errors"
"fmt"
"io"
mutex "sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/spf13/pflag"
)
// Sigil represents the rune (-+=*!?) used by Logger to categorize files by their match/differ/missing status.
type Sigil rune
// String converts sigil to more human-readable string
func (sigil Sigil) String() string {
switch sigil {
case '-':
return "MissingOnSrc"
case '+':
return "MissingOnDst"
case '=':
return "Match"
case '*':
return "Differ"
case '!':
return "Error"
// case '.':
// return "Completed"
case '?':
return "Other"
}
return "unknown"
}
// Writer directs traffic from sigil -> LoggerOpt.Writer
func (sigil Sigil) Writer(opt LoggerOpt) io.Writer {
switch sigil {
case '-':
return opt.MissingOnSrc
case '+':
return opt.MissingOnDst
case '=':
return opt.Match
case '*':
return opt.Differ
case '!':
return opt.Error
}
return nil
}
// Sigil constants
const (
MissingOnSrc Sigil = '-'
MissingOnDst Sigil = '+'
Match Sigil = '='
Differ Sigil = '*'
TransferError Sigil = '!'
Other Sigil = '?' // reserved but not currently used
)
// LoggerFn uses fs.DirEntry instead of fs.Object so it can include Dirs
// For LoggerFn example, see bisync.WriteResults() or sync.SyncLoggerFn()
// Usage example: s.logger(ctx, operations.Differ, src, dst, nil)
type LoggerFn func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error)
type loggerContextKey struct{}
type loggerOptContextKey struct{}
var loggerKey = loggerContextKey{}
var loggerOptKey = loggerOptContextKey{}
// LoggerOpt contains options for the Sync Logger functions
// TODO: refactor Check in here too?
type LoggerOpt struct {
// Fdst, Fsrc fs.Fs // fses to check
// Check checkFn // function to use for checking
// OneWay bool // one way only?
LoggerFn LoggerFn // function to use for logging
Combined io.Writer // a file with file names with leading sigils
MissingOnSrc io.Writer // files only in the destination
MissingOnDst io.Writer // files only in the source
Match io.Writer // matching files
Differ io.Writer // differing files
Error io.Writer // files with errors of some kind
DestAfter io.Writer // files that exist on the destination post-sync
JSON *bytes.Buffer // used by bisync to read/write struct as JSON
DeleteModeOff bool //affects whether Logger expects MissingOnSrc to be deleted
// lsf options for destAfter
ListFormat ListFormat
JSONOpt ListJSONOpt
LJ *listJSON
Format string
TimeFormat string
Separator string
DirSlash bool
// Recurse bool
HashType hash.Type
FilesOnly bool
DirsOnly bool
Csv bool
Absolute bool
}
// NewDefaultLoggerFn creates a logger function that writes the sigil and path to configured files that match the sigil
func NewDefaultLoggerFn(opt *LoggerOpt) LoggerFn {
var lock mutex.Mutex
return func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) {
lock.Lock()
defer lock.Unlock()
if err == fs.ErrorIsDir && !opt.FilesOnly && opt.DestAfter != nil {
opt.PrintDestAfter(ctx, sigil, src, dst, err)
return
}
_, srcOk := src.(fs.Object)
_, dstOk := dst.(fs.Object)
var filename string
if !srcOk && !dstOk {
return
} else if srcOk && !dstOk {
filename = src.String()
} else {
filename = dst.String()
}
if sigil.Writer(*opt) != nil {
SyncFprintf(sigil.Writer(*opt), "%s\n", filename)
}
if opt.Combined != nil {
SyncFprintf(opt.Combined, "%c %s\n", sigil, filename)
fs.Debugf(nil, "Sync Logger: %s: %c %s\n", sigil.String(), sigil, filename)
}
if opt.DestAfter != nil {
opt.PrintDestAfter(ctx, sigil, src, dst, err)
}
}
}
// WithLogger stores logger in ctx and returns a copy of ctx in which loggerKey = logger
func WithLogger(ctx context.Context, logger LoggerFn) context.Context {
return context.WithValue(ctx, loggerKey, logger)
}
// WithLoggerOpt stores loggerOpt in ctx and returns a copy of ctx in which loggerOptKey = loggerOpt
func WithLoggerOpt(ctx context.Context, loggerOpt LoggerOpt) context.Context {
return context.WithValue(ctx, loggerOptKey, loggerOpt)
}
// GetLogger attempts to retrieve LoggerFn from context, returns it if found, otherwise returns no-op function
func GetLogger(ctx context.Context) (LoggerFn, bool) {
logger, ok := ctx.Value(loggerKey).(LoggerFn)
if !ok {
logger = func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) {}
}
return logger, ok
}
// GetLoggerOpt attempts to retrieve LoggerOpt from context, returns it if found, otherwise returns NewLoggerOpt()
func GetLoggerOpt(ctx context.Context) LoggerOpt {
loggerOpt, ok := ctx.Value(loggerOptKey).(LoggerOpt)
if ok {
return loggerOpt
}
return NewLoggerOpt()
}
// WithSyncLogger starts a new logger with the options passed in and saves it to ctx for retrieval later
func WithSyncLogger(ctx context.Context, opt LoggerOpt) context.Context {
ctx = WithLoggerOpt(ctx, opt)
return WithLogger(ctx, func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) {
if opt.LoggerFn != nil {
opt.LoggerFn(ctx, sigil, src, dst, err)
} else {
SyncFprintf(opt.Combined, "%c %s\n", sigil, dst.Remote())
}
})
}
// NewLoggerOpt returns a new LoggerOpt struct with defaults
func NewLoggerOpt() LoggerOpt {
opt := LoggerOpt{
Combined: new(bytes.Buffer),
MissingOnSrc: new(bytes.Buffer),
MissingOnDst: new(bytes.Buffer),
Match: new(bytes.Buffer),
Differ: new(bytes.Buffer),
Error: new(bytes.Buffer),
DestAfter: new(bytes.Buffer),
JSON: new(bytes.Buffer),
}
return opt
}
// Winner predicts which side (src or dst) should end up winning out on the dst.
type Winner struct {
Obj fs.DirEntry // the object that should exist on dst post-sync, if any
Side string // whether the winning object was from the src or dst
Err error // whether there's an error preventing us from predicting winner correctly (not whether there was a sync error more generally)
}
// WinningSide can be called in a LoggerFn to predict what the dest will look like post-sync
//
// This attempts to account for every case in which dst (intentionally) does not match src after a sync.
//
// Known issues / cases we can't confidently predict yet:
//
// --max-duration / CutoffModeHard
// --compare-dest / --copy-dest (because equal() is called multiple times for the same file)
// server-side moves of an entire dir at once (because we never get the individual file objects in the dir)
// High-level retries, because there would be dupes (use --retries 1 to disable)
// Possibly some error scenarios
func WinningSide(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) Winner {
winner := Winner{nil, "none", nil}
opt := GetLoggerOpt(ctx)
ci := fs.GetConfig(ctx)
if err == fs.ErrorIsDir {
winner.Err = err
if sigil == MissingOnSrc {
if (opt.DeleteModeOff || ci.DryRun) && dst != nil {
winner.Obj = dst
winner.Side = "dst" // whatever's on dst will remain so after DryRun
return winner
}
return winner // none, because dst should just get deleted
}
if sigil == MissingOnDst && ci.DryRun {
return winner // none, because it does not currently exist on dst, and will still not exist after DryRun
} else if ci.DryRun && dst != nil {
winner.Obj = dst
winner.Side = "dst"
} else if src != nil {
winner.Obj = src
winner.Side = "src"
}
return winner
}
_, srcOk := src.(fs.Object)
_, dstOk := dst.(fs.Object)
if !srcOk && !dstOk {
return winner // none, because we don't have enough info to continue.
}
switch sigil {
case MissingOnSrc:
if opt.DeleteModeOff || ci.DryRun { // i.e. it's a copy, not sync (or it's a DryRun)
winner.Obj = dst
winner.Side = "dst" // whatever's on dst will remain so after DryRun
return winner
}
return winner // none, because dst should just get deleted
case Match, Differ, MissingOnDst:
if sigil == MissingOnDst && ci.DryRun {
return winner // none, because it does not currently exist on dst, and will still not exist after DryRun
}
winner.Obj = src
winner.Side = "src" // presume dst will end up matching src unless changed below
if sigil == Match && (ci.SizeOnly || ci.CheckSum || ci.IgnoreSize || ci.UpdateOlder || ci.NoUpdateModTime) {
winner.Obj = dst
winner.Side = "dst" // ignore any differences with src because of user flags
}
if ci.IgnoreTimes {
winner.Obj = src
winner.Side = "src" // copy src to dst unconditionally
}
if (sigil == Match || sigil == Differ) && (ci.IgnoreExisting || ci.Immutable) {
winner.Obj = dst
winner.Side = "dst" // dst should remain unchanged if it already exists (and we know it does because it's Match or Differ)
}
if ci.DryRun {
winner.Obj = dst
winner.Side = "dst" // dst should remain unchanged after DryRun (note that we handled MissingOnDst earlier)
}
return winner
case TransferError:
winner.Obj = dst
winner.Side = "dst" // usually, dst should not change if there's an error
if dst == nil {
winner.Obj = src
winner.Side = "src" // but if for some reason we have a src and not a dst, go with it
}
if winner.Obj != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, errors.New("max transfer duration reached as set by --max-duration")) {
winner.Err = err // we can't confidently predict what survives if CutoffModeHard
}
return winner // we know at least one of the objects
}
}
// should only make it this far if it's TransferError and both src and dst are nil
winner.Side = "none"
winner.Err = fmt.Errorf("unknown case -- can't determine winner. %v", err)
fs.Debugf(winner.Obj, "%v", winner.Err)
return winner
}
// SetListFormat sets opt.ListFormat for destAfter
// TODO: possibly refactor duplicate code from cmd/lsf, where this is mostly copied from
func (opt *LoggerOpt) SetListFormat(ctx context.Context, cmdFlags *pflag.FlagSet) {
// Work out if the separatorFlag was supplied or not
separatorFlag := cmdFlags.Lookup("separator")
separatorFlagSupplied := separatorFlag != nil && separatorFlag.Changed
// Default the separator to , if using CSV
if opt.Csv && !separatorFlagSupplied {
opt.Separator = ","
}
var list ListFormat
list.SetSeparator(opt.Separator)
list.SetCSV(opt.Csv)
list.SetDirSlash(opt.DirSlash)
list.SetAbsolute(opt.Absolute)
var JSONOpt = ListJSONOpt{
NoModTime: true,
NoMimeType: true,
DirsOnly: opt.DirsOnly,
FilesOnly: opt.FilesOnly,
// Recurse: opt.Recurse,
}
for _, char := range opt.Format {
switch char {
case 'p':
list.AddPath()
case 't':
list.AddModTime(opt.TimeFormat)
JSONOpt.NoModTime = false
case 's':
list.AddSize()
case 'h':
list.AddHash(opt.HashType)
JSONOpt.ShowHash = true
JSONOpt.HashTypes = []string{opt.HashType.String()}
case 'i':
list.AddID()
case 'm':
list.AddMimeType()
JSONOpt.NoMimeType = false
case 'e':
list.AddEncrypted()
JSONOpt.ShowEncrypted = true
case 'o':
list.AddOrigID()
JSONOpt.ShowOrigIDs = true
case 'T':
list.AddTier()
case 'M':
list.AddMetadata()
JSONOpt.Metadata = true
default:
fs.Errorf(nil, "unknown format character %q", char)
}
}
opt.ListFormat = list
opt.JSONOpt = JSONOpt
}
// NewListJSON makes a new *listJSON for destAfter
func (opt *LoggerOpt) NewListJSON(ctx context.Context, fdst fs.Fs, remote string) {
opt.LJ, _ = newListJSON(ctx, fdst, remote, &opt.JSONOpt)
//fs.Debugf(nil, "%v", opt.LJ)
}
// JSONEntry returns a *ListJSONItem for destAfter
func (opt *LoggerOpt) JSONEntry(ctx context.Context, entry fs.DirEntry) (*ListJSONItem, error) {
return opt.LJ.entry(ctx, entry)
}
// PrintDestAfter writes a *ListJSONItem to opt.DestAfter
func (opt *LoggerOpt) PrintDestAfter(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) {
entry := WinningSide(ctx, sigil, src, dst, err)
if entry.Obj != nil {
JSONEntry, _ := opt.JSONEntry(ctx, entry.Obj)
_, _ = fmt.Fprintln(opt.DestAfter, opt.ListFormat.Format(JSONEntry))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/reopen.go | fs/operations/reopen.go | package operations
import (
"context"
"errors"
"io"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
)
// AccountFn is a function which will be called after every read
// from the ReOpen.
//
// It may return an error which will be passed back to the user.
type AccountFn func(n int) error
// ReOpen is a wrapper for an object reader which reopens the stream on error
type ReOpen struct {
ctx context.Context
mu sync.Mutex // mutex to protect the below
readAtMu sync.Mutex // mutex to serialize the ReadAt calls
src fs.Object // object to open
baseOptions []fs.OpenOption // options to pass to initial open and where offset == 0
options []fs.OpenOption // option to pass on subsequent opens where offset != 0
rangeOption fs.RangeOption // adjust this range option on re-opens
rc io.ReadCloser // underlying stream
size int64 // total size of object - can be -ve
start int64 // absolute position to start reading from
end int64 // absolute position to end reading (exclusive)
offset int64 // offset in the file we are at, offset from start
newOffset int64 // if different to offset, reopen needed
maxTries int // maximum number of retries
tries int // number of retries we've had so far in this stream
err error // if this is set then Read/Close calls will return it
opened bool // if set then rc is valid and needs closing
account AccountFn // account for a read
reads int // count how many times the data has been read
accountOn int // only account on or after this read
}
var (
errFileClosed = errors.New("file already closed")
errTooManyTries = errors.New("failed to reopen: too many retries")
errInvalidWhence = errors.New("reopen Seek: invalid whence")
errNegativeSeek = errors.New("reopen Seek: negative position")
errSeekPastEnd = errors.New("reopen Seek: attempt to seek past end of data")
errBadEndSeek = errors.New("reopen Seek: can't seek from end with unknown sized object")
)
// NewReOpen makes a handle which will reopen itself and seek to where
// it was on errors up to maxTries times.
//
// If an fs.HashesOption is set this will be applied when reading from
// the start.
//
// If an fs.RangeOption is set then this will applied when reading from
// the start, and updated on retries.
func NewReOpen(ctx context.Context, src fs.Object, maxTries int, options ...fs.OpenOption) (rc *ReOpen, err error) {
h := &ReOpen{
ctx: ctx,
src: src,
maxTries: maxTries,
baseOptions: options,
size: src.Size(),
start: 0,
offset: 0,
newOffset: -1, // -1 means no seek required
}
h.mu.Lock()
defer h.mu.Unlock()
// Filter the options for subsequent opens
h.options = make([]fs.OpenOption, 0, len(options)+1)
var limit int64 = -1
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
// leave hash option out when ranging
case *fs.RangeOption:
h.start, limit = x.Decode(h.end)
case *fs.SeekOption:
h.start, limit = x.Offset, -1
default:
h.options = append(h.options, option)
}
}
// Put our RangeOption on the end
h.rangeOption.Start = h.start
h.options = append(h.options, &h.rangeOption)
// If a size range is set then set the end point of the file to that
if limit >= 0 && h.size >= 0 {
h.end = h.start + limit
h.rangeOption.End = h.end - 1 // remember range options are inclusive
} else {
h.end = h.size
h.rangeOption.End = -1
}
err = h.open()
if err != nil {
return nil, err
}
return h, nil
}
// Open makes a handle which will reopen itself and seek to where it
// was on errors.
//
// If an fs.HashesOption is set this will be applied when reading from
// the start.
//
// If an fs.RangeOption is set then this will applied when reading from
// the start, and updated on retries.
//
// It will obey LowLevelRetries in the ctx as the maximum number of
// tries.
//
// Use this instead of calling the Open method on fs.Objects
func Open(ctx context.Context, src fs.Object, options ...fs.OpenOption) (rc *ReOpen, err error) {
maxTries := fs.GetConfig(ctx).LowLevelRetries
return NewReOpen(ctx, src, maxTries, options...)
}
// open the underlying handle - call with lock held
//
// we don't retry here as the Open() call will itself have low level retries
func (h *ReOpen) open() error {
var opts []fs.OpenOption
if h.offset == 0 {
// if reading from the start using the initial options
opts = h.baseOptions
} else {
// otherwise use the filtered options
opts = h.options
// Adjust range start to where we have got to
h.rangeOption.Start = h.start + h.offset
}
// Make a copy of the options as fs.FixRangeOption modifies them :-(
opts = append(make([]fs.OpenOption, 0, len(opts)), opts...)
h.tries++
if h.tries > h.maxTries {
h.err = errTooManyTries
} else {
h.rc, h.err = h.src.Open(h.ctx, opts...)
}
if h.err != nil {
if h.tries > 1 {
fs.Debugf(h.src, "Reopen failed after offset %d bytes read: %v", h.offset, h.err)
}
return h.err
}
h.opened = true
return nil
}
// reopen the underlying handle by closing it and reopening it.
func (h *ReOpen) reopen() (err error) {
// close underlying stream if needed
if h.opened {
h.opened = false
_ = h.rc.Close()
}
return h.open()
}
// account for n bytes being read
func (h *ReOpen) accountRead(n int) error {
if h.account == nil {
return nil
}
// Don't start accounting until we've reached this many reads
//
// rw.reads will be 1 the first time this is called
// rw.accountOn 2 means start accounting on the 2nd read through
if h.reads >= h.accountOn {
return h.account(n)
}
return nil
}
// Read bytes retrying as necessary
func (h *ReOpen) Read(p []byte) (n int, err error) {
h.mu.Lock()
defer h.mu.Unlock()
if h.err != nil {
// return a previous error if there is one
return n, h.err
}
// re-open if seek needed
if h.newOffset >= 0 {
if h.offset != h.newOffset {
fs.Debugf(h.src, "Seek from %d to %d", h.offset, h.newOffset)
h.offset = h.newOffset
err = h.reopen()
if err != nil {
return 0, err
}
}
h.newOffset = -1
}
// Read a full buffer
startOffset := h.offset
var nn int
for n < len(p) && err == nil {
nn, err = h.rc.Read(p[n:])
n += nn
h.offset += int64(nn)
if err != nil && err != io.EOF {
h.err = err
if !fserrors.IsNoLowLevelRetryError(err) {
fs.Debugf(h.src, "Reopening on read failure after offset %d bytes: retry %d/%d: %v", h.offset, h.tries, h.maxTries, err)
if h.reopen() == nil {
err = nil
}
}
}
}
// Count a read of the data if we read from the start successfully
if startOffset == 0 && n != 0 {
h.reads++
}
// Account the read
accErr := h.accountRead(n)
if err == nil {
err = accErr
}
return n, err
}
// ReadAt reads len(p) bytes at absolute offset off without changing
// the read position.
//
// Note: operations are serialized; it won't behave like a truly
// concurrent ReaderAt.
func (h *ReOpen) ReadAt(p []byte, off int64) (n int, err error) {
h.readAtMu.Lock()
defer h.readAtMu.Unlock()
// Save current position
cur, err := h.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}
// Seek to requested offset
if _, err = h.Seek(off, io.SeekStart); err != nil {
return 0, err
}
// Restore position on exit
defer func() {
if _, seekErr := h.Seek(cur, io.SeekStart); seekErr != nil && err == nil {
err = seekErr
}
}()
// Fill p fully unless EOF
return h.Read(p)
}
// Seek sets the offset for the next Read or Write to offset, interpreted
// according to whence: SeekStart means relative to the start of the file,
// SeekCurrent means relative to the current offset, and SeekEnd means relative
// to the end (for example, offset = -2 specifies the penultimate byte of the
// file). Seek returns the new offset relative to the start of the file or an
// error, if any.
//
// Seeking to an offset before the start of the file is an error. Seeking
// to any positive offset may be allowed, but if the new offset exceeds the
// size of the underlying object the behavior of subsequent I/O operations is
// implementation-dependent.
func (h *ReOpen) Seek(offset int64, whence int) (int64, error) {
h.mu.Lock()
defer h.mu.Unlock()
if h.err != nil {
// return a previous error if there is one
return 0, h.err
}
var abs int64
var size = h.end - h.start
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
if h.newOffset >= 0 {
abs = h.newOffset + offset
} else {
abs = h.offset + offset
}
case io.SeekEnd:
if h.size < 0 {
return 0, errBadEndSeek
}
abs = size + offset
default:
return 0, errInvalidWhence
}
if abs < 0 {
return 0, errNegativeSeek
}
if h.size >= 0 && abs > size {
return size, errSeekPastEnd
}
h.tries = 0 // Reset open count on seek
h.newOffset = abs // New offset - applied in Read
return abs, nil
}
// Close the stream
func (h *ReOpen) Close() error {
h.mu.Lock()
defer h.mu.Unlock()
if !h.opened {
return errFileClosed
}
h.opened = false
h.err = errFileClosed
return h.rc.Close()
}
// SetAccounting should be provided with a function which will be
// called after every read from the RW.
//
// It may return an error which will be passed back to the user.
func (h *ReOpen) SetAccounting(account AccountFn) *ReOpen {
h.account = account
return h
}
// DelayAccounting makes sure the accounting function only gets called
// on the i-th or later read of the data from this point (counting
// from 1).
//
// This is useful so that we don't account initial reads of the data
// e.g. when calculating hashes.
//
// Set this to 0 to account everything.
func (h *ReOpen) DelayAccounting(i int) {
h.accountOn = i
h.reads = 0
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/copy_test.go | fs/operations/copy_test.go | package operations_test
import (
"context"
"crypto/rand"
"errors"
"fmt"
"os"
"path"
"runtime"
"sort"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTruncateString(t *testing.T) {
for _, test := range []struct {
in string
n int
want string
}{
{
in: "",
n: 0,
want: "",
}, {
in: "Hello World",
n: 5,
want: "Hello",
}, {
in: "ááááá",
n: 5,
want: "áá",
}, {
in: "ááááá\xFF\xFF",
n: 5,
want: "áá\xc3",
}, {
in: "世世世世世",
n: 7,
want: "世世",
}, {
in: "🙂🙂🙂🙂🙂",
n: 16,
want: "🙂🙂🙂🙂",
}, {
in: "🙂🙂🙂🙂🙂",
n: 15,
want: "🙂🙂🙂",
}, {
in: "🙂🙂🙂🙂🙂",
n: 14,
want: "🙂🙂🙂",
}, {
in: "🙂🙂🙂🙂🙂",
n: 13,
want: "🙂🙂🙂",
}, {
in: "🙂🙂🙂🙂🙂",
n: 12,
want: "🙂🙂🙂",
}, {
in: "🙂🙂🙂🙂🙂",
n: 11,
want: "🙂🙂",
}, {
in: "𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢⁱᵒⁿᵃʳʸ",
n: 100,
want: "𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢ",
}, {
in: "a𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢⁱᵒⁿᵃʳʸ",
n: 100,
want: "a𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢ",
}, {
in: "aa𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢⁱᵒⁿᵃʳʸ",
n: 100,
want: "aa𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱ",
}, {
in: "aaa𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢⁱᵒⁿᵃʳʸ",
n: 100,
want: "aaa𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱ",
}, {
in: "aaaa𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽⁱˢⁱᵒⁿᵃʳʸ",
n: 100,
want: "aaaa𝓝𝓸𝓫𝓸𝓭𝔂 𝓲𝓼 𝓱𝓸𝓶𝓮 ᴬ ⱽⁱˢⁱᵗ ᶠʳᵒᵐ ᵗʰᵉ ⱽ",
},
} {
got := operations.TruncateString(test.in, test.n)
assert.Equal(t, test.want, got, fmt.Sprintf("In %q", test.in))
assert.LessOrEqual(t, len(got), test.n)
assert.GreaterOrEqual(t, len(got), test.n-3)
}
}
func TestCopyFile(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
file1 := r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file2 := file1
file2.Path = "sub/file2"
err := operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
err = operations.CopyFile(ctx, r.Fremote, r.Fremote, file2.Path, file2.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
}
// Find the longest file name for writing to local
func maxLengthFileName(t *testing.T, r *fstest.Run) string {
require.NoError(t, r.Flocal.Mkdir(context.Background(), "")) // create the root
const maxLen = 16 * 1024
name := strings.Repeat("A", maxLen)
i := sort.Search(len(name), func(i int) (fail bool) {
filePath := path.Join(r.LocalName, name[:i])
err := os.WriteFile(filePath, []byte{0}, 0777)
if err != nil {
return true
}
err = os.Remove(filePath)
if err != nil {
t.Logf("Failed to remove test file: %v", err)
}
return false
})
return name[:i-1]
}
// Check we can copy a file of maximum name length
func TestCopyLongFile(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
if !r.Fremote.Features().IsLocal {
t.Skip("Test only runs on local")
}
// Find the maximum length of file we can write
name := maxLengthFileName(t, r)
t.Logf("Max length of file name is %d", len(name))
file1 := r.WriteFile(name, "file1 contents", t1)
r.CheckLocalItems(t, file1)
err := operations.CopyFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file1)
}
func TestCopyFileBackupDir(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if !operations.CanServerSideMove(r.Fremote) {
t.Skip("Skipping test as remote does not support server-side move or copy")
}
ci.BackupDir = r.FremoteName + "/backup"
file1 := r.WriteFile("dst/file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file1old := r.WriteObject(ctx, "dst/file1", "file1 contents old", t1)
r.CheckRemoteItems(t, file1old)
err := operations.CopyFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
file1old.Path = "backup/dst/file1"
r.CheckRemoteItems(t, file1old, file1)
}
// Test with CompareDest set
func TestCopyFileCompareDest(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
ci.CompareDest = []string{r.FremoteName + "/CompareDest"}
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
require.NoError(t, err)
// check empty dest, empty compare
file1 := r.WriteFile("one", "one", t1)
r.CheckLocalItems(t, file1)
err = operations.CopyFile(ctx, fdst, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
file1dst := file1
file1dst.Path = "dst/one"
r.CheckRemoteItems(t, file1dst)
// check old dest, empty compare
file1b := r.WriteFile("one", "onet2", t2)
r.CheckRemoteItems(t, file1dst)
r.CheckLocalItems(t, file1b)
err = operations.CopyFile(ctx, fdst, r.Flocal, file1b.Path, file1b.Path)
require.NoError(t, err)
file1bdst := file1b
file1bdst.Path = "dst/one"
r.CheckRemoteItems(t, file1bdst)
// check old dest, new compare
file3 := r.WriteObject(ctx, "dst/one", "one", t1)
file2 := r.WriteObject(ctx, "CompareDest/one", "onet2", t2)
file1c := r.WriteFile("one", "onet2", t2)
r.CheckRemoteItems(t, file2, file3)
r.CheckLocalItems(t, file1c)
err = operations.CopyFile(ctx, fdst, r.Flocal, file1c.Path, file1c.Path)
require.NoError(t, err)
r.CheckRemoteItems(t, file2, file3)
// check empty dest, new compare
file4 := r.WriteObject(ctx, "CompareDest/two", "two", t2)
file5 := r.WriteFile("two", "two", t2)
r.CheckRemoteItems(t, file2, file3, file4)
r.CheckLocalItems(t, file1c, file5)
err = operations.CopyFile(ctx, fdst, r.Flocal, file5.Path, file5.Path)
require.NoError(t, err)
r.CheckRemoteItems(t, file2, file3, file4)
// check new dest, new compare
err = operations.CopyFile(ctx, fdst, r.Flocal, file5.Path, file5.Path)
require.NoError(t, err)
r.CheckRemoteItems(t, file2, file3, file4)
// check empty dest, old compare
file5b := r.WriteFile("two", "twot3", t3)
r.CheckRemoteItems(t, file2, file3, file4)
r.CheckLocalItems(t, file1c, file5b)
err = operations.CopyFile(ctx, fdst, r.Flocal, file5b.Path, file5b.Path)
require.NoError(t, err)
file5bdst := file5b
file5bdst.Path = "dst/two"
r.CheckRemoteItems(t, file2, file3, file4, file5bdst)
}
// Test with CopyDest set
func TestCopyFileCopyDest(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if r.Fremote.Features().Copy == nil {
t.Skip("Skipping test as remote does not support server-side copy")
}
ci.CopyDest = []string{r.FremoteName + "/CopyDest"}
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
require.NoError(t, err)
// check empty dest, empty copy
file1 := r.WriteFile("one", "one", t1)
r.CheckLocalItems(t, file1)
err = operations.CopyFile(ctx, fdst, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
file1dst := file1
file1dst.Path = "dst/one"
r.CheckRemoteItems(t, file1dst)
// check old dest, empty copy
file1b := r.WriteFile("one", "onet2", t2)
r.CheckRemoteItems(t, file1dst)
r.CheckLocalItems(t, file1b)
err = operations.CopyFile(ctx, fdst, r.Flocal, file1b.Path, file1b.Path)
require.NoError(t, err)
file1bdst := file1b
file1bdst.Path = "dst/one"
r.CheckRemoteItems(t, file1bdst)
// check old dest, new copy, backup-dir
ci.BackupDir = r.FremoteName + "/BackupDir"
file3 := r.WriteObject(ctx, "dst/one", "one", t1)
file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2)
file1c := r.WriteFile("one", "onet2", t2)
r.CheckRemoteItems(t, file2, file3)
r.CheckLocalItems(t, file1c)
err = operations.CopyFile(ctx, fdst, r.Flocal, file1c.Path, file1c.Path)
require.NoError(t, err)
file2dst := file2
file2dst.Path = "dst/one"
file3.Path = "BackupDir/one"
r.CheckRemoteItems(t, file2, file2dst, file3)
ci.BackupDir = ""
// check empty dest, new copy
file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2)
file5 := r.WriteFile("two", "two", t2)
r.CheckRemoteItems(t, file2, file2dst, file3, file4)
r.CheckLocalItems(t, file1c, file5)
err = operations.CopyFile(ctx, fdst, r.Flocal, file5.Path, file5.Path)
require.NoError(t, err)
file4dst := file4
file4dst.Path = "dst/two"
r.CheckRemoteItems(t, file2, file2dst, file3, file4, file4dst)
// check new dest, new copy
err = operations.CopyFile(ctx, fdst, r.Flocal, file5.Path, file5.Path)
require.NoError(t, err)
r.CheckRemoteItems(t, file2, file2dst, file3, file4, file4dst)
// check empty dest, old copy
file6 := r.WriteObject(ctx, "CopyDest/three", "three", t2)
file7 := r.WriteFile("three", "threet3", t3)
r.CheckRemoteItems(t, file2, file2dst, file3, file4, file4dst, file6)
r.CheckLocalItems(t, file1c, file5, file7)
err = operations.CopyFile(ctx, fdst, r.Flocal, file7.Path, file7.Path)
require.NoError(t, err)
file7dst := file7
file7dst.Path = "dst/three"
r.CheckRemoteItems(t, file2, file2dst, file3, file4, file4dst, file6, file7dst)
}
func TestCopyInplace(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if !r.Fremote.Features().PartialUploads {
t.Skip("Partial uploads not supported")
}
ci.Inplace = true
file1 := r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file2 := file1
file2.Path = "sub/file2"
err := operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
err = operations.CopyFile(ctx, r.Fremote, r.Fremote, file2.Path, file2.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
}
func TestCopyLongFileName(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if !r.Fremote.Features().PartialUploads {
t.Skip("Partial uploads not supported")
}
ci.Inplace = false // the default
file1 := r.WriteFile("file1", "file1 contents", t1)
r.CheckLocalItems(t, file1)
file2 := file1
file2.Path = "sub/" + strings.Repeat("file2", 30)
err := operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
err = operations.CopyFile(ctx, r.Fremote, r.Fremote, file2.Path, file2.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1)
r.CheckRemoteItems(t, file2)
}
func TestCopyLongFileNameCollision(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
if !r.Fremote.Features().PartialUploads {
t.Skip("Partial uploads not supported")
}
ci.Inplace = false
ci.Transfers = 4
// Write a lot of identical files with long names
files := make([]fstest.Item, 10)
namePrefix := strings.Repeat("file1", 30)
for i := range files {
files[i] = r.WriteFile(fmt.Sprintf("%s%02d", namePrefix, i), "file1 contents", t1)
}
r.CheckLocalItems(t, files...)
err := sync.CopyDir(ctx, r.Fremote, r.Flocal, false)
require.NoError(t, err)
r.CheckLocalItems(t, files...)
r.CheckRemoteItems(t, files...)
}
func TestCopyFileMaxTransfer(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
defer accounting.Stats(ctx).ResetCounters()
const sizeCutoff = 2048
// Make random incompressible data
randomData := make([]byte, sizeCutoff)
_, err := rand.Read(randomData)
require.NoError(t, err)
randomString := string(randomData)
file1 := r.WriteFile("TestCopyFileMaxTransfer/file1", "file1 contents", t1)
file2 := r.WriteFile("TestCopyFileMaxTransfer/file2", "file2 contents"+randomString, t2)
file3 := r.WriteFile("TestCopyFileMaxTransfer/file3", "file3 contents"+randomString, t2)
file4 := r.WriteFile("TestCopyFileMaxTransfer/file4", "file4 contents"+randomString, t2)
// Cutoff mode: Hard
ci.MaxTransfer = sizeCutoff
ci.CutoffMode = fs.CutoffModeHard
if runtime.GOOS == "darwin" {
// disable server-side copies as they don't count towards transfer size stats
r.Flocal.Features().Disable("Copy")
if r.Fremote.Features().IsLocal {
r.Fremote.Features().Disable("Copy")
}
}
// file1: Show a small file gets transferred OK
accounting.Stats(ctx).ResetCounters()
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1, file2, file3, file4)
r.CheckRemoteItems(t, file1)
// file2: show a large file does not get transferred
accounting.Stats(ctx).ResetCounters()
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file2.Path, file2.Path)
require.NotNil(t, err, "Did not get expected max transfer limit error")
if !errors.Is(err, accounting.ErrorMaxTransferLimitReachedFatal) {
t.Log("Expecting error to contain accounting.ErrorMaxTransferLimitReachedFatal")
// Sometimes the backends or their SDKs don't pass the
// error through properly, so check that it at least
// has the text we expect in.
assert.Contains(t, err.Error(), "max transfer limit reached")
}
r.CheckLocalItems(t, file1, file2, file3, file4)
r.CheckRemoteItems(t, file1)
// Cutoff mode: Cautious
ci.CutoffMode = fs.CutoffModeCautious
// file3: show a large file does not get transferred
accounting.Stats(ctx).ResetCounters()
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file3.Path, file3.Path)
require.NotNil(t, err)
assert.True(t, errors.Is(err, accounting.ErrorMaxTransferLimitReachedGraceful))
r.CheckLocalItems(t, file1, file2, file3, file4)
r.CheckRemoteItems(t, file1)
if isChunker(r.Fremote) {
t.Log("skipping remainder of test for chunker as it involves multiple transfers")
return
}
// Cutoff mode: Soft
ci.CutoffMode = fs.CutoffModeSoft
// file4: show a large file does get transferred this time
accounting.Stats(ctx).ResetCounters()
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file4.Path, file4.Path)
require.NoError(t, err)
r.CheckLocalItems(t, file1, file2, file3, file4)
r.CheckRemoteItems(t, file1, file4)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/operations/operationsflags/operationsflags.go | fs/operations/operationsflags/operationsflags.go | // Package operationsflags defines the flags used by rclone operations.
// It is decoupled into a separate package so it can be replaced.
package operationsflags
import (
"context"
_ "embed"
"io"
"os"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
//go:embed operationsflags.md
var help string
// Help returns the help string cleaned up to simplify appending
func Help() string {
return strings.TrimSpace(help)
}
// AddLoggerFlagsOptions contains options for the Logger Flags
type AddLoggerFlagsOptions struct {
Combined string // a file with file names with leading sigils
MissingOnSrc string // files only in the destination
MissingOnDst string // files only in the source
Match string // matching files
Differ string // differing files
ErrFile string // files with errors of some kind
DestAfter string // files that exist on the destination post-sync
}
// AnySet checks if any of the logger flags have a non-blank value
func (o AddLoggerFlagsOptions) AnySet() bool {
return anyNotBlank(o.Combined, o.MissingOnSrc, o.MissingOnDst, o.Match, o.Differ, o.ErrFile, o.DestAfter)
}
func anyNotBlank(s ...string) bool {
for _, x := range s {
if x != "" {
return true
}
}
return false
}
// AddLoggerFlags adds the logger flags to the cmdFlags command
func AddLoggerFlags(cmdFlags *pflag.FlagSet, opt *operations.LoggerOpt, flagsOpt *AddLoggerFlagsOptions) {
flags.StringVarP(cmdFlags, &flagsOpt.Combined, "combined", "", flagsOpt.Combined, "Make a combined report of changes to this file", "Sync")
flags.StringVarP(cmdFlags, &flagsOpt.MissingOnSrc, "missing-on-src", "", flagsOpt.MissingOnSrc, "Report all files missing from the source to this file", "Sync")
flags.StringVarP(cmdFlags, &flagsOpt.MissingOnDst, "missing-on-dst", "", flagsOpt.MissingOnDst, "Report all files missing from the destination to this file", "Sync")
flags.StringVarP(cmdFlags, &flagsOpt.Match, "match", "", flagsOpt.Match, "Report all matching files to this file", "Sync")
flags.StringVarP(cmdFlags, &flagsOpt.Differ, "differ", "", flagsOpt.Differ, "Report all non-matching files to this file", "Sync")
flags.StringVarP(cmdFlags, &flagsOpt.ErrFile, "error", "", flagsOpt.ErrFile, "Report all files with errors (hashing or reading) to this file", "Sync")
flags.StringVarP(cmdFlags, &flagsOpt.DestAfter, "dest-after", "", flagsOpt.DestAfter, "Report all files that exist on the dest post-sync", "Sync")
// lsf flags for destAfter
flags.StringVarP(cmdFlags, &opt.Format, "format", "F", "p", "Output format - see lsf help for details", "Sync")
flags.StringVarP(cmdFlags, &opt.TimeFormat, "timeformat", "t", "", "Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05)", "")
flags.StringVarP(cmdFlags, &opt.Separator, "separator", "s", ";", "Separator for the items in the format", "Sync")
flags.BoolVarP(cmdFlags, &opt.DirSlash, "dir-slash", "d", true, "Append a slash to directory names", "Sync")
opt.HashType = hash.MD5
flags.FVarP(cmdFlags, &opt.HashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash", "Sync")
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", true, "Only list files", "Sync")
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Only list directories", "Sync")
flags.BoolVarP(cmdFlags, &opt.Csv, "csv", "", false, "Output in CSV format", "Sync")
flags.BoolVarP(cmdFlags, &opt.Absolute, "absolute", "", false, "Put a leading / in front of path names", "Sync")
// flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing", "")
}
// ConfigureLoggers verifies and sets up writers for log files requested via CLI flags
func ConfigureLoggers(ctx context.Context, fdst fs.Fs, command *cobra.Command, opt *operations.LoggerOpt, flagsOpt AddLoggerFlagsOptions) (func(), error) {
closers := []io.Closer{}
if opt.TimeFormat == "max" {
opt.TimeFormat = operations.FormatForLSFPrecision(fdst.Precision())
}
opt.SetListFormat(ctx, command.Flags())
opt.NewListJSON(ctx, fdst, "")
open := func(name string, pout *io.Writer) error {
if name == "" {
return nil
}
if name == "-" {
*pout = os.Stdout
return nil
}
out, err := os.Create(name)
if err != nil {
return err
}
*pout = out
closers = append(closers, out)
return nil
}
if err := open(flagsOpt.Combined, &opt.Combined); err != nil {
return nil, err
}
if err := open(flagsOpt.MissingOnSrc, &opt.MissingOnSrc); err != nil {
return nil, err
}
if err := open(flagsOpt.MissingOnDst, &opt.MissingOnDst); err != nil {
return nil, err
}
if err := open(flagsOpt.Match, &opt.Match); err != nil {
return nil, err
}
if err := open(flagsOpt.Differ, &opt.Differ); err != nil {
return nil, err
}
if err := open(flagsOpt.ErrFile, &opt.Error); err != nil {
return nil, err
}
if err := open(flagsOpt.DestAfter, &opt.DestAfter); err != nil {
return nil, err
}
close := func() {
for _, closer := range closers {
err := closer.Close()
if err != nil {
fs.Errorf(nil, "Failed to close report output: %v", err)
}
}
}
ci := fs.GetConfig(ctx)
if ci.NoTraverse && opt.Combined != nil {
fs.LogPrintf(fs.LogLevelWarning, nil, "--no-traverse does not list any deletes (-) in --combined output\n")
}
if ci.NoTraverse && opt.MissingOnSrc != nil {
fs.LogPrintf(fs.LogLevelWarning, nil, "--no-traverse makes --missing-on-src produce empty output\n")
}
if ci.NoTraverse && opt.DestAfter != nil {
fs.LogPrintf(fs.LogLevelWarning, nil, "--no-traverse makes --dest-after produce incomplete output\n")
}
return close, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunksize/chunksize.go | fs/chunksize/chunksize.go | // Package chunksize calculates a suitable chunk size for large uploads
package chunksize
import (
"github.com/rclone/rclone/fs"
)
// Calculator calculates the minimum chunk size needed to fit within
// the maximum number of parts, rounded up to the nearest fs.Mebi.
//
// For most backends, (chunk_size) * (concurrent_upload_routines)
// memory will be required so we want to use the smallest possible
// chunk size that's going to allow the upload to proceed. Rounding up
// to the nearest fs.Mebi on the assumption that some backends may
// only allow integer type parameters when specifying the chunk size.
//
// Returns the default chunk size if it is sufficiently large enough
// to support the given file size otherwise returns the smallest chunk
// size necessary to allow the upload to proceed.
func Calculator(o any, size int64, maxParts int, defaultChunkSize fs.SizeSuffix) fs.SizeSuffix {
// If streaming then use default chunk size
if size < 0 {
fs.Debugf(o, "Streaming upload with chunk_size %s allows uploads of up to %s and will fail only when that limit is reached.", defaultChunkSize, fs.SizeSuffix(maxParts)*defaultChunkSize)
return defaultChunkSize
}
fileSize := fs.SizeSuffix(size)
requiredChunks := fileSize / defaultChunkSize
if requiredChunks < fs.SizeSuffix(maxParts) || (requiredChunks == fs.SizeSuffix(maxParts) && fileSize%defaultChunkSize == 0) {
return defaultChunkSize
}
minChunk := fileSize / fs.SizeSuffix(maxParts)
remainder := minChunk % fs.Mebi
if remainder != 0 {
minChunk += fs.Mebi - remainder
}
if fileSize/minChunk == fs.SizeSuffix(maxParts) && fileSize%fs.SizeSuffix(maxParts) != 0 { // when right on the boundary, we need to add a MiB
minChunk += fs.Mebi
}
fs.Debugf(o, "size: %v, parts: %v, default: %v, new: %v; default chunk size insufficient, returned new chunk size", fileSize, maxParts, defaultChunkSize, minChunk)
return minChunk
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/chunksize/chunksize_test.go | fs/chunksize/chunksize_test.go | package chunksize
import (
"testing"
"github.com/rclone/rclone/fs"
)
func TestComputeChunkSize(t *testing.T) {
for _, test := range []struct {
name string
size fs.SizeSuffix
maxParts int
defaultChunkSize fs.SizeSuffix
want fs.SizeSuffix
}{
{
name: "streaming file",
size: -1,
maxParts: 10000,
defaultChunkSize: toSizeSuffixMiB(10),
want: toSizeSuffixMiB(10),
}, {
name: "default size returned when file size is small enough",
size: 1000,
maxParts: 10000,
defaultChunkSize: toSizeSuffixMiB(10),
want: toSizeSuffixMiB(10),
}, {
name: "default size returned when file size is just 1 byte small enough",
size: toSizeSuffixMiB(100000) - 1,
maxParts: 10000,
defaultChunkSize: toSizeSuffixMiB(10),
want: toSizeSuffixMiB(10),
}, {
name: "no rounding up when everything divides evenly",
size: toSizeSuffixMiB(1000000),
maxParts: 10000,
defaultChunkSize: toSizeSuffixMiB(100),
want: toSizeSuffixMiB(100),
}, {
name: "rounding up to nearest MiB when not quite enough parts",
size: toSizeSuffixMiB(1000000),
maxParts: 9999,
defaultChunkSize: toSizeSuffixMiB(100),
want: toSizeSuffixMiB(101),
}, {
name: "rounding up to nearest MiB when one extra byte",
size: toSizeSuffixMiB(1000000) + 1,
maxParts: 10000,
defaultChunkSize: toSizeSuffixMiB(100),
want: toSizeSuffixMiB(101),
}, {
name: "expected MiB value when rounding sets to absolute minimum",
size: toSizeSuffixMiB(1) - 1,
maxParts: 1,
defaultChunkSize: toSizeSuffixMiB(1),
want: toSizeSuffixMiB(1),
}, {
name: "expected MiB value when rounding to absolute min with extra",
size: toSizeSuffixMiB(1) + 1,
maxParts: 1,
defaultChunkSize: toSizeSuffixMiB(1),
want: toSizeSuffixMiB(2),
}, {
name: "issue from forum #1",
size: 120864818840,
maxParts: 10000,
defaultChunkSize: 5 * 1024 * 1024,
want: toSizeSuffixMiB(12),
},
} {
t.Run(test.name, func(t *testing.T) {
got := Calculator(test.name, int64(test.size), test.maxParts, test.defaultChunkSize)
if got != test.want {
t.Fatalf("expected: %v, got: %v", test.want, got)
}
if test.size < 0 {
return
}
parts := func(result fs.SizeSuffix) int {
n := test.size / result
r := test.size % result
if r != 0 {
n++
}
return int(n)
}
// Check this gives the parts in range
if parts(got) > test.maxParts {
t.Fatalf("too many parts %d", parts(got))
}
// Check that setting chunk size smaller gave too many parts
if got > test.defaultChunkSize {
if parts(got-toSizeSuffixMiB(1)) <= test.maxParts {
t.Fatalf("chunk size %v too big as %v only gives %d parts", got, got-toSizeSuffixMiB(1), parts(got-toSizeSuffixMiB(1)))
}
}
})
}
}
func toSizeSuffixMiB(size int64) fs.SizeSuffix {
return fs.SizeSuffix(size * int64(fs.Mebi))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/transfermap.go | fs/accounting/transfermap.go | package accounting
import (
"context"
"fmt"
"maps"
"sort"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
// transferMap holds name to transfer map
type transferMap struct {
mu sync.RWMutex
items map[string]*Transfer
name string
}
// newTransferMap creates a new empty transfer map of capacity size
func newTransferMap(size int, name string) *transferMap {
return &transferMap{
items: make(map[string]*Transfer, size),
name: name,
}
}
// add adds a new transfer to the map
func (tm *transferMap) add(tr *Transfer) {
tm.mu.Lock()
tm.items[tr.remote] = tr
tm.mu.Unlock()
}
// del removes a transfer from the map by name
func (tm *transferMap) del(remote string) bool {
tm.mu.Lock()
_, exists := tm.items[remote]
delete(tm.items, remote)
tm.mu.Unlock()
return exists
}
// merge adds items from another map
func (tm *transferMap) merge(m *transferMap) {
tm.mu.Lock()
m.mu.Lock()
maps.Copy(tm.items, m.items)
m.mu.Unlock()
tm.mu.Unlock()
}
// empty returns whether the map has any items
func (tm *transferMap) empty() bool {
tm.mu.RLock()
defer tm.mu.RUnlock()
return len(tm.items) == 0
}
// count returns the number of items in the map
func (tm *transferMap) count() int {
tm.mu.RLock()
defer tm.mu.RUnlock()
return len(tm.items)
}
// _sortedSlice returns all transfers sorted by start time
//
// Call with mu.Rlock held
func (tm *transferMap) _sortedSlice() []*Transfer {
s := make([]*Transfer, 0, len(tm.items))
for _, tr := range tm.items {
s = append(s, tr)
}
// sort by time first and if equal by name. Note that the relatively
// low time resolution on Windows can cause equal times.
sort.Slice(s, func(i, j int) bool {
a, b := s[i], s[j]
if a.startedAt.Before(b.startedAt) {
return true
} else if !a.startedAt.Equal(b.startedAt) {
return false
}
return a.remote < b.remote
})
return s
}
// String returns string representation of map items excluding any in
// exclude (if set).
func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string {
tm.mu.RLock()
defer tm.mu.RUnlock()
ci := fs.GetConfig(ctx)
stringList := make([]string, 0, len(tm.items))
for _, tr := range tm._sortedSlice() {
var what = tr.what
if exclude != nil {
exclude.mu.RLock()
_, found := exclude.items[tr.remote]
exclude.mu.RUnlock()
if found {
continue
}
}
var out string
if acc := progress.get(tr.remote); acc != nil {
out = acc.String()
if what != "" {
out += ", " + what
}
} else {
if what == "" {
what = tm.name
}
out = fmt.Sprintf("%*s: %s",
ci.StatsFileNameLength,
shortenName(tr.remote, ci.StatsFileNameLength),
what,
)
}
stringList = append(stringList, " * "+out)
}
return strings.Join(stringList, "\n")
}
// progress returns total bytes read as well as the size.
func (tm *transferMap) progress(stats *StatsInfo) (totalBytes, totalSize int64) {
tm.mu.RLock()
defer tm.mu.RUnlock()
for name := range tm.items {
if acc := stats.inProgress.get(name); acc != nil {
bytes, size := acc.progress()
if size >= 0 && bytes >= 0 {
totalBytes += bytes
totalSize += size
}
}
}
return totalBytes, totalSize
}
// remotes returns a []string of the remote names for the transferMap
func (tm *transferMap) remotes() (c []string) {
tm.mu.RLock()
defer tm.mu.RUnlock()
for _, tr := range tm._sortedSlice() {
c = append(c, tr.remote)
}
return c
}
// rcStats returns a []rc.Params of the stats for the transferMap
func (tm *transferMap) rcStats(progress *inProgress) (t []rc.Params) {
tm.mu.RLock()
defer tm.mu.RUnlock()
for _, tr := range tm._sortedSlice() {
out := tr.rcStats() // basic stats
if acc := progress.get(tr.remote); acc != nil {
acc.rcStats(out) // add extended stats if have acc
}
t = append(t, out)
}
return t
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/stats.go | fs/accounting/stats.go | package accounting
import (
"bytes"
"context"
"errors"
"fmt"
"slices"
"sort"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/terminal"
)
const (
averagePeriodLength = time.Second
averageStopAfter = time.Minute
)
// MaxCompletedTransfers specifies the default maximum number of
// completed transfers in startedTransfers list. This can be adjusted
// for a given StatsInfo by calling the SetMaxCompletedTransfers
// method.
var MaxCompletedTransfers = 100
// StatsInfo accounts all transfers
// N.B.: if this struct is modified, please remember to also update sum() function in stats_groups
// to correctly count the updated fields
type StatsInfo struct {
mu sync.RWMutex
ctx context.Context
ci *fs.ConfigInfo
bytes int64
errors int64
lastError error
fatalError bool
retryError bool
retryAfter time.Time
checks int64
checking *transferMap
checkQueue int
checkQueueSize int64
transfers int64
transferring *transferMap
transferQueue int
transferQueueSize int64
listed int64
renames int64
renameQueue int
renameQueueSize int64
deletes int64
deletesSize int64
deletedDirs int64
inProgress *inProgress
startedTransfers []*Transfer // currently active transfers
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
oldDuration time.Duration // duration of transfers we have culled
group string
startTime time.Time // the moment these stats were initialized or reset
average averageValues
serverSideCopies int64
serverSideCopyBytes int64
serverSideMoves int64
serverSideMoveBytes int64
maxCompletedTransfers int
}
type averageValues struct {
mu sync.Mutex
period float64
lpBytes int64
lpTime time.Time
speed float64
cancel context.CancelFunc
stopped sync.WaitGroup
started bool
}
// NewStats creates an initialised StatsInfo
func NewStats(ctx context.Context) *StatsInfo {
ci := fs.GetConfig(ctx)
s := &StatsInfo{
ctx: ctx,
ci: ci,
checking: newTransferMap(ci.Checkers, "checking"),
transferring: newTransferMap(ci.Transfers, "transferring"),
inProgress: newInProgress(ctx),
startTime: time.Now(),
average: averageValues{},
maxCompletedTransfers: MaxCompletedTransfers,
}
return s
}
// SetMaxCompletedTransfers sets the maximum number of completed transfers to keep.
func (s *StatsInfo) SetMaxCompletedTransfers(n int) *StatsInfo {
s.mu.Lock()
s.maxCompletedTransfers = n
s.mu.Unlock()
return s
}
// RemoteStats returns stats for rc
//
// If short is true then the transfers and checkers won't be added.
func (s *StatsInfo) RemoteStats(short bool) (out rc.Params, err error) {
// NB if adding values here - make sure you update the docs in
// stats_groups.go
out = make(rc.Params)
ts := s.calculateTransferStats()
out["totalChecks"] = ts.totalChecks
out["totalTransfers"] = ts.totalTransfers
out["totalBytes"] = ts.totalBytes
out["transferTime"] = ts.transferTime
out["speed"] = ts.speed
s.mu.RLock()
out["bytes"] = s.bytes
out["errors"] = s.errors
out["fatalError"] = s.fatalError
out["retryError"] = s.retryError
out["checks"] = s.checks
out["transfers"] = s.transfers
out["deletes"] = s.deletes
out["deletedDirs"] = s.deletedDirs
out["renames"] = s.renames
out["listed"] = s.listed
out["elapsedTime"] = time.Since(s.startTime).Seconds()
out["serverSideCopies"] = s.serverSideCopies
out["serverSideCopyBytes"] = s.serverSideCopyBytes
out["serverSideMoves"] = s.serverSideMoves
out["serverSideMoveBytes"] = s.serverSideMoveBytes
eta, etaOK := eta(s.bytes, ts.totalBytes, ts.speed)
if etaOK {
out["eta"] = eta.Seconds()
} else {
out["eta"] = nil
}
s.mu.RUnlock()
if !short && !s.checking.empty() {
out["checking"] = s.checking.remotes()
}
if !short && !s.transferring.empty() {
out["transferring"] = s.transferring.rcStats(s.inProgress)
}
if s.errors > 0 {
out["lastError"] = s.lastError.Error()
}
return out, nil
}
// _speed returns the average speed of the transfer in bytes/second
//
// Call with lock held
func (s *StatsInfo) _speed() float64 {
return s.average.speed
}
// timeRange is a start and end time of a transfer
type timeRange struct {
start time.Time
end time.Time
}
// timeRanges is a list of non-overlapping start and end times for
// transfers
type timeRanges []timeRange
// merge all the overlapping time ranges
func (trs *timeRanges) merge() {
Trs := *trs
// Sort by the starting time.
sort.Slice(Trs, func(i, j int) bool {
return Trs[i].start.Before(Trs[j].start)
})
// Merge overlaps and add distinctive ranges together
var (
newTrs = Trs[:0]
i, j = 0, 1
)
for i < len(Trs) {
if j < len(Trs) {
if !Trs[i].end.Before(Trs[j].start) {
if Trs[i].end.Before(Trs[j].end) {
Trs[i].end = Trs[j].end
}
j++
continue
}
}
newTrs = append(newTrs, Trs[i])
i = j
j++
}
*trs = newTrs
}
// cull remove any ranges whose start and end are before cutoff
// returning their duration sum
func (trs *timeRanges) cull(cutoff time.Time) (d time.Duration) {
var newTrs = (*trs)[:0]
for _, tr := range *trs {
if cutoff.Before(tr.start) || cutoff.Before(tr.end) {
newTrs = append(newTrs, tr)
} else {
d += tr.end.Sub(tr.start)
}
}
*trs = newTrs
return d
}
// total the time out of the time ranges
func (trs timeRanges) total() (total time.Duration) {
for _, tr := range trs {
total += tr.end.Sub(tr.start)
}
return total
}
// Total duration is union of durations of all transfers belonging to this
// object.
//
// Needs to be protected by mutex.
func (s *StatsInfo) _totalDuration() time.Duration {
// copy of s.oldTimeRanges with extra room for the current transfers
timeRanges := make(timeRanges, len(s.oldTimeRanges), len(s.oldTimeRanges)+len(s.startedTransfers))
copy(timeRanges, s.oldTimeRanges)
// Extract time ranges of all transfers.
now := time.Now()
for i := range s.startedTransfers {
start, end := s.startedTransfers[i].TimeRange()
if end.IsZero() {
end = now
}
timeRanges = append(timeRanges, timeRange{start, end})
}
timeRanges.merge()
return s.oldDuration + timeRanges.total()
}
const (
etaMaxSeconds = (1<<63 - 1) / int64(time.Second) // Largest possible ETA as number of seconds
etaMax = time.Duration(etaMaxSeconds) * time.Second // Largest possible ETA, which is in second precision, representing "292y24w3d23h47m16s"
)
// eta returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined 'ok' returns false.
func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
if total <= 0 || size < 0 || rate <= 0 {
return 0, false
}
remaining := total - size
if remaining < 0 {
return 0, false
}
seconds := int64(float64(remaining) / rate)
if seconds < 0 {
// Got Int64 overflow
eta = etaMax
} else if seconds >= etaMaxSeconds {
// Would get Int64 overflow if converting from seconds to Duration (nanoseconds)
eta = etaMax
} else {
eta = time.Duration(seconds) * time.Second
}
return eta, true
}
// etaString returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined it returns "-"
func etaString(done, total int64, rate float64) string {
d, ok := eta(done, total, rate)
if !ok {
return "-"
}
if d == etaMax {
return "-"
}
return fs.Duration(d).ShortReadableString()
}
// percent returns a/b as a percentage rounded to the nearest integer
// as a string
//
// if the percentage is invalid it returns "-"
func percent(a int64, b int64) string {
if a < 0 || b <= 0 {
return "-"
}
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
}
// returned from calculateTransferStats
type transferStats struct {
totalChecks int64
totalTransfers int64
totalBytes int64
transferTime float64
speed float64
}
// calculateTransferStats calculates some additional transfer stats not
// stored directly in StatsInfo
func (s *StatsInfo) calculateTransferStats() (ts transferStats) {
// checking and transferring have their own locking so read
// here before lock to prevent deadlock on GetBytes
transferring, checking := s.transferring.count(), s.checking.count()
transferringBytesDone, transferringBytesTotal := s.transferring.progress(s)
s.mu.RLock()
defer s.mu.RUnlock()
ts.totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
ts.totalTransfers = int64(s.transferQueue) + s.transfers + int64(transferring)
// note that s.bytes already includes transferringBytesDone so
// we take it off here to avoid double counting
ts.totalBytes = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
s.average.mu.Lock()
ts.speed = s.average.speed
s.average.mu.Unlock()
dt := s._totalDuration()
ts.transferTime = dt.Seconds()
return ts
}
func (s *StatsInfo) averageLoop(ctx context.Context) {
ticker := time.NewTicker(averagePeriodLength)
defer ticker.Stop()
a := &s.average
defer a.stopped.Done()
for {
select {
case now := <-ticker.C:
a.mu.Lock()
avg := 0.0
elapsed := now.Sub(a.lpTime).Seconds()
if elapsed > 0 {
avg = float64(a.lpBytes) / elapsed
}
if a.period < averagePeriod {
a.period++
}
a.speed = (avg + a.speed*(a.period-1)) / a.period
a.lpBytes = 0
a.lpTime = now
a.mu.Unlock()
case <-ctx.Done():
// Stop the loop
return
}
}
}
// Start the average loop
//
// Call with the mutex held
func (s *StatsInfo) _startAverageLoop() {
if !s.average.started {
ctx, cancel := context.WithCancel(context.Background())
s.average.cancel = cancel
s.average.started = true
s.average.stopped.Add(1)
s.average.lpTime = time.Now()
go s.averageLoop(ctx)
}
}
// Start the average loop
func (s *StatsInfo) startAverageLoop() {
s.mu.Lock()
defer s.mu.Unlock()
s._startAverageLoop()
}
// Stop the average loop
//
// Call with the mutex held
func (s *StatsInfo) _stopAverageLoop() {
if s.average.started {
s.average.cancel()
s.average.stopped.Wait()
s.average.started = false
}
}
// String convert the StatsInfo to a string for printing
func (s *StatsInfo) String() string {
// NB if adding more stats in here, remember to add them into
// RemoteStats() too.
ts := s.calculateTransferStats()
s.mu.RLock()
var (
buf = &bytes.Buffer{}
xfrchkString = ""
dateString = ""
elapsedTime = time.Since(s.startTime)
elapsedTimeSecondsOnly = elapsedTime.Truncate(time.Second/10) % time.Minute
displaySpeedString string
)
if s.ci.DataRateUnit == "bits" {
displaySpeedString = fs.SizeSuffix(ts.speed * 8).BitRateUnit()
} else {
displaySpeedString = fs.SizeSuffix(ts.speed).ByteRateUnit()
}
if !s.ci.StatsOneLine {
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
} else {
xfrchk := []string{}
if ts.totalTransfers > 0 && s.transferQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, ts.totalTransfers))
}
if ts.totalChecks > 0 && s.checkQueue > 0 {
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, ts.totalChecks))
}
if len(xfrchk) > 0 {
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
}
if s.ci.StatsOneLineDate {
t := time.Now()
dateString = t.Format(s.ci.StatsOneLineDateFormat) // Including the separator so people can customize it
}
}
_, _ = fmt.Fprintf(buf, "%s%13s / %s, %s, %s, ETA %s%s",
dateString,
fs.SizeSuffix(s.bytes).ByteUnit(),
fs.SizeSuffix(ts.totalBytes).ByteUnit(),
percent(s.bytes, ts.totalBytes),
displaySpeedString,
etaString(s.bytes, ts.totalBytes, ts.speed),
xfrchkString,
)
if s.ci.ProgressTerminalTitle {
// Writes ETA to the terminal title
terminal.WriteTerminalTitle("ETA: " + etaString(s.bytes, ts.totalBytes, ts.speed))
}
if !s.ci.StatsOneLine {
_, _ = buf.WriteRune('\n')
errorDetails := ""
switch {
case s.fatalError:
errorDetails = " (fatal error encountered)"
case s.retryError:
errorDetails = " (retrying may help)"
case s.errors != 0:
errorDetails = " (no need to retry)"
}
// Add only non zero stats
if s.errors != 0 {
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
s.errors, errorDetails)
}
if s.checks != 0 || ts.totalChecks != 0 || s.listed != 0 {
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s, Listed %d\n",
s.checks, ts.totalChecks, percent(s.checks, ts.totalChecks), s.listed)
}
if s.deletes != 0 || s.deletedDirs != 0 {
_, _ = fmt.Fprintf(buf, "Deleted: %10d (files), %d (dirs), %s (freed)\n", s.deletes, s.deletedDirs, fs.SizeSuffix(s.deletesSize).ByteUnit())
}
if s.renames != 0 {
_, _ = fmt.Fprintf(buf, "Renamed: %10d\n", s.renames)
}
if s.transfers != 0 || ts.totalTransfers != 0 {
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
s.transfers, ts.totalTransfers, percent(s.transfers, ts.totalTransfers))
}
if s.serverSideCopies != 0 || s.serverSideCopyBytes != 0 {
_, _ = fmt.Fprintf(buf, "Server Side Copies:%6d @ %s\n",
s.serverSideCopies, fs.SizeSuffix(s.serverSideCopyBytes).ByteUnit(),
)
}
if s.serverSideMoves != 0 || s.serverSideMoveBytes != 0 {
_, _ = fmt.Fprintf(buf, "Server Side Moves:%7d @ %s\n",
s.serverSideMoves, fs.SizeSuffix(s.serverSideMoveBytes).ByteUnit(),
)
}
_, _ = fmt.Fprintf(buf, "Elapsed time: %10ss\n", strings.TrimRight(fs.Duration(elapsedTime.Truncate(time.Minute)).ReadableString(), "0s")+fmt.Sprintf("%.1f", elapsedTimeSecondsOnly.Seconds()))
}
// checking and transferring have their own locking so unlock
// here to prevent deadlock on GetBytes
s.mu.RUnlock()
// Add per transfer stats if required
if !s.ci.StatsOneLine {
if !s.checking.empty() {
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring))
}
if !s.transferring.empty() {
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.ctx, s.inProgress, nil))
}
}
return buf.String()
}
// Transferred returns list of all completed transfers including checked and
// failed ones.
func (s *StatsInfo) Transferred() []TransferSnapshot {
s.mu.RLock()
defer s.mu.RUnlock()
ts := make([]TransferSnapshot, 0, len(s.startedTransfers))
for _, tr := range s.startedTransfers {
if tr.IsDone() {
ts = append(ts, tr.Snapshot())
}
}
return ts
}
// Log outputs the StatsInfo to the log
func (s *StatsInfo) Log() {
if s.ci.UseJSONLog {
out, _ := s.RemoteStats(false)
fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v%v\n", s, fs.LogValueHide("stats", out))
} else {
fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v\n", s)
}
}
// Bytes updates the stats for bytes bytes
func (s *StatsInfo) Bytes(bytes int64) {
s.average.mu.Lock()
s.average.lpBytes += bytes
s.average.mu.Unlock()
s.mu.Lock()
defer s.mu.Unlock()
s.bytes += bytes
}
// BytesNoNetwork updates the stats for bytes bytes but doesn't include the transfer stats
func (s *StatsInfo) BytesNoNetwork(bytes int64) {
s.mu.Lock()
defer s.mu.Unlock()
s.bytes += bytes
}
// GetBytes returns the number of bytes transferred so far
func (s *StatsInfo) GetBytes() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.bytes
}
// GetBytesWithPending returns the number of bytes transferred and remaining transfers
func (s *StatsInfo) GetBytesWithPending() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
pending := int64(0)
for _, tr := range s.startedTransfers {
if tr.acc != nil {
bytesRead, size := tr.acc.progress()
if bytesRead < size {
pending += size - bytesRead
}
}
}
return s.bytes + pending
}
// Errors updates the stats for errors
func (s *StatsInfo) Errors(errors int64) {
s.mu.Lock()
defer s.mu.Unlock()
s.errors += errors
}
// GetErrors reads the number of errors
func (s *StatsInfo) GetErrors() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.errors
}
// GetLastError returns the lastError
func (s *StatsInfo) GetLastError() error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastError
}
// GetChecks returns the number of checks
func (s *StatsInfo) GetChecks() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.checks
}
// FatalError sets the fatalError flag
func (s *StatsInfo) FatalError() {
s.mu.Lock()
defer s.mu.Unlock()
s.fatalError = true
}
// HadFatalError returns whether there has been at least one FatalError
func (s *StatsInfo) HadFatalError() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.fatalError
}
// RetryError sets the retryError flag
func (s *StatsInfo) RetryError() {
s.mu.Lock()
defer s.mu.Unlock()
s.retryError = true
}
// HadRetryError returns whether there has been at least one non-NoRetryError
func (s *StatsInfo) HadRetryError() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.retryError
}
var (
errMaxDelete = fserrors.FatalError(errors.New("--max-delete threshold reached"))
errMaxDeleteSize = fserrors.FatalError(errors.New("--max-delete-size threshold reached"))
)
// DeleteFile updates the stats for deleting a file
//
// It may return fatal errors if the threshold for --max-delete or
// --max-delete-size have been reached.
func (s *StatsInfo) DeleteFile(ctx context.Context, size int64) error {
ci := fs.GetConfig(ctx)
s.mu.Lock()
defer s.mu.Unlock()
if size < 0 {
size = 0
}
if ci.MaxDelete >= 0 && s.deletes+1 > ci.MaxDelete {
return errMaxDelete
}
if ci.MaxDeleteSize >= 0 && s.deletesSize+size > int64(ci.MaxDeleteSize) {
return errMaxDeleteSize
}
s.deletes++
s.deletesSize += size
return nil
}
// GetDeletes returns the number of deletes
func (s *StatsInfo) GetDeletes() int64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.deletes
}
// DeletedDirs updates the stats for deletedDirs
func (s *StatsInfo) DeletedDirs(deletedDirs int64) int64 {
s.mu.Lock()
defer s.mu.Unlock()
s.deletedDirs += deletedDirs
return s.deletedDirs
}
// Renames updates the stats for renames
func (s *StatsInfo) Renames(renames int64) int64 {
s.mu.Lock()
defer s.mu.Unlock()
s.renames += renames
return s.renames
}
// Listed updates the stats for listed objects
func (s *StatsInfo) Listed(listed int64) int64 {
s.mu.Lock()
defer s.mu.Unlock()
s.listed += listed
return s.listed
}
// ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames, listed) to 0 and resets lastError, fatalError and retryError
func (s *StatsInfo) ResetCounters() {
s.mu.Lock()
defer s.mu.Unlock()
s.bytes = 0
s.errors = 0
s.lastError = nil
s.fatalError = false
s.retryError = false
s.retryAfter = time.Time{}
s.checks = 0
s.transfers = 0
s.deletes = 0
s.deletesSize = 0
s.deletedDirs = 0
s.renames = 0
s.listed = 0
s.startedTransfers = nil
s.oldDuration = 0
s._stopAverageLoop()
s.average = averageValues{}
s._startAverageLoop()
}
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
func (s *StatsInfo) ResetErrors() {
s.mu.Lock()
defer s.mu.Unlock()
s.errors = 0
s.lastError = nil
s.fatalError = false
s.retryError = false
s.retryAfter = time.Time{}
}
// Errored returns whether there have been any errors
func (s *StatsInfo) Errored() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.errors != 0
}
// Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
func (s *StatsInfo) Error(err error) error {
if err == nil || fserrors.IsCounted(err) {
return err
}
s.mu.Lock()
defer s.mu.Unlock()
s.errors++
s.lastError = err
err = fserrors.FsError(err)
fserrors.Count(err)
switch {
case fserrors.IsFatalError(err):
s.fatalError = true
case fserrors.IsRetryAfterError(err):
retryAfter := fserrors.RetryAfterErrorTime(err)
if s.retryAfter.IsZero() || retryAfter.Sub(s.retryAfter) > 0 {
s.retryAfter = retryAfter
}
s.retryError = true
case !fserrors.IsNoRetryError(err):
s.retryError = true
}
return err
}
// RetryAfter returns the time to retry after if it is set. It will
// be Zero if it isn't set.
func (s *StatsInfo) RetryAfter() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.retryAfter
}
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry, what string) *Transfer {
tr := newCheckingTransfer(s, obj, what)
s.checking.add(tr)
return tr
}
// DoneChecking removes a check from the stats
func (s *StatsInfo) DoneChecking(remote string) {
s.checking.del(remote)
s.mu.Lock()
s.checks++
s.mu.Unlock()
}
// GetTransfers reads the number of transfers
func (s *StatsInfo) GetTransfers() int64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.transfers
}
// NewTransfer adds a transfer to the stats from the object.
//
// The obj is uses as the srcFs, the dstFs must be supplied
func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
var srcFs fs.Fs
if oi, ok := obj.(fs.ObjectInfo); ok {
if f, ok := oi.Fs().(fs.Fs); ok {
srcFs = f
}
}
tr := newTransfer(s, obj, srcFs, dstFs)
s.transferring.add(tr)
s.startAverageLoop()
return tr
}
// NewTransferRemoteSize adds a transfer to the stats based on remote and size.
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer {
tr := newTransferRemoteSize(s, remote, size, false, "", srcFs, dstFs)
s.transferring.add(tr)
s.startAverageLoop()
return tr
}
// DoneTransferring removes a transfer from the stats
//
// if ok is true and it was in the transfermap (to avoid incrementing in case of nested calls, #6213) then it increments the transfers count
func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
existed := s.transferring.del(remote)
if ok && existed {
s.mu.Lock()
s.transfers++
s.mu.Unlock()
}
if s.transferring.empty() && s.checking.empty() {
s.mu.Lock()
s._stopAverageLoop()
s.mu.Unlock()
}
}
// SetCheckQueue sets the number of queued checks
func (s *StatsInfo) SetCheckQueue(n int, size int64) {
s.mu.Lock()
s.checkQueue = n
s.checkQueueSize = size
s.mu.Unlock()
}
// SetTransferQueue sets the number of queued transfers
func (s *StatsInfo) SetTransferQueue(n int, size int64) {
s.mu.Lock()
s.transferQueue = n
s.transferQueueSize = size
s.mu.Unlock()
}
// SetRenameQueue sets the number of queued transfers
func (s *StatsInfo) SetRenameQueue(n int, size int64) {
s.mu.Lock()
s.renameQueue = n
s.renameQueueSize = size
s.mu.Unlock()
}
// AddTransfer adds reference to the started transfer.
func (s *StatsInfo) AddTransfer(transfer *Transfer) {
s.mu.Lock()
s.startedTransfers = append(s.startedTransfers, transfer)
s.mu.Unlock()
}
// _removeTransfer removes a reference to the started transfer in
// position i.
//
// Must be called with the lock held
func (s *StatsInfo) _removeTransfer(transfer *Transfer, i int) {
now := time.Now()
// add finished transfer onto old time ranges
start, end := transfer.TimeRange()
if end.IsZero() {
end = now
}
s.oldTimeRanges = append(s.oldTimeRanges, timeRange{start, end})
s.oldTimeRanges.merge()
// remove the found entry
s.startedTransfers = slices.Delete(s.startedTransfers, i, i+1)
// Find youngest active transfer
oldestStart := now
for i := range s.startedTransfers {
start, _ := s.startedTransfers[i].TimeRange()
if start.Before(oldestStart) {
oldestStart = start
}
}
// remove old entries older than that
s.oldDuration += s.oldTimeRanges.cull(oldestStart)
}
// RemoveTransfer removes a reference to the started transfer.
func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
s.mu.Lock()
for i, tr := range s.startedTransfers {
if tr == transfer {
s._removeTransfer(tr, i)
break
}
}
s.mu.Unlock()
}
// PruneTransfers makes sure there aren't too many old transfers by removing
// a single finished transfer. Returns true if it removed a transfer.
func (s *StatsInfo) PruneTransfers() bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.maxCompletedTransfers < 0 {
return false
}
removed := false
// remove a transfer from the start if we are over quota
if len(s.startedTransfers) > s.maxCompletedTransfers+s.ci.Transfers {
for i, tr := range s.startedTransfers {
if tr.IsDone() {
s._removeTransfer(tr, i)
removed = true
break
}
}
}
return removed
}
// RemoveDoneTransfers removes all Done transfers.
func (s *StatsInfo) RemoveDoneTransfers() {
for s.PruneTransfers() {
}
}
// AddServerSideMove counts a server side move
func (s *StatsInfo) AddServerSideMove(n int64) {
s.mu.Lock()
s.serverSideMoves += 1
s.serverSideMoveBytes += n
s.mu.Unlock()
}
// AddServerSideCopy counts a server side copy
func (s *StatsInfo) AddServerSideCopy(n int64) {
s.mu.Lock()
s.serverSideCopies += 1
s.serverSideCopyBytes += n
s.mu.Unlock()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/prometheus.go | fs/accounting/prometheus.go | package accounting
import (
"context"
"github.com/prometheus/client_golang/prometheus"
)
var namespace = "rclone_"
// RcloneCollector is a Prometheus collector for Rclone
type RcloneCollector struct {
ctx context.Context
bytesTransferred *prometheus.Desc
transferSpeed *prometheus.Desc
numOfErrors *prometheus.Desc
numOfCheckFiles *prometheus.Desc
transferredFiles *prometheus.Desc
deletes *prometheus.Desc
deletedDirs *prometheus.Desc
renames *prometheus.Desc
listed *prometheus.Desc
fatalError *prometheus.Desc
retryError *prometheus.Desc
}
// NewRcloneCollector make a new RcloneCollector
func NewRcloneCollector(ctx context.Context) *RcloneCollector {
return &RcloneCollector{
ctx: ctx,
bytesTransferred: prometheus.NewDesc(namespace+"bytes_transferred_total",
"Total transferred bytes since the start of the Rclone process",
nil, nil,
),
transferSpeed: prometheus.NewDesc(namespace+"speed",
"Average speed in bytes per second since the start of the Rclone process",
nil, nil,
),
numOfErrors: prometheus.NewDesc(namespace+"errors_total",
"Number of errors thrown",
nil, nil,
),
numOfCheckFiles: prometheus.NewDesc(namespace+"checked_files_total",
"Number of checked files",
nil, nil,
),
transferredFiles: prometheus.NewDesc(namespace+"files_transferred_total",
"Number of transferred files",
nil, nil,
),
deletes: prometheus.NewDesc(namespace+"files_deleted_total",
"Total number of files deleted",
nil, nil,
),
deletedDirs: prometheus.NewDesc(namespace+"dirs_deleted_total",
"Total number of directories deleted",
nil, nil,
),
renames: prometheus.NewDesc(namespace+"files_renamed_total",
"Total number of files renamed",
nil, nil,
),
listed: prometheus.NewDesc(namespace+"entries_listed_total",
"Total number of entries listed",
nil, nil,
),
fatalError: prometheus.NewDesc(namespace+"fatal_error",
"Whether a fatal error has occurred",
nil, nil,
),
retryError: prometheus.NewDesc(namespace+"retry_error",
"Whether there has been an error that will be retried",
nil, nil,
),
}
}
// Describe is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.bytesTransferred
ch <- c.transferSpeed
ch <- c.numOfErrors
ch <- c.numOfCheckFiles
ch <- c.transferredFiles
ch <- c.deletes
ch <- c.deletedDirs
ch <- c.renames
ch <- c.listed
ch <- c.fatalError
ch <- c.retryError
}
// Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
s := groups.sum(c.ctx)
s.mu.RLock()
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s._speed())
ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors))
ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks))
ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers))
ch <- prometheus.MustNewConstMetric(c.deletes, prometheus.CounterValue, float64(s.deletes))
ch <- prometheus.MustNewConstMetric(c.deletedDirs, prometheus.CounterValue, float64(s.deletedDirs))
ch <- prometheus.MustNewConstMetric(c.renames, prometheus.CounterValue, float64(s.renames))
ch <- prometheus.MustNewConstMetric(c.listed, prometheus.CounterValue, float64(s.listed))
ch <- prometheus.MustNewConstMetric(c.fatalError, prometheus.GaugeValue, bool2Float(s.fatalError))
ch <- prometheus.MustNewConstMetric(c.retryError, prometheus.GaugeValue, bool2Float(s.retryError))
s.mu.RUnlock()
}
// bool2Float is a small function to convert a boolean into a float64 value that can be used for Prometheus
func bool2Float(e bool) float64 {
if e {
return 1
}
return 0
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/inprogress.go | fs/accounting/inprogress.go | package accounting
import (
"context"
"maps"
"sync"
"github.com/rclone/rclone/fs"
)
// inProgress holds a synchronized map of in progress transfers
type inProgress struct {
mu sync.Mutex
m map[string]*Account
}
// newInProgress makes a new inProgress object
func newInProgress(ctx context.Context) *inProgress {
ci := fs.GetConfig(ctx)
return &inProgress{
m: make(map[string]*Account, ci.Transfers),
}
}
// set marks the name as in progress
func (ip *inProgress) set(name string, acc *Account) {
ip.mu.Lock()
defer ip.mu.Unlock()
ip.m[name] = acc
}
// clear marks the name as no longer in progress
func (ip *inProgress) clear(name string) {
ip.mu.Lock()
defer ip.mu.Unlock()
delete(ip.m, name)
}
// get gets the account for name, of nil if not found
func (ip *inProgress) get(name string) *Account {
ip.mu.Lock()
defer ip.mu.Unlock()
return ip.m[name]
}
// merge adds items from another inProgress
func (ip *inProgress) merge(m *inProgress) {
ip.mu.Lock()
defer ip.mu.Unlock()
m.mu.Lock()
defer m.mu.Unlock()
maps.Copy(ip.m, m.m)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/accounting_unix.go | fs/accounting/accounting_unix.go | // Accounting and limiting reader
// Unix specific functions.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
package accounting
import (
"os"
"os/signal"
"syscall"
"github.com/rclone/rclone/fs"
)
// startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling.
func (tb *tokenBucket) startSignalHandler() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGUSR2)
go func() {
// This runs forever, but blocks until the signal is received.
for {
<-signals
func() {
tb.mu.Lock()
defer tb.mu.Unlock()
// if there's no bandwidth limit configured now, do nothing
if !tb.currLimit.Bandwidth.IsSet() {
fs.Debugf(nil, "SIGUSR2 received but no bandwidth limit configured right now, ignoring")
return
}
tb.toggledOff = !tb.toggledOff
tb.curr, tb.prev = tb.prev, tb.curr
s, limit := "disabled", "off"
if !tb.curr._isOff() {
s = "enabled"
limit = tb.currLimit.Bandwidth.String()
}
fs.Logf(nil, "Bandwidth limit %s by user (now %s)", s, limit)
}()
}
}()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/accounting.go | fs/accounting/accounting.go | // Package accounting providers an accounting and limiting reader
package accounting
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"unicode/utf8"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/asyncreader"
"github.com/rclone/rclone/fs/fserrors"
)
// ErrorMaxTransferLimitReached defines error when transfer limit is reached.
// Used for checking on exit and matching to correct exit code.
var ErrorMaxTransferLimitReached = errors.New("max transfer limit reached as set by --max-transfer")
// ErrorMaxTransferLimitReachedFatal is returned from Read when the max
// transfer limit is reached.
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
// transfer limit is reached and a graceful stop is required.
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
// Start sets up the accounting, in particular the bandwidth limiting
func Start(ctx context.Context) {
// Start the token bucket limiter
TokenBucket.StartTokenBucket(ctx)
// Start the bandwidth update ticker
TokenBucket.StartTokenTicker(ctx)
// Start the transactions per second limiter
StartLimitTPS(ctx)
// Set the error count function pointer up in fs
//
// We can't do this in an init() method as it uses fs.Config
// and that isn't set up then.
fs.CountError = func(ctx context.Context, err error) error {
return Stats(ctx).Error(err)
}
}
// Account limits and accounts for one transfer
type Account struct {
stats *StatsInfo
// The mutex is to make sure Read() and Close() aren't called
// concurrently. Unfortunately the persistent connection loop
// in http transport calls Read() after Do() returns on
// CancelRequest so this race can happen when it apparently
// shouldn't.
mu sync.Mutex // mutex protects these values
in io.Reader
ctx context.Context // current context for transfer - may change
ci *fs.ConfigInfo
origIn io.ReadCloser
close io.Closer
size int64
name string
closed bool // set if the file is closed
exit chan struct{} // channel that will be closed when transfer is finished
withBuf bool // is using a buffered in
checking bool // set if attached transfer is checking
tokenBucket buckets // per file bandwidth limiter (may be nil)
values accountValues
}
// accountValues holds statistics for this Account
type accountValues struct {
mu sync.Mutex // Mutex for stat values.
bytes int64 // Total number of bytes read
max int64 // if >=0 the max number of bytes to transfer
start time.Time // Start time of first read
lpTime time.Time // Time of last average measurement
lpBytes int64 // Number of bytes read since last measurement
avg float64 // Moving average of last few measurements in Byte/s
}
const averagePeriod = 16 // period to do exponentially weighted averages over
// newAccountSizeName makes an Account reader for an io.ReadCloser of
// the given size and name
func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser, size int64, name string) *Account {
acc := &Account{
stats: stats,
in: in,
ctx: ctx,
ci: fs.GetConfig(ctx),
close: in,
origIn: in,
size: size,
name: name,
exit: make(chan struct{}),
values: accountValues{
avg: 0,
lpTime: time.Now(),
max: -1,
},
}
if acc.ci.CutoffMode == fs.CutoffModeHard {
acc.values.max = int64((acc.ci.MaxTransfer))
}
currLimit := acc.ci.BwLimitFile.LimitAt(time.Now())
if currLimit.Bandwidth.IsSet() {
fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth)
acc.tokenBucket = newTokenBucket(currLimit.Bandwidth)
}
go acc.averageLoop()
stats.inProgress.set(acc.name, acc)
return acc
}
// WithBuffer - If the file is above a certain size it adds an Async reader
func (acc *Account) WithBuffer() *Account {
// if already have a buffer then just return
if acc.withBuf {
return acc
}
acc.withBuf = true
var buffers int
if acc.size >= int64(acc.ci.BufferSize) || acc.size == -1 {
buffers = int(int64(acc.ci.BufferSize) / asyncreader.BufferSize)
} else {
buffers = int(acc.size / asyncreader.BufferSize)
}
// On big files add a buffer
if buffers > 0 {
rc, err := asyncreader.New(acc.ctx, acc.origIn, buffers)
if err != nil {
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
} else {
acc.in = rc
acc.close = rc
}
}
return acc
}
// HasBuffer - returns true if this Account has an AsyncReader with a buffer
func (acc *Account) HasBuffer() bool {
acc.mu.Lock()
defer acc.mu.Unlock()
_, ok := acc.in.(*asyncreader.AsyncReader)
return ok
}
// GetReader returns the underlying io.ReadCloser under any Buffer
func (acc *Account) GetReader() io.ReadCloser {
acc.mu.Lock()
defer acc.mu.Unlock()
return acc.origIn
}
// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
acc.mu.Lock()
defer acc.mu.Unlock()
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
return asyncIn
}
return nil
}
// StopBuffering stops the async buffer doing any more buffering
func (acc *Account) StopBuffering() {
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
asyncIn.StopBuffering()
}
}
// Abandon stops the async buffer doing any more buffering
func (acc *Account) Abandon() {
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
asyncIn.Abandon()
}
}
// UpdateReader updates the underlying io.ReadCloser stopping the
// async buffer (if any) and re-adding it
func (acc *Account) UpdateReader(ctx context.Context, in io.ReadCloser) {
acc.mu.Lock()
withBuf := acc.withBuf
if withBuf {
acc.Abandon()
acc.withBuf = false
}
acc.in = in
acc.ctx = ctx
acc.close = in
acc.origIn = in
acc.closed = false
if withBuf {
acc.WithBuffer()
}
acc.mu.Unlock()
// Reset counter to stop percentage going over 100%
acc.values.mu.Lock()
acc.values.lpBytes = 0
acc.values.bytes = 0
acc.values.mu.Unlock()
}
// averageLoop calculates averages for the stats in the background
func (acc *Account) averageLoop() {
tick := time.NewTicker(time.Second)
var period float64
defer tick.Stop()
for {
select {
case now := <-tick.C:
acc.values.mu.Lock()
// Add average of last second.
elapsed := now.Sub(acc.values.lpTime).Seconds()
avg := 0.0
if elapsed > 0 {
avg = float64(acc.values.lpBytes) / elapsed
}
// Soft start the moving average
if period < averagePeriod {
period++
}
acc.values.avg = (avg + (period-1)*acc.values.avg) / period
acc.values.lpBytes = 0
acc.values.lpTime = now
// Unlock stats
acc.values.mu.Unlock()
case <-acc.exit:
return
}
}
}
// Check the read before it has happened is valid returning the number
// of bytes remaining to read.
func (acc *Account) checkReadBefore() (bytesUntilLimit int64, err error) {
// Check to see if context is cancelled
if err = acc.ctx.Err(); err != nil {
return 0, err
}
acc.values.mu.Lock()
if acc.values.max >= 0 {
bytesUntilLimit = acc.values.max - acc.stats.GetBytes()
if bytesUntilLimit < 0 {
acc.values.mu.Unlock()
return bytesUntilLimit, ErrorMaxTransferLimitReachedFatal
}
} else {
bytesUntilLimit = 1 << 62
}
// Set start time.
if acc.values.start.IsZero() {
acc.values.start = time.Now()
}
acc.values.mu.Unlock()
return bytesUntilLimit, nil
}
// Check the read call after the read has happened
func (acc *Account) checkReadAfter(bytesUntilLimit int64, n int, err error) (outN int, outErr error) {
bytesUntilLimit -= int64(n)
if bytesUntilLimit < 0 {
// chop the overage off
n += int(bytesUntilLimit)
if n < 0 {
n = 0
}
err = ErrorMaxTransferLimitReachedFatal
}
return n, err
}
// ServerSideTransferStart should be called at the start of a server-side transfer
//
// This pretends a transfer has started
func (acc *Account) ServerSideTransferStart() {
acc.values.mu.Lock()
// Set start time.
if acc.values.start.IsZero() {
acc.values.start = time.Now()
}
acc.values.mu.Unlock()
}
// ServerSideTransferEnd accounts for a read of n bytes in a sever
// side transfer to be treated as a normal transfer.
func (acc *Account) ServerSideTransferEnd(n int64) {
// Update Stats
acc.values.mu.Lock()
acc.values.bytes += n
acc.values.mu.Unlock()
acc.stats.Bytes(n)
}
// serverSideEnd accounts for non specific server-side data
func (acc *Account) serverSideEnd(n int64) {
// Account for bytes unless we are checking
if !acc.checking {
acc.stats.BytesNoNetwork(n)
}
}
// ServerSideCopyEnd accounts for a read of n bytes in a server-side copy
func (acc *Account) ServerSideCopyEnd(n int64) {
acc.stats.AddServerSideCopy(n)
acc.serverSideEnd(n)
}
// ServerSideMoveEnd accounts for a read of n bytes in a server-side move
func (acc *Account) ServerSideMoveEnd(n int64) {
acc.stats.AddServerSideMove(n)
acc.serverSideEnd(n)
}
// DryRun accounts for statistics without running the operation
func (acc *Account) DryRun(n int64) {
acc.ServerSideTransferStart()
acc.ServerSideTransferEnd(n)
}
// Account for n bytes from the current file bandwidth limit (if any)
func (acc *Account) limitPerFileBandwidth(n int) {
acc.values.mu.Lock()
tokenBucket := acc.tokenBucket[TokenBucketSlotAccounting]
acc.values.mu.Unlock()
if tokenBucket != nil {
err := tokenBucket.WaitN(context.Background(), n)
if err != nil {
fs.Errorf(nil, "Token bucket error: %v", err)
}
}
}
// Account the read
func (acc *Account) accountReadN(n int64) {
// Update Stats
acc.values.mu.Lock()
acc.values.lpBytes += n
acc.values.bytes += n
acc.values.mu.Unlock()
acc.stats.Bytes(n)
}
// Account the read and limit bandwidth
func (acc *Account) accountRead(n int) {
acc.accountReadN(int64(n))
TokenBucket.LimitBandwidth(TokenBucketSlotAccounting, n)
acc.limitPerFileBandwidth(n)
}
// read bytes from the io.Reader passed in and account them
func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
bytesUntilLimit, err := acc.checkReadBefore()
if err == nil {
n, err = in.Read(p)
acc.accountRead(n)
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
}
return n, err
}
// Read bytes from the object - see io.Reader
func (acc *Account) Read(p []byte) (n int, err error) {
acc.mu.Lock()
defer acc.mu.Unlock()
return acc.read(acc.in, p)
}
// Seek to position in the object - see io.Seeker
//
// May return an error if not implemented by the underlying reader.
func (acc *Account) Seek(offset int64, whence int) (int64, error) {
acc.mu.Lock()
defer acc.mu.Unlock()
do, ok := acc.in.(io.Seeker)
if !ok {
return 0, fmt.Errorf("internal error: Seek not implemented for %T", acc.in)
}
return do.Seek(offset, whence)
}
// ReadAt from off into p - see io.ReaderAt
//
// May return an error if not implemented by the underlying reader.
func (acc *Account) ReadAt(p []byte, off int64) (n int, err error) {
acc.mu.Lock()
defer acc.mu.Unlock()
do, ok := acc.in.(io.ReaderAt)
if !ok {
return 0, fmt.Errorf("internal error: ReadAt not implemented for %T", acc.in)
}
bytesUntilLimit, err := acc.checkReadBefore()
if err == nil {
n, err = do.ReadAt(p, off)
acc.accountRead(n)
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
}
return n, err
}
// Thin wrapper for w
type accountWriteTo struct {
w io.Writer
acc *Account
}
// Write writes len(p) bytes from p to the underlying data stream. It
// returns the number of bytes written from p (0 <= n <= len(p)) and
// any error encountered that caused the write to stop early. Write
// must return a non-nil error if it returns n < len(p). Write must
// not modify the slice data, even temporarily.
//
// Implementations must not retain p.
func (awt *accountWriteTo) Write(p []byte) (n int, err error) {
bytesUntilLimit, err := awt.acc.checkReadBefore()
if err == nil {
n, err = awt.w.Write(p)
n, err = awt.acc.checkReadAfter(bytesUntilLimit, n, err)
awt.acc.accountRead(n)
}
return n, err
}
// WriteTo writes data to w until there's no more data to write or
// when an error occurs. The return value n is the number of bytes
// written. Any error encountered during the write is also returned.
func (acc *Account) WriteTo(w io.Writer) (n int64, err error) {
acc.mu.Lock()
in := acc.in
acc.mu.Unlock()
wrappedWriter := accountWriteTo{w: w, acc: acc}
if do, ok := in.(io.WriterTo); ok {
n, err = do.WriteTo(&wrappedWriter)
} else {
n, err = io.Copy(&wrappedWriter, in)
}
return
}
// AccountRead account having read n bytes
func (acc *Account) AccountRead(n int) (err error) {
acc.mu.Lock()
defer acc.mu.Unlock()
bytesUntilLimit, err := acc.checkReadBefore()
if err == nil {
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
acc.accountRead(n)
}
return err
}
// AccountReadN account having read n bytes
//
// Does not obey any transfer limits, bandwidth limits, etc.
func (acc *Account) AccountReadN(n int64) {
acc.mu.Lock()
defer acc.mu.Unlock()
acc.accountReadN(n)
}
// Close the object
func (acc *Account) Close() error {
acc.mu.Lock()
defer acc.mu.Unlock()
if acc.closed {
return nil
}
acc.closed = true
if acc.close == nil {
return nil
}
return acc.close.Close()
}
// Done with accounting - must be called to free accounting goroutine
func (acc *Account) Done() {
acc.mu.Lock()
defer acc.mu.Unlock()
close(acc.exit)
acc.stats.inProgress.clear(acc.name)
}
// progress returns bytes read as well as the size.
// Size can be <= 0 if the size is unknown.
func (acc *Account) progress() (bytes, size int64) {
if acc == nil {
return 0, 0
}
acc.values.mu.Lock()
bytes, size = acc.values.bytes, acc.size
acc.values.mu.Unlock()
return bytes, size
}
// speed returns the speed of the current file transfer
// in bytes per second, as well an exponentially weighted moving average
// If no read has completed yet, 0 is returned for both values.
func (acc *Account) speed() (bps, current float64) {
if acc == nil {
return 0, 0
}
acc.values.mu.Lock()
defer acc.values.mu.Unlock()
if acc.values.bytes == 0 {
return 0, 0
}
// Calculate speed from first read.
total := float64(time.Since(acc.values.start)) / float64(time.Second)
if total > 0 {
bps = float64(acc.values.bytes) / total
} else {
bps = 0.0
}
current = acc.values.avg
return
}
// eta returns the ETA of the current operation,
// rounded to full seconds.
// If the ETA cannot be determined 'ok' returns false.
func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
if acc == nil {
return 0, false
}
acc.values.mu.Lock()
defer acc.values.mu.Unlock()
return eta(acc.values.bytes, acc.size, acc.values.avg)
}
// shortenName shortens in to size runes long
// If size <= 0 then in is left untouched
func shortenName(in string, size int) string {
if size <= 0 {
return in
}
if utf8.RuneCountInString(in) <= size {
return in
}
name := []rune(in)
size-- // don't count ellipsis rune
suffixLength := size / 2
prefixLength := size - suffixLength
suffixStart := len(name) - suffixLength
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
return string(name)
}
// String produces stats for this file
func (acc *Account) String() string {
a, b := acc.progress()
_, cur := acc.speed()
eta, etaok := acc.eta()
etas := "-"
if etaok {
if eta > 0 {
etas = fmt.Sprintf("%v", eta)
} else {
etas = "0s"
}
}
if acc.ci.DataRateUnit == "bits" {
cur *= 8
}
percentageDone := 0
if b > 0 {
percentageDone = int(100 * float64(a) / float64(b))
}
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
acc.ci.StatsFileNameLength,
shortenName(acc.name, acc.ci.StatsFileNameLength),
percentageDone,
fs.SizeSuffix(b),
fs.SizeSuffix(cur),
etas,
)
}
// rcStats adds remote control stats for this file
func (acc *Account) rcStats(out rc.Params) {
a, b := acc.progress()
out["bytes"] = a
out["size"] = b
spd, cur := acc.speed()
out["speed"] = spd
out["speedAvg"] = cur
eta, etaOK := acc.eta()
if etaOK {
out["eta"] = eta.Seconds()
} else {
out["eta"] = nil
}
out["name"] = acc.name
percentageDone := 0
if b > 0 {
percentageDone = int(100 * float64(a) / float64(b))
}
out["percentage"] = percentageDone
out["group"] = acc.stats.group
}
// OldStream returns the top io.Reader
func (acc *Account) OldStream() io.Reader {
acc.mu.Lock()
defer acc.mu.Unlock()
return acc.in
}
// SetStream updates the top io.Reader
func (acc *Account) SetStream(in io.Reader) {
acc.mu.Lock()
acc.in = in
acc.mu.Unlock()
}
// WrapStream wraps an io Reader so it will be accounted in the same
// way as account
func (acc *Account) WrapStream(in io.Reader) io.Reader {
return &accountStream{
acc: acc,
in: in,
}
}
// accountStream accounts a single io.Reader into a parent *Account
type accountStream struct {
acc *Account
in io.Reader
}
// OldStream return the underlying stream
func (a *accountStream) OldStream() io.Reader {
return a.in
}
// SetStream set the underlying stream
func (a *accountStream) SetStream(in io.Reader) {
a.in = in
}
// WrapStream wrap in an accounter
func (a *accountStream) WrapStream(in io.Reader) io.Reader {
return a.acc.WrapStream(in)
}
// Read bytes from the object - see io.Reader
func (a *accountStream) Read(p []byte) (n int, err error) {
return a.acc.read(a.in, p)
}
// Accounter accounts a stream allowing the accounting to be removed and re-added
type Accounter interface {
io.Reader
OldStream() io.Reader
SetStream(io.Reader)
WrapStream(io.Reader) io.Reader
}
// WrapFn wraps an io.Reader (for accounting purposes usually)
type WrapFn func(io.Reader) io.Reader
// UnWrap unwraps a reader returning unwrapped and wrap, a function to
// wrap it back up again. If `in` is an Accounter then this function
// will take the accounting unwrapped and wrap will put it back on
// again the new Reader passed in.
//
// This allows functions which wrap io.Readers to move the accounting
// to the end of the wrapped chain of readers. This is very important
// if buffering is being introduced and if the Reader might be wrapped
// again.
func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) {
acc, ok := in.(Accounter)
if !ok {
return in, func(r io.Reader) io.Reader { return r }
}
return acc.OldStream(), acc.WrapStream
}
// UnWrapAccounting unwraps a reader returning unwrapped and acc a
// pointer to the accounting.
//
// The caller is expected to manage the accounting at this point.
func UnWrapAccounting(in io.Reader) (unwrapped io.Reader, acc *Account) {
a, ok := in.(*accountStream)
if !ok {
return in, nil
}
return a.in, a.acc
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/accounting_other.go | fs/accounting/accounting_other.go | // Accounting and limiting reader
// Non-unix specific functions.
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
package accounting
// startSignalHandler() is Unix specific and does nothing under non-Unix
// platforms.
func (tb *tokenBucket) startSignalHandler() {}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/stats_test.go | fs/accounting/stats_test.go | package accounting
import (
"context"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestETA(t *testing.T) {
for _, test := range []struct {
size, total int64
rate float64
wantETA time.Duration
wantOK bool
wantString string
}{
// Custom String Cases
{size: 0, total: 365 * 86400, rate: 1.0, wantETA: 365 * 86400 * time.Second, wantOK: true, wantString: "1y"},
{size: 0, total: 7 * 86400, rate: 1.0, wantETA: 7 * 86400 * time.Second, wantOK: true, wantString: "1w"},
{size: 0, total: 1 * 86400, rate: 1.0, wantETA: 1 * 86400 * time.Second, wantOK: true, wantString: "1d"},
{size: 0, total: 1110 * 86400, rate: 1.0, wantETA: 1110 * 86400 * time.Second, wantOK: true, wantString: "3y2w1d"},
{size: 0, total: 15 * 86400, rate: 1.0, wantETA: 15 * 86400 * time.Second, wantOK: true, wantString: "2w1d"},
// Composite Custom String Cases
{size: 0, total: 1.5 * 86400, rate: 1.0, wantETA: 1.5 * 86400 * time.Second, wantOK: true, wantString: "1d12h"},
{size: 0, total: 95000, rate: 1.0, wantETA: 95000 * time.Second, wantOK: true, wantString: "1d2h23m"}, // Short format, if full it would be "1d2h23m20s"
// Standard Duration String Cases
{size: 0, total: 1, rate: 2.0, wantETA: 0, wantOK: true, wantString: "0s"},
{size: 0, total: 1, rate: 1.0, wantETA: time.Second, wantOK: true, wantString: "1s"},
{size: 0, total: 1, rate: 0.5, wantETA: 2 * time.Second, wantOK: true, wantString: "2s"},
{size: 0, total: 100, rate: 1.0, wantETA: 100 * time.Second, wantOK: true, wantString: "1m40s"},
{size: 50, total: 100, rate: 1.0, wantETA: 50 * time.Second, wantOK: true, wantString: "50s"},
{size: 100, total: 100, rate: 1.0, wantETA: 0 * time.Second, wantOK: true, wantString: "0s"},
// No String Cases
{size: -1, total: 100, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
{size: 200, total: 100, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
{size: 10, total: -1, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
{size: 10, total: 20, rate: 0.0, wantETA: 0, wantOK: false, wantString: "-"},
{size: 10, total: 20, rate: -1.0, wantETA: 0, wantOK: false, wantString: "-"},
{size: 0, total: 0, rate: 1.0, wantETA: 0, wantOK: false, wantString: "-"},
// Extreme Cases
{size: 0, total: (1 << 63) - 1, rate: 1.0, wantETA: (time.Duration((1<<63)-1) / time.Second) * time.Second, wantOK: true, wantString: "-"},
{size: 0, total: ((1 << 63) - 1) / int64(time.Second), rate: 1.0, wantETA: (time.Duration((1<<63)-1) / time.Second) * time.Second, wantOK: true, wantString: "-"},
{size: 0, total: ((1<<63)-1)/int64(time.Second) - 1, rate: 1.0, wantETA: (time.Duration((1<<63)-1)/time.Second - 1) * time.Second, wantOK: true, wantString: "292y24w3d"}, // Short format, if full it would be "292y24w3d23h47m15s"
{size: 0, total: ((1<<63)-1)/int64(time.Second) - 1, rate: 0.1, wantETA: (time.Duration((1<<63)-1) / time.Second) * time.Second, wantOK: true, wantString: "-"},
} {
t.Run(fmt.Sprintf("size=%d/total=%d/rate=%f", test.size, test.total, test.rate), func(t *testing.T) {
gotETA, gotOK := eta(test.size, test.total, test.rate)
assert.Equal(t, int64(test.wantETA), int64(gotETA))
assert.Equal(t, test.wantOK, gotOK)
gotString := etaString(test.size, test.total, test.rate)
assert.Equal(t, test.wantString, gotString)
})
}
}
func TestPercentage(t *testing.T) {
assert.Equal(t, percent(0, 1000), "0%")
assert.Equal(t, percent(1, 1000), "0%")
assert.Equal(t, percent(9, 1000), "1%")
assert.Equal(t, percent(500, 1000), "50%")
assert.Equal(t, percent(1000, 1000), "100%")
assert.Equal(t, percent(1e8, 1e9), "10%")
assert.Equal(t, percent(1e8, 1e9), "10%")
assert.Equal(t, percent(0, 0), "-")
assert.Equal(t, percent(100, -100), "-")
assert.Equal(t, percent(-100, 100), "-")
assert.Equal(t, percent(-100, -100), "-")
}
func TestStatsError(t *testing.T) {
ctx := context.Background()
s := NewStats(ctx)
assert.Equal(t, int64(0), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.False(t, s.HadRetryError())
assert.Equal(t, time.Time{}, s.RetryAfter())
assert.Equal(t, nil, s.GetLastError())
assert.False(t, s.Errored())
t0 := time.Now()
t1 := t0.Add(time.Second)
_ = s.Error(nil)
assert.Equal(t, int64(0), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.False(t, s.HadRetryError())
assert.Equal(t, time.Time{}, s.RetryAfter())
assert.Equal(t, nil, s.GetLastError())
assert.False(t, s.Errored())
_ = s.Error(io.EOF)
assert.Equal(t, int64(1), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
assert.Equal(t, time.Time{}, s.RetryAfter())
assert.Equal(t, io.EOF, s.GetLastError())
assert.True(t, s.Errored())
e := fserrors.ErrorRetryAfter(t0)
_ = s.Error(e)
assert.Equal(t, int64(2), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
assert.Equal(t, t0, s.RetryAfter())
assert.Equal(t, e, s.GetLastError())
err := fmt.Errorf("potato: %w", fserrors.ErrorRetryAfter(t1))
err = s.Error(err)
assert.Equal(t, int64(3), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
assert.Equal(t, t1, s.RetryAfter())
assert.Equal(t, t1, fserrors.RetryAfterErrorTime(err))
_ = s.Error(fserrors.FatalError(io.EOF))
assert.Equal(t, int64(4), s.GetErrors())
assert.True(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
assert.Equal(t, t1, s.RetryAfter())
s.ResetErrors()
assert.Equal(t, int64(0), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.False(t, s.HadRetryError())
assert.Equal(t, time.Time{}, s.RetryAfter())
assert.Equal(t, nil, s.GetLastError())
assert.False(t, s.Errored())
_ = s.Error(fserrors.NoRetryError(io.EOF))
assert.Equal(t, int64(1), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.False(t, s.HadRetryError())
assert.Equal(t, time.Time{}, s.RetryAfter())
}
func TestStatsTotalDuration(t *testing.T) {
ctx := context.Background()
startTime := time.Now()
time1 := startTime.Add(-40 * time.Second)
time2 := time1.Add(10 * time.Second)
time3 := time2.Add(10 * time.Second)
time4 := time3.Add(10 * time.Second)
t.Run("Single completed transfer", func(t *testing.T) {
s := NewStats(ctx)
tr1 := &Transfer{
startedAt: time1,
completedAt: time2,
}
s.AddTransfer(tr1)
s.mu.Lock()
total := s._totalDuration()
s.mu.Unlock()
assert.Equal(t, 1, len(s.startedTransfers))
assert.Equal(t, 10*time.Second, total)
s.RemoveTransfer(tr1)
assert.Equal(t, 10*time.Second, total)
assert.Equal(t, 0, len(s.startedTransfers))
})
t.Run("Single uncompleted transfer", func(t *testing.T) {
s := NewStats(ctx)
tr1 := &Transfer{
startedAt: time1,
}
s.AddTransfer(tr1)
s.mu.Lock()
total := s._totalDuration()
s.mu.Unlock()
assert.Equal(t, time.Since(time1)/time.Second, total/time.Second)
s.RemoveTransfer(tr1)
assert.Equal(t, time.Since(time1)/time.Second, total/time.Second)
})
t.Run("Overlapping without ending", func(t *testing.T) {
s := NewStats(ctx)
tr1 := &Transfer{
startedAt: time2,
completedAt: time3,
}
s.AddTransfer(tr1)
tr2 := &Transfer{
startedAt: time2,
completedAt: time2.Add(time.Second),
}
s.AddTransfer(tr2)
tr3 := &Transfer{
startedAt: time1,
completedAt: time3,
}
s.AddTransfer(tr3)
tr4 := &Transfer{
startedAt: time3,
completedAt: time4,
}
s.AddTransfer(tr4)
tr5 := &Transfer{
startedAt: time.Now(),
}
s.AddTransfer(tr5)
time.Sleep(time.Millisecond)
s.mu.Lock()
total := s._totalDuration()
s.mu.Unlock()
assert.Equal(t, time.Duration(30), total/time.Second)
s.RemoveTransfer(tr1)
assert.Equal(t, time.Duration(30), total/time.Second)
s.RemoveTransfer(tr2)
assert.Equal(t, time.Duration(30), total/time.Second)
s.RemoveTransfer(tr3)
assert.Equal(t, time.Duration(30), total/time.Second)
s.RemoveTransfer(tr4)
assert.Equal(t, time.Duration(30), total/time.Second)
})
t.Run("Mixed completed and uncompleted transfers", func(t *testing.T) {
s := NewStats(ctx)
s.AddTransfer(&Transfer{
startedAt: time1,
completedAt: time2,
})
s.AddTransfer(&Transfer{
startedAt: time2,
})
s.AddTransfer(&Transfer{
startedAt: time3,
})
s.AddTransfer(&Transfer{
startedAt: time3,
})
s.mu.Lock()
total := s._totalDuration()
s.mu.Unlock()
assert.Equal(t, startTime.Sub(time1)/time.Second, total/time.Second)
})
}
func TestRemoteStats(t *testing.T) {
ctx := context.Background()
startTime := time.Now()
time1 := startTime.Add(-40 * time.Second)
time2 := time1.Add(10 * time.Second)
t.Run("Single completed transfer", func(t *testing.T) {
s := NewStats(ctx)
tr1 := &Transfer{
startedAt: time1,
completedAt: time2,
}
s.AddTransfer(tr1)
time.Sleep(time.Millisecond)
rs, err := s.RemoteStats(false)
require.NoError(t, err)
assert.Equal(t, float64(10), rs["transferTime"])
assert.Greater(t, rs["elapsedTime"], float64(0))
})
}
// make time ranges from string description for testing
func makeTimeRanges(t *testing.T, in []string) timeRanges {
trs := make(timeRanges, len(in))
for i, Range := range in {
var start, end int64
n, err := fmt.Sscanf(Range, "%d-%d", &start, &end)
require.NoError(t, err)
require.Equal(t, 2, n)
trs[i] = timeRange{time.Unix(start, 0), time.Unix(end, 0)}
}
return trs
}
func (trs timeRanges) toStrings() (out []string) {
out = []string{}
for _, tr := range trs {
out = append(out, fmt.Sprintf("%d-%d", tr.start.Unix(), tr.end.Unix()))
}
return out
}
func TestTimeRangeMerge(t *testing.T) {
for _, test := range []struct {
in []string
want []string
}{{
in: []string{},
want: []string{},
}, {
in: []string{"1-2"},
want: []string{"1-2"},
}, {
in: []string{"1-4", "2-3"},
want: []string{"1-4"},
}, {
in: []string{"2-3", "1-4"},
want: []string{"1-4"},
}, {
in: []string{"1-3", "2-4"},
want: []string{"1-4"},
}, {
in: []string{"2-4", "1-3"},
want: []string{"1-4"},
}, {
in: []string{"1-2", "2-3"},
want: []string{"1-3"},
}, {
in: []string{"2-3", "1-2"},
want: []string{"1-3"},
}, {
in: []string{"1-2", "3-4"},
want: []string{"1-2", "3-4"},
}, {
in: []string{"1-3", "7-8", "4-6", "2-5", "7-8", "7-8"},
want: []string{"1-6", "7-8"},
}} {
in := makeTimeRanges(t, test.in)
in.merge()
got := in.toStrings()
assert.Equal(t, test.want, got)
}
}
func TestTimeRangeCull(t *testing.T) {
for _, test := range []struct {
in []string
cutoff int64
want []string
wantDuration time.Duration
}{{
in: []string{},
cutoff: 1,
want: []string{},
wantDuration: 0 * time.Second,
}, {
in: []string{"1-2"},
cutoff: 1,
want: []string{"1-2"},
wantDuration: 0 * time.Second,
}, {
in: []string{"2-5", "7-9"},
cutoff: 1,
want: []string{"2-5", "7-9"},
wantDuration: 0 * time.Second,
}, {
in: []string{"2-5", "7-9"},
cutoff: 4,
want: []string{"2-5", "7-9"},
wantDuration: 0 * time.Second,
}, {
in: []string{"2-5", "7-9"},
cutoff: 5,
want: []string{"7-9"},
wantDuration: 3 * time.Second,
}, {
in: []string{"2-5", "7-9", "2-5", "2-5"},
cutoff: 6,
want: []string{"7-9"},
wantDuration: 9 * time.Second,
}, {
in: []string{"7-9", "3-3", "2-5"},
cutoff: 7,
want: []string{"7-9"},
wantDuration: 3 * time.Second,
}, {
in: []string{"2-5", "7-9"},
cutoff: 8,
want: []string{"7-9"},
wantDuration: 3 * time.Second,
}, {
in: []string{"2-5", "7-9"},
cutoff: 9,
want: []string{},
wantDuration: 5 * time.Second,
}, {
in: []string{"2-5", "7-9"},
cutoff: 10,
want: []string{},
wantDuration: 5 * time.Second,
}} {
in := makeTimeRanges(t, test.in)
cutoff := time.Unix(test.cutoff, 0)
gotDuration := in.cull(cutoff)
what := fmt.Sprintf("in=%q, cutoff=%d", test.in, test.cutoff)
got := in.toStrings()
assert.Equal(t, test.want, got, what)
assert.Equal(t, test.wantDuration, gotDuration, what)
}
}
func TestTimeRangeDuration(t *testing.T) {
assert.Equal(t, 0*time.Second, timeRanges{}.total())
assert.Equal(t, 1*time.Second, makeTimeRanges(t, []string{"1-2"}).total())
assert.Equal(t, 91*time.Second, makeTimeRanges(t, []string{"1-2", "10-100"}).total())
}
func TestPruneTransfers(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
for _, test := range []struct {
Name string
Transfers int
Limit int
ExpectedStartedTransfers int
}{
{
Name: "Limited number of StartedTransfers",
Limit: 100,
Transfers: 200,
ExpectedStartedTransfers: 100 + ci.Transfers,
},
{
Name: "Unlimited number of StartedTransfers",
Limit: -1,
Transfers: 200,
ExpectedStartedTransfers: 200,
},
} {
t.Run(test.Name, func(t *testing.T) {
prevLimit := MaxCompletedTransfers
MaxCompletedTransfers = test.Limit
defer func() { MaxCompletedTransfers = prevLimit }()
s := NewStats(ctx)
for i := int64(1); i <= int64(test.Transfers); i++ {
s.AddTransfer(&Transfer{
startedAt: time.Unix(i, 0),
completedAt: time.Unix(i+1, 0),
})
}
s.mu.Lock()
assert.Equal(t, time.Duration(test.Transfers)*time.Second, s._totalDuration())
assert.Equal(t, test.Transfers, len(s.startedTransfers))
s.mu.Unlock()
for range test.Transfers {
s.PruneTransfers()
}
s.mu.Lock()
assert.Equal(t, time.Duration(test.Transfers)*time.Second, s._totalDuration())
assert.Equal(t, test.ExpectedStartedTransfers, len(s.startedTransfers))
s.mu.Unlock()
})
}
}
func TestRemoveDoneTransfers(t *testing.T) {
ctx := context.Background()
s := NewStats(ctx)
const transfers = 10
for i := int64(1); i <= int64(transfers); i++ {
s.AddTransfer(&Transfer{
startedAt: time.Unix(i, 0),
completedAt: time.Unix(i+1, 0),
})
}
s.mu.Lock()
assert.Equal(t, time.Duration(transfers)*time.Second, s._totalDuration())
assert.Equal(t, transfers, len(s.startedTransfers))
s.mu.Unlock()
s.RemoveDoneTransfers()
s.mu.Lock()
assert.Equal(t, time.Duration(transfers)*time.Second, s._totalDuration())
assert.Equal(t, transfers, len(s.startedTransfers))
s.mu.Unlock()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/token_bucket_test.go | fs/accounting/token_bucket_test.go | package accounting
import (
"context"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
)
func TestRcBwLimit(t *testing.T) {
call := rc.Calls.Get("core/bwlimit")
assert.NotNil(t, call)
// Set
in := rc.Params{
"rate": "1M",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"bytesPerSecond": int64(1048576),
"bytesPerSecondTx": int64(1048576),
"bytesPerSecondRx": int64(1048576),
"rate": "1Mi",
}, out)
assert.Equal(t, rate.Limit(1048576), TokenBucket.curr[0].Limit())
// Query
in = rc.Params{}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"bytesPerSecond": int64(1048576),
"bytesPerSecondTx": int64(1048576),
"bytesPerSecondRx": int64(1048576),
"rate": "1Mi",
}, out)
// Set
in = rc.Params{
"rate": "10M:1M",
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"bytesPerSecond": int64(10485760),
"bytesPerSecondTx": int64(10485760),
"bytesPerSecondRx": int64(1048576),
"rate": "10Mi:1Mi",
}, out)
assert.Equal(t, rate.Limit(10485760), TokenBucket.curr[0].Limit())
// Query
in = rc.Params{}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"bytesPerSecond": int64(10485760),
"bytesPerSecondTx": int64(10485760),
"bytesPerSecondRx": int64(1048576),
"rate": "10Mi:1Mi",
}, out)
// Reset
in = rc.Params{
"rate": "off",
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"bytesPerSecond": int64(-1),
"bytesPerSecondTx": int64(-1),
"bytesPerSecondRx": int64(-1),
"rate": "off",
}, out)
assert.Nil(t, TokenBucket.curr[0])
// Query
in = rc.Params{}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"bytesPerSecond": int64(-1),
"bytesPerSecondTx": int64(-1),
"bytesPerSecondRx": int64(-1),
"rate": "off",
}, out)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/transfer.go | fs/accounting/transfer.go | package accounting
import (
"context"
"encoding/json"
"io"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
// TransferSnapshot represents state of an account at point in time.
type TransferSnapshot struct {
Name string `json:"name"`
Size int64 `json:"size"`
Bytes int64 `json:"bytes"`
Checked bool `json:"checked"`
What string `json:"what"`
StartedAt time.Time `json:"started_at"`
CompletedAt time.Time `json:"completed_at,omitempty"`
Error error `json:"-"`
Group string `json:"group"`
SrcFs string `json:"srcFs,omitempty"`
DstFs string `json:"dstFs,omitempty"`
}
// MarshalJSON implements json.Marshaler interface.
func (as TransferSnapshot) MarshalJSON() ([]byte, error) {
err := ""
if as.Error != nil {
err = as.Error.Error()
}
type Alias TransferSnapshot
return json.Marshal(&struct {
Error string `json:"error"`
Alias
}{
Error: err,
Alias: (Alias)(as),
})
}
// Transfer keeps track of initiated transfers and provides access to
// accounting functions.
// Transfer needs to be closed on completion.
type Transfer struct {
// these are initialised at creation and may be accessed without locking
stats *StatsInfo
remote string
size int64
startedAt time.Time
checking bool
what string // what kind of transfer this is
srcFs fs.Fs // source Fs - may be nil
dstFs fs.Fs // destination Fs - may be nil
// Protects all below
//
// NB to avoid deadlocks we must release this lock before
// calling any methods on Transfer.stats. This is because
// StatsInfo calls back into Transfer.
mu sync.RWMutex
acc *Account
err error
completedAt time.Time
}
// newCheckingTransfer instantiates new checking of the object.
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry, what string) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true, what, nil, nil)
}
// newTransfer instantiates new transfer.
func newTransfer(stats *StatsInfo, obj fs.DirEntry, srcFs, dstFs fs.Fs) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false, "", srcFs, dstFs)
}
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool, what string, srcFs, dstFs fs.Fs) *Transfer {
tr := &Transfer{
stats: stats,
remote: remote,
size: size,
startedAt: time.Now(),
checking: checking,
what: what,
srcFs: srcFs,
dstFs: dstFs,
}
stats.AddTransfer(tr)
return tr
}
// Done ends the transfer.
// Must be called after transfer is finished to run proper cleanups.
func (tr *Transfer) Done(ctx context.Context, err error) {
if err != nil {
err = tr.stats.Error(err)
tr.mu.Lock()
tr.err = err
tr.mu.Unlock()
}
tr.mu.RLock()
acc := tr.acc
tr.mu.RUnlock()
ci := fs.GetConfig(ctx)
if acc != nil {
// Close the file if it is still open
if err := acc.Close(); err != nil {
fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err)
}
// Signal done with accounting
acc.Done()
// free the account since we may keep the transfer
acc = nil
}
tr.mu.Lock()
tr.completedAt = time.Now()
tr.mu.Unlock()
if tr.checking {
tr.stats.DoneChecking(tr.remote)
} else {
tr.stats.DoneTransferring(tr.remote, err == nil)
}
tr.stats.PruneTransfers()
}
// Reset allows to switch the Account to another transfer method.
func (tr *Transfer) Reset(ctx context.Context) {
tr.mu.RLock()
acc := tr.acc
tr.acc = nil
tr.mu.RUnlock()
ci := fs.GetConfig(ctx)
if acc != nil {
acc.Done()
if err := acc.Close(); err != nil {
fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err)
}
}
}
// Account returns reader that knows how to keep track of transfer progress.
func (tr *Transfer) Account(ctx context.Context, in io.ReadCloser) *Account {
tr.mu.Lock()
if tr.acc == nil {
tr.acc = newAccountSizeName(ctx, tr.stats, in, tr.size, tr.remote)
} else {
tr.acc.UpdateReader(ctx, in)
}
tr.acc.checking = tr.checking
tr.mu.Unlock()
return tr.acc
}
// TimeRange returns the time transfer started and ended at. If not completed
// it will return zero time for end time.
func (tr *Transfer) TimeRange() (time.Time, time.Time) {
tr.mu.RLock()
defer tr.mu.RUnlock()
return tr.startedAt, tr.completedAt
}
// IsDone returns true if transfer is completed.
func (tr *Transfer) IsDone() bool {
tr.mu.RLock()
defer tr.mu.RUnlock()
return !tr.completedAt.IsZero()
}
// Snapshot produces stats for this account at point in time.
func (tr *Transfer) Snapshot() TransferSnapshot {
tr.mu.RLock()
defer tr.mu.RUnlock()
var s, b int64 = tr.size, 0
if tr.acc != nil {
b, s = tr.acc.progress()
}
what := tr.what
if what == "" {
what = "transferring"
}
snapshot := TransferSnapshot{
Name: tr.remote,
Size: s,
Bytes: b,
Checked: tr.checking,
What: what,
StartedAt: tr.startedAt,
CompletedAt: tr.completedAt,
Error: tr.err,
Group: tr.stats.group,
}
if tr.srcFs != nil {
snapshot.SrcFs = fs.ConfigString(tr.srcFs)
}
if tr.dstFs != nil {
snapshot.DstFs = fs.ConfigString(tr.dstFs)
}
return snapshot
}
// rcStats returns stats for the transfer suitable for the rc
func (tr *Transfer) rcStats() rc.Params {
out := rc.Params{
"name": tr.remote, // no locking needed to access this
"size": tr.size,
}
if tr.srcFs != nil {
out["srcFs"] = fs.ConfigString(tr.srcFs)
}
if tr.dstFs != nil {
out["dstFs"] = fs.ConfigString(tr.dstFs)
}
return out
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/tpslimit_test.go | fs/accounting/tpslimit_test.go | package accounting
import (
"context"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
)
func TestLimitTPS(t *testing.T) {
timeTransactions := func(n int, minTime, maxTime time.Duration) {
start := time.Now()
for range n {
LimitTPS(context.Background())
}
dt := time.Since(start)
assert.True(t, dt >= minTime && dt <= maxTime, "Expecting time between %v and %v, got %v", minTime, maxTime, dt)
}
t.Run("Off", func(t *testing.T) {
assert.Nil(t, tpsBucket)
timeTransactions(100, 0*time.Millisecond, 100*time.Millisecond)
})
t.Run("On", func(t *testing.T) {
ctx, ci := fs.AddConfig(context.Background())
ci.TPSLimit = 100.0
ci.TPSLimitBurst = 0
StartLimitTPS(ctx)
assert.NotNil(t, tpsBucket)
defer func() {
tpsBucket = nil
}()
timeTransactions(100, 900*time.Millisecond, 5000*time.Millisecond)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/stats_groups.go | fs/accounting/stats_groups.go | package accounting
import (
"context"
"fmt"
"sync"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs"
)
const globalStats = "global_stats"
var groups *statsGroups
func init() {
// Init stats container
groups = newStatsGroups()
}
func rcListStats(ctx context.Context, in rc.Params) (rc.Params, error) {
out := make(rc.Params)
out["groups"] = groups.names()
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "core/group-list",
Fn: rcListStats,
Title: "Returns list of stats.",
Help: `
This returns list of stats groups currently in memory.
Returns the following values:
` + "```" + `
{
"groups": an array of group names:
[
"group1",
"group2",
...
]
}
` + "```" + `
`,
})
}
func rcRemoteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
// Check to see if we should filter by group.
group, err := in.GetString("group")
if rc.NotErrParamNotFound(err) {
return rc.Params{}, err
}
short, _ := in.GetBool("short")
if group != "" {
return StatsGroup(ctx, group).RemoteStats(short)
}
return groups.sum(ctx).RemoteStats(short)
}
func init() {
rc.Add(rc.Call{
Path: "core/stats",
Fn: rcRemoteStats,
Title: "Returns stats about current transfers.",
Help: `
This returns all available stats:
rclone rc core/stats
If group is not provided then summed up stats for all groups will be
returned.
Parameters
- group - name of the stats group (string, optional)
- short - if true will not return the transferring and checking arrays (boolean, optional)
Returns the following values:
` + "```" + `
{
"bytes": total transferred bytes since the start of the group,
"checks": number of files checked,
"deletes" : number of files deleted,
"elapsedTime": time in floating point seconds since rclone was started,
"errors": number of errors,
"eta": estimated time in seconds until the group completes,
"fatalError": boolean whether there has been at least one fatal error,
"lastError": last error string,
"renames" : number of files renamed,
"listed" : number of directory entries listed,
"retryError": boolean showing whether there has been at least one non-NoRetryError,
"serverSideCopies": number of server side copies done,
"serverSideCopyBytes": number bytes server side copied,
"serverSideMoves": number of server side moves done,
"serverSideMoveBytes": number bytes server side moved,
"speed": average speed in bytes per second since start of the group,
"totalBytes": total number of bytes in the group,
"totalChecks": total number of checks in the group,
"totalTransfers": total number of transfers in the group,
"transferTime" : total time spent on running jobs,
"transfers": number of transferred files,
"transferring": an array of currently active file transfers:
[
{
"bytes": total transferred bytes for this file,
"eta": estimated time in seconds until file transfer completion
"name": name of the file,
"percentage": progress of the file transfer in percent,
"speed": average speed over the whole transfer in bytes per second,
"speedAvg": current speed in bytes per second as an exponentially weighted moving average,
"size": size of the file in bytes
}
],
"checking": an array of names of currently active file checks
[]
}
` + "```" + `
Values for "transferring", "checking" and "lastError" are only assigned if data is available.
The value for "eta" is null if an eta cannot be determined.
`,
})
}
func rcTransferredStats(ctx context.Context, in rc.Params) (rc.Params, error) {
// Check to see if we should filter by group.
group, err := in.GetString("group")
if rc.NotErrParamNotFound(err) {
return rc.Params{}, err
}
out := make(rc.Params)
if group != "" {
out["transferred"] = StatsGroup(ctx, group).Transferred()
} else {
out["transferred"] = groups.sum(ctx).Transferred()
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "core/transferred",
Fn: rcTransferredStats,
Title: "Returns stats about completed transfers.",
Help: `
This returns stats about completed transfers:
rclone rc core/transferred
If group is not provided then completed transfers for all groups will be
returned.
Note only the last 100 completed transfers are returned.
Parameters
- group - name of the stats group (string)
Returns the following values:
` + "```" + `
{
"transferred": an array of completed transfers (including failed ones):
[
{
"name": name of the file,
"size": size of the file in bytes,
"bytes": total transferred bytes for this file,
"checked": if the transfer is only checked (skipped, deleted),
"what": the purpose of the transfer (transferring, deleting, checking, importing, hashing, merging, listing, moving, renaming),
"timestamp": integer representing millisecond unix epoch,
"error": string description of the error (empty if successful),
"jobid": id of the job that this transfer belongs to
}
]
}
` + "```" + `
`,
})
}
func rcResetStats(ctx context.Context, in rc.Params) (rc.Params, error) {
// Check to see if we should filter by group.
group, err := in.GetString("group")
if rc.NotErrParamNotFound(err) {
return rc.Params{}, err
}
if group != "" {
stats := groups.get(group)
if stats == nil {
return rc.Params{}, fmt.Errorf("group %q not found", group)
}
stats.ResetErrors()
stats.ResetCounters()
} else {
groups.reset()
}
return rc.Params{}, nil
}
func init() {
rc.Add(rc.Call{
Path: "core/stats-reset",
Fn: rcResetStats,
Title: "Reset stats.",
Help: `
This clears counters, errors and finished transfers for all stats or specific
stats group if group is provided.
Parameters
- group - name of the stats group (string)
`,
})
}
func rcDeleteStats(ctx context.Context, in rc.Params) (rc.Params, error) {
// Group name required because we only do single group.
group, err := in.GetString("group")
if rc.NotErrParamNotFound(err) {
return rc.Params{}, err
}
if group != "" {
groups.delete(group)
}
return rc.Params{}, nil
}
func init() {
rc.Add(rc.Call{
Path: "core/stats-delete",
Fn: rcDeleteStats,
Title: "Delete stats group.",
Help: `
This deletes entire stats group.
Parameters
- group - name of the stats group (string)
`,
})
}
type statsGroupCtx int64
const statsGroupKey statsGroupCtx = 1
// WithStatsGroup returns copy of the parent context with assigned group.
func WithStatsGroup(parent context.Context, group string) context.Context {
return context.WithValue(parent, statsGroupKey, group)
}
// StatsGroupFromContext returns group from the context if it's available.
// Returns false if group is empty.
func StatsGroupFromContext(ctx context.Context) (string, bool) {
statsGroup, ok := ctx.Value(statsGroupKey).(string)
if statsGroup == "" {
ok = false
}
return statsGroup, ok
}
// Stats gets stats by extracting group from context.
func Stats(ctx context.Context) *StatsInfo {
group, ok := StatsGroupFromContext(ctx)
if !ok {
return GlobalStats()
}
return StatsGroup(ctx, group)
}
// StatsGroup gets stats by group name.
func StatsGroup(ctx context.Context, group string) *StatsInfo {
stats := groups.get(group)
if stats == nil {
return NewStatsGroup(ctx, group)
}
return stats
}
// GlobalStats returns special stats used for global accounting.
func GlobalStats() *StatsInfo {
return StatsGroup(context.Background(), globalStats)
}
// NewStatsGroup creates new stats under named group.
func NewStatsGroup(ctx context.Context, group string) *StatsInfo {
stats := NewStats(ctx)
stats.startAverageLoop()
stats.group = group
groups.set(ctx, group, stats)
return stats
}
// statsGroups holds a synchronized map of stats
type statsGroups struct {
mu sync.Mutex
m map[string]*StatsInfo
order []string
}
// newStatsGroups makes a new statsGroups object
func newStatsGroups() *statsGroups {
return &statsGroups{
m: make(map[string]*StatsInfo),
}
}
// set marks the stats as belonging to a group
func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo) {
sg.mu.Lock()
defer sg.mu.Unlock()
ci := fs.GetConfig(ctx)
// Limit number of groups kept in memory.
if len(sg.order) >= ci.MaxStatsGroups {
group := sg.order[0]
fs.Debugf(nil, "Max number of stats groups reached removing %s", group)
delete(sg.m, group)
r := (len(sg.order) - ci.MaxStatsGroups) + 1
sg.order = sg.order[r:]
}
// Exclude global stats from listing
if group != globalStats {
sg.order = append(sg.order, group)
}
sg.m[group] = stats
}
// get gets the stats for group, or nil if not found
func (sg *statsGroups) get(group string) *StatsInfo {
sg.mu.Lock()
defer sg.mu.Unlock()
stats, ok := sg.m[group]
if !ok {
return nil
}
return stats
}
func (sg *statsGroups) names() []string {
sg.mu.Lock()
defer sg.mu.Unlock()
return sg.order
}
// sum returns aggregate stats that contains summation of all groups.
func (sg *statsGroups) sum(ctx context.Context) *StatsInfo {
startTime := GlobalStats().startTime
sg.mu.Lock()
defer sg.mu.Unlock()
sum := NewStats(ctx)
for _, stats := range sg.m {
stats.mu.RLock()
{
sum.bytes += stats.bytes
sum.errors += stats.errors
if sum.lastError == nil && stats.lastError != nil {
sum.lastError = stats.lastError
}
sum.fatalError = sum.fatalError || stats.fatalError
sum.retryError = sum.retryError || stats.retryError
if stats.retryAfter.After(sum.retryAfter) {
// Update the retryAfter field only if it is a later date than the current one in the sum
sum.retryAfter = stats.retryAfter
}
sum.checks += stats.checks
sum.checking.merge(stats.checking)
sum.checkQueue += stats.checkQueue
sum.checkQueueSize += stats.checkQueueSize
sum.transfers += stats.transfers
sum.transferring.merge(stats.transferring)
sum.transferQueueSize += stats.transferQueueSize
sum.listed += stats.listed
sum.renames += stats.renames
sum.renameQueue += stats.renameQueue
sum.renameQueueSize += stats.renameQueueSize
sum.deletes += stats.deletes
sum.deletedDirs += stats.deletedDirs
sum.inProgress.merge(stats.inProgress)
sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...)
sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...)
sum.oldDuration += stats.oldDuration
stats.average.mu.Lock()
sum.average.speed += stats.average.speed
stats.average.mu.Unlock()
}
stats.mu.RUnlock()
}
sum.startTime = startTime
return sum
}
func (sg *statsGroups) reset() {
sg.mu.Lock()
defer sg.mu.Unlock()
for _, stats := range sg.m {
stats.ResetErrors()
stats.ResetCounters()
}
sg.m = make(map[string]*StatsInfo)
sg.order = nil
}
// delete removes all references to the group.
func (sg *statsGroups) delete(group string) {
sg.mu.Lock()
defer sg.mu.Unlock()
stats := sg.m[group]
if stats == nil {
return
}
stats.ResetErrors()
stats.ResetCounters()
delete(sg.m, group)
// Remove group reference from the ordering slice.
tmp := sg.order[:0]
for _, g := range sg.order {
if g != group {
tmp = append(tmp, g)
}
}
sg.order = tmp
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/accounting_test.go | fs/accounting/accounting_test.go | package accounting
import (
"bytes"
"context"
"fmt"
"io"
"strings"
"testing"
"unicode/utf8"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/asyncreader"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check it satisfies the interfaces
var (
_ io.ReadCloser = &Account{}
_ io.WriterTo = &Account{}
_ io.Reader = &accountStream{}
_ Accounter = &Account{}
_ Accounter = &accountStream{}
)
func TestNewAccountSizeName(t *testing.T) {
ctx := context.Background()
in := io.NopCloser(bytes.NewBuffer([]byte{1}))
stats := NewStats(ctx)
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
assert.Equal(t, in, acc.in)
assert.Equal(t, acc, stats.inProgress.get("test"))
err := acc.Close()
assert.NoError(t, err)
assert.Equal(t, acc, stats.inProgress.get("test"))
acc.Done()
assert.Nil(t, stats.inProgress.get("test"))
assert.False(t, acc.HasBuffer())
}
func TestAccountWithBuffer(t *testing.T) {
ctx := context.Background()
in := io.NopCloser(bytes.NewBuffer([]byte{1}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, -1, "test")
assert.False(t, acc.HasBuffer())
acc.WithBuffer()
assert.True(t, acc.HasBuffer())
// should have a buffer for an unknown size
_, ok := acc.in.(*asyncreader.AsyncReader)
require.True(t, ok)
assert.NoError(t, acc.Close())
acc = newAccountSizeName(ctx, stats, in, 1, "test")
acc.WithBuffer()
// should not have a buffer for a small size
_, ok = acc.in.(*asyncreader.AsyncReader)
require.False(t, ok)
assert.NoError(t, acc.Close())
}
func TestAccountGetUpdateReader(t *testing.T) {
ctx := context.Background()
test := func(doClose bool) func(t *testing.T) {
return func(t *testing.T) {
in := io.NopCloser(bytes.NewBuffer([]byte{1}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
assert.Equal(t, in, acc.GetReader())
assert.Equal(t, acc, stats.inProgress.get("test"))
if doClose {
// close the account before swapping it out
require.NoError(t, acc.Close())
}
in2 := io.NopCloser(bytes.NewBuffer([]byte{1}))
acc.UpdateReader(ctx, in2)
assert.Equal(t, in2, acc.GetReader())
assert.Equal(t, acc, stats.inProgress.get("test"))
assert.NoError(t, acc.Close())
}
}
t.Run("NoClose", test(false))
t.Run("Close", test(true))
}
func TestAccountRead(t *testing.T) {
ctx := context.Background()
in := io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
assert.True(t, acc.values.start.IsZero())
acc.values.mu.Lock()
assert.Equal(t, int64(0), acc.values.lpBytes)
assert.Equal(t, int64(0), acc.values.bytes)
acc.values.mu.Unlock()
assert.Equal(t, int64(0), stats.bytes)
var buf = make([]byte, 2)
n, err := acc.Read(buf)
assert.NoError(t, err)
assert.Equal(t, 2, n)
assert.Equal(t, []byte{1, 2}, buf[:n])
assert.False(t, acc.values.start.IsZero())
acc.values.mu.Lock()
assert.Equal(t, int64(2), acc.values.lpBytes)
assert.Equal(t, int64(2), acc.values.bytes)
acc.values.mu.Unlock()
assert.Equal(t, int64(2), stats.bytes)
n, err = acc.Read(buf)
assert.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, []byte{3}, buf[:n])
n, err = acc.Read(buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
assert.NoError(t, acc.Close())
}
func testAccountWriteTo(t *testing.T, withBuffer bool) {
ctx := context.Background()
buf := make([]byte, 2*asyncreader.BufferSize+1)
for i := range buf {
buf[i] = byte(i % 251)
}
in := io.NopCloser(bytes.NewBuffer(buf))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, int64(len(buf)), "test")
if withBuffer {
acc = acc.WithBuffer()
}
assert.True(t, acc.values.start.IsZero())
acc.values.mu.Lock()
assert.Equal(t, int64(0), acc.values.lpBytes)
assert.Equal(t, int64(0), acc.values.bytes)
acc.values.mu.Unlock()
assert.Equal(t, int64(0), stats.bytes)
var out bytes.Buffer
n, err := acc.WriteTo(&out)
assert.NoError(t, err)
assert.Equal(t, int64(len(buf)), n)
assert.Equal(t, buf, out.Bytes())
assert.False(t, acc.values.start.IsZero())
acc.values.mu.Lock()
assert.Equal(t, int64(len(buf)), acc.values.lpBytes)
assert.Equal(t, int64(len(buf)), acc.values.bytes)
acc.values.mu.Unlock()
assert.Equal(t, int64(len(buf)), stats.bytes)
assert.NoError(t, acc.Close())
}
func TestAccountWriteTo(t *testing.T) {
testAccountWriteTo(t, false)
}
func TestAccountWriteToWithBuffer(t *testing.T) {
testAccountWriteTo(t, true)
}
func TestAccountString(t *testing.T) {
ctx := context.Background()
in := io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 3, "test")
// FIXME not an exhaustive test!
assert.Equal(t, "test: 0% /3, 0/s, -", strings.TrimSpace(acc.String()))
var buf = make([]byte, 2)
n, err := acc.Read(buf)
assert.NoError(t, err)
assert.Equal(t, 2, n)
assert.Equal(t, "test: 66% /3, 0/s, -", strings.TrimSpace(acc.String()))
assert.NoError(t, acc.Close())
}
// Test the Accounter interface methods on Account and accountStream
func TestAccountAccounter(t *testing.T) {
ctx := context.Background()
in := io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 3, "test")
assert.True(t, in == acc.OldStream())
in2 := io.NopCloser(bytes.NewBuffer([]byte{2, 3, 4}))
acc.SetStream(in2)
assert.True(t, in2 == acc.OldStream())
r := acc.WrapStream(in)
as, ok := r.(Accounter)
require.True(t, ok)
assert.True(t, in == as.OldStream())
assert.True(t, in2 == acc.OldStream())
accs, ok := r.(*accountStream)
require.True(t, ok)
assert.Equal(t, acc, accs.acc)
assert.True(t, in == accs.in)
// Check Read on the accountStream
var buf = make([]byte, 2)
n, err := r.Read(buf)
assert.NoError(t, err)
assert.Equal(t, 2, n)
assert.Equal(t, []byte{1, 2}, buf[:n])
// Test that we can get another accountstream out
in3 := io.NopCloser(bytes.NewBuffer([]byte{3, 1, 2}))
r2 := as.WrapStream(in3)
as2, ok := r2.(Accounter)
require.True(t, ok)
assert.True(t, in3 == as2.OldStream())
assert.True(t, in2 == acc.OldStream())
accs2, ok := r2.(*accountStream)
require.True(t, ok)
assert.Equal(t, acc, accs2.acc)
assert.True(t, in3 == accs2.in)
// Test we can set this new accountStream
as2.SetStream(in)
assert.True(t, in == as2.OldStream())
// Test UnWrap on accountStream
unwrapped, wrap := UnWrap(r2)
assert.True(t, unwrapped == in)
r3 := wrap(in2)
assert.True(t, in2 == r3.(Accounter).OldStream())
// TestUnWrap on a normal io.Reader
unwrapped, wrap = UnWrap(in2)
assert.True(t, unwrapped == in2)
assert.True(t, wrap(in3) == in3)
}
func TestAccountMaxTransfer(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
old := ci.MaxTransfer
oldMode := ci.CutoffMode
ci.MaxTransfer = 15
defer func() {
ci.MaxTransfer = old
ci.CutoffMode = oldMode
}()
in := io.NopCloser(bytes.NewBuffer(make([]byte, 100)))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
var b = make([]byte, 10)
n, err := acc.Read(b)
assert.Equal(t, 10, n)
assert.NoError(t, err)
n, err = acc.Read(b)
assert.Equal(t, 5, n)
assert.Equal(t, ErrorMaxTransferLimitReachedFatal, err)
n, err = acc.Read(b)
assert.Equal(t, 0, n)
assert.Equal(t, ErrorMaxTransferLimitReachedFatal, err)
assert.True(t, fserrors.IsFatalError(err))
ci.CutoffMode = fs.CutoffModeSoft
stats = NewStats(ctx)
acc = newAccountSizeName(ctx, stats, in, 1, "test")
n, err = acc.Read(b)
assert.Equal(t, 10, n)
assert.NoError(t, err)
n, err = acc.Read(b)
assert.Equal(t, 10, n)
assert.NoError(t, err)
n, err = acc.Read(b)
assert.Equal(t, 10, n)
assert.NoError(t, err)
}
func TestAccountMaxTransferWriteTo(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
old := ci.MaxTransfer
oldMode := ci.CutoffMode
ci.MaxTransfer = 15
defer func() {
ci.MaxTransfer = old
ci.CutoffMode = oldMode
}()
in := io.NopCloser(readers.NewPatternReader(1024))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
var b bytes.Buffer
n, err := acc.WriteTo(&b)
assert.Equal(t, int64(15), n)
assert.Equal(t, ErrorMaxTransferLimitReachedFatal, err)
}
func TestAccountReadCtx(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
in := io.NopCloser(bytes.NewBuffer(make([]byte, 100)))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
var b = make([]byte, 10)
n, err := acc.Read(b)
assert.Equal(t, 10, n)
assert.NoError(t, err)
cancel()
n, err = acc.Read(b)
assert.Equal(t, 0, n)
assert.Equal(t, context.Canceled, err)
}
func TestShortenName(t *testing.T) {
for _, test := range []struct {
in string
size int
want string
}{
{"", 0, ""},
{"abcde", 10, "abcde"},
{"abcde", 0, "abcde"},
{"abcde", -1, "abcde"},
{"abcde", 5, "abcde"},
{"abcde", 4, "ab…e"},
{"abcde", 3, "a…e"},
{"abcde", 2, "a…"},
{"abcde", 1, "…"},
{"abcdef", 6, "abcdef"},
{"abcdef", 5, "ab…ef"},
{"abcdef", 4, "ab…f"},
{"abcdef", 3, "a…f"},
{"abcdef", 2, "a…"},
{"áßcdèf", 1, "…"},
{"áßcdè", 5, "áßcdè"},
{"áßcdè", 4, "áß…è"},
{"áßcdè", 3, "á…è"},
{"áßcdè", 2, "á…"},
{"áßcdè", 1, "…"},
{"áßcdèł", 6, "áßcdèł"},
{"áßcdèł", 5, "áß…èł"},
{"áßcdèł", 4, "áß…ł"},
{"áßcdèł", 3, "á…ł"},
{"áßcdèł", 2, "á…"},
{"áßcdèł", 1, "…"},
} {
t.Run(fmt.Sprintf("in=%q, size=%d", test.in, test.size), func(t *testing.T) {
got := shortenName(test.in, test.size)
assert.Equal(t, test.want, got)
if test.size > 0 {
assert.True(t, utf8.RuneCountInString(got) <= test.size, "too big")
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/transfer_test.go | fs/accounting/transfer_test.go | package accounting
import (
"context"
"errors"
"io"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTransfer(t *testing.T) {
ctx := context.Background()
s := NewStats(ctx)
o := mockobject.Object("obj")
srcFs, err := mockfs.NewFs(ctx, "srcFs", "srcFs", nil)
require.NoError(t, err)
dstFs, err := mockfs.NewFs(ctx, "dstFs", "dstFs", nil)
require.NoError(t, err)
tr := newTransfer(s, o, srcFs, dstFs)
t.Run("Snapshot", func(t *testing.T) {
snap := tr.Snapshot()
assert.Equal(t, "obj", snap.Name)
assert.Equal(t, int64(0), snap.Size)
assert.Equal(t, int64(0), snap.Bytes)
assert.Equal(t, false, snap.Checked)
assert.Equal(t, "transferring", snap.What)
assert.Equal(t, false, snap.StartedAt.IsZero())
assert.Equal(t, true, snap.CompletedAt.IsZero())
assert.Equal(t, nil, snap.Error)
assert.Equal(t, "", snap.Group)
assert.Equal(t, "srcFs:srcFs", snap.SrcFs)
assert.Equal(t, "dstFs:dstFs", snap.DstFs)
})
t.Run("Done", func(t *testing.T) {
tr.Done(ctx, io.EOF)
snap := tr.Snapshot()
assert.Equal(t, "obj", snap.Name)
assert.Equal(t, int64(0), snap.Size)
assert.Equal(t, int64(0), snap.Bytes)
assert.Equal(t, false, snap.Checked)
assert.Equal(t, "transferring", snap.What)
assert.Equal(t, false, snap.StartedAt.IsZero())
assert.Equal(t, false, snap.CompletedAt.IsZero())
assert.Equal(t, true, errors.Is(snap.Error, io.EOF))
assert.Equal(t, "", snap.Group)
assert.Equal(t, "srcFs:srcFs", snap.SrcFs)
assert.Equal(t, "dstFs:dstFs", snap.DstFs)
})
t.Run("rcStats", func(t *testing.T) {
out := tr.rcStats()
assert.Equal(t, rc.Params{
"name": "obj",
"size": int64(0),
"srcFs": "srcFs:srcFs",
"dstFs": "dstFs:dstFs",
}, out)
})
t.Run("Snapshot checking transfer", func(t *testing.T) {
ctr := newCheckingTransfer(s, o, "checking")
snap := ctr.Snapshot()
assert.Equal(t, "obj", snap.Name)
assert.Equal(t, int64(0), snap.Size)
assert.Equal(t, int64(0), snap.Bytes)
assert.Equal(t, true, snap.Checked)
assert.Equal(t, "checking", snap.What)
assert.Equal(t, false, snap.StartedAt.IsZero())
assert.Equal(t, true, snap.CompletedAt.IsZero())
assert.Equal(t, nil, snap.Error)
assert.Equal(t, "", snap.Group)
assert.Equal(t, "", snap.SrcFs)
assert.Equal(t, "", snap.DstFs)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/token_bucket.go | fs/accounting/token_bucket.go | package accounting
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"golang.org/x/time/rate"
)
// TokenBucket holds the global token bucket limiter
var TokenBucket tokenBucket
// TokenBucketSlot is the type to select which token bucket to use
type TokenBucketSlot int
// Slots for the token bucket
const (
TokenBucketSlotAccounting TokenBucketSlot = iota
TokenBucketSlotTransportRx
TokenBucketSlotTransportTx
TokenBucketSlots
)
type buckets [TokenBucketSlots]*rate.Limiter
// tokenBucket holds info about the rate limiters in use
type tokenBucket struct {
mu sync.RWMutex // protects the token bucket variables
curr buckets
prev buckets
toggledOff bool
currLimit fs.BwTimeSlot
}
// Return true if limit is disabled
//
// Call with lock held
func (bs *buckets) _isOff() bool { //nolint:unused // Don't include unused when running golangci-lint in case its on windows where this is not called
for i := range bs {
if bs[i] != nil {
return false
}
}
return true
}
// Disable the limits
//
// Call with lock held
func (bs *buckets) _setOff() {
for i := range bs {
bs[i] = nil
}
}
const defaultMaxBurstSize = 4 * 1024 * 1024 // must be bigger than the biggest request
// make a new empty token bucket with the bandwidth given
func newEmptyTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
// Relate maxBurstSize to bandwidth limit
// 4M gives 2.5 Gb/s on Windows
// Use defaultMaxBurstSize up to 2GBit/s (256MiB/s) then scale
maxBurstSize := max((bandwidth*defaultMaxBurstSize)/(256*1024*1024), defaultMaxBurstSize)
// fs.Debugf(nil, "bandwidth=%v maxBurstSize=%v", bandwidth, maxBurstSize)
tb := rate.NewLimiter(rate.Limit(bandwidth), int(maxBurstSize))
if tb != nil {
// empty the bucket
err := tb.WaitN(context.Background(), int(maxBurstSize))
if err != nil {
fs.Errorf(nil, "Failed to empty token bucket: %v", err)
}
}
return tb
}
// make a new empty token bucket with the bandwidth(s) given
func newTokenBucket(bandwidth fs.BwPair) (tbs buckets) {
bandwidthAccounting := fs.SizeSuffix(-1)
if bandwidth.Tx > 0 {
tbs[TokenBucketSlotTransportTx] = newEmptyTokenBucket(bandwidth.Tx)
bandwidthAccounting = bandwidth.Tx
}
if bandwidth.Rx > 0 {
tbs[TokenBucketSlotTransportRx] = newEmptyTokenBucket(bandwidth.Rx)
if bandwidth.Rx > bandwidthAccounting {
bandwidthAccounting = bandwidth.Rx
}
}
// Limit core bandwidth to max of Rx and Tx if both are limited
if bandwidth.Tx > 0 && bandwidth.Rx > 0 {
tbs[TokenBucketSlotAccounting] = newEmptyTokenBucket(bandwidthAccounting)
}
return tbs
}
// StartTokenBucket starts the token bucket if necessary
func (tb *tokenBucket) StartTokenBucket(ctx context.Context) {
tb.mu.Lock()
defer tb.mu.Unlock()
ci := fs.GetConfig(ctx)
tb.currLimit = ci.BwLimit.LimitAt(time.Now())
if tb.currLimit.Bandwidth.IsSet() {
tb.curr = newTokenBucket(tb.currLimit.Bandwidth)
fs.Infof(nil, "Starting bandwidth limiter at %v Byte/s", &tb.currLimit.Bandwidth)
}
// Start the SIGUSR2 signal handler to toggle bandwidth.
// This function does nothing in windows systems.
tb.startSignalHandler()
}
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
func (tb *tokenBucket) StartTokenTicker(ctx context.Context) {
ci := fs.GetConfig(ctx)
// If the timetable has a single entry or was not specified, we don't need
// a ticker to update the bandwidth.
if len(ci.BwLimit) <= 1 {
return
}
ticker := time.NewTicker(time.Minute)
go func() {
for range ticker.C {
limitNow := ci.BwLimit.LimitAt(time.Now())
tb.mu.Lock()
if tb.currLimit.Bandwidth != limitNow.Bandwidth {
// If bwlimit is toggled off, the change should only
// become active on the next toggle, which causes
// an exchange of tb.curr <-> tb.prev
var targetBucket *buckets
if tb.toggledOff {
targetBucket = &tb.prev
} else {
targetBucket = &tb.curr
}
// Set new bandwidth. If unlimited, set tokenbucket to nil.
if limitNow.Bandwidth.IsSet() {
*targetBucket = newTokenBucket(limitNow.Bandwidth)
if tb.toggledOff {
fs.Logf(nil, "Scheduled bandwidth change. "+
"Limit will be set to %v Byte/s when toggled on again.", &limitNow.Bandwidth)
} else {
fs.Logf(nil, "Scheduled bandwidth change. Limit set to %v Byte/s", &limitNow.Bandwidth)
}
} else {
targetBucket._setOff()
fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
}
tb.currLimit = limitNow
}
tb.mu.Unlock()
}
}()
}
// LimitBandwidth sleeps for the correct amount of time for the passage
// of n bytes according to the current bandwidth limit
func (tb *tokenBucket) LimitBandwidth(i TokenBucketSlot, n int) {
tb.mu.RLock()
// Limit the transfer speed if required
if tb.curr[i] != nil {
err := tb.curr[i].WaitN(context.Background(), n)
if err != nil {
fs.Errorf(nil, "Token bucket error: %v", err)
}
}
tb.mu.RUnlock()
}
// SetBwLimit sets the current bandwidth limit
func (tb *tokenBucket) SetBwLimit(bandwidth fs.BwPair) {
tb.mu.Lock()
defer tb.mu.Unlock()
if bandwidth.IsSet() {
tb.curr = newTokenBucket(bandwidth)
fs.Logf(nil, "Bandwidth limit set to %v", bandwidth)
} else {
tb.curr._setOff()
fs.Logf(nil, "Bandwidth limit reset to unlimited")
}
}
// read and set the bandwidth limits
func (tb *tokenBucket) rcBwlimit(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if in["rate"] != nil {
bwlimit, err := in.GetString("rate")
if err != nil {
return out, err
}
var bws fs.BwTimetable
err = bws.Set(bwlimit)
if err != nil {
return out, fmt.Errorf("bad bwlimit: %w", err)
}
if len(bws) != 1 {
return out, errors.New("need exactly 1 bandwidth setting")
}
bw := bws[0]
tb.SetBwLimit(bw.Bandwidth)
}
tb.mu.RLock()
bytesPerSecond := int64(-1)
if tb.curr[TokenBucketSlotAccounting] != nil {
bytesPerSecond = int64(tb.curr[TokenBucketSlotAccounting].Limit())
}
var bp = fs.BwPair{Tx: -1, Rx: -1}
if tb.curr[TokenBucketSlotTransportTx] != nil {
bp.Tx = fs.SizeSuffix(tb.curr[TokenBucketSlotTransportTx].Limit())
}
if tb.curr[TokenBucketSlotTransportRx] != nil {
bp.Rx = fs.SizeSuffix(tb.curr[TokenBucketSlotTransportRx].Limit())
}
tb.mu.RUnlock()
out = rc.Params{
"rate": bp.String(),
"bytesPerSecond": bytesPerSecond,
"bytesPerSecondTx": int64(bp.Tx),
"bytesPerSecondRx": int64(bp.Rx),
}
return out, nil
}
// Remote control for the token bucket
func init() {
rc.Add(rc.Call{
Path: "core/bwlimit",
Fn: TokenBucket.rcBwlimit,
Title: "Set the bandwidth limit.",
Help: `
This sets the bandwidth limit to the string passed in. This should be
a single bandwidth limit entry or a pair of upload:download bandwidth.
Eg
rclone rc core/bwlimit rate=off
{
"bytesPerSecond": -1,
"bytesPerSecondTx": -1,
"bytesPerSecondRx": -1,
"rate": "off"
}
rclone rc core/bwlimit rate=1M
{
"bytesPerSecond": 1048576,
"bytesPerSecondTx": 1048576,
"bytesPerSecondRx": 1048576,
"rate": "1M"
}
rclone rc core/bwlimit rate=1M:100k
{
"bytesPerSecond": 1048576,
"bytesPerSecondTx": 1048576,
"bytesPerSecondRx": 131072,
"rate": "1M"
}
If the rate parameter is not supplied then the bandwidth is queried
rclone rc core/bwlimit
{
"bytesPerSecond": 1048576,
"bytesPerSecondTx": 1048576,
"bytesPerSecondRx": 1048576,
"rate": "1M"
}
The format of the parameter is exactly the same as passed to --bwlimit
except only one bandwidth may be specified.
In either case "rate" is returned as a human-readable string, and
"bytesPerSecond" is returned as a number.
`,
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/stats_groups_test.go | fs/accounting/stats_groups_test.go | package accounting
import (
"context"
"encoding/json"
"fmt"
"runtime"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStatsGroupOperations(t *testing.T) {
ctx := context.Background()
t.Run("empty group returns nil", func(t *testing.T) {
t.Parallel()
sg := newStatsGroups()
sg.get("invalid-group")
})
t.Run("set assigns stats to group", func(t *testing.T) {
t.Parallel()
stats := NewStats(ctx)
sg := newStatsGroups()
sg.set(ctx, "test", stats)
sg.set(ctx, "test1", stats)
if len(sg.m) != len(sg.names()) || len(sg.m) != 2 {
t.Fatalf("Expected two stats got %d, %d", len(sg.m), len(sg.order))
}
})
t.Run("get returns correct group", func(t *testing.T) {
t.Parallel()
stats := NewStats(ctx)
sg := newStatsGroups()
sg.set(ctx, "test", stats)
sg.set(ctx, "test1", stats)
got := sg.get("test")
if got != stats {
t.Fatal("get returns incorrect stats")
}
})
t.Run("sum returns correct values", func(t *testing.T) {
t.Parallel()
stats1 := NewStats(ctx)
stats1.bytes = 5
stats1.transferQueueSize = 10
stats1.errors = 6
stats1.oldDuration = time.Second
stats1.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(time.Second)}}
stats2 := NewStats(ctx)
stats2.bytes = 10
stats2.errors = 12
stats1.transferQueueSize = 20
stats2.oldDuration = 2 * time.Second
stats2.oldTimeRanges = []timeRange{{time.Now(), time.Now().Add(2 * time.Second)}}
sg := newStatsGroups()
sg.set(ctx, "test1", stats1)
sg.set(ctx, "test2", stats2)
sum := sg.sum(ctx)
assert.Equal(t, stats1.bytes+stats2.bytes, sum.bytes)
assert.Equal(t, stats1.transferQueueSize+stats2.transferQueueSize, sum.transferQueueSize)
assert.Equal(t, stats1.errors+stats2.errors, sum.errors)
assert.Equal(t, stats1.oldDuration+stats2.oldDuration, sum.oldDuration)
assert.Equal(t, stats1.average.speed+stats2.average.speed, sum.average.speed)
// dict can iterate in either order
a := timeRanges{stats1.oldTimeRanges[0], stats2.oldTimeRanges[0]}
b := timeRanges{stats2.oldTimeRanges[0], stats1.oldTimeRanges[0]}
if !assert.ObjectsAreEqual(a, sum.oldTimeRanges) {
assert.Equal(t, b, sum.oldTimeRanges)
}
})
t.Run("delete removes stats", func(t *testing.T) {
t.Parallel()
stats := NewStats(ctx)
sg := newStatsGroups()
sg.set(ctx, "test", stats)
sg.set(ctx, "test1", stats)
sg.delete("test1")
if sg.get("test1") != nil {
t.Fatal("stats not deleted")
}
if len(sg.m) != len(sg.names()) || len(sg.m) != 1 {
t.Fatalf("Expected two stats got %d, %d", len(sg.m), len(sg.order))
}
})
t.Run("memory is reclaimed", func(t *testing.T) {
testy.SkipUnreliable(t)
var (
count = 1000
start, end runtime.MemStats
sg = newStatsGroups()
)
runtime.GC()
runtime.ReadMemStats(&start)
for i := range count {
sg.set(ctx, fmt.Sprintf("test-%d", i), NewStats(ctx))
}
for i := range count {
sg.delete(fmt.Sprintf("test-%d", i))
}
runtime.GC()
runtime.ReadMemStats(&end)
t.Logf("%+v\n%+v", start, end)
diff := percentDiff(start.HeapObjects, end.HeapObjects)
if diff > 1 {
t.Errorf("HeapObjects = %d, expected %d", end.HeapObjects, start.HeapObjects)
}
})
testGroupStatsInfo := NewStatsGroup(ctx, "test-group")
require.NoError(t, testGroupStatsInfo.DeleteFile(ctx, 0))
for range 41 {
require.NoError(t, GlobalStats().DeleteFile(ctx, 0))
}
t.Run("core/group-list", func(t *testing.T) {
call := rc.Calls.Get("core/group-list")
require.NotNil(t, call)
got, err := call.Fn(ctx, rc.Params{})
require.NoError(t, err)
require.Equal(t, rc.Params{
"groups": []string{
"test-group",
},
}, got)
})
t.Run("core/stats", func(t *testing.T) {
tr := Stats(ctx).NewCheckingTransfer(mockobject.New("core-check"), "deleting")
// defer tr.Done(ctx, nil)
_ = tr // don't finish the transfer so we don't mess up the other tests
tr2 := Stats(ctx).NewTransfer(mockobject.New("core-transfer"), nil)
//defer tr2.Done(ctx, nil)
_ = tr2 // don't finish the transfer so we don't mess up the other tests
call := rc.Calls.Get("core/stats")
require.NotNil(t, call)
got, err := call.Fn(ctx, rc.Params{})
require.NoError(t, err)
assert.Equal(t, int64(42), got["deletes"])
data, err := json.Marshal(got["transferring"])
require.NoError(t, err)
assert.Contains(t, string(data), "core-transfer")
data, err = json.Marshal(got["checking"])
require.NoError(t, err)
assert.Contains(t, string(data), "core-check")
got, err = call.Fn(ctx, rc.Params{"short": true})
require.NoError(t, err)
assert.Equal(t, int64(42), got["deletes"])
assert.Nil(t, got["transferring"])
assert.Nil(t, got["checking"])
got, err = call.Fn(ctx, rc.Params{"group": "test-group"})
require.NoError(t, err)
assert.Equal(t, int64(1), got["deletes"])
})
t.Run("core/transferred", func(t *testing.T) {
call := rc.Calls.Get("core/transferred")
require.NotNil(t, call)
gotNoGroup, err := call.Fn(ctx, rc.Params{})
require.NoError(t, err)
gotGroup, err := call.Fn(ctx, rc.Params{"group": "test-group"})
require.NoError(t, err)
assert.Equal(t, rc.Params{
"transferred": []TransferSnapshot{},
}, gotNoGroup)
assert.Equal(t, rc.Params{
"transferred": []TransferSnapshot{},
}, gotGroup)
})
t.Run("core/stats-reset", func(t *testing.T) {
call := rc.Calls.Get("core/stats-reset")
require.NotNil(t, call)
assert.Equal(t, int64(41), GlobalStats().deletes)
assert.Equal(t, int64(1), testGroupStatsInfo.deletes)
_, err := call.Fn(ctx, rc.Params{"group": "test-group"})
require.NoError(t, err)
assert.Equal(t, int64(41), GlobalStats().deletes)
assert.Equal(t, int64(0), testGroupStatsInfo.deletes)
_, err = call.Fn(ctx, rc.Params{})
require.NoError(t, err)
assert.Equal(t, int64(0), GlobalStats().deletes)
assert.Equal(t, int64(0), testGroupStatsInfo.deletes)
_, err = call.Fn(ctx, rc.Params{"group": "not-found"})
require.ErrorContains(t, err, `group "not-found" not found`)
})
testGroupStatsInfo = NewStatsGroup(ctx, "test-group")
t.Run("core/stats-delete", func(t *testing.T) {
call := rc.Calls.Get("core/stats-delete")
require.NotNil(t, call)
assert.Equal(t, []string{"test-group"}, groups.names())
_, err := call.Fn(ctx, rc.Params{"group": "test-group"})
require.NoError(t, err)
assert.Equal(t, []string{}, groups.names())
_, err = call.Fn(ctx, rc.Params{"group": "not-found"})
require.NoError(t, err)
})
}
func TestCountError(t *testing.T) {
ctx := context.Background()
Start(ctx)
defer func() {
groups = newStatsGroups()
}()
t.Run("global stats", func(t *testing.T) {
GlobalStats().ResetCounters()
err := fs.CountError(ctx, fmt.Errorf("global err"))
assert.Equal(t, int64(1), GlobalStats().errors)
assert.True(t, fserrors.IsCounted(err))
})
t.Run("group stats", func(t *testing.T) {
statGroupName := fmt.Sprintf("%s-error_group", t.Name())
GlobalStats().ResetCounters()
stCtx := WithStatsGroup(ctx, statGroupName)
st := StatsGroup(stCtx, statGroupName)
err := fs.CountError(stCtx, fmt.Errorf("group err"))
assert.Equal(t, int64(0), GlobalStats().errors)
assert.Equal(t, int64(1), st.errors)
assert.True(t, fserrors.IsCounted(err))
})
}
func percentDiff(start, end uint64) uint64 {
if start == 0 {
return 0 // Handle zero start value to avoid division by zero
}
var diff uint64
if end > start {
diff = end - start // Handle case where end is larger than start
} else {
diff = start - end
}
return (diff * 100) / start
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/accounting/tpslimit.go | fs/accounting/tpslimit.go | package accounting
import (
"context"
"github.com/rclone/rclone/fs"
"golang.org/x/time/rate"
)
var (
tpsBucket *rate.Limiter // for limiting number of http transactions per second
)
// StartLimitTPS starts the token bucket for transactions per second
// limiting if necessary
func StartLimitTPS(ctx context.Context) {
ci := fs.GetConfig(ctx)
if ci.TPSLimit > 0 {
tpsBurst := max(ci.TPSLimitBurst, 1)
tpsBucket = rate.NewLimiter(rate.Limit(ci.TPSLimit), tpsBurst)
fs.Infof(nil, "Starting transaction limiter: max %g transactions/s with burst %d", ci.TPSLimit, tpsBurst)
}
}
// LimitTPS limits the number of transactions per second if enabled.
// It should be called once per transaction.
func LimitTPS(ctx context.Context) {
if tpsBucket != nil {
tbErr := tpsBucket.Wait(ctx)
if tbErr != nil && tbErr != context.Canceled {
fs.Errorf(nil, "HTTP token bucket error: %v", tbErr)
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fspath/fuzz.go | fs/fspath/fuzz.go | //go:build gofuzz
/*
Fuzz test the Parse function
Generate corpus
go test -v -make-corpus
Install go fuzz
go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
Compile and fuzz
go-fuzz-build
go-fuzz
Tidy up
rm -rf corpus/ crashers/ suppressions/
git co ../../go.mod ../../go.sum
*/
package fspath
func Fuzz(data []byte) int {
path := string(data)
parsed, err := Parse(path)
if err != nil {
return 0
}
if parsed.Name == "" {
if parsed.ConfigString != "" {
panic("bad ConfigString")
}
if parsed.Path != path {
panic("local path not preserved")
}
} else {
if parsed.ConfigString+":"+parsed.Path != path {
panic("didn't split properly")
}
}
return 0
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fspath/path.go | fs/fspath/path.go | // Package fspath contains routines for fspath manipulation
package fspath
import (
"errors"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/driveletter"
)
const (
configNameRe = `[\w\p{L}\p{N}.+@]+(?:[ -]+[\w\p{L}\p{N}.+@-]+)*` // May contain Unicode numbers and letters, as well as `_` (covered by \w), `-`, `.`, `+`, `@` and space, but not start with `-` (it complicates usage, see #4261) or space, and not end with space
illegalPartOfConfigNameRe = `^[ -]+|[^\w\p{L}\p{N}.+@ -]+|[ ]+$`
)
var (
errInvalidCharacters = errors.New("config name contains invalid characters - may only contain numbers, letters, `_`, `-`, `.`, `+`, `@` and space, while not start with `-` or space, and not end with space")
errCantBeEmpty = errors.New("can't use empty string as a path")
errBadConfigParam = errors.New("config parameters may only contain `0-9`, `A-Z`, `a-z`, `_` and `.`")
errEmptyConfigParam = errors.New("config parameters can't be empty")
errConfigNameEmpty = errors.New("config name can't be empty")
errConfigName = errors.New("config name needs a trailing `:`")
errParam = errors.New("config parameter must end with `,` or `:`")
errValue = errors.New("unquoted config value must end with `,` or `:`")
errQuotedValue = errors.New("unterminated quoted config value")
errAfterQuote = errors.New("expecting `:` or `,` or another quote after a quote")
errSyntax = errors.New("syntax error in config string")
// configNameMatcher is a pattern to match an rclone config name
configNameMatcher = regexp.MustCompile(`^` + configNameRe + `$`)
// illegalPartOfConfigNameMatcher is a pattern to match a sequence of characters not allowed in an rclone config name
illegalPartOfConfigNameMatcher = regexp.MustCompile(illegalPartOfConfigNameRe)
// remoteNameMatcher is a pattern to match an rclone remote name at the start of a config
remoteNameMatcher = regexp.MustCompile(`^:?` + configNameRe + `(?::$|,)`)
)
// CheckConfigName returns an error if configName is invalid
func CheckConfigName(configName string) error {
if !configNameMatcher.MatchString(configName) {
return errInvalidCharacters
}
return nil
}
// MakeConfigName makes an input into something legal to be used as a config name.
// Returns a string where any sequences of illegal characters are replaced with
// a single underscore. If the input is already valid as a config name, it is
// returned unchanged. If the input is an empty string, a single underscore is
// returned.
func MakeConfigName(name string) string {
if name == "" {
return "_"
}
if configNameMatcher.MatchString(name) {
return name
}
return illegalPartOfConfigNameMatcher.ReplaceAllString(name, "_")
}
// checkRemoteName returns an error if remoteName is invalid
func checkRemoteName(remoteName string) error {
if remoteName == ":" || remoteName == "::" {
return errConfigNameEmpty
}
if !remoteNameMatcher.MatchString(remoteName) {
return errInvalidCharacters
}
return nil
}
// Return true if c is a valid character for a config parameter
func isConfigParam(c rune) bool {
return ((c >= 'a' && c <= 'z') ||
(c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') ||
c == '_' ||
c == '.')
}
// Parsed is returned from Parse with the results of the connection string decomposition
//
// If Name is "" then it is a local path in Path
//
// Note that ConfigString + ":" + Path is equal to the input of Parse except that Path may have had
// \ converted to /
type Parsed struct {
Name string // Just the name of the config: "remote" or ":backend"
ConfigString string // The whole config string: "remote:" or ":backend,value=6:"
Path string // The file system path, may be empty
Config configmap.Simple // key/value config parsed out of ConfigString may be nil
}
// Parse deconstructs a path into a Parsed structure
//
// If the path is a local path then parsed.Name will be returned as "".
//
// So "remote:path/to/dir" will return Parsed{Name:"remote", Path:"path/to/dir"},
// and "/path/to/local" will return Parsed{Name:"", Path:"/path/to/local"}
//
// Note that this will turn \ into / in the fsPath on Windows
//
// An error may be returned if the remote name has invalid characters or the
// parameters are invalid or the path is empty.
func Parse(path string) (parsed Parsed, err error) {
parsed.Path = filepath.ToSlash(path)
if path == "" {
return parsed, errCantBeEmpty
}
// If path has no `:` in, it must be a local path
if !strings.ContainsRune(path, ':') {
return parsed, nil
}
// States for parser
const (
stateConfigName = uint8(iota)
stateParam
stateValue
stateQuotedValue
stateAfterQuote
stateDone
)
var (
state = stateConfigName // current state of parser
i int // position in path
prev int // previous position in path
c rune // current rune under consideration
quote rune // kind of quote to end this quoted string
param string // current parameter value
doubled bool // set if had doubled quotes
)
loop:
for i, c = range path {
// Example Parse
// remote,param=value,param2="qvalue":/path/to/file
switch state {
// Parses "remote,"
case stateConfigName:
if i == 0 && c == ':' {
continue
} else if c == '/' || c == '\\' {
// `:` or `,` not before a path separator must be a local path,
// except if the path started with `:` in which case it was intended
// to be an on the fly remote so return an error.
if path[0] == ':' {
return parsed, errInvalidCharacters
}
return parsed, nil
} else if c == ':' || c == ',' {
parsed.Name = path[:i]
err := checkRemoteName(parsed.Name + ":")
if err != nil {
return parsed, err
}
prev = i + 1
if c == ':' {
// If we parsed a drive letter, must be a local path
if driveletter.IsDriveLetter(parsed.Name) {
parsed.Name = ""
return parsed, nil
}
state = stateDone
break loop
}
state = stateParam
parsed.Config = make(configmap.Simple)
}
// Parses param= and param2=
case stateParam:
if c == ':' || c == ',' || c == '=' {
param = path[prev:i]
if len(param) == 0 {
return parsed, errEmptyConfigParam
}
prev = i + 1
if c == '=' {
state = stateValue
break
}
parsed.Config[param] = "true"
if c == ':' {
state = stateDone
break loop
}
state = stateParam
} else if !isConfigParam(c) {
return parsed, errBadConfigParam
}
// Parses value
case stateValue:
if c == '\'' || c == '"' {
if i == prev {
quote = c
state = stateQuotedValue
prev = i + 1
doubled = false
break
}
} else if c == ':' || c == ',' {
value := path[prev:i]
prev = i + 1
parsed.Config[param] = value
if c == ':' {
state = stateDone
break loop
}
state = stateParam
}
// Parses "qvalue"
case stateQuotedValue:
if c == quote {
state = stateAfterQuote
}
// Parses : or , or quote after "qvalue"
case stateAfterQuote:
if c == ':' || c == ',' {
value := path[prev : i-1]
// replace any doubled quotes if there were any
if doubled {
value = strings.ReplaceAll(value, string(quote)+string(quote), string(quote))
}
prev = i + 1
parsed.Config[param] = value
if c == ':' {
state = stateDone
break loop
} else {
state = stateParam
}
} else if c == quote {
// Here is a doubled quote to indicate a literal quote
state = stateQuotedValue
doubled = true
} else {
return parsed, errAfterQuote
}
}
}
// Depending on which state we were in when we fell off the
// end of the state machine we can return a sensible error.
switch state {
default:
return parsed, errSyntax
case stateConfigName:
return parsed, errConfigName
case stateParam:
return parsed, errParam
case stateValue:
return parsed, errValue
case stateQuotedValue:
return parsed, errQuotedValue
case stateAfterQuote:
return parsed, errAfterQuote
case stateDone:
break
}
parsed.ConfigString = path[:i]
parsed.Path = path[i+1:]
// change native directory separators to / if there are any
parsed.Path = filepath.ToSlash(parsed.Path)
return parsed, nil
}
// SplitFs splits a remote a remoteName and an remotePath.
//
// SplitFs("remote:path/to/file") -> ("remote:", "path/to/file")
// SplitFs("/to/file") -> ("", "/to/file")
//
// If it returns remoteName as "" then remotePath is a local path
//
// The returned values have the property that remoteName + remotePath ==
// remote (except under Windows where \ will be translated into /)
func SplitFs(remote string) (remoteName string, remotePath string, err error) {
parsed, err := Parse(remote)
if err != nil {
return "", "", err
}
remoteName, remotePath = parsed.ConfigString, parsed.Path
if remoteName != "" {
remoteName += ":"
}
return remoteName, remotePath, nil
}
// Split splits a remote into a parent and a leaf
//
// if it returns leaf as an empty string then remote is a directory
//
// if it returns parent as an empty string then that means the current directory
//
// The returned values have the property that parent + leaf == remote
// (except under Windows where \ will be translated into /)
func Split(remote string) (parent string, leaf string, err error) {
remoteName, remotePath, err := SplitFs(remote)
if err != nil {
return "", "", err
}
// Construct new remote name without last segment
parent, leaf = path.Split(remotePath)
return remoteName + parent, leaf, nil
}
// Make filePath absolute so it can't read above the root
func makeAbsolute(filePath string) string {
leadingSlash := strings.HasPrefix(filePath, "/")
filePath = path.Join("/", filePath)
if !leadingSlash && strings.HasPrefix(filePath, "/") {
filePath = filePath[1:]
}
return filePath
}
// JoinRootPath joins filePath onto remote
//
// If the remote has a leading "//" this is preserved to allow Windows
// network paths to be used as remotes.
//
// If filePath is empty then remote will be returned.
//
// If the path contains \ these will be converted to / on Windows.
func JoinRootPath(remote, filePath string) string {
remote = filepath.ToSlash(remote)
if filePath == "" {
return remote
}
filePath = filepath.ToSlash(filePath)
filePath = makeAbsolute(filePath)
if strings.HasPrefix(remote, "//") {
return "/" + path.Join(remote, filePath)
}
parsed, err := Parse(remote)
remoteName, remotePath := parsed.ConfigString, parsed.Path
if err != nil {
// Couldn't parse so assume it is a path
remoteName = ""
remotePath = remote
}
remotePath = path.Join(remotePath, filePath)
if remoteName != "" {
remoteName += ":"
// if have remote: then normalise the remotePath
if remotePath == "." {
remotePath = ""
}
}
return remoteName + remotePath
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fspath/path_test.go | fs/fspath/path_test.go | package fspath
import (
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
makeCorpus = flag.Bool("make-corpus", false, "Set to make the fuzzing corpus")
)
func TestCheckConfigName(t *testing.T) {
for _, test := range []struct {
in string
problem error
fixed string
}{
{"remote", nil, "remote"},
{"REMOTE", nil, "REMOTE"},
{"", errInvalidCharacters, "_"},
{":remote:", errInvalidCharacters, "_remote_"},
{"remote:", errInvalidCharacters, "remote_"},
{"rem:ote", errInvalidCharacters, "rem_ote"},
{"rem/ote", errInvalidCharacters, "rem_ote"},
{"rem\\ote", errInvalidCharacters, "rem_ote"},
{"[remote", errInvalidCharacters, "_remote"},
{"*", errInvalidCharacters, "_"},
{"-remote", errInvalidCharacters, "_remote"},
{"r-emote-", nil, "r-emote-"},
{"---rem:::ote???", errInvalidCharacters, "_rem_ote_"},
{"_rem_ote_", nil, "_rem_ote_"},
{".", nil, "."},
{"..", nil, ".."},
{".r.e.m.o.t.e.", nil, ".r.e.m.o.t.e."},
{"rem ote", nil, "rem ote"},
{"user@example.com", nil, "user@example.com"},
{"user+junkmail@example.com", nil, "user+junkmail@example.com"},
{"blåbær", nil, "blåbær"},
{"chữ Quốc ngữ", nil, "chữ Quốc ngữ"},
{"remote ", errInvalidCharacters, "remote_"},
{" remote", errInvalidCharacters, "_remote"},
{" remote ", errInvalidCharacters, "_remote_"},
} {
problem := CheckConfigName(test.in)
assert.Equal(t, test.problem, problem, test.in)
fixed := MakeConfigName(test.in)
assert.Equal(t, test.fixed, fixed, test.in)
}
}
func TestCheckRemoteName(t *testing.T) {
for _, test := range []struct {
in string
want error
}{
{":remote:", nil},
{":REMOTE:", nil},
{":s3:", nil},
{"remote:", nil},
{".:", nil},
{"..:", nil},
{".r.e.m.o.t.e.:", nil},
{"-r-emote-:", errInvalidCharacters},
{"rem ote:", nil},
{"user@example.com:", nil},
{"user+junkmail@example.com:", nil},
{"blåbær:", nil},
{"chữ Quốc ngữ:", nil},
{"remote :", errInvalidCharacters},
{" remote:", errInvalidCharacters},
{" remote :", errInvalidCharacters},
{"", errInvalidCharacters},
{"rem:ote", errInvalidCharacters},
{"rem:ote:", errInvalidCharacters},
{"remote", errInvalidCharacters},
{"rem/ote:", errInvalidCharacters},
{"rem\\ote:", errInvalidCharacters},
{"[remote:", errInvalidCharacters},
{"*:", errInvalidCharacters},
} {
got := checkRemoteName(test.in)
assert.Equal(t, test.want, got, test.in)
}
}
func TestParse(t *testing.T) {
for testNumber, test := range []struct {
in string
wantParsed Parsed
wantErr error
win bool // only run these tests on Windows
noWin bool // only run these tests on !Windows
}{
{
in: "",
wantErr: errCantBeEmpty,
}, {
in: ":",
wantErr: errConfigName,
}, {
in: "::",
wantErr: errConfigNameEmpty,
}, {
in: ":/:",
wantErr: errInvalidCharacters,
}, {
in: "/:",
wantParsed: Parsed{
ConfigString: "",
Path: "/:",
},
}, {
in: "\\backslash:",
wantParsed: Parsed{
ConfigString: "",
Path: "/backslash:",
},
win: true,
}, {
in: "\\backslash:",
wantParsed: Parsed{
ConfigString: "",
Path: "\\backslash:",
},
noWin: true,
}, {
in: "/slash:",
wantParsed: Parsed{
ConfigString: "",
Path: "/slash:",
},
}, {
in: "with\\backslash:",
wantParsed: Parsed{
ConfigString: "",
Path: "with/backslash:",
},
win: true,
}, {
in: "with\\backslash:",
wantParsed: Parsed{
ConfigString: "",
Path: "with\\backslash:",
},
noWin: true,
}, {
in: "with/slash:",
wantParsed: Parsed{
ConfigString: "",
Path: "with/slash:",
},
}, {
in: "/path/to/file",
wantParsed: Parsed{
ConfigString: "",
Path: "/path/to/file",
},
}, {
in: "/path:/to/file",
wantParsed: Parsed{
ConfigString: "",
Path: "/path:/to/file",
},
}, {
in: "./path:/to/file",
wantParsed: Parsed{
ConfigString: "",
Path: "./path:/to/file",
},
}, {
in: "./:colon.txt",
wantParsed: Parsed{
ConfigString: "",
Path: "./:colon.txt",
},
}, {
in: "path/to/file",
wantParsed: Parsed{
ConfigString: "",
Path: "path/to/file",
},
}, {
in: ".:",
wantParsed: Parsed{
ConfigString: ".",
Name: ".",
Path: "",
},
}, {
in: "..:",
wantParsed: Parsed{
ConfigString: "..",
Name: "..",
Path: "",
},
}, {
in: ".:colon.txt",
wantParsed: Parsed{
ConfigString: ".",
Name: ".",
Path: "colon.txt",
},
}, {
in: "remote:path/to/file",
wantParsed: Parsed{
ConfigString: "remote",
Name: "remote",
Path: "path/to/file",
},
}, {
in: "rem*ote:path/to/file",
wantErr: errInvalidCharacters,
}, {
in: "remote:/path/to/file",
wantParsed: Parsed{
ConfigString: "remote",
Name: "remote",
Path: "/path/to/file",
},
}, {
in: "rem.ote:/path/to/file",
wantParsed: Parsed{
ConfigString: "rem.ote",
Name: "rem.ote",
Path: "/path/to/file",
},
}, {
in: "rem ote:/path/to/file",
wantParsed: Parsed{
ConfigString: "rem ote",
Name: "rem ote",
Path: "/path/to/file",
},
}, {
in: "remote :/path/to/file",
wantErr: errInvalidCharacters,
}, {
in: " remote:/path/to/file",
wantErr: errInvalidCharacters,
}, {
in: " remote :/path/to/file",
wantErr: errInvalidCharacters,
}, {
in: "rem#ote:/path/to/file",
wantErr: errInvalidCharacters,
}, {
in: ":backend:/path/to/file",
wantParsed: Parsed{
ConfigString: ":backend",
Name: ":backend",
Path: "/path/to/file",
},
}, {
in: ":back.end:/path/to/file",
wantParsed: Parsed{
ConfigString: ":back.end",
Name: ":back.end",
Path: "/path/to/file",
},
}, {
in: ":bac*kend:/path/to/file",
wantErr: errInvalidCharacters,
}, {
in: `C:\path\to\file`,
wantParsed: Parsed{
Name: "",
Path: `C:/path/to/file`,
},
win: true,
}, {
in: `C:\path\to\file`,
wantParsed: Parsed{
Name: "C",
ConfigString: "C",
Path: `\path\to\file`,
},
noWin: true,
}, {
in: `\path\to\file`,
wantParsed: Parsed{
Name: "",
Path: `/path/to/file`,
},
win: true,
}, {
in: `\path\to\file`,
wantParsed: Parsed{
Name: "",
Path: `\path\to\file`,
},
noWin: true,
}, {
in: `.`,
wantParsed: Parsed{
Name: "",
Path: `.`,
},
noWin: true,
}, {
in: `..`,
wantParsed: Parsed{
Name: "",
Path: `..`,
},
noWin: true,
}, {
in: `remote:\path\to\file`,
wantParsed: Parsed{
Name: "remote",
ConfigString: "remote",
Path: `/path/to/file`,
},
win: true,
}, {
in: `remote:\path\to\file`,
wantParsed: Parsed{
Name: "remote",
ConfigString: "remote",
Path: `\path\to\file`,
},
noWin: true,
}, {
in: `D:/path/to/file`,
wantParsed: Parsed{
Name: "",
Path: `D:/path/to/file`,
},
win: true,
}, {
in: `D:/path/to/file`,
wantParsed: Parsed{
Name: "D",
ConfigString: "D",
Path: `/path/to/file`,
},
noWin: true,
}, {
in: `:backend,param1:/path/to/file`,
wantParsed: Parsed{
ConfigString: `:backend,param1`,
Name: ":backend",
Path: "/path/to/file",
Config: configmap.Simple{
"param1": "true",
},
},
}, {
in: `:backend,param1=value:/path/to/file`,
wantParsed: Parsed{
ConfigString: `:backend,param1=value`,
Name: ":backend",
Path: "/path/to/file",
Config: configmap.Simple{
"param1": "value",
},
},
}, {
in: `:backend,param1=value1,param2,param3=value3:/path/to/file`,
wantParsed: Parsed{
ConfigString: `:backend,param1=value1,param2,param3=value3`,
Name: ":backend",
Path: "/path/to/file",
Config: configmap.Simple{
"param1": "value1",
"param2": "true",
"param3": "value3",
},
},
}, {
in: `:backend,param1=value1,param2="value2",param3='value3':/path/to/file`,
wantParsed: Parsed{
ConfigString: `:backend,param1=value1,param2="value2",param3='value3'`,
Name: ":backend",
Path: "/path/to/file",
Config: configmap.Simple{
"param1": "value1",
"param2": "value2",
"param3": "value3",
},
},
}, {
in: `:backend,param-1=value:/path/to/file`,
wantErr: errBadConfigParam,
}, {
in: `:backend,param1="value"x:/path/to/file`,
wantErr: errAfterQuote,
}, {
in: `:backend,`,
wantErr: errParam,
}, {
in: `:backend,param=value`,
wantErr: errValue,
}, {
in: `:backend,param="value'`,
wantErr: errQuotedValue,
}, {
in: `:backend,param1="value"`,
wantErr: errAfterQuote,
}, {
in: `:backend,=value:`,
wantErr: errEmptyConfigParam,
}, {
in: `:backend,:`,
wantErr: errEmptyConfigParam,
}, {
in: `:backend,,:`,
wantErr: errEmptyConfigParam,
}, {
in: `:backend,param=:path`,
wantParsed: Parsed{
ConfigString: `:backend,param=`,
Name: ":backend",
Path: "path",
Config: configmap.Simple{
"param": "",
},
},
}, {
in: `:backend,param="with""quote":path`,
wantParsed: Parsed{
ConfigString: `:backend,param="with""quote"`,
Name: ":backend",
Path: "path",
Config: configmap.Simple{
"param": `with"quote`,
},
},
}, {
in: `:backend,param='''''':`,
wantParsed: Parsed{
ConfigString: `:backend,param=''''''`,
Name: ":backend",
Path: "",
Config: configmap.Simple{
"param": `''`,
},
},
}, {
in: `:backend,param=''bad'':`,
wantErr: errAfterQuote,
},
} {
gotParsed, gotErr := Parse(test.in)
if runtime.GOOS == "windows" && test.noWin {
continue
}
if runtime.GOOS != "windows" && test.win {
continue
}
assert.Equal(t, test.wantErr, gotErr, test.in)
if test.wantErr == nil {
assert.Equal(t, test.wantParsed, gotParsed, test.in)
}
if *makeCorpus {
// write the test corpus for fuzzing
require.NoError(t, os.MkdirAll("corpus", 0777))
require.NoError(t, os.WriteFile(fmt.Sprintf("corpus/%02d", testNumber), []byte(test.in), 0666))
}
}
}
func TestSplitFs(t *testing.T) {
for _, test := range []struct {
remote, wantRemoteName, wantRemotePath string
wantErr error
}{
{"", "", "", errCantBeEmpty},
{"remote:", "remote:", "", nil},
{"remote:potato", "remote:", "potato", nil},
{"remote:/", "remote:", "/", nil},
{"remote:/potato", "remote:", "/potato", nil},
{"remote:/potato/potato", "remote:", "/potato/potato", nil},
{"remote:potato/sausage", "remote:", "potato/sausage", nil},
{"rem.ote:potato/sausage", "rem.ote:", "potato/sausage", nil},
{"rem ote:", "rem ote:", "", nil},
{"remote :", "", "", errInvalidCharacters},
{" remote:", "", "", errInvalidCharacters},
{" remote :", "", "", errInvalidCharacters},
{".:", ".:", "", nil},
{"..:", "..:", "", nil},
{".:potato/sausage", ".:", "potato/sausage", nil},
{"..:potato/sausage", "..:", "potato/sausage", nil},
{":remote:", ":remote:", "", nil},
{":remote:potato", ":remote:", "potato", nil},
{":remote:/", ":remote:", "/", nil},
{":remote:/potato", ":remote:", "/potato", nil},
{":remote:/potato/potato", ":remote:", "/potato/potato", nil},
{":remote:potato/sausage", ":remote:", "potato/sausage", nil},
{":rem.ote:potato/sausage", ":rem.ote:", "potato/sausage", nil},
{":rem[ote:potato/sausage", "", "", errInvalidCharacters},
{":.:", ":.:", "", nil},
{":..:", ":..:", "", nil},
{":.:potato/sausage", ":.:", "potato/sausage", nil},
{":..:potato/sausage", ":..:", "potato/sausage", nil},
{"/", "", "/", nil},
{"/root", "", "/root", nil},
{"/a/b", "", "/a/b", nil},
{"root", "", "root", nil},
{"a/b", "", "a/b", nil},
{"root/", "", "root/", nil},
{"a/b/", "", "a/b/", nil},
} {
gotRemoteName, gotRemotePath, gotErr := SplitFs(test.remote)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.wantRemoteName, gotRemoteName, test.remote)
assert.Equal(t, test.wantRemotePath, gotRemotePath, test.remote)
if gotErr == nil {
assert.Equal(t, test.remote, gotRemoteName+gotRemotePath, fmt.Sprintf("%s: %q + %q != %q", test.remote, gotRemoteName, gotRemotePath, test.remote))
}
}
}
func TestSplit(t *testing.T) {
for _, test := range []struct {
remote, wantParent, wantLeaf string
wantErr error
}{
{"", "", "", errCantBeEmpty},
{"remote:", "remote:", "", nil},
{"remote:potato", "remote:", "potato", nil},
{"remote:/", "remote:/", "", nil},
{"remote:/potato", "remote:/", "potato", nil},
{"remote:/potato/potato", "remote:/potato/", "potato", nil},
{"remote:potato/sausage", "remote:potato/", "sausage", nil},
{"rem.ote:potato/sausage", "rem.ote:potato/", "sausage", nil},
{"rem ote:", "rem ote:", "", nil},
{"remote :", "", "", errInvalidCharacters},
{" remote:", "", "", errInvalidCharacters},
{" remote :", "", "", errInvalidCharacters},
{".:", ".:", "", nil},
{"..:", "..:", "", nil},
{".:potato/sausage", ".:potato/", "sausage", nil},
{"..:potato/sausage", "..:potato/", "sausage", nil},
{":remote:", ":remote:", "", nil},
{":remote:potato", ":remote:", "potato", nil},
{":remote:/", ":remote:/", "", nil},
{":remote:/potato", ":remote:/", "potato", nil},
{":remote:/potato/potato", ":remote:/potato/", "potato", nil},
{":remote:potato/sausage", ":remote:potato/", "sausage", nil},
{":rem.ote:potato/sausage", ":rem.ote:potato/", "sausage", nil},
{":rem[ote:potato/sausage", "", "", errInvalidCharacters},
{":.:", ":.:", "", nil},
{":..:", ":..:", "", nil},
{":.:potato/sausage", ":.:potato/", "sausage", nil},
{":..:potato/sausage", ":..:potato/", "sausage", nil},
{"/", "/", "", nil},
{"/root", "/", "root", nil},
{"/a/b", "/a/", "b", nil},
{"root", "", "root", nil},
{"a/b", "a/", "b", nil},
{"root/", "root/", "", nil},
{"a/b/", "a/b/", "", nil},
} {
gotParent, gotLeaf, gotErr := Split(test.remote)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.wantParent, gotParent, test.remote)
assert.Equal(t, test.wantLeaf, gotLeaf, test.remote)
if gotErr == nil {
assert.Equal(t, test.remote, gotParent+gotLeaf, fmt.Sprintf("%s: %q + %q != %q", test.remote, gotParent, gotLeaf, test.remote))
}
}
}
func TestMakeAbsolute(t *testing.T) {
for _, test := range []struct {
in string
want string
}{
{"", ""},
{".", ""},
{"/.", "/"},
{"../potato", "potato"},
{"/../potato", "/potato"},
{"./../potato", "potato"},
{"//../potato", "/potato"},
{"././../potato", "potato"},
{"././potato/../../onion", "onion"},
} {
got := makeAbsolute(test.in)
assert.Equal(t, test.want, got, test)
}
}
func TestJoinRootPath(t *testing.T) {
for _, test := range []struct {
remote string
filePath string
want string
}{
{"", "", ""},
{"", "/", "/"},
{"/", "", "/"},
{"/", "/", "/"},
{"/", "//", "/"},
{"/root", "", "/root"},
{"/root", "/", "/root"},
{"/root", "//", "/root"},
{"/a/b", "", "/a/b"},
{"//", "/", "//"},
{"//server", "path", "//server/path"},
{"//server/sub", "path", "//server/sub/path"},
{"//server", "//path", "//server/path"},
{"//server/sub", "//path", "//server/sub/path"},
{"//", "/", "//"},
{"//server", "path", "//server/path"},
{"//server/sub", "path", "//server/sub/path"},
{"//server", "//path", "//server/path"},
{"//server/sub", "//path", "//server/sub/path"},
{filepath.FromSlash("//server/sub"), filepath.FromSlash("//path"), "//server/sub/path"},
{"s3:", "", "s3:"},
{"s3:", ".", "s3:"},
{"s3:.", ".", "s3:"},
{"s3:", "..", "s3:"},
{"s3:dir", "sub", "s3:dir/sub"},
{"s3:dir", "/sub", "s3:dir/sub"},
{"s3:dir", "./sub", "s3:dir/sub"},
{"s3:/dir", "/sub/", "s3:/dir/sub"},
{"s3:dir", "..", "s3:dir"},
{"s3:dir", "/..", "s3:dir"},
{"s3:dir", "/../", "s3:dir"},
} {
got := JoinRootPath(test.remote, test.filePath)
assert.Equal(t, test.want, got, test)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/internal_job_test.go | fs/rc/internal_job_test.go | // These tests use the job framework so must be external to the module
package rc_test
import (
"context"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/rc/jobs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInternalPanic(t *testing.T) {
ctx := context.Background()
call := rc.Calls.Get("rc/panic")
assert.NotNil(t, call)
in := rc.Params{}
_, out, err := jobs.NewJob(ctx, call.Fn, in)
require.Error(t, err)
assert.ErrorContains(t, err, "arbitrary error on input map[]")
assert.ErrorContains(t, err, "panic received:")
assert.Equal(t, rc.Params{}, out)
}
func TestInternalFatal(t *testing.T) {
ctx := context.Background()
call := rc.Calls.Get("rc/fatal")
assert.NotNil(t, call)
in := rc.Params{}
_, out, err := jobs.NewJob(ctx, call.Fn, in)
require.Error(t, err)
assert.ErrorContains(t, err, "arbitrary error on input map[]")
assert.ErrorContains(t, err, "panic received:")
assert.ErrorContains(t, err, "fatal error:")
assert.Equal(t, rc.Params{}, out)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rc.go | fs/rc/rc.go | // Package rc implements a remote control server and registry for rclone
//
// To register your internal calls, call rc.Add(path, function). Your
// function should take and return a Param. It can also return an
// error. Use rc.NewError to wrap an existing error along with an
// http response type if another response other than 500 internal
// error is required on error.
package rc
import (
"encoding/json"
"io"
_ "net/http/pprof" // install the pprof http handlers
"time"
"github.com/rclone/rclone/fs"
libhttp "github.com/rclone/rclone/lib/http"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "rc",
Default: false,
Help: "Enable the remote control server",
Groups: "RC",
}, {
Name: "rc_files",
Default: "",
Help: "Path to local files to serve on the HTTP server",
Groups: "RC",
}, {
Name: "rc_serve",
Default: false,
Help: "Enable the serving of remote objects",
Groups: "RC",
}, {
Name: "rc_serve_no_modtime",
Default: false,
Help: "Don't read the modification time (can speed things up)",
Groups: "RC",
}, {
Name: "rc_no_auth",
Default: false,
Help: "Don't require auth for certain methods",
Groups: "RC",
}, {
Name: "rc_web_gui",
Default: false,
Help: "Launch WebGUI on localhost",
Groups: "RC",
}, {
Name: "rc_web_gui_update",
Default: false,
Help: "Check and update to latest version of web gui",
Groups: "RC",
}, {
Name: "rc_web_gui_force_update",
Default: false,
Help: "Force update to latest version of web gui",
Groups: "RC",
}, {
Name: "rc_web_gui_no_open_browser",
Default: false,
Help: "Don't open the browser automatically",
Groups: "RC",
}, {
Name: "rc_web_fetch_url",
Default: "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest",
Help: "URL to fetch the releases for webgui",
Groups: "RC",
}, {
Name: "rc_enable_metrics",
Default: false,
Help: "Enable the Prometheus metrics path at the remote control server",
Groups: "RC,Metrics",
}, {
Name: "rc_job_expire_duration",
Default: fs.Duration(60 * time.Second),
Help: "Expire finished async jobs older than this value",
Groups: "RC",
}, {
Name: "rc_job_expire_interval",
Default: fs.Duration(10 * time.Second),
Help: "Interval to check for expired async jobs",
Groups: "RC",
}, {
Name: "metrics_addr",
Default: []string{},
Help: "IPaddress:Port or :Port to bind metrics server to",
Groups: "Metrics",
}}.
AddPrefix(libhttp.ConfigInfo, "rc", "RC").
AddPrefix(libhttp.AuthConfigInfo, "rc", "RC").
AddPrefix(libhttp.TemplateConfigInfo, "rc", "RC").
AddPrefix(libhttp.ConfigInfo, "metrics", "Metrics").
AddPrefix(libhttp.AuthConfigInfo, "metrics", "Metrics").
AddPrefix(libhttp.TemplateConfigInfo, "metrics", "Metrics").
SetDefault("rc_addr", []string{"localhost:5572"})
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "rc", Opt: &Opt, Options: OptionsInfo})
}
// Options contains options for the remote control server
type Options struct {
HTTP libhttp.Config `config:"rc"`
Auth libhttp.AuthConfig `config:"rc"`
Template libhttp.TemplateConfig `config:"rc"`
Enabled bool `config:"rc"` // set to enable the server
Files string `config:"rc_files"` // set to enable serving files locally
Serve bool `config:"rc_serve"` // set to serve files from remotes
ServeNoModTime bool `config:"rc_serve_no_modtime"` // don't read the modification time
NoAuth bool `config:"rc_no_auth"` // set to disable auth checks on AuthRequired methods
WebUI bool `config:"rc_web_gui"` // set to launch the web ui
WebGUIUpdate bool `config:"rc_web_gui_update"` // set to check new update
WebGUIForceUpdate bool `config:"rc_web_gui_force_update"` // set to force download new update
WebGUINoOpenBrowser bool `config:"rc_web_gui_no_open_browser"` // set to disable auto opening browser
WebGUIFetchURL string `config:"rc_web_fetch_url"` // set the default url for fetching webgui
EnableMetrics bool `config:"rc_enable_metrics"` // set to disable prometheus metrics on /metrics
MetricsHTTP libhttp.Config `config:"metrics"`
MetricsAuth libhttp.AuthConfig `config:"metrics"`
MetricsTemplate libhttp.TemplateConfig `config:"metrics"`
JobExpireDuration fs.Duration `config:"rc_job_expire_duration"`
JobExpireInterval fs.Duration `config:"rc_job_expire_interval"`
}
// Opt is the default values used for Options
var Opt Options
// WriteJSON writes JSON in out to w
func WriteJSON(w io.Writer, out Params) error {
enc := json.NewEncoder(w)
enc.SetIndent("", "\t")
return enc.Encode(out)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/registry.go | fs/rc/registry.go | // Define the registry
package rc
import (
"context"
"sort"
"strings"
"sync"
)
// Func defines a type for a remote control function
type Func func(ctx context.Context, in Params) (out Params, err error)
// Call defines info about a remote control function and is used in
// the Add function to create new entry points.
type Call struct {
Path string // path to activate this RC
Fn Func `json:"-"` // function to call
Title string // help for the function
AuthRequired bool // if set then this call requires authorisation to be set
Help string // multi-line markdown formatted help
NeedsRequest bool // if set then this call will be passed the original request object as _request
NeedsResponse bool // if set then this call will be passed the original response object as _response
}
// Registry holds the list of all the registered remote control functions
type Registry struct {
mu sync.RWMutex
call map[string]*Call
}
// NewRegistry makes a new registry for remote control functions
func NewRegistry() *Registry {
return &Registry{
call: make(map[string]*Call),
}
}
// Add a call to the registry
func (r *Registry) Add(call Call) {
r.mu.Lock()
defer r.mu.Unlock()
call.Path = strings.Trim(call.Path, "/")
call.Help = strings.TrimSpace(call.Help)
// fs.Debugf(nil, "Adding path %q to remote control registry", call.Path) // disabled to make initialization less verbose
r.call[call.Path] = &call
}
// Get a Call from a path or nil
func (r *Registry) Get(path string) *Call {
r.mu.RLock()
defer r.mu.RUnlock()
return r.call[path]
}
// List of all calls in alphabetical order
func (r *Registry) List() (out []*Call) {
r.mu.RLock()
defer r.mu.RUnlock()
var keys []string
for key := range r.call {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
out = append(out, r.call[key])
}
return out
}
// Calls is the global registry of Call objects
var Calls = NewRegistry()
// Add a function to the global registry
func Add(call Call) {
Calls.Add(call)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/cache.go | fs/rc/cache.go | // Utilities for accessing the Fs cache
package rc
import (
"context"
"errors"
"fmt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fspath"
)
// getFsName gets an fs name from fsName either from the cache or direct
func getFsName(in Params, fsName string) (fsString string, err error) {
fsString, err = in.GetString(fsName)
if err != nil {
if !IsErrParamInvalid(err) {
return fsString, err
}
fsString, err = getConfigMap(in, fsName)
if err != nil {
return fsString, err
}
}
return fsString, err
}
// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh
func GetFsNamed(ctx context.Context, in Params, fsName string) (f fs.Fs, err error) {
fsString, err := getFsName(in, fsName)
if err != nil {
return nil, err
}
return cache.Get(ctx, fsString)
}
// GetFsNamedFileOK gets an fs.Fs named fsName either from the cache or creates it afresh
//
// If the fs.Fs points to a single file then it returns a new ctx with
// filters applied to make the listings return only that file.
func GetFsNamedFileOK(ctx context.Context, in Params, fsName string) (newCtx context.Context, f fs.Fs, err error) {
fsString, err := getFsName(in, fsName)
if err != nil {
return ctx, nil, err
}
f, err = cache.Get(ctx, fsString)
if err == nil {
return ctx, f, nil
} else if !errors.Is(err, fs.ErrorIsFile) {
return ctx, nil, err
}
// f points to the directory above the file so find the remote name
_, fileName, err := fspath.Split(fsString)
if err != nil {
return ctx, f, err
}
ctx, fi := filter.AddConfig(ctx)
if !fi.InActive() {
return ctx, f, fmt.Errorf("can't limit to single files when using filters: %q", fileName)
}
// Limit transfers to this file
err = fi.AddFile(fileName)
if err != nil {
return ctx, f, fmt.Errorf("failed to limit to single file: %w", err)
}
return ctx, f, nil
}
// getConfigMap gets the config as a map from in and converts it to a
// config string
//
// It uses the special parameters _name to name the remote and _root
// to make the root of the remote.
func getConfigMap(in Params, fsName string) (fsString string, err error) {
var m configmap.Simple
err = in.GetStruct(fsName, &m)
if err != nil {
return fsString, err
}
pop := func(key string) string {
value := m[key]
delete(m, key)
return value
}
Type := pop("type")
name := pop("_name")
root := pop("_root")
if name != "" {
fsString = name
} else if Type != "" {
fsString = ":" + Type
} else {
return fsString, errors.New(`couldn't find "type" or "_name" in JSON config definition`)
}
config := m.String()
if config != "" {
fsString += ","
fsString += config
}
fsString += ":"
fsString += root
return fsString, nil
}
// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh
func GetFs(ctx context.Context, in Params) (f fs.Fs, err error) {
return GetFsNamed(ctx, in, "fs")
}
// GetFsAndRemoteNamed gets the fsName parameter from in, makes a
// remote or fetches it from the cache then gets the remoteName
// parameter from in too.
func GetFsAndRemoteNamed(ctx context.Context, in Params, fsName, remoteName string) (f fs.Fs, remote string, err error) {
remote, err = in.GetString(remoteName)
if err != nil {
return
}
f, err = GetFsNamed(ctx, in, fsName)
return
}
// GetFsAndRemote gets the `fs` parameter from in, makes a remote or
// fetches it from the cache then gets the `remote` parameter from in
// too.
func GetFsAndRemote(ctx context.Context, in Params) (f fs.Fs, remote string, err error) {
return GetFsAndRemoteNamed(ctx, in, "fs", "remote")
}
func init() {
Add(Call{
Path: "fscache/clear",
Fn: rcCacheClear,
Title: "Clear the Fs cache.",
AuthRequired: true,
Help: `
This clears the fs cache. This is where remotes created from backends
are cached for a short while to make repeated rc calls more efficient.
If you change the parameters of a backend then you may want to call
this to clear an existing remote out of the cache before re-creating
it.
`,
})
}
// Clear the fs cache
func rcCacheClear(ctx context.Context, in Params) (out Params, err error) {
cache.Clear()
return nil, nil
}
func init() {
Add(Call{
Path: "fscache/entries",
Fn: rcCacheEntries,
Title: "Returns the number of entries in the fs cache.",
AuthRequired: true,
Help: `
This returns the number of entries in the fs cache.
Returns
- entries - number of items in the cache
`,
})
}
// Return the Entries the fs cache
func rcCacheEntries(ctx context.Context, in Params) (out Params, err error) {
return Params{
"entries": cache.Entries(),
}, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/cache_test.go | fs/rc/cache_test.go | package rc
import (
"context"
"fmt"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func mockNewFs(t *testing.T) func() {
ctx := context.Background()
f, err := mockfs.NewFs(ctx, "/", "", nil)
require.NoError(t, err)
cache.Put("/", f)
f, err = mockfs.NewFs(ctx, "mock", "/", nil)
require.NoError(t, err)
cache.Put("mock:/", f)
cache.Put(":mock:/", f)
f, err = mockfs.NewFs(ctx, "mock", "dir/", nil)
require.NoError(t, err)
cache.PutErr("mock:dir/file.txt", f, fs.ErrorIsFile)
return func() {
cache.Clear()
}
}
func TestGetFsNamed(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"potato": "/",
}
f, err := GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
in = Params{
"sausage": "/",
}
f, err = GetFsNamed(context.Background(), in, "potato")
require.Error(t, err)
assert.Nil(t, f)
}
func TestGetFsNamedStruct(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"potato": Params{
"type": "mock",
"_root": "/",
},
}
f, err := GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
in = Params{
"potato": Params{
"_name": "mock",
"_root": "/",
},
}
f, err = GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
}
func TestGetFsNamedFileOK(t *testing.T) {
defer mockNewFs(t)()
ctx := context.Background()
in := Params{
"potato": "/",
}
newCtx, f, err := GetFsNamedFileOK(ctx, in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, ctx, newCtx)
in = Params{
"sausage": "/",
}
newCtx, f, err = GetFsNamedFileOK(ctx, in, "potato")
require.Error(t, err)
assert.Nil(t, f)
assert.Equal(t, ctx, newCtx)
in = Params{
"potato": "mock:dir/file.txt",
}
newCtx, f, err = GetFsNamedFileOK(ctx, in, "potato")
assert.Nil(t, err)
assert.NotNil(t, f)
assert.NotEqual(t, ctx, newCtx)
fi := filter.GetConfig(newCtx)
assert.False(t, fi.InActive())
assert.True(t, fi.IncludeRemote("file.txt"))
assert.False(t, fi.IncludeRemote("other.txt"))
}
func TestGetConfigMap(t *testing.T) {
for _, test := range []struct {
in Params
fsName string
wantFsString string
wantErr string
}{
{
in: Params{
"Fs": Params{},
},
fsName: "Fs",
wantErr: `couldn't find "type" or "_name" in JSON config definition`,
},
{
in: Params{
"Fs": Params{
"notastring": true,
},
},
fsName: "Fs",
wantErr: `cannot unmarshal bool`,
},
{
in: Params{
"Fs": Params{
"_name": "potato",
},
},
fsName: "Fs",
wantFsString: "potato:",
},
{
in: Params{
"Fs": Params{
"type": "potato",
},
},
fsName: "Fs",
wantFsString: ":potato:",
},
{
in: Params{
"Fs": Params{
"type": "sftp",
"_name": "potato",
"parameter": "42",
"parameter2": "true",
"_root": "/path/to/somewhere",
},
},
fsName: "Fs",
wantFsString: "potato,parameter='42',parameter2='true':/path/to/somewhere",
},
} {
gotFsString, gotErr := getConfigMap(test.in, test.fsName)
what := fmt.Sprintf("%+v", test.in)
assert.Equal(t, test.wantFsString, gotFsString, what)
if test.wantErr == "" {
assert.NoError(t, gotErr)
} else {
require.Error(t, gotErr)
assert.Contains(t, gotErr.Error(), test.wantErr)
}
}
}
func TestGetFs(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"fs": "/",
}
f, err := GetFs(context.Background(), in)
require.NoError(t, err)
assert.NotNil(t, f)
}
func TestGetFsAndRemoteNamed(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"fs": "/",
"remote": "hello",
}
f, remote, err := GetFsAndRemoteNamed(context.Background(), in, "fs", "remote")
require.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, "hello", remote)
f, _, err = GetFsAndRemoteNamed(context.Background(), in, "fsX", "remote")
require.Error(t, err)
assert.Nil(t, f)
f, _, err = GetFsAndRemoteNamed(context.Background(), in, "fs", "remoteX")
require.Error(t, err)
assert.Nil(t, f)
}
func TestGetFsAndRemote(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"fs": "/",
"remote": "hello",
}
f, remote, err := GetFsAndRemote(context.Background(), in)
require.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, "hello", remote)
t.Run("RcFscache", func(t *testing.T) {
getEntries := func() int {
call := Calls.Get("fscache/entries")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
return out["entries"].(int)
}
t.Run("Entries", func(t *testing.T) {
assert.NotEqual(t, 0, getEntries())
})
t.Run("Clear", func(t *testing.T) {
call := Calls.Get("fscache/clear")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.Nil(t, out)
})
t.Run("Entries2", func(t *testing.T) {
assert.Equal(t, 0, getEntries())
})
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/config.go | fs/rc/config.go | // Implement config options reading and writing
//
// This is done here rather than in fs/fs.go so we don't cause a circular dependency
package rc
import (
"context"
"fmt"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
)
func init() {
Add(Call{
Path: "options/blocks",
Fn: rcOptionsBlocks,
Title: "List all the option blocks",
Help: `Returns:
- options - a list of the options block names`,
})
}
// Show the list of all the option blocks
func rcOptionsBlocks(ctx context.Context, in Params) (out Params, err error) {
options := []string{}
for _, opt := range fs.OptionsRegistry {
options = append(options, opt.Name)
}
out = make(Params)
out["options"] = options
return out, nil
}
func init() {
Add(Call{
Path: "options/get",
Fn: rcOptionsGet,
Title: "Get all the global options",
Help: `Returns an object where keys are option block names and values are an
object with the current option values in.
Parameters:
- blocks: optional string of comma separated blocks to include
- all are included if this is missing or ""
Note that these are the global options which are unaffected by use of
the _config and _filter parameters. If you wish to read the parameters
set in _config or _filter use options/local.
This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions.
`,
})
}
// Filter the blocks according to name
func filterBlocks(in Params, f func(oi fs.OptionsInfo)) (err error) {
blocksStr, err := in.GetString("blocks")
if err != nil && !IsErrParamNotFound(err) {
return err
}
blocks := map[string]struct{}{}
for name := range strings.SplitSeq(blocksStr, ",") {
if name != "" {
blocks[name] = struct{}{}
}
}
for _, oi := range fs.OptionsRegistry {
if _, found := blocks[oi.Name]; found || len(blocks) == 0 {
f(oi)
}
}
return nil
}
// Show the list of all the option blocks
func rcOptionsGet(ctx context.Context, in Params) (out Params, err error) {
out = make(Params)
err = filterBlocks(in, func(oi fs.OptionsInfo) {
out[oi.Name] = oi.Opt
})
return out, err
}
func init() {
Add(Call{
Path: "options/info",
Fn: rcOptionsInfo,
Title: "Get info about all the global options",
Help: `Returns an object where keys are option block names and values are an
array of objects with info about each options.
Parameters:
- blocks: optional string of comma separated blocks to include
- all are included if this is missing or ""
These objects are in the same format as returned by "config/providers". They are
described in the [option blocks](#option-blocks) section.
`,
})
}
// Show the info of all the option blocks
func rcOptionsInfo(ctx context.Context, in Params) (out Params, err error) {
out = make(Params)
err = filterBlocks(in, func(oi fs.OptionsInfo) {
out[oi.Name] = oi.Options
})
return out, err
}
func init() {
Add(Call{
Path: "options/local",
Fn: rcOptionsLocal,
Title: "Get the currently active config for this call",
Help: `Returns an object with the keys "config" and "filter".
The "config" key contains the local config and the "filter" key contains
the local filters.
Note that these are the local options specific to this rc call. If
_config was not supplied then they will be the global options.
Likewise with "_filter".
This call is mostly useful for seeing if _config and _filter passing
is working.
This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions.
`,
})
}
// Show the current config
func rcOptionsLocal(ctx context.Context, in Params) (out Params, err error) {
out = make(Params)
out["config"] = fs.GetConfig(ctx)
out["filter"] = filter.GetConfig(ctx).Opt
return out, nil
}
func init() {
Add(Call{
Path: "options/set",
Fn: rcOptionsSet,
Title: "Set an option",
Help: `Parameters:
- option block name containing an object with
- key: value
Repeated as often as required.
Only supply the options you wish to change. If an option is unknown
it will be silently ignored. Not all options will have an effect when
changed like this.
For example:
This sets DEBUG level logs (-vv) (these can be set by number or string)
rclone rc options/set --json '{"main": {"LogLevel": "DEBUG"}}'
rclone rc options/set --json '{"main": {"LogLevel": 8}}'
And this sets INFO level logs (-v)
rclone rc options/set --json '{"main": {"LogLevel": "INFO"}}'
And this sets NOTICE level logs (normal without -v)
rclone rc options/set --json '{"main": {"LogLevel": "NOTICE"}}'
`,
})
}
// Set an option in an option block
func rcOptionsSet(ctx context.Context, in Params) (out Params, err error) {
for name, options := range in {
opt, ok := fs.OptionsRegistry[name]
if !ok {
return nil, fmt.Errorf("unknown option block %q", name)
}
err := Reshape(opt.Opt, options)
if err != nil {
return nil, fmt.Errorf("failed to write options from block %q: %w", name, err)
}
if opt.Reload != nil {
err = opt.Reload(ctx)
if err != nil {
return nil, fmt.Errorf("failed to reload options from block %q: %w", name, err)
}
}
}
return out, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/params.go | fs/rc/params.go | // Parameter parsing
package rc
import (
"encoding/json"
"errors"
"fmt"
"maps"
"math"
"net/http"
"strconv"
"time"
"github.com/rclone/rclone/fs"
)
// Params is the input and output type for the Func
type Params map[string]any
// ErrParamNotFound - this is returned from the Get* functions if the
// parameter isn't found along with a zero value of the requested
// item.
//
// Returning an error of this type from an rc.Func will cause the http
// method to return http.StatusBadRequest
type ErrParamNotFound string
// Error turns this error into a string
func (e ErrParamNotFound) Error() string {
return fmt.Sprintf("Didn't find key %q in input", string(e))
}
// IsErrParamNotFound returns whether err is ErrParamNotFound
func IsErrParamNotFound(err error) bool {
_, isNotFound := err.(ErrParamNotFound)
return isNotFound
}
// NotErrParamNotFound returns true if err != nil and
// !IsErrParamNotFound(err)
//
// This is for checking error returns of the Get* functions to ignore
// error not found returns and take the default value.
func NotErrParamNotFound(err error) bool {
return err != nil && !IsErrParamNotFound(err)
}
// ErrParamInvalid - this is returned from the Get* functions if the
// parameter is invalid.
//
// Returning an error of this type from an rc.Func will cause the http
// method to return http.StatusBadRequest
type ErrParamInvalid struct {
error
}
// NewErrParamInvalid returns new ErrParamInvalid from given error
func NewErrParamInvalid(err error) ErrParamInvalid {
return ErrParamInvalid{err}
}
// IsErrParamInvalid returns whether err is ErrParamInvalid
func IsErrParamInvalid(err error) bool {
_, isInvalid := err.(ErrParamInvalid)
return isInvalid
}
// Reshape reshapes one blob of data into another via json serialization
//
// out should be a pointer type
//
// This isn't a very efficient way of dealing with this!
func Reshape(out any, in any) error {
b, err := json.Marshal(in)
if err != nil {
return fmt.Errorf("Reshape failed to Marshal: %w", err)
}
err = json.Unmarshal(b, out)
if err != nil {
return fmt.Errorf("Reshape failed to Unmarshal: %w", err)
}
return nil
}
// Copy shallow copies the Params
func (p Params) Copy() (out Params) {
out = make(Params, len(p))
maps.Copy(out, p)
return out
}
// Get gets a parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be nil.
func (p Params) Get(key string) (any, error) {
value, ok := p[key]
if !ok {
return nil, ErrParamNotFound(key)
}
return value, nil
}
// GetHTTPRequest gets a http.Request parameter associated with the request with the key "_request"
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be nil.
func (p Params) GetHTTPRequest() (*http.Request, error) {
key := "_request"
value, err := p.Get(key)
if err != nil {
return nil, err
}
request, ok := value.(*http.Request)
if !ok {
return nil, ErrParamInvalid{fmt.Errorf("expecting http.request value for key %q (was %T)", key, value)}
}
return request, nil
}
// GetHTTPResponseWriter gets a http.ResponseWriter parameter associated with the request with the key "_response"
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be nil.
func (p Params) GetHTTPResponseWriter() (http.ResponseWriter, error) {
key := "_response"
value, err := p.Get(key)
if err != nil {
return nil, err
}
request, ok := value.(http.ResponseWriter)
if !ok {
return nil, ErrParamInvalid{fmt.Errorf("expecting http.ResponseWriter value for key %q (was %T)", key, value)}
}
return request, nil
}
// GetString gets a string parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be "".
func (p Params) GetString(key string) (string, error) {
value, err := p.Get(key)
if err != nil {
return "", err
}
str, ok := value.(string)
if !ok {
return "", ErrParamInvalid{fmt.Errorf("expecting string value for key %q (was %T)", key, value)}
}
return str, nil
}
// GetInt64 gets an int64 parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be 0.
func (p Params) GetInt64(key string) (int64, error) {
value, err := p.Get(key)
if err != nil {
return 0, err
}
switch x := value.(type) {
case int:
return int64(x), nil
case int64:
return x, nil
case float64:
if x > math.MaxInt64 || x < math.MinInt64 {
return 0, ErrParamInvalid{fmt.Errorf("key %q (%v) overflows int64 ", key, value)}
}
return int64(x), nil
case string:
i, err := strconv.ParseInt(x, 10, 0)
if err != nil {
return 0, ErrParamInvalid{fmt.Errorf("couldn't parse key %q (%v) as int64: %w", key, value, err)}
}
return i, nil
}
return 0, ErrParamInvalid{fmt.Errorf("expecting int64 value for key %q (was %T)", key, value)}
}
// GetFloat64 gets a float64 parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be 0.
func (p Params) GetFloat64(key string) (float64, error) {
value, err := p.Get(key)
if err != nil {
return 0, err
}
switch x := value.(type) {
case float64:
return x, nil
case int:
return float64(x), nil
case int64:
return float64(x), nil
case string:
f, err := strconv.ParseFloat(x, 64)
if err != nil {
return 0, ErrParamInvalid{fmt.Errorf("couldn't parse key %q (%v) as float64: %w", key, value, err)}
}
return f, nil
}
return 0, ErrParamInvalid{fmt.Errorf("expecting float64 value for key %q (was %T)", key, value)}
}
// GetBool gets a boolean parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be false.
func (p Params) GetBool(key string) (bool, error) {
value, err := p.Get(key)
if err != nil {
return false, err
}
switch x := value.(type) {
case int:
return x != 0, nil
case int64:
return x != 0, nil
case float64:
return x != 0, nil
case bool:
return x, nil
case string:
b, err := strconv.ParseBool(x)
if err != nil {
return false, ErrParamInvalid{fmt.Errorf("couldn't parse key %q (%v) as bool: %w", key, value, err)}
}
return b, nil
}
return false, ErrParamInvalid{fmt.Errorf("expecting bool value for key %q (was %T)", key, value)}
}
// GetStruct gets a struct from key from the input into the struct
// pointed to by out. out must be a pointer type.
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and out will be unchanged.
func (p Params) GetStruct(key string, out any) error {
value, err := p.Get(key)
if err != nil {
return err
}
err = Reshape(out, value)
if err != nil {
if valueStr, ok := value.(string); ok {
// try to unmarshal as JSON if string
err = json.Unmarshal([]byte(valueStr), out)
if err == nil {
return nil
}
}
return ErrParamInvalid{fmt.Errorf("key %q: %w", key, err)}
}
return nil
}
// GetStructMissingOK works like GetStruct but doesn't return an error
// if the key is missing
func (p Params) GetStructMissingOK(key string, out any) error {
_, ok := p[key]
if !ok {
return nil
}
return p.GetStruct(key, out)
}
// GetDuration get the duration parameters from in
func (p Params) GetDuration(key string) (time.Duration, error) {
s, err := p.GetString(key)
if err != nil {
return 0, err
}
duration, err := fs.ParseDuration(s)
if err != nil {
return 0, ErrParamInvalid{fmt.Errorf("parse duration: %w", err)}
}
return duration, nil
}
// GetFsDuration get the duration parameters from in
func (p Params) GetFsDuration(key string) (fs.Duration, error) {
d, err := p.GetDuration(key)
return fs.Duration(d), err
}
// Error creates the standard response for an errored rc call using an
// rc.Param from a path, input Params, error and a suggested HTTP
// response code.
//
// It returns a Params and an updated status code
func Error(path string, in Params, err error, status int) (Params, int) {
// Adjust the status code for some well known errors
switch {
case errors.Is(err, fs.ErrorDirNotFound) || errors.Is(err, fs.ErrorObjectNotFound):
status = http.StatusNotFound
case IsErrParamInvalid(err) || IsErrParamNotFound(err):
status = http.StatusBadRequest
}
result := Params{
"status": status,
"error": err.Error(),
"input": in,
"path": path,
}
return result, status
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/config_test.go | fs/rc/config_test.go | package rc
import (
"context"
"encoding/json"
"errors"
"fmt"
"testing"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func clearOptionBlock() func() {
oldOptionBlock := fs.OptionsRegistry
fs.OptionsRegistry = map[string]fs.OptionsInfo{}
return func() {
fs.OptionsRegistry = oldOptionBlock
}
}
var testInfo = fs.Options{{
Name: "string",
Default: "str",
Help: "It is a string",
}, {
Name: "int",
Default: 17,
Help: "It is an int",
}}
var testOptions = struct {
String string
Int int
}{
String: "hello",
Int: 42,
}
func registerTestOptions() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "potato", Opt: &testOptions, Options: testInfo})
}
func registerTestOptionsReload(reload func(context.Context) error) {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "potato", Opt: &testOptions, Options: testInfo, Reload: reload})
}
func TestAddOption(t *testing.T) {
defer clearOptionBlock()()
assert.Equal(t, len(fs.OptionsRegistry), 0)
registerTestOptions()
assert.Equal(t, len(fs.OptionsRegistry), 1)
assert.Equal(t, &testOptions, fs.OptionsRegistry["potato"].Opt)
}
func TestAddOptionReload(t *testing.T) {
defer clearOptionBlock()()
assert.Equal(t, len(fs.OptionsRegistry), 0)
reload := func(ctx context.Context) error { return nil }
registerTestOptionsReload(reload)
assert.Equal(t, len(fs.OptionsRegistry), 1)
assert.Equal(t, &testOptions, fs.OptionsRegistry["potato"].Opt)
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", fs.OptionsRegistry["potato"].Reload))
}
func TestOptionsBlocks(t *testing.T) {
defer clearOptionBlock()()
registerTestOptions()
call := Calls.Get("options/blocks")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"options": []string{"potato"}}, out)
}
func TestOptionsGet(t *testing.T) {
defer clearOptionBlock()()
registerTestOptions()
call := Calls.Get("options/get")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"potato": &testOptions}, out)
in = Params{"blocks": "sausage,potato,rhubarb"}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"potato": &testOptions}, out)
in = Params{"blocks": "sausage"}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{}, out)
}
func TestOptionsGetMarshal(t *testing.T) {
defer clearOptionBlock()()
ctx := context.Background()
ci := fs.GetConfig(ctx)
// Add some real options
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "main", Opt: ci, Options: nil})
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "rc", Opt: &Opt, Options: nil})
// get them
call := Calls.Get("options/get")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
// Check that they marshal
_, err = json.Marshal(out)
require.NoError(t, err)
}
func TestOptionsInfo(t *testing.T) {
defer clearOptionBlock()()
registerTestOptions()
call := Calls.Get("options/info")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"potato": testInfo}, out)
in = Params{"blocks": "sausage,potato,rhubarb"}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"potato": testInfo}, out)
in = Params{"blocks": "sausage"}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{}, out)
}
func TestOptionsSet(t *testing.T) {
defer clearOptionBlock()()
var reloaded int
registerTestOptionsReload(func(ctx context.Context) error {
if reloaded > 1 {
return errors.New("error while reloading")
}
reloaded++
return nil
})
call := Calls.Get("options/set")
require.NotNil(t, call)
in := Params{
"potato": Params{
"Int": 50,
},
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.Nil(t, out)
assert.Equal(t, 50, testOptions.Int)
assert.Equal(t, "str", testOptions.String)
assert.Equal(t, 2, reloaded)
// error from reload
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "error while reloading")
// unknown option block
in = Params{
"sausage": Params{
"Int": 50,
},
}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "unknown option block")
// bad shape
in = Params{
"potato": []string{"a", "b"},
}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to write options")
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/params_test.go | fs/rc/params_test.go | package rc
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/fs"
)
func TestErrParamNotFoundError(t *testing.T) {
e := ErrParamNotFound("key")
assert.Equal(t, "Didn't find key \"key\" in input", e.Error())
}
func TestIsErrParamNotFound(t *testing.T) {
assert.Equal(t, true, IsErrParamNotFound(ErrParamNotFound("key")))
assert.Equal(t, false, IsErrParamNotFound(nil))
assert.Equal(t, false, IsErrParamNotFound(errors.New("potato")))
}
func TestNotErrParamNotFound(t *testing.T) {
assert.Equal(t, false, NotErrParamNotFound(ErrParamNotFound("key")))
assert.Equal(t, false, NotErrParamNotFound(nil))
assert.Equal(t, true, NotErrParamNotFound(errors.New("potato")))
}
func TestIsErrParamInvalid(t *testing.T) {
e := ErrParamInvalid{errors.New("potato")}
assert.Equal(t, true, IsErrParamInvalid(e))
assert.Equal(t, false, IsErrParamInvalid(nil))
assert.Equal(t, false, IsErrParamInvalid(errors.New("potato")))
}
func TestReshape(t *testing.T) {
in := Params{
"String": "hello",
"Float": 4.2,
}
var out struct {
String string
Float float64
}
require.NoError(t, Reshape(&out, in))
assert.Equal(t, "hello", out.String)
assert.Equal(t, 4.2, out.Float)
var inCopy = Params{}
require.NoError(t, Reshape(&inCopy, out))
assert.Equal(t, in, inCopy)
// Now a failure to marshal
var in2 func()
require.Error(t, Reshape(&inCopy, in2))
// Now a failure to unmarshal
require.Error(t, Reshape(&out, "string"))
}
func TestParamsCopy(t *testing.T) {
in := Params{
"ok": 1,
"x": "seventeen",
"nil": nil,
}
out := in.Copy()
assert.Equal(t, in, out)
if &in == &out {
t.Error("didn't copy")
}
}
func TestParamsGet(t *testing.T) {
in := Params{
"ok": 1,
}
v1, e1 := in.Get("ok")
assert.NoError(t, e1)
assert.Equal(t, 1, v1)
v2, e2 := in.Get("notOK")
assert.Error(t, e2)
assert.Equal(t, nil, v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
}
func TestParamsGetString(t *testing.T) {
in := Params{
"string": "one",
"notString": 17,
}
v1, e1 := in.GetString("string")
assert.NoError(t, e1)
assert.Equal(t, "one", v1)
v2, e2 := in.GetString("notOK")
assert.Error(t, e2)
assert.Equal(t, "", v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetString("notString")
assert.Error(t, e3)
assert.Equal(t, "", v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetInt64(t *testing.T) {
for _, test := range []struct {
value any
result int64
errString string
}{
{"123", 123, ""},
{"123x", 0, "couldn't parse"},
{int(12), 12, ""},
{int64(13), 13, ""},
{float64(14), 14, ""},
{float64(9.3e18), 0, "overflows int64"},
{float64(-9.3e18), 0, "overflows int64"},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetInt64("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, int64(0), v1)
}
})
}
in := Params{
"notInt64": []string{"a", "b"},
}
v2, e2 := in.GetInt64("notOK")
assert.Error(t, e2)
assert.Equal(t, int64(0), v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetInt64("notInt64")
assert.Error(t, e3)
assert.Equal(t, int64(0), v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetFloat64(t *testing.T) {
for _, test := range []struct {
value any
result float64
errString string
}{
{"123.1", 123.1, ""},
{"123x1", 0, "couldn't parse"},
{int(12), 12, ""},
{int64(13), 13, ""},
{float64(14), 14, ""},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetFloat64("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, float64(0), v1)
}
})
}
in := Params{
"notFloat64": []string{"a", "b"},
}
v2, e2 := in.GetFloat64("notOK")
assert.Error(t, e2)
assert.Equal(t, float64(0), v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetFloat64("notFloat64")
assert.Error(t, e3)
assert.Equal(t, float64(0), v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetDuration(t *testing.T) {
for _, test := range []struct {
value any
result time.Duration
errString string
}{
{"86400", time.Hour * 24, ""},
{"1y", time.Hour * 24 * 365, ""},
{"60", time.Minute * 1, ""},
{"0", 0, ""},
{"-45", -time.Second * 45, ""},
{"2", time.Second * 2, ""},
{"2h4m7s", time.Hour*2 + 4*time.Minute + 7*time.Second, ""},
{"3d", time.Hour * 24 * 3, ""},
{"off", time.Duration(fs.DurationOff), ""},
{"", 0, "parse duration"},
{12, 0, "expecting string"},
{"34y", time.Hour * 24 * 365 * 34, ""},
{"30d", time.Hour * 24 * 30, ""},
{"2M", time.Hour * 24 * 60, ""},
{"wrong", 0, "parse duration"},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetDuration("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, time.Duration(0), v1)
}
})
}
in := Params{
"notDuration": []string{"a", "b"},
}
v2, e2 := in.GetDuration("notOK")
assert.Error(t, e2)
assert.Equal(t, time.Duration(0), v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetDuration("notDuration")
assert.Error(t, e3)
assert.Equal(t, time.Duration(0), v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetBool(t *testing.T) {
for _, test := range []struct {
value any
result bool
errString string
}{
{true, true, ""},
{false, false, ""},
{"true", true, ""},
{"false", false, ""},
{"fasle", false, "couldn't parse"},
{int(12), true, ""},
{int(0), false, ""},
{int64(13), true, ""},
{int64(0), false, ""},
{float64(14), true, ""},
{float64(0), false, ""},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetBool("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, false, v1)
}
})
}
in := Params{
"notBool": []string{"a", "b"},
}
v2, e2 := Params{}.GetBool("notOK")
assert.Error(t, e2)
assert.Equal(t, false, v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetBool("notBool")
assert.Error(t, e3)
assert.Equal(t, false, v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetStruct(t *testing.T) {
in := Params{
"struct": Params{
"String": "one",
"Float": 4.2,
},
}
var out struct {
String string
Float float64
}
e1 := in.GetStruct("struct", &out)
assert.NoError(t, e1)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
e2 := in.GetStruct("notOK", &out)
assert.Error(t, e2)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
in["struct"] = "string"
e3 := in.GetStruct("struct", &out)
assert.Error(t, e3)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetStructString(t *testing.T) {
in := Params{
"struct": `{"String": "one", "Float": 4.2}`,
}
var out struct {
String string
Float float64
}
e1 := in.GetStruct("struct", &out)
assert.NoError(t, e1)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
}
func TestParamsGetStructMissingOK(t *testing.T) {
in := Params{
"struct": Params{
"String": "one",
"Float": 4.2,
},
}
var out struct {
String string
Float float64
}
e1 := in.GetStructMissingOK("struct", &out)
assert.NoError(t, e1)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
e2 := in.GetStructMissingOK("notOK", &out)
assert.NoError(t, e2)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
in["struct"] = "string"
e3 := in.GetStructMissingOK("struct", &out)
assert.Error(t, e3)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetHTTPRequest(t *testing.T) {
in := Params{}
req, err := in.GetHTTPRequest()
assert.Nil(t, req)
assert.Error(t, err)
assert.Equal(t, true, IsErrParamNotFound(err), err.Error())
in = Params{
"_request": 42,
}
req, err = in.GetHTTPRequest()
assert.Nil(t, req)
assert.Error(t, err)
assert.Equal(t, true, IsErrParamInvalid(err), err.Error())
r := new(http.Request)
in = Params{
"_request": r,
}
req, err = in.GetHTTPRequest()
assert.NotNil(t, req)
assert.NoError(t, err)
assert.Equal(t, r, req)
}
func TestParamsGetHTTPResponseWriter(t *testing.T) {
in := Params{}
wr, err := in.GetHTTPResponseWriter()
assert.Nil(t, wr)
assert.Error(t, err)
assert.Equal(t, true, IsErrParamNotFound(err), err.Error())
in = Params{
"_response": 42,
}
wr, err = in.GetHTTPResponseWriter()
assert.Nil(t, wr)
assert.Error(t, err)
assert.Equal(t, true, IsErrParamInvalid(err), err.Error())
var w http.ResponseWriter = httptest.NewRecorder()
in = Params{
"_response": w,
}
wr, err = in.GetHTTPResponseWriter()
assert.NotNil(t, wr)
assert.NoError(t, err)
assert.Equal(t, w, wr)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/internal_test.go | fs/rc/internal_test.go | package rc
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"os"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
)
func TestMain(m *testing.M) {
// Pretend to be rclone version if we have a version string parameter
if os.Args[len(os.Args)-1] == "version" {
fmt.Printf("rclone %s\n", fs.Version)
os.Exit(0)
}
// Pretend to error if we have an unknown command
if os.Args[len(os.Args)-1] == "unknown_command" {
fmt.Printf("rclone %s\n", fs.Version)
fmt.Fprintf(os.Stderr, "Unknown command\n")
os.Exit(1)
}
os.Exit(m.Run())
}
func TestInternalNoop(t *testing.T) {
call := Calls.Get("rc/noop")
assert.NotNil(t, call)
in := Params{
"String": "hello",
"Int": 42,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, in, out)
}
func TestInternalError(t *testing.T) {
call := Calls.Get("rc/error")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.Error(t, err)
require.Nil(t, out)
}
func TestInternalList(t *testing.T) {
call := Calls.Get("rc/list")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"commands": Calls.List()}, out)
}
func TestCorePid(t *testing.T) {
call := Calls.Get("core/pid")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
pid := out["pid"]
assert.NotEqual(t, nil, pid)
_, ok := pid.(int)
assert.Equal(t, true, ok)
}
func TestCoreMemstats(t *testing.T) {
call := Calls.Get("core/memstats")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
sys := out["Sys"]
assert.NotEqual(t, nil, sys)
_, ok := sys.(uint64)
assert.Equal(t, true, ok)
}
func TestCoreGC(t *testing.T) {
call := Calls.Get("core/gc")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.Nil(t, out)
assert.Equal(t, Params(nil), out)
}
func TestCoreVersion(t *testing.T) {
call := Calls.Get("core/version")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, fs.Version, out["version"])
assert.Equal(t, runtime.GOOS, out["os"])
assert.Equal(t, runtime.GOARCH, out["arch"])
assert.Equal(t, runtime.Version(), out["goVersion"])
assert.True(t, strings.HasPrefix(out["osArch"].(string), runtime.GOARCH))
assert.NotEqual(t, "", out["osVersion"].(string))
assert.NotEqual(t, "", out["osKernel"].(string))
_ = out["isGit"].(bool)
v := out["decomposed"].([]int64)
assert.True(t, len(v) >= 2)
}
func TestCoreObscure(t *testing.T) {
call := Calls.Get("core/obscure")
assert.NotNil(t, call)
in := Params{
"clear": "potato",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, in["clear"], obscure.MustReveal(out["obscured"].(string)))
}
func TestCoreQuit(t *testing.T) {
//The call should return an error if param exitCode is not parsed to int
call := Calls.Get("core/quit")
assert.NotNil(t, call)
in := Params{
"exitCode": "potato",
}
_, err := call.Fn(context.Background(), in)
require.Error(t, err)
}
// core/command: Runs a raw rclone command
func TestCoreCommand(t *testing.T) {
call := Calls.Get("core/command")
test := func(command string, returnType string, wantOutput string, fail bool) {
var rec = httptest.NewRecorder()
var w http.ResponseWriter = rec
in := Params{
"command": command,
"opt": map[string]string{},
"arg": []string{},
"_response": w,
}
if returnType != "" {
in["returnType"] = returnType
} else {
returnType = "COMBINED_OUTPUT"
}
stream := strings.HasPrefix(returnType, "STREAM")
got, err := call.Fn(context.Background(), in)
if stream && fail {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
if !stream {
assert.Equal(t, wantOutput, got["result"])
assert.Equal(t, fail, got["error"])
} else {
assert.Equal(t, wantOutput, rec.Body.String())
}
assert.Equal(t, http.StatusOK, rec.Result().StatusCode)
}
version := fmt.Sprintf("rclone %s\n", fs.Version)
errorString := "Unknown command\n"
t.Run("OK", func(t *testing.T) {
test("version", "", version, false)
})
t.Run("Fail", func(t *testing.T) {
test("unknown_command", "", version+errorString, true)
})
t.Run("Combined", func(t *testing.T) {
test("unknown_command", "COMBINED_OUTPUT", version+errorString, true)
})
t.Run("Stderr", func(t *testing.T) {
test("unknown_command", "STREAM_ONLY_STDERR", errorString, true)
})
t.Run("Stdout", func(t *testing.T) {
test("unknown_command", "STREAM_ONLY_STDOUT", version, true)
})
t.Run("Stream", func(t *testing.T) {
test("unknown_command", "STREAM", version+errorString, true)
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/internal.go | fs/rc/internal.go | // Define the internal rc functions
package rc
import (
"context"
"fmt"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/coreos/go-semver/semver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/debug"
)
func init() {
Add(Call{
Path: "rc/noopauth",
AuthRequired: true,
Fn: rcNoop,
Title: "Echo the input to the output parameters requiring auth",
Help: `
This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.`,
})
Add(Call{
Path: "rc/noop",
Fn: rcNoop,
Title: "Echo the input to the output parameters",
Help: `
This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.`,
})
}
// Echo the input to the output parameters
func rcNoop(ctx context.Context, in Params) (out Params, err error) {
return in, nil
}
func init() {
Add(Call{
Path: "rc/error",
Fn: rcError,
Title: "This returns an error",
Help: `
This returns an error with the input as part of its error string.
Useful for testing error handling.`,
})
}
// Return an error regardless
func rcError(ctx context.Context, in Params) (out Params, err error) {
return nil, fmt.Errorf("arbitrary error on input %+v", in)
}
func init() {
Add(Call{
Path: "rc/panic",
Fn: rcPanic,
Title: "This returns an error by panicking",
Help: `
This returns an error with the input as part of its error string.
Useful for testing error handling.`,
})
}
// Return an error regardless
func rcPanic(ctx context.Context, in Params) (out Params, err error) {
panic(fmt.Sprintf("arbitrary error on input %+v", in))
}
func init() {
Add(Call{
Path: "rc/fatal",
Fn: rcFatal,
Title: "This returns an fatal error",
Help: `
This returns an error with the input as part of its error string.
Useful for testing error handling.`,
})
}
// Return an error regardless
func rcFatal(ctx context.Context, in Params) (out Params, err error) {
fs.Fatalf(nil, "arbitrary error on input %+v", in)
return nil, nil
}
func init() {
Add(Call{
Path: "rc/list",
Fn: rcList,
Title: "List all the registered remote control commands",
Help: `
This lists all the registered remote control commands as a JSON map in
the commands response.`,
})
}
// List the registered commands
func rcList(ctx context.Context, in Params) (out Params, err error) {
out = make(Params)
out["commands"] = Calls.List()
return out, nil
}
func init() {
Add(Call{
Path: "core/pid",
Fn: rcPid,
Title: "Return PID of current process",
Help: `
This returns PID of current process.
Useful for stopping rclone process.`,
})
}
// Return PID of current process
func rcPid(ctx context.Context, in Params) (out Params, err error) {
out = make(Params)
out["pid"] = os.Getpid()
return out, nil
}
func init() {
Add(Call{
Path: "core/memstats",
Fn: rcMemStats,
Title: "Returns the memory statistics",
Help: `
This returns the memory statistics of the running program. What the values mean
are explained in the go docs: https://golang.org/pkg/runtime/#MemStats
The most interesting values for most people are:
- HeapAlloc - this is the amount of memory rclone is actually using
- HeapSys - this is the amount of memory rclone has obtained from the OS
- Sys - this is the total amount of memory requested from the OS
- It is virtual memory so may include unused memory
`,
})
}
// Return the memory statistics
func rcMemStats(ctx context.Context, in Params) (out Params, err error) {
out = make(Params)
var m runtime.MemStats
runtime.ReadMemStats(&m)
out["Alloc"] = m.Alloc
out["TotalAlloc"] = m.TotalAlloc
out["Sys"] = m.Sys
out["Mallocs"] = m.Mallocs
out["Frees"] = m.Frees
out["HeapAlloc"] = m.HeapAlloc
out["HeapSys"] = m.HeapSys
out["HeapIdle"] = m.HeapIdle
out["HeapInuse"] = m.HeapInuse
out["HeapReleased"] = m.HeapReleased
out["HeapObjects"] = m.HeapObjects
out["StackInuse"] = m.StackInuse
out["StackSys"] = m.StackSys
out["MSpanInuse"] = m.MSpanInuse
out["MSpanSys"] = m.MSpanSys
out["MCacheInuse"] = m.MCacheInuse
out["MCacheSys"] = m.MCacheSys
out["BuckHashSys"] = m.BuckHashSys
out["GCSys"] = m.GCSys
out["OtherSys"] = m.OtherSys
return out, nil
}
func init() {
Add(Call{
Path: "core/gc",
Fn: rcGc,
Title: "Runs a garbage collection.",
Help: `
This tells the go runtime to do a garbage collection run. It isn't
necessary to call this normally, but it can be useful for debugging
memory problems.
`,
})
}
// Do a garbage collection run
func rcGc(ctx context.Context, in Params) (out Params, err error) {
runtime.GC()
return nil, nil
}
func init() {
Add(Call{
Path: "core/version",
Fn: rcVersion,
Title: "Shows the current version of rclone, Go and the OS.",
Help: `
This shows the current versions of rclone, Go and the OS:
- version - rclone version, e.g. "v1.71.2"
- decomposed - version number as [major, minor, patch]
- isGit - boolean - true if this was compiled from the git version
- isBeta - boolean - true if this is a beta version
- os - OS in use as according to Go GOOS (e.g. "linux")
- osKernel - OS Kernel version (e.g. "6.8.0-86-generic (x86_64)")
- osVersion - OS Version (e.g. "ubuntu 24.04 (64 bit)")
- osArch - cpu architecture in use (e.g. "arm64 (ARMv8 compatible)")
- arch - cpu architecture in use according to Go GOARCH (e.g. "arm64")
- goVersion - version of Go runtime in use (e.g. "go1.25.0")
- linking - type of rclone executable (static or dynamic)
- goTags - space separated build tags or "none"
`,
})
}
// Return version info
func rcVersion(ctx context.Context, in Params) (out Params, err error) {
version, err := semver.NewVersion(fs.Version[1:])
if err != nil {
return nil, err
}
linking, tagString := buildinfo.GetLinkingAndTags()
osVersion, osKernel := buildinfo.GetOSVersion()
if osVersion == "" {
osVersion = "unknown"
}
if osKernel == "" {
osKernel = "unknown"
}
out = Params{
"version": fs.Version,
"decomposed": version.Slice(),
"isGit": strings.HasSuffix(fs.Version, "-DEV"),
"isBeta": version.PreRelease != "",
"os": runtime.GOOS,
"osVersion": osVersion,
"osKernel": osKernel,
"osArch": buildinfo.GetArch(),
"arch": runtime.GOARCH,
"goVersion": runtime.Version(),
"linking": linking,
"goTags": tagString,
}
return out, nil
}
func init() {
Add(Call{
Path: "core/obscure",
Fn: rcObscure,
Title: "Obscures a string passed in.",
Help: `
Pass a clear string and rclone will obscure it for the config file:
- clear - string
Returns:
- obscured - string
`,
})
}
// Return obscured string
func rcObscure(ctx context.Context, in Params) (out Params, err error) {
clear, err := in.GetString("clear")
if err != nil {
return nil, err
}
obscured, err := obscure.Obscure(clear)
if err != nil {
return nil, err
}
out = Params{
"obscured": obscured,
}
return out, nil
}
func init() {
Add(Call{
Path: "core/quit",
Fn: rcQuit,
Title: "Terminates the app.",
Help: `
(Optional) Pass an exit code to be used for terminating the app:
- exitCode - int
`,
})
}
// Terminates app
func rcQuit(ctx context.Context, in Params) (out Params, err error) {
code, err := in.GetInt64("exitCode")
if IsErrParamInvalid(err) {
return nil, err
}
if IsErrParamNotFound(err) {
code = 0
}
exitCode := int(code)
go func(exitCode int) {
time.Sleep(time.Millisecond * 1500)
atexit.Run()
os.Exit(exitCode)
}(exitCode)
return nil, nil
}
func init() {
Add(Call{
Path: "debug/set-mutex-profile-fraction",
Fn: rcSetMutexProfileFraction,
Title: "Set runtime.SetMutexProfileFraction for mutex profiling.",
Help: `
SetMutexProfileFraction controls the fraction of mutex contention
events that are reported in the mutex profile. On average 1/rate
events are reported. The previous rate is returned.
To turn off profiling entirely, pass rate 0. To just read the current
rate, pass rate < 0. (For n>1 the details of sampling may change.)
Once this is set you can look use this to profile the mutex contention:
go tool pprof http://localhost:5572/debug/pprof/mutex
Parameters:
- rate - int
Results:
- previousRate - int
`,
})
}
func rcSetMutexProfileFraction(ctx context.Context, in Params) (out Params, err error) {
rate, err := in.GetInt64("rate")
if err != nil {
return nil, err
}
previousRate := runtime.SetMutexProfileFraction(int(rate))
out = make(Params)
out["previousRate"] = previousRate
return out, nil
}
func init() {
Add(Call{
Path: "debug/set-block-profile-rate",
Fn: rcSetBlockProfileRate,
Title: "Set runtime.SetBlockProfileRate for blocking profiling.",
Help: `
SetBlockProfileRate controls the fraction of goroutine blocking events
that are reported in the blocking profile. The profiler aims to sample
an average of one blocking event per rate nanoseconds spent blocked.
To include every blocking event in the profile, pass rate = 1. To turn
off profiling entirely, pass rate <= 0.
After calling this you can use this to see the blocking profile:
go tool pprof http://localhost:5572/debug/pprof/block
Parameters:
- rate - int
`,
})
}
func rcSetBlockProfileRate(ctx context.Context, in Params) (out Params, err error) {
rate, err := in.GetInt64("rate")
if err != nil {
return nil, err
}
runtime.SetBlockProfileRate(int(rate))
return nil, nil
}
func init() {
Add(Call{
Path: "debug/set-soft-memory-limit",
Fn: rcSetSoftMemoryLimit,
Title: "Call runtime/debug.SetMemoryLimit for setting a soft memory limit for the runtime.",
Help: `
SetMemoryLimit provides the runtime with a soft memory limit.
The runtime undertakes several processes to try to respect this memory limit, including
adjustments to the frequency of garbage collections and returning memory to the underlying
system more aggressively. This limit will be respected even if GOGC=off (or, if SetGCPercent(-1) is executed).
The input limit is provided as bytes, and includes all memory mapped, managed, and not
released by the Go runtime. Notably, it does not account for space used by the Go binary
and memory external to Go, such as memory managed by the underlying system on behalf of
the process, or memory managed by non-Go code inside the same process.
Examples of excluded memory sources include: OS kernel memory held on behalf of the process,
memory allocated by C code, and memory mapped by syscall.Mmap (because it is not managed by the Go runtime).
A zero limit or a limit that's lower than the amount of memory used by the Go runtime may cause
the garbage collector to run nearly continuously. However, the application may still make progress.
The memory limit is always respected by the Go runtime, so to effectively disable this behavior,
set the limit very high. math.MaxInt64 is the canonical value for disabling the limit, but values
much greater than the available memory on the underlying system work just as well.
See https://go.dev/doc/gc-guide for a detailed guide explaining the soft memory limit in more detail,
as well as a variety of common use-cases and scenarios.
SetMemoryLimit returns the previously set memory limit. A negative input does not adjust the limit,
and allows for retrieval of the currently set memory limit.
Parameters:
- mem-limit - int
`,
})
}
func rcSetSoftMemoryLimit(ctx context.Context, in Params) (out Params, err error) {
memLimit, err := in.GetInt64("mem-limit")
if err != nil {
return nil, err
}
oldMemLimit := debug.SetMemoryLimit(memLimit)
out = Params{
"existing-mem-limit": oldMemLimit,
}
return out, nil
}
func init() {
Add(Call{
Path: "debug/set-gc-percent",
Fn: rcSetGCPercent,
Title: "Call runtime/debug.SetGCPercent for setting the garbage collection target percentage.",
Help: `
SetGCPercent sets the garbage collection target percentage: a collection is triggered
when the ratio of freshly allocated data to live data remaining after the previous collection
reaches this percentage. SetGCPercent returns the previous setting. The initial setting is the
value of the GOGC environment variable at startup, or 100 if the variable is not set.
This setting may be effectively reduced in order to maintain a memory limit.
A negative percentage effectively disables garbage collection, unless the memory limit is reached.
See https://pkg.go.dev/runtime/debug#SetMemoryLimit for more details.
Parameters:
- gc-percent - int
`,
})
}
func rcSetGCPercent(ctx context.Context, in Params) (out Params, err error) {
gcPercent, err := in.GetInt64("gc-percent")
if err != nil {
return nil, err
}
oldGCPercent := debug.SetGCPercent(int(gcPercent))
out = Params{
"existing-gc-percent": oldGCPercent,
}
return out, nil
}
func init() {
Add(Call{
Path: "core/command",
AuthRequired: true,
Fn: rcRunCommand,
NeedsRequest: true,
NeedsResponse: true,
Title: "Run a rclone terminal command over rc.",
Help: `This takes the following parameters:
- command - a string with the command name.
- arg - a list of arguments for the backend command.
- opt - a map of string to string of options.
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
- Defaults to "COMBINED_OUTPUT" if not set.
- The STREAM returnTypes will write the output to the body of the HTTP message.
- The COMBINED_OUTPUT will write the output to the "result" parameter.
Returns:
- result - result from the backend command.
- Only set when using returnType "COMBINED_OUTPUT".
- error - set if rclone exits with an error code.
- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
Example:
rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
Returns:
` + "```" + `
{
"error": false,
"result": "<Raw command line output>"
}
OR
{
"error": true,
"result": "<Raw command line output>"
}
` + "```" + `
`,
})
}
// rcRunCommand runs an rclone command with the given args and flags
func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {
command, err := in.GetString("command")
if err != nil {
command = ""
}
var opt = map[string]string{}
err = in.GetStructMissingOK("opt", &opt)
if err != nil {
return nil, err
}
var arg = []string{}
err = in.GetStructMissingOK("arg", &arg)
if err != nil {
return nil, err
}
returnType, err := in.GetString("returnType")
if err != nil {
returnType = "COMBINED_OUTPUT"
}
var httpResponse http.ResponseWriter
httpResponse, err = in.GetHTTPResponseWriter()
if err != nil {
return nil, fmt.Errorf("response object is required\n%w", err)
}
var allArgs = []string{}
if command != "" {
// Add the command e.g.: ls to the args
allArgs = append(allArgs, command)
}
// Add all from arg
allArgs = append(allArgs, arg...)
// Add flags to args for e.g. --max-depth 1 comes in as { max-depth 1 }.
// Convert it to [ max-depth, 1 ] and append to args list
for key, value := range opt {
if len(key) == 1 {
allArgs = append(allArgs, "-"+key)
} else {
allArgs = append(allArgs, "--"+key)
}
allArgs = append(allArgs, value)
}
// Get the path for the current executable which was used to run rclone.
ex, err := os.Executable()
if err != nil {
return nil, err
}
cmd := exec.CommandContext(ctx, ex, allArgs...)
if returnType == "COMBINED_OUTPUT" {
// Run the command and get the output for error and stdout combined.
out, err := cmd.CombinedOutput()
if err != nil {
return Params{
"result": string(out),
"error": true,
}, nil
}
return Params{
"result": string(out),
"error": false,
}, nil
} else if returnType == "STREAM_ONLY_STDOUT" {
cmd.Stdout = httpResponse
} else if returnType == "STREAM_ONLY_STDERR" {
cmd.Stderr = httpResponse
} else if returnType == "STREAM" {
cmd.Stdout = httpResponse
cmd.Stderr = httpResponse
} else {
return nil, fmt.Errorf("unknown returnType %q", returnType)
}
err = cmd.Run()
return nil, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rc_test.go | fs/rc/rc_test.go | package rc
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestWriteJSON(t *testing.T) {
var buf bytes.Buffer
err := WriteJSON(&buf, Params{
"String": "hello",
"Int": 42,
})
require.NoError(t, err)
assert.Equal(t, `{
"Int": 42,
"String": "hello"
}
`, buf.String())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/jobs/job_test.go | fs/rc/jobs/job_test.go | package jobs
import (
"context"
"encoding/json"
"errors"
"runtime"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewJobs(t *testing.T) {
jobs := newJobs()
assert.Equal(t, 0, len(jobs.jobs))
}
func TestJobsKickExpire(t *testing.T) {
testy.SkipUnreliable(t)
jobs := newJobs()
jobs.opt.JobExpireInterval = fs.Duration(time.Millisecond)
assert.Equal(t, false, jobs.expireRunning)
jobs.kickExpire()
jobs.mu.Lock()
assert.Equal(t, true, jobs.expireRunning)
jobs.mu.Unlock()
time.Sleep(10 * time.Millisecond)
jobs.mu.Lock()
assert.Equal(t, false, jobs.expireRunning)
jobs.mu.Unlock()
}
func TestJobsExpire(t *testing.T) {
testy.SkipUnreliable(t)
ctx := context.Background()
wait := make(chan struct{})
jobs := newJobs()
jobs.opt.JobExpireInterval = fs.Duration(time.Millisecond)
assert.Equal(t, false, jobs.expireRunning)
var gotJobID int64
var gotJob *Job
job, out, err := jobs.NewJob(ctx, func(ctx context.Context, in rc.Params) (rc.Params, error) {
defer close(wait)
var ok bool
gotJobID, ok = GetJobID(ctx)
assert.True(t, ok)
gotJob, ok = GetJob(ctx)
assert.True(t, ok)
return in, nil
}, rc.Params{"_async": true})
require.NoError(t, err)
assert.Equal(t, 2, len(out), "check output has jobid and executeId")
<-wait
assert.Equal(t, job.ID, gotJobID, "check can get JobID from ctx")
assert.Equal(t, job, gotJob, "check can get Job from ctx")
assert.Equal(t, 1, len(jobs.jobs))
jobs.Expire()
assert.Equal(t, 1, len(jobs.jobs))
jobs.mu.Lock()
job.mu.Lock()
job.EndTime = time.Now().Add(-time.Duration(rc.Opt.JobExpireDuration) - 60*time.Second)
assert.Equal(t, true, jobs.expireRunning)
job.mu.Unlock()
jobs.mu.Unlock()
time.Sleep(250 * time.Millisecond)
jobs.mu.Lock()
assert.Equal(t, false, jobs.expireRunning)
assert.Equal(t, 0, len(jobs.jobs))
jobs.mu.Unlock()
}
var noopFn = func(ctx context.Context, in rc.Params) (rc.Params, error) {
return nil, nil
}
func TestJobsIDs(t *testing.T) {
ctx := context.Background()
jobs := newJobs()
job1, _, err := jobs.NewJob(ctx, noopFn, rc.Params{"_async": true})
require.NoError(t, err)
job2, _, err := jobs.NewJob(ctx, noopFn, rc.Params{"_async": true})
require.NoError(t, err)
wantIDs := []int64{job1.ID, job2.ID}
gotIDs := jobs.IDs()
require.Equal(t, 2, len(gotIDs))
if gotIDs[0] != wantIDs[0] {
gotIDs[0], gotIDs[1] = gotIDs[1], gotIDs[0]
}
assert.Equal(t, wantIDs, gotIDs)
}
func TestJobsExecuteIDs(t *testing.T) {
ctx := context.Background()
jobs := newJobs()
job1, _, err := jobs.NewJob(ctx, noopFn, rc.Params{"_async": true})
require.NoError(t, err)
job2, _, err := jobs.NewJob(ctx, noopFn, rc.Params{"_async": true})
require.NoError(t, err)
assert.Equal(t, executeID, job1.ExecuteID, "execute ID should match global executeID")
assert.Equal(t, executeID, job2.ExecuteID, "execute ID should match global executeID")
assert.True(t, job1.ExecuteID == job2.ExecuteID, "just to be sure, all the jobs share the same executeID")
}
func TestJobsGet(t *testing.T) {
ctx := context.Background()
jobs := newJobs()
job, _, err := jobs.NewJob(ctx, noopFn, rc.Params{"_async": true})
require.NoError(t, err)
assert.Equal(t, job, jobs.Get(job.ID))
assert.Nil(t, jobs.Get(123123123123))
}
var longFn = func(ctx context.Context, in rc.Params) (rc.Params, error) {
time.Sleep(1 * time.Hour)
return nil, nil
}
var shortFn = func(ctx context.Context, in rc.Params) (rc.Params, error) {
time.Sleep(time.Millisecond)
return nil, nil
}
var ctxFn = func(ctx context.Context, in rc.Params) (rc.Params, error) {
<-ctx.Done()
return nil, ctx.Err()
}
var ctxParmFn = func(paramCtx context.Context, returnError bool) func(ctx context.Context, in rc.Params) (rc.Params, error) {
return func(ctx context.Context, in rc.Params) (rc.Params, error) {
<-paramCtx.Done()
if returnError {
return nil, ctx.Err()
}
return rc.Params{}, nil
}
}
const (
sleepTime = 100 * time.Millisecond
floatSleepTime = float64(sleepTime) / 1e9 / 2
)
// sleep for some time so job.Duration is non-0
func sleepJob() {
time.Sleep(sleepTime)
}
func TestJobFinish(t *testing.T) {
ctx := context.Background()
jobs := newJobs()
job, _, err := jobs.NewJob(ctx, longFn, rc.Params{"_async": true})
require.NoError(t, err)
sleepJob()
assert.Equal(t, true, job.EndTime.IsZero())
assert.Equal(t, rc.Params(nil), job.Output)
assert.Equal(t, 0.0, job.Duration)
assert.Equal(t, "", job.Error)
assert.Equal(t, false, job.Success)
assert.Equal(t, false, job.Finished)
wantOut := rc.Params{"a": 1}
job.finish(wantOut, nil)
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, wantOut, job.Output)
assert.True(t, job.Duration >= floatSleepTime)
assert.Equal(t, "", job.Error)
assert.Equal(t, true, job.Success)
assert.Equal(t, true, job.Finished)
job, _, err = jobs.NewJob(ctx, longFn, rc.Params{"_async": true})
require.NoError(t, err)
sleepJob()
job.finish(nil, nil)
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, rc.Params{}, job.Output)
assert.True(t, job.Duration >= floatSleepTime)
assert.Equal(t, "", job.Error)
assert.Equal(t, true, job.Success)
assert.Equal(t, true, job.Finished)
job, _, err = jobs.NewJob(ctx, longFn, rc.Params{"_async": true})
require.NoError(t, err)
sleepJob()
job.finish(wantOut, errors.New("potato"))
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, wantOut, job.Output)
assert.True(t, job.Duration >= floatSleepTime)
assert.Equal(t, "potato", job.Error)
assert.Equal(t, false, job.Success)
assert.Equal(t, true, job.Finished)
}
// We've tested the functionality of run() already as it is
// part of NewJob, now just test the panic catching
func TestJobRunPanic(t *testing.T) {
ctx := context.Background()
wait := make(chan struct{})
boom := func(ctx context.Context, in rc.Params) (rc.Params, error) {
sleepJob()
defer close(wait)
panic("boom")
}
jobs := newJobs()
job, _, err := jobs.NewJob(ctx, boom, rc.Params{"_async": true})
require.NoError(t, err)
<-wait
runtime.Gosched() // yield to make sure job is updated
// Wait a short time for the panic to propagate
for i := range uint(10) {
job.mu.Lock()
e := job.Error
job.mu.Unlock()
if e != "" {
break
}
time.Sleep(time.Millisecond << i)
}
job.mu.Lock()
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, rc.Params{}, job.Output)
assert.True(t, job.Duration >= floatSleepTime)
assert.Contains(t, job.Error, "panic received: boom")
assert.Equal(t, false, job.Success)
assert.Equal(t, true, job.Finished)
job.mu.Unlock()
}
func TestJobsNewJob(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
jobs := newJobs()
job, out, err := jobs.NewJob(ctx, noopFn, rc.Params{"_async": true})
require.NoError(t, err)
assert.Equal(t, int64(1), job.ID)
assert.Equal(t, executeID, job.ExecuteID)
assert.Equal(t, rc.Params{"jobid": int64(1), "executeId": executeID}, out)
assert.Equal(t, job, jobs.Get(1))
assert.NotEmpty(t, job.Stop)
}
func TestStartJob(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
job, out, err := NewJob(ctx, longFn, rc.Params{"_async": true})
assert.NoError(t, err)
assert.Equal(t, rc.Params{"jobid": int64(1), "executeId": executeID}, out)
assert.Equal(t, int64(1), job.ID)
assert.Equal(t, executeID, job.ExecuteID)
}
func TestExecuteJob(t *testing.T) {
jobID.Store(0)
job, out, err := NewJob(context.Background(), shortFn, rc.Params{})
assert.NoError(t, err)
assert.Equal(t, int64(1), job.ID)
assert.Equal(t, rc.Params{}, out)
}
func TestExecuteJobWithConfig(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
called := false
jobFn := func(ctx context.Context, in rc.Params) (rc.Params, error) {
ci := fs.GetConfig(ctx)
assert.Equal(t, 42*fs.Mebi, ci.BufferSize)
called = true
return nil, nil
}
_, _, err := NewJob(context.Background(), jobFn, rc.Params{
"_config": rc.Params{
"BufferSize": "42M",
},
})
require.NoError(t, err)
assert.Equal(t, true, called)
// Retest with string parameter
jobID.Store(0)
called = false
_, _, err = NewJob(ctx, jobFn, rc.Params{
"_config": `{"BufferSize": "42M"}`,
})
require.NoError(t, err)
assert.Equal(t, true, called)
// Check that wasn't the default
ci := fs.GetConfig(ctx)
assert.NotEqual(t, 42*fs.Mebi, ci.BufferSize)
}
func TestExecuteJobWithFilter(t *testing.T) {
ctx := context.Background()
called := false
jobID.Store(0)
jobFn := func(ctx context.Context, in rc.Params) (rc.Params, error) {
fi := filter.GetConfig(ctx)
assert.Equal(t, fs.SizeSuffix(1024), fi.Opt.MaxSize)
assert.Equal(t, []string{"a", "b", "c"}, fi.Opt.IncludeRule)
called = true
return nil, nil
}
_, _, err := NewJob(ctx, jobFn, rc.Params{
"_filter": rc.Params{
"IncludeRule": []string{"a", "b", "c"},
"MaxSize": "1k",
},
})
require.NoError(t, err)
assert.Equal(t, true, called)
}
func TestExecuteJobWithGroup(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
called := false
jobFn := func(ctx context.Context, in rc.Params) (rc.Params, error) {
called = true
group, found := accounting.StatsGroupFromContext(ctx)
assert.Equal(t, true, found)
assert.Equal(t, "myparty", group)
return nil, nil
}
_, _, err := NewJob(ctx, jobFn, rc.Params{
"_group": "myparty",
})
require.NoError(t, err)
assert.Equal(t, true, called)
}
func TestExecuteJobErrorPropagation(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
testErr := errors.New("test error")
errorFn := func(ctx context.Context, in rc.Params) (out rc.Params, err error) {
return nil, testErr
}
_, _, err := NewJob(ctx, errorFn, rc.Params{})
assert.Equal(t, testErr, err)
}
func TestRcJobStatus(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
_, _, err := NewJob(ctx, longFn, rc.Params{"_async": true})
assert.NoError(t, err)
call := rc.Calls.Get("job/status")
assert.NotNil(t, call)
in := rc.Params{"jobid": 1}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, float64(1), out["id"])
assert.Equal(t, executeID, out["executeId"])
assert.Equal(t, "", out["error"])
assert.Equal(t, false, out["finished"])
assert.Equal(t, false, out["success"])
in = rc.Params{"jobid": 123123123}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "job not found")
in = rc.Params{"jobidx": 123123123}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "Didn't find key")
}
func TestRcJobList(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
_, _, err := NewJob(ctx, longFn, rc.Params{"_async": true})
assert.NoError(t, err)
call := rc.Calls.Get("job/list")
assert.NotNil(t, call)
in := rc.Params{}
out1, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out1)
assert.Equal(t, executeID, out1["executeId"], "should have executeId")
assert.Equal(t, []int64{1}, out1["jobids"], "should have job listed")
assert.Equal(t, []int64{1}, out1["runningIds"], "should have running job")
assert.Equal(t, []int64{}, out1["finishedIds"], "should not have finished job")
_, _, err = NewJob(ctx, longFn, rc.Params{"_async": true})
assert.NoError(t, err)
call = rc.Calls.Get("job/list")
assert.NotNil(t, call)
in = rc.Params{}
out2, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out2)
assert.Equal(t, 2, len(out2["jobids"].([]int64)), "should have all jobs listed")
assert.Equal(t, out1["executeId"], out2["executeId"], "executeId should be the same")
}
func TestRcAsyncJobStop(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
_, _, err := NewJob(ctx, ctxFn, rc.Params{"_async": true})
assert.NoError(t, err)
call := rc.Calls.Get("job/stop")
assert.NotNil(t, call)
in := rc.Params{"jobid": 1}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.Empty(t, out)
in = rc.Params{"jobid": 123123123}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "job not found")
in = rc.Params{"jobidx": 123123123}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "Didn't find key")
time.Sleep(10 * time.Millisecond)
call = rc.Calls.Get("job/status")
assert.NotNil(t, call)
in = rc.Params{"jobid": 1}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, float64(1), out["id"])
assert.Equal(t, "context canceled", out["error"])
assert.Equal(t, true, out["finished"])
assert.Equal(t, false, out["success"])
}
func TestRcSyncJobStop(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
jobID.Store(0)
job, out, err := NewJob(ctx, ctxFn, rc.Params{})
assert.Error(t, err)
assert.Equal(t, int64(1), job.ID)
assert.Equal(t, rc.Params{}, out)
}()
time.Sleep(10 * time.Millisecond)
call := rc.Calls.Get("job/stop")
assert.NotNil(t, call)
in := rc.Params{"jobid": 1}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.Empty(t, out)
in = rc.Params{"jobid": 123123123}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "job not found")
in = rc.Params{"jobidx": 123123123}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "Didn't find key")
cancel()
time.Sleep(10 * time.Millisecond)
call = rc.Calls.Get("job/status")
assert.NotNil(t, call)
in = rc.Params{"jobid": 1}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, float64(1), out["id"])
assert.Equal(t, "context canceled", out["error"])
assert.Equal(t, true, out["finished"])
assert.Equal(t, false, out["success"])
}
func TestRcJobStopGroup(t *testing.T) {
ctx := context.Background()
jobID.Store(0)
_, _, err := NewJob(ctx, ctxFn, rc.Params{
"_async": true,
"_group": "myparty",
})
require.NoError(t, err)
_, _, err = NewJob(ctx, ctxFn, rc.Params{
"_async": true,
"_group": "myparty",
})
require.NoError(t, err)
call := rc.Calls.Get("job/stopgroup")
assert.NotNil(t, call)
in := rc.Params{"group": "myparty"}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.Empty(t, out)
in = rc.Params{}
_, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.Contains(t, err.Error(), "Didn't find key")
time.Sleep(10 * time.Millisecond)
call = rc.Calls.Get("job/status")
assert.NotNil(t, call)
for i := 1; i <= 2; i++ {
in = rc.Params{"jobid": i}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, "myparty", out["group"])
assert.Equal(t, "context canceled", out["error"])
assert.Equal(t, true, out["finished"])
assert.Equal(t, false, out["success"])
}
}
func TestOnFinish(t *testing.T) {
jobID.Store(0)
done := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
job, _, err := NewJob(ctx, ctxParmFn(ctx, false), rc.Params{"_async": true})
assert.NoError(t, err)
stop, err := OnFinish(job.ID, func() { close(done) })
defer stop()
assert.NoError(t, err)
cancel()
select {
case <-done:
case <-time.After(time.Second):
t.Fatal("Timeout waiting for OnFinish to fire")
}
}
func TestOnFinishAlreadyFinished(t *testing.T) {
jobID.Store(0)
done := make(chan struct{})
ctx := t.Context()
job, _, err := NewJob(ctx, shortFn, rc.Params{})
assert.NoError(t, err)
stop, err := OnFinish(job.ID, func() { close(done) })
defer stop()
assert.NoError(t, err)
select {
case <-done:
case <-time.After(time.Second):
t.Fatal("Timeout waiting for OnFinish to fire")
}
}
func TestOnFinishDataRace(t *testing.T) {
jobID.Store(0)
job, _, err := NewJob(context.Background(), ctxFn, rc.Params{"_async": true})
assert.NoError(t, err)
var expect, got uint64
finished := make(chan struct{})
stop, stopped := make(chan struct{}), make(chan struct{})
go func() {
Loop:
for {
select {
case <-stop:
break Loop
default:
_, err := OnFinish(job.ID, func() {
finished <- struct{}{}
})
assert.NoError(t, err)
expect += 1
}
}
close(stopped)
}()
time.Sleep(10 * time.Millisecond)
job.Stop()
// Wait for the first OnFinish to fire
<-finished
got += 1
// Stop the OnFinish producer
close(stop)
<-stopped
timeout := time.After(5 * time.Second)
for {
if got == expect {
break
}
select {
case <-finished:
got += 1
case <-timeout:
t.Fatal("Timeout waiting for all OnFinish calls to fire")
}
}
}
// Register some test rc calls
func init() {
rc.Add(rc.Call{
Path: "test/needs_request",
NeedsRequest: true,
})
rc.Add(rc.Call{
Path: "test/needs_response",
NeedsResponse: true,
})
}
func TestNewJobFromParams(t *testing.T) {
ctx := context.Background()
for _, test := range []struct {
in rc.Params
want rc.Params
}{{
in: rc.Params{
"_path": "rc/noop",
"a": "potato",
},
want: rc.Params{
"a": "potato",
},
}, {
in: rc.Params{
"_path": "rc/noop",
"b": "sausage",
},
want: rc.Params{
"b": "sausage",
},
}, {
in: rc.Params{
"_path": "rc/error",
"e": "sausage",
},
want: rc.Params{
"error": "arbitrary error on input map[e:sausage]",
"input": rc.Params{
"e": "sausage",
},
"path": "rc/error",
"status": 500,
},
}, {
in: rc.Params{
"_path": "bad/path",
"param": "sausage",
},
want: rc.Params{
"error": "couldn't find path \"bad/path\"",
"input": rc.Params{
"param": "sausage",
},
"path": "bad/path",
"status": 404,
},
}, {
in: rc.Params{
"_path": "test/needs_request",
},
want: rc.Params{
"error": "can't run path \"test/needs_request\" as it needs the request",
"input": rc.Params{},
"path": "test/needs_request",
"status": 400,
},
}, {
in: rc.Params{
"_path": "test/needs_response",
},
want: rc.Params{
"error": "can't run path \"test/needs_response\" as it needs the response",
"input": rc.Params{},
"path": "test/needs_response",
"status": 400,
},
}, {
in: rc.Params{
"nopath": "BOOM",
},
want: rc.Params{
"error": "Didn't find key \"_path\" in input",
"input": rc.Params{
"nopath": "BOOM",
},
"path": "",
"status": 400,
},
}} {
got := NewJobFromParams(ctx, test.in)
assert.Equal(t, test.want, got)
}
}
func TestNewJobFromBytes(t *testing.T) {
ctx := context.Background()
for _, test := range []struct {
in string
want string
}{{
in: `{
"_path": "rc/noop",
"a": "potato"
}`,
want: `{
"a": "potato"
}
`,
}, {
in: `{
"_path": "rc/error",
"e": "sausage"
}`,
want: `{
"error": "arbitrary error on input map[e:sausage]",
"input": {
"e": "sausage"
},
"path": "rc/error",
"status": 500
}
`,
}, {
in: `parse error`,
want: `{
"error": "invalid character 'p' looking for beginning of value",
"input": null,
"path": "unknown",
"status": 400
}
`,
}, {
in: `"just a string"`,
want: `{
"error": "json: cannot unmarshal string into Go value of type rc.Params",
"input": null,
"path": "unknown",
"status": 400
}
`,
}} {
got := NewJobFromBytes(ctx, []byte(test.in))
assert.Equal(t, test.want, string(got))
}
}
func TestJobsBatch(t *testing.T) {
ctx := context.Background()
call := rc.Calls.Get("job/batch")
assert.NotNil(t, call)
inJSON := `{
"inputs": [
{
"_path": "rc/noop",
"a": "potato"
},
"bad string",
{
"_path": "rc/noop",
"b": "sausage"
},
{
"_path": "rc/error",
"e": "sausage"
},
{
"_path": "bad/path",
"param": "sausage"
},
{
"_path": "test/needs_request"
},
{
"_path": "test/needs_response"
},
{
"nopath": "BOOM"
}
]
}
`
var in rc.Params
require.NoError(t, json.Unmarshal([]byte(inJSON), &in))
wantJSON := `{
"results": [
{
"a": "potato"
},
{
"error": "\"inputs\" items must be objects not string",
"input": null,
"path": "unknown",
"status": 400
},
{
"b": "sausage"
},
{
"error": "arbitrary error on input map[e:sausage]",
"input": {
"e": "sausage"
},
"path": "rc/error",
"status": 500
},
{
"error": "couldn't find path \"bad/path\"",
"input": {
"param": "sausage"
},
"path": "bad/path",
"status": 404
},
{
"error": "can't run path \"test/needs_request\" as it needs the request",
"input": {},
"path": "test/needs_request",
"status": 400
},
{
"error": "can't run path \"test/needs_response\" as it needs the response",
"input": {},
"path": "test/needs_response",
"status": 400
},
{
"error": "Didn't find key \"_path\" in input",
"input": {
"nopath": "BOOM"
},
"path": "",
"status": 400
}
]
}
`
var want rc.Params
require.NoError(t, json.Unmarshal([]byte(wantJSON), &want))
out, err := call.Fn(ctx, in)
require.NoError(t, err)
var got rc.Params
require.NoError(t, rc.Reshape(&got, out))
assert.Equal(t, want, got)
}
func TestJobsBatchConcurrent(t *testing.T) {
ctx := context.Background()
for concurrency := range 10 {
in := rc.Params{}
var inputs []any
var results []rc.Params
for i := range 100 {
in := map[string]any{
"_path": "rc/noop",
"i": i,
}
inputs = append(inputs, in)
results = append(results, rc.Params{
"i": i,
})
}
in["inputs"] = inputs
want := rc.Params{
"results": results,
}
if concurrency > 0 {
in["concurrency"] = concurrency
}
call := rc.Calls.Get("job/batch")
assert.NotNil(t, call)
got, err := call.Fn(ctx, in)
require.NoError(t, err)
assert.Equal(t, want, got)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/jobs/job.go | fs/rc/jobs/job.go | // Package jobs manages background jobs that the rc is running.
package jobs
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"runtime/debug"
"slices"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
"golang.org/x/sync/errgroup"
)
// Fill in these to avoid circular dependencies
func init() {
cache.JobOnFinish = OnFinish
cache.JobGetJobID = GetJobID
}
// Job describes an asynchronous task started via the rc package
type Job struct {
mu sync.Mutex
ID int64 `json:"id"`
ExecuteID string `json:"executeId"`
Group string `json:"group"`
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
Error string `json:"error"`
Finished bool `json:"finished"`
Success bool `json:"success"`
Duration float64 `json:"duration"`
Output rc.Params `json:"output"`
Stop func() `json:"-"`
listeners []*func()
// realErr is the Error before printing it as a string, it's used to return
// the real error to the upper application layers while still printing the
// string error message.
realErr error
}
// mark the job as finished
func (job *Job) finish(out rc.Params, err error) {
job.mu.Lock()
job.EndTime = time.Now()
if out == nil {
out = make(rc.Params)
}
job.Output = out
job.Duration = job.EndTime.Sub(job.StartTime).Seconds()
if err != nil {
job.realErr = err
job.Error = err.Error()
job.Success = false
} else {
job.realErr = nil
job.Error = ""
job.Success = true
}
job.Finished = true
// Notify listeners that the job is finished
for i := range job.listeners {
go (*job.listeners[i])()
}
job.mu.Unlock()
running.kickExpire() // make sure this job gets expired
}
func (job *Job) removeListener(fn *func()) {
job.mu.Lock()
defer job.mu.Unlock()
for i, ln := range job.listeners {
if ln == fn {
job.listeners = slices.Delete(job.listeners, i, i+1)
return
}
}
}
// OnFinish adds listener to job that will be triggered when job is finished.
// It returns a function to cancel listening.
func (job *Job) OnFinish(fn func()) func() {
job.mu.Lock()
defer job.mu.Unlock()
if job.Finished {
go fn()
} else {
job.listeners = append(job.listeners, &fn)
}
return func() { job.removeListener(&fn) }
}
// run the job until completion writing the return status
func (job *Job) run(ctx context.Context, fn rc.Func, in rc.Params) {
defer func() {
if r := recover(); r != nil {
job.finish(nil, fmt.Errorf("panic received: %v \n%s", r, string(debug.Stack())))
}
}()
job.finish(fn(ctx, in))
}
// Jobs describes a collection of running tasks
type Jobs struct {
mu sync.RWMutex
jobs map[int64]*Job
opt *rc.Options
expireRunning bool
}
var (
running = newJobs()
jobID atomic.Int64
// executeID is a unique ID for this rclone execution
executeID = uuid.New().String()
)
// newJobs makes a new Jobs structure
func newJobs() *Jobs {
return &Jobs{
jobs: map[int64]*Job{},
opt: &rc.Opt,
}
}
// SetOpt sets the options when they are known
func SetOpt(opt *rc.Options) {
running.opt = opt
}
// SetInitialJobID allows for setting jobID before starting any jobs.
func SetInitialJobID(id int64) {
if !jobID.CompareAndSwap(0, id) {
panic("Setting jobID is only possible before starting any jobs")
}
}
// kickExpire makes sure Expire is running
func (jobs *Jobs) kickExpire() {
jobs.mu.Lock()
defer jobs.mu.Unlock()
if !jobs.expireRunning {
time.AfterFunc(time.Duration(jobs.opt.JobExpireInterval), jobs.Expire)
jobs.expireRunning = true
}
}
// Expire expires any jobs that haven't been collected
func (jobs *Jobs) Expire() {
jobs.mu.Lock()
defer jobs.mu.Unlock()
now := time.Now()
for ID, job := range jobs.jobs {
job.mu.Lock()
if job.Finished && now.Sub(job.EndTime) > time.Duration(jobs.opt.JobExpireDuration) {
delete(jobs.jobs, ID)
}
job.mu.Unlock()
}
if len(jobs.jobs) != 0 {
time.AfterFunc(time.Duration(jobs.opt.JobExpireInterval), jobs.Expire)
jobs.expireRunning = true
} else {
jobs.expireRunning = false
}
}
// IDs returns the IDs of the running jobs
func (jobs *Jobs) IDs() (IDs []int64) {
jobs.mu.RLock()
defer jobs.mu.RUnlock()
IDs = []int64{}
for ID := range jobs.jobs {
IDs = append(IDs, ID)
}
return IDs
}
// Stats returns the IDs of the running and finished jobs
func (jobs *Jobs) Stats() (running []int64, finished []int64) {
jobs.mu.RLock()
defer jobs.mu.RUnlock()
running = []int64{}
finished = []int64{}
for jobID := range jobs.jobs {
if jobs.jobs[jobID].Finished {
finished = append(finished, jobID)
} else {
running = append(running, jobID)
}
}
return running, finished
}
// Get a job with a given ID or nil if it doesn't exist
func (jobs *Jobs) Get(ID int64) *Job {
jobs.mu.RLock()
defer jobs.mu.RUnlock()
return jobs.jobs[ID]
}
// Check to see if the group is set
func getGroup(ctx context.Context, in rc.Params, id int64) (context.Context, string, error) {
group, err := in.GetString("_group")
if rc.NotErrParamNotFound(err) {
return ctx, "", err
}
delete(in, "_group")
if group == "" {
group = fmt.Sprintf("job/%d", id)
}
ctx = accounting.WithStatsGroup(ctx, group)
return ctx, group, nil
}
// See if _async is set returning a boolean and a possible new context
func getAsync(ctx context.Context, in rc.Params) (context.Context, bool, error) {
isAsync, err := in.GetBool("_async")
if rc.NotErrParamNotFound(err) {
return ctx, false, err
}
delete(in, "_async") // remove the async parameter after parsing
if isAsync {
// unlink this job from the current context
ctx = context.Background()
}
return ctx, isAsync, nil
}
// See if _config is set and if so adjust ctx to include it
func getConfig(ctx context.Context, in rc.Params) (context.Context, error) {
if _, ok := in["_config"]; !ok {
return ctx, nil
}
ctx, ci := fs.AddConfig(ctx)
err := in.GetStruct("_config", ci)
if err != nil {
return ctx, err
}
delete(in, "_config") // remove the parameter
return ctx, nil
}
// See if _filter is set and if so adjust ctx to include it
func getFilter(ctx context.Context, in rc.Params) (context.Context, error) {
if _, ok := in["_filter"]; !ok {
return ctx, nil
}
// Copy of the current filter options
opt := filter.GetConfig(ctx).Opt
// Update the options from the parameter
err := in.GetStruct("_filter", &opt)
if err != nil {
return ctx, err
}
fi, err := filter.NewFilter(&opt)
if err != nil {
return ctx, err
}
ctx = filter.ReplaceConfig(ctx, fi)
delete(in, "_filter") // remove the parameter
return ctx, nil
}
type jobKeyType struct{}
// Key for adding jobs to ctx
var jobKey = jobKeyType{}
// NewJob creates a Job and executes it, possibly in the background if _async is set
func (jobs *Jobs) NewJob(ctx context.Context, fn rc.Func, in rc.Params) (job *Job, out rc.Params, err error) {
id := jobID.Add(1)
in = in.Copy() // copy input so we can change it
ctx, isAsync, err := getAsync(ctx, in)
if err != nil {
return nil, nil, err
}
ctx, err = getConfig(ctx, in)
if err != nil {
return nil, nil, err
}
ctx, err = getFilter(ctx, in)
if err != nil {
return nil, nil, err
}
ctx, group, err := getGroup(ctx, in, id)
if err != nil {
return nil, nil, err
}
ctx, cancel := context.WithCancel(ctx)
stop := func() {
cancel()
// Wait for cancel to propagate before returning.
<-ctx.Done()
}
job = &Job{
ID: id,
ExecuteID: executeID,
Group: group,
StartTime: time.Now(),
Stop: stop,
}
jobs.mu.Lock()
jobs.jobs[job.ID] = job
jobs.mu.Unlock()
// Add the job to the context
ctx = context.WithValue(ctx, jobKey, job)
if isAsync {
go job.run(ctx, fn, in)
out = make(rc.Params)
out["jobid"] = job.ID
out["executeId"] = job.ExecuteID
err = nil
} else {
job.run(ctx, fn, in)
out = job.Output
err = job.realErr
}
return job, out, err
}
// NewJob creates a Job and executes it on the global job queue,
// possibly in the background if _async is set
func NewJob(ctx context.Context, fn rc.Func, in rc.Params) (job *Job, out rc.Params, err error) {
return running.NewJob(ctx, fn, in)
}
// OnFinish adds listener to jobid that will be triggered when job is finished.
// It returns a function to cancel listening.
func OnFinish(jobID int64, fn func()) (func(), error) {
job := running.Get(jobID)
if job == nil {
return func() {}, errors.New("job not found")
}
return job.OnFinish(fn), nil
}
// GetJob gets the Job from the context if possible
func GetJob(ctx context.Context) (job *Job, ok bool) {
job, ok = ctx.Value(jobKey).(*Job)
return job, ok
}
// GetJobID gets the Job from the context if possible
func GetJobID(ctx context.Context) (jobID int64, ok bool) {
job, ok := GetJob(ctx)
if !ok {
return -1, ok
}
return job.ID, true
}
func init() {
rc.Add(rc.Call{
Path: "job/status",
Fn: rcJobStatus,
Title: "Reads the status of the job ID",
Help: `Parameters:
- jobid - id of the job (integer).
Results:
- finished - boolean
- duration - time in seconds that the job ran for
- endTime - time the job finished (e.g. "2018-10-26T18:50:20.528746884+01:00")
- error - error from the job or empty string for no error
- finished - boolean whether the job has finished or not
- id - as passed in above
- executeId - rclone instance ID (changes after restart); combined with id uniquely identifies a job
- startTime - time the job started (e.g. "2018-10-26T18:50:20.528336039+01:00")
- success - boolean - true for success false otherwise
- output - output of the job as would have been returned if called synchronously
- progress - output of the progress related to the underlying job
`,
})
}
// Returns the status of a job
func rcJobStatus(ctx context.Context, in rc.Params) (out rc.Params, err error) {
jobID, err := in.GetInt64("jobid")
if err != nil {
return nil, err
}
job := running.Get(jobID)
if job == nil {
return nil, errors.New("job not found")
}
job.mu.Lock()
defer job.mu.Unlock()
out = make(rc.Params)
err = rc.Reshape(&out, job)
if err != nil {
return nil, fmt.Errorf("reshape failed in job status: %w", err)
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "job/list",
Fn: rcJobList,
Title: "Lists the IDs of the running jobs",
Help: `Parameters: None.
Results:
- executeId - string id of rclone executing (change after restart)
- jobids - array of integer job ids (starting at 1 on each restart)
- runningIds - array of integer job ids that are running
- finishedIds - array of integer job ids that are finished
`,
})
}
// Returns list of job ids.
func rcJobList(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
out["jobids"] = running.IDs()
runningIDs, finishedIDs := running.Stats()
out["runningIds"] = runningIDs
out["finishedIds"] = finishedIDs
out["executeId"] = executeID
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "job/stop",
Fn: rcJobStop,
Title: "Stop the running job",
Help: `Parameters:
- jobid - id of the job (integer).
`,
})
}
// Stops the running job.
func rcJobStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
jobID, err := in.GetInt64("jobid")
if err != nil {
return nil, err
}
job := running.Get(jobID)
if job == nil {
return nil, errors.New("job not found")
}
job.mu.Lock()
defer job.mu.Unlock()
out = make(rc.Params)
job.Stop()
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "job/stopgroup",
Fn: rcGroupStop,
Title: "Stop all running jobs in a group",
Help: `Parameters:
- group - name of the group (string).
`,
})
}
// Stops all running jobs in a group
func rcGroupStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
group, err := in.GetString("group")
if err != nil {
return nil, err
}
running.mu.RLock()
defer running.mu.RUnlock()
for _, job := range running.jobs {
if job.Group == group {
job.mu.Lock()
job.Stop()
job.mu.Unlock()
}
}
out = make(rc.Params)
return out, nil
}
// NewJobFromParams creates an rc job rc.Params.
//
// The JSON blob should contain a _path entry.
//
// It returns a rc.Params as output which may be an error.
func NewJobFromParams(ctx context.Context, in rc.Params) (out rc.Params) {
path := "unknown"
// Return an rc error blob
rcError := func(err error, status int) rc.Params {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
out, _ = rc.Error(path, in, err, status)
return out
}
// Find the call
path, err := in.GetString("_path")
if err != nil {
return rcError(err, http.StatusNotFound)
}
delete(in, "_path")
call := rc.Calls.Get(path)
if call == nil {
return rcError(fmt.Errorf("couldn't find path %q", path), http.StatusNotFound)
}
if call.NeedsRequest {
return rcError(fmt.Errorf("can't run path %q as it needs the request", path), http.StatusBadRequest)
}
if call.NeedsResponse {
return rcError(fmt.Errorf("can't run path %q as it needs the response", path), http.StatusBadRequest)
}
// Pass on the group if one is set in the context and it isn't set in the input.
if _, found := in["_group"]; !found {
group, ok := accounting.StatsGroupFromContext(ctx)
if ok {
in["_group"] = group
}
}
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
_, out, err = NewJob(ctx, call.Fn, in)
if err != nil {
return rcError(err, http.StatusInternalServerError)
}
if out == nil {
out = make(rc.Params)
}
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
return out
}
// NewJobFromBytes creates an rc job from a JSON blob as bytes.
//
// The JSON blob should contain a _path entry.
//
// It returns a JSON blob as output which may be an error.
func NewJobFromBytes(ctx context.Context, inBuf []byte) (outBuf []byte) {
var in rc.Params
var out rc.Params
// Parse a JSON blob from the input
err := json.Unmarshal(inBuf, &in)
if err != nil {
out, _ = rc.Error("unknown", in, err, http.StatusBadRequest)
} else {
out = NewJobFromParams(ctx, in)
}
var w bytes.Buffer
err = rc.WriteJSON(&w, out)
if err != nil {
fs.Errorf(nil, "rc: NewJobFromBytes: failed to write JSON output: %v", err)
return []byte(`{"error":"failed to write JSON output"}`)
}
return w.Bytes()
}
func init() {
rc.Add(rc.Call{
Path: "job/batch",
AuthRequired: true, // require auth always since sub commands may require it
Fn: rcBatch,
Title: "Run a batch of rclone rc commands concurrently.",
Help: strings.ReplaceAll(`
This takes the following parameters:
- concurrency - int - do this many commands concurrently. Defaults to |--transfers| if not set.
- inputs - an list of inputs to the commands with an extra |_path| parameter
|||json
{
"_path": "rc/path",
"param1": "parameter for the path as documented",
"param2": "parameter for the path as documented, etc",
}
|||
The inputs may use |_async|, |_group|, |_config| and |_filter| as normal when using the rc.
Returns:
- results - a list of results from the commands with one entry for each in inputs.
For example:
|||sh
rclone rc job/batch --json '{
"inputs": [
{
"_path": "rc/noop",
"parameter": "OK"
},
{
"_path": "rc/error",
"parameter": "BAD"
}
]
}
'
|||
Gives the result:
|||json
{
"results": [
{
"parameter": "OK"
},
{
"error": "arbitrary error on input map[parameter:BAD]",
"input": {
"parameter": "BAD"
},
"path": "rc/error",
"status": 500
}
]
}
|||
`, "|", "`"),
})
}
/*
// Run a single batch job
func runBatchJob(ctx context.Context, inputAny any) (out rc.Params, err error) {
var in rc.Params
path := "unknown"
defer func() {
if err != nil {
out, _ = rc.Error(path, in, err, http.StatusInternalServerError)
}
}()
// get the inputs to the job
input, ok := inputAny.(map[string]any)
if !ok {
return nil, rc.NewErrParamInvalid(fmt.Errorf("\"inputs\" items must be objects not %T", inputAny))
}
in = rc.Params(input)
path, err = in.GetString("_path")
if err != nil {
return nil, err
}
delete(in, "_path")
call := rc.Calls.Get(path)
// Check call
if call == nil {
return nil, rc.NewErrParamInvalid(fmt.Errorf("path %q does not exist", path))
}
path = call.Path
if call.NeedsRequest {
return nil, rc.NewErrParamInvalid(fmt.Errorf("can't run path %q as it needs the request", path))
}
if call.NeedsResponse {
return nil, rc.NewErrParamInvalid(fmt.Errorf("can't run path %q as it needs the response", path))
}
// Run the job
_, out, err = NewJob(ctx, call.Fn, in)
if err != nil {
return nil, err
}
// Reshape (serialize then deserialize) the data so it is in the form expected
err = rc.Reshape(&out, out)
if err != nil {
return nil, err
}
return out, nil
}
*/
// Batch the registered commands
func rcBatch(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
// Read inputs
inputsAny, err := in.Get("inputs")
if err != nil {
return nil, err
}
inputs, ok := inputsAny.([]any)
if !ok {
return nil, rc.NewErrParamInvalid(fmt.Errorf("expecting list key %q (was %T)", "inputs", inputsAny))
}
// Read concurrency
concurrency, err := in.GetInt64("concurrency")
if rc.IsErrParamNotFound(err) {
ci := fs.GetConfig(ctx)
concurrency = int64(ci.Transfers)
} else if err != nil {
return nil, err
}
// Prepare outputs
results := make([]rc.Params, len(inputs))
out["results"] = results
g, gCtx := errgroup.WithContext(ctx)
g.SetLimit(int(concurrency))
for i, inputAny := range inputs {
input, ok := inputAny.(map[string]any)
if !ok {
results[i], _ = rc.Error("unknown", nil, fmt.Errorf("\"inputs\" items must be objects not %T", inputAny), http.StatusBadRequest)
continue
}
in := rc.Params(input)
if concurrency <= 1 {
results[i] = NewJobFromParams(ctx, in)
} else {
g.Go(func() error {
results[i] = NewJobFromParams(gCtx, in)
return nil
})
}
}
_ = g.Wait()
return out, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rcflags/rcflags.go | fs/rc/rcflags/rcflags.go | // Package rcflags implements command line flags to set up the remote control
package rcflags
import (
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/spf13/pflag"
)
// FlagPrefix is the prefix used to uniquely identify command line flags.
const FlagPrefix = "rc-"
// AddFlags adds the remote control flags to the flagSet
func AddFlags(flagSet *pflag.FlagSet) {
flags.AddFlagsFromOptions(flagSet, "", rc.OptionsInfo)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rcserver/rcserver_test.go | fs/rc/rcserver/rcserver_test.go | package rcserver
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/go-chi/chi/v5"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testBindAddress = "localhost:0"
defaultTestTemplate = "testdata/golden/testindex.html"
testFs = "testdata/files"
remoteURL = "[" + testFs + "]/" // initial URL path to fetch from that remote
)
func TestMain(m *testing.M) {
// Pretend to be rclone version if we have a version string parameter
if os.Args[len(os.Args)-1] == "version" {
fmt.Printf("rclone %s\n", fs.Version)
os.Exit(0)
}
// Pretend to error if we have an unknown command
if os.Args[len(os.Args)-1] == "unknown_command" {
fmt.Printf("rclone %s\n", fs.Version)
fmt.Fprintf(os.Stderr, "Unknown command\n")
os.Exit(1)
}
os.Exit(m.Run())
}
// Test the RC server runs and we can do HTTP fetches from it.
// We'll do the majority of the testing with the httptest framework
func TestRcServer(t *testing.T) {
opt := rc.Opt
opt.HTTP.ListenAddr = []string{testBindAddress}
opt.Template.Path = defaultTestTemplate
opt.Enabled = true
opt.Serve = true
opt.Files = testFs
mux := http.NewServeMux()
rcServer, err := newServer(context.Background(), &opt, mux)
require.NoError(t, err)
assert.NoError(t, rcServer.Serve())
defer func() {
assert.NoError(t, rcServer.Shutdown())
rcServer.Wait()
}()
testURL := rcServer.server.URLs()[0]
// Do the simplest possible test to check the server is alive
// Do it a few times to wait for the server to start
var resp *http.Response
for range 10 {
resp, err = http.Get(testURL + "file.txt")
if err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
require.NoError(t, err)
body, err := io.ReadAll(resp.Body)
_ = resp.Body.Close()
require.NoError(t, err)
require.NoError(t, resp.Body.Close())
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "this is file1.txt\n", string(body))
}
type testRun struct {
Name string
URL string
User string
Pass string
Status int
Method string
Range string
Body string
ContentType string
Expected string
Contains *regexp.Regexp
Headers map[string]string
}
// Run a suite of tests
func testServer(t *testing.T, tests []testRun, opt *rc.Options) {
t.Helper()
ctx := context.Background()
configfile.Install()
if opt.Template.Path == "" {
opt.Template.Path = defaultTestTemplate
}
rcServer, err := newServer(ctx, opt, http.DefaultServeMux)
require.NoError(t, err)
testURL := rcServer.server.URLs()[0]
mux := rcServer.server.Router()
emulateCalls(t, tests, mux, testURL)
}
func emulateCalls(t *testing.T, tests []testRun, mux chi.Router, testURL string) {
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
t.Helper()
method := test.Method
if method == "" {
method = "GET"
}
var inBody io.Reader
if test.Body != "" {
buf := bytes.NewBufferString(test.Body)
inBody = buf
}
req, err := http.NewRequest(method, "http://1.2.3.4/"+test.URL, inBody)
require.NoError(t, err)
if test.Range != "" {
req.Header.Add("Range", test.Range)
}
if test.ContentType != "" {
req.Header.Add("Content-Type", test.ContentType)
}
if test.User != "" && test.Pass != "" {
req.SetBasicAuth(test.User, test.Pass)
}
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
resp := w.Result()
assert.Equal(t, test.Status, resp.StatusCode)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
if test.ContentType == "application/json" && test.Expected != "" {
expectedNormalized := normalizeJSON(t, test.Expected)
actualNormalized := normalizeJSON(t, string(body))
assert.Equal(t, expectedNormalized, actualNormalized, "Normalized JSON does not match")
} else if test.Contains == nil {
// go1.23 started putting an html wrapper
bodyNormalized := strings.TrimPrefix(string(body), "<!doctype html>\n<meta name=\"viewport\" content=\"width=device-width\">\n")
assert.Equal(t, test.Expected, bodyNormalized)
} else {
assert.True(t, test.Contains.Match(body), fmt.Sprintf("body didn't match: %v: %v", test.Contains, string(body)))
}
for k, v := range test.Headers {
if v == "testURL" {
v = testURL
}
assert.Equal(t, v, resp.Header.Get(k), k)
}
})
}
}
// return an enabled rc
func newTestOpt() rc.Options {
opt := rc.Opt
opt.Enabled = true
opt.HTTP.ListenAddr = []string{testBindAddress}
return opt
}
func TestFileServing(t *testing.T) {
tests := []testRun{{
Name: "index",
URL: "",
Status: http.StatusOK,
Expected: `<pre>
<a href="dir/">dir/</a>
<a href="file.txt">file.txt</a>
<a href="modtime/">modtime/</a>
</pre>
`,
}, {
Name: "notfound",
URL: "notfound",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dirnotfound",
URL: "dirnotfound/",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dir",
URL: "dir/",
Status: http.StatusOK,
Expected: `<pre>
<a href="file2.txt">file2.txt</a>
</pre>
`,
}, {
Name: "file",
URL: "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file2",
URL: "dir/file2.txt",
Status: http.StatusOK,
Expected: "this is dir/file2.txt\n",
}, {
Name: "file-head",
URL: "file.txt",
Method: "HEAD",
Status: http.StatusOK,
Expected: ``,
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file-range",
URL: "file.txt",
Status: http.StatusPartialContent,
Range: "bytes=8-12",
Expected: `file1`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRemoteServing(t *testing.T) {
tests := []testRun{
// Test serving files from the test remote
{
Name: "index",
URL: remoteURL + "",
Status: http.StatusOK,
Expected: `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /</title>
</head>
<body>
<h1>Directory listing of /</h1>
<a href="dir/">dir/</a><br />
<a href="modtime/">modtime/</a><br />
<a href="file.txt">file.txt</a><br />
</body>
</html>
`,
}, {
Name: "notfound-index",
URL: "[notfound]/",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to list directory: directory not found",
"input": null,
"path": "",
"status": 404
}
`,
}, {
Name: "notfound",
URL: remoteURL + "notfound",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to find object: object not found",
"input": null,
"path": "notfound",
"status": 404
}
`,
}, {
Name: "dirnotfound",
URL: remoteURL + "dirnotfound/",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to list directory: directory not found",
"input": null,
"path": "dirnotfound",
"status": 404
}
`,
}, {
Name: "dir",
URL: remoteURL + "dir/",
Status: http.StatusOK,
Expected: `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /dir</title>
</head>
<body>
<h1>Directory listing of /dir</h1>
<a href="file2.txt">file2.txt</a><br />
</body>
</html>
`,
}, {
Name: "file",
URL: remoteURL + "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file with no slash after ]",
URL: strings.TrimRight(remoteURL, "/") + "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file2",
URL: remoteURL + "dir/file2.txt",
Status: http.StatusOK,
Expected: "this is dir/file2.txt\n",
}, {
Name: "file-head",
URL: remoteURL + "file.txt",
Method: "HEAD",
Status: http.StatusOK,
Expected: ``,
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file-range",
URL: remoteURL + "file.txt",
Status: http.StatusPartialContent,
Range: "bytes=8-12",
Expected: `file1`,
}, {
Name: "bad-remote",
URL: "[notfoundremote:]/",
Status: http.StatusInternalServerError,
Expected: `{
"error": "failed to make Fs: didn't find section in config file (\"notfoundremote\")",
"input": null,
"path": "/",
"status": 500
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRC(t *testing.T) {
tests := []testRun{{
Name: "rc-root",
URL: "",
Method: "POST",
Status: http.StatusNotFound,
Expected: `{
"error": "couldn't find method \"\"",
"input": {},
"path": "",
"status": 404
}
`,
}, {
Name: "rc-noop",
URL: "rc/noop",
Method: "POST",
Status: http.StatusOK,
Expected: "{}\n",
}, {
Name: "rc-error",
URL: "rc/error",
Method: "POST",
Status: http.StatusInternalServerError,
Expected: `{
"error": "arbitrary error on input map[]",
"input": {},
"path": "rc/error",
"status": 500
}
`,
}, {
Name: "core-gc",
URL: "core/gc", // returns nil, nil so check it is made into {}
Method: "POST",
Status: http.StatusOK,
Expected: "{}\n",
}, {
Name: "url-params",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Status: http.StatusOK,
Expected: `{
"param1": "potato",
"param2": "sausage"
}
`,
}, {
Name: "json",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": true
}
`,
}, {
Name: "json-mixed-case-content-type",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "ApplicAtion/JsOn",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": true
}
`,
}, {
Name: "json-and-url-params",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `{ "param1":"string", "param3":true }`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": "sausage",
"param3": true
}
`,
}, {
Name: "json-bad",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `{ param1":"string", "param3":true }`,
ContentType: "application/json",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to read input JSON: invalid character 'p' looking for beginning of object key string",
"input": {
"param1": "potato",
"param2": "sausage"
},
"path": "rc/noop",
"status": 400
}
`,
}, {
Name: "json-charset",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "application/json; charset=utf-8",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": true
}
`,
}, {
Name: "json-mixed-case-charset",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "aPPlication/jSoN; charset=UtF-8",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": true
}
`,
}, {
Name: "json-bad-charset",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "application/json; charset=latin1",
Status: http.StatusBadRequest,
Expected: `{
"error": "unsupported charset \"latin1\" for JSON input",
"input": {},
"path": "rc/noop",
"status": 400
}
`,
}, {
Name: "form",
URL: "rc/noop",
Method: "POST",
Body: `param1=string¶m2=true`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": "true"
}
`,
}, {
Name: "form-and-url-params",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `param1=string¶m3=true`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: `{
"param1": "potato",
"param2": "sausage",
"param3": "true"
}
`,
}, {
Name: "form-bad",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `%zz`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to parse form/URL parameters: invalid URL escape \"%zz\"",
"input": null,
"path": "rc/noop",
"status": 400
}
`,
}, {
Name: "malformed-content-type",
URL: "rc/noop",
Method: "POST",
ContentType: "malformed/",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to parse Content-Type: mime: expected token after slash",
"input": null,
"path": "rc/noop",
"status": 400
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRCWithAuth(t *testing.T) {
tests := []testRun{{
Name: "core-command",
URL: "core/command",
Method: "POST",
Body: `command=version`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: fmt.Sprintf(`{
"error": false,
"result": "rclone %s\n"
}
`, fs.Version),
}, {
Name: "core-command-bad-returnType",
URL: "core/command",
Method: "POST",
Body: `command=version&returnType=POTATO`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusInternalServerError,
Expected: `{
"error": "unknown returnType \"POTATO\"",
"input": {
"command": "version",
"returnType": "POTATO"
},
"path": "core/command",
"status": 500
}
`,
}, {
Name: "core-command-stream",
URL: "core/command",
Method: "POST",
Body: `command=version&returnType=STREAM`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: fmt.Sprintf(`rclone %s
{}
`, fs.Version),
}, {
Name: "core-command-stream-error",
URL: "core/command",
Method: "POST",
Body: `command=unknown_command&returnType=STREAM`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: fmt.Sprintf(`rclone %s
Unknown command
{
"error": "exit status 1",
"input": {
"command": "unknown_command",
"returnType": "STREAM"
},
"path": "core/command",
"status": 500
}
`, fs.Version),
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
opt.NoAuth = true
testServer(t, tests, &opt)
}
var matchRemoteDirListing = regexp.MustCompile(`<title>Directory listing of /</title>`)
func TestServingRoot(t *testing.T) {
tests := []testRun{{
Name: "rootlist",
URL: "*",
Status: http.StatusOK,
Contains: matchRemoteDirListing,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestServingRootNoFiles(t *testing.T) {
tests := []testRun{{
Name: "rootlist",
URL: "",
Status: http.StatusOK,
Contains: matchRemoteDirListing,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestNoFiles(t *testing.T) {
tests := []testRun{{
Name: "file",
URL: "file.txt",
Status: http.StatusNotFound,
Expected: "Not Found\n",
}, {
Name: "dir",
URL: "dir/",
Status: http.StatusNotFound,
Expected: "Not Found\n",
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestNoServe(t *testing.T) {
tests := []testRun{{
Name: "file",
URL: remoteURL + "file.txt",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dir",
URL: remoteURL + "dir/",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestAuthRequired(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusForbidden,
Expected: `{
"error": "authentication must be set up on the rc server to use \"rc/noopauth\" or the --rc-no-auth flag must be in use",
"input": {},
"path": "rc/noopauth",
"status": 403
}
`,
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = false
testServer(t, tests, &opt)
}
func TestNoAuth(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusOK,
Expected: "{}\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = true
testServer(t, tests, &opt)
}
func TestWithUserPass(t *testing.T) {
tests := []testRun{{
Name: "authMissing",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusUnauthorized,
Expected: "401 Unauthorized\n",
}, {
Name: "authWrong",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusUnauthorized,
Expected: "401 Unauthorized\n",
User: "user1",
Pass: "pass2",
}, {
Name: "authOK",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusOK,
Expected: "{}\n",
User: "user",
Pass: "pass",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = false
opt.Auth.BasicUser = "user"
opt.Auth.BasicPass = "pass"
testServer(t, tests, &opt)
}
func TestRCAsync(t *testing.T) {
tests := []testRun{{
Name: "ok",
URL: "rc/noop",
Method: "POST",
ContentType: "application/json",
Body: `{ "_async":true }`,
Status: http.StatusOK,
Contains: regexp.MustCompile(`(?s)\{.*\"jobid\":.*\}`),
}, {
Name: "bad",
URL: "rc/noop",
Method: "POST",
ContentType: "application/json",
Body: `{ "_async":"truthy" }`,
Status: http.StatusBadRequest,
Expected: `{
"error": "couldn't parse key \"_async\" (truthy) as bool: strconv.ParseBool: parsing \"truthy\": invalid syntax",
"input": {
"_async": "truthy"
},
"path": "rc/noop",
"status": 400
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
// Check the debug handlers are attached
func TestRCDebug(t *testing.T) {
tests := []testRun{{
Name: "index",
URL: "debug/pprof/",
Method: "GET",
ContentType: "text/html",
Status: http.StatusOK,
Contains: regexp.MustCompile(`Types of profiles available`),
}, {
Name: "goroutines",
URL: "debug/pprof/goroutine?debug=1",
Method: "GET",
ContentType: "text/html",
Status: http.StatusOK,
Contains: regexp.MustCompile(`goroutine profile`),
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestServeModTime(t *testing.T) {
for file, mtime := range map[string]time.Time{
"dir": time.Date(2023, 4, 12, 21, 15, 17, 0, time.UTC),
"modtime.txt": time.Date(2021, 1, 18, 5, 2, 28, 0, time.UTC),
} {
path := filepath.Join(testFs, "modtime", file)
err := os.Chtimes(path, mtime, mtime)
require.NoError(t, err)
}
opt := newTestOpt()
opt.Serve = true
opt.Template.Path = "testdata/golden/testmodtime.html"
tests := []testRun{{
Name: "modtime",
Method: "GET",
URL: remoteURL + "modtime/",
Status: http.StatusOK,
Expected: "* dir/ - 2023-04-12T21:15:17Z\n* modtime.txt - 2021-01-18T05:02:28Z\n",
}}
testServer(t, tests, &opt)
opt.ServeNoModTime = true
tests = []testRun{{
Name: "no modtime",
Method: "GET",
URL: remoteURL + "modtime/",
Status: http.StatusOK,
Expected: "* dir/ - 0001-01-01T00:00:00Z\n* modtime.txt - 0001-01-01T00:00:00Z\n",
}}
testServer(t, tests, &opt)
}
func TestContentTypeJSON(t *testing.T) {
tests := []testRun{
{
Name: "Check Content-Type for JSON response",
URL: "rc/noop",
Method: "POST",
Body: `{}`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: "{}\n",
Headers: map[string]string{
"Content-Type": "application/json",
},
},
{
Name: "Check Content-Type for JSON error response",
URL: "rc/error",
Method: "POST",
Body: `{}`,
ContentType: "application/json",
Status: http.StatusInternalServerError,
Expected: `{
"error": "arbitrary error on input map[]",
"input": {},
"path": "rc/error",
"status": 500
}
`,
Headers: map[string]string{
"Content-Type": "application/json",
},
},
}
opt := newTestOpt()
testServer(t, tests, &opt)
}
func normalizeJSON(t *testing.T, jsonStr string) string {
var jsonObj map[string]any
err := json.Unmarshal([]byte(jsonStr), &jsonObj)
require.NoError(t, err, "JSON unmarshalling failed")
normalizedJSON, err := json.Marshal(jsonObj)
require.NoError(t, err, "JSON marshalling failed")
return string(normalizedJSON)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rcserver/metrics.go | fs/rc/rcserver/metrics.go | // Package rcserver implements the HTTP endpoint to serve the remote control
package rcserver
import (
"context"
"fmt"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/rc/jobs"
libhttp "github.com/rclone/rclone/lib/http"
)
const path = "/metrics"
var promHandlerFunc http.HandlerFunc
func init() {
rcloneCollector := accounting.NewRcloneCollector(context.Background())
prometheus.MustRegister(rcloneCollector)
m := fshttp.NewMetrics("rclone")
for _, c := range m.Collectors() {
prometheus.MustRegister(c)
}
fshttp.DefaultMetrics = m
promHandlerFunc = promhttp.Handler().ServeHTTP
}
// MetricsStart the remote control server if configured
//
// If the server wasn't configured the *Server returned may be nil
func MetricsStart(ctx context.Context, opt *rc.Options) (*MetricsServer, error) {
jobs.SetOpt(opt) // set the defaults for jobs
if len(opt.MetricsHTTP.ListenAddr) > 0 {
// Serve on the DefaultServeMux so can have global registrations appear
s, err := newMetricsServer(ctx, opt)
if err != nil {
return nil, err
}
return s, s.Serve()
}
return nil, nil
}
// MetricsServer contains everything to run the rc server
type MetricsServer struct {
ctx context.Context // for global config
server *libhttp.Server
promHandlerFunc http.Handler
opt *rc.Options
}
func newMetricsServer(ctx context.Context, opt *rc.Options) (*MetricsServer, error) {
s := &MetricsServer{
ctx: ctx,
opt: opt,
promHandlerFunc: promHandlerFunc,
}
var err error
s.server, err = libhttp.NewServer(ctx,
libhttp.WithConfig(opt.MetricsHTTP),
libhttp.WithAuth(opt.MetricsAuth),
libhttp.WithTemplate(opt.MetricsTemplate),
)
if err != nil {
return nil, fmt.Errorf("failed to init server: %w", err)
}
router := s.server.Router()
router.Get(path, promHandlerFunc)
return s, nil
}
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *MetricsServer) Serve() error {
s.server.Serve()
return nil
}
// Wait blocks while the server is serving requests
func (s *MetricsServer) Wait() {
s.server.Wait()
}
// Shutdown gracefully shuts down the server
func (s *MetricsServer) Shutdown() error {
return s.server.Shutdown()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rcserver/rcserver.go | fs/rc/rcserver/rcserver.go | // Package rcserver implements the HTTP endpoint to serve the remote control
package rcserver
import (
"context"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"mime"
"net/http"
"net/url"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/rc/jobs"
"github.com/rclone/rclone/fs/rc/webgui"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/random"
"github.com/skratchdot/open-golang/open"
)
// Start the remote control server if configured
//
// If the server wasn't configured the *Server returned may be nil
func Start(ctx context.Context, opt *rc.Options) (*Server, error) {
jobs.SetOpt(opt) // set the defaults for jobs
if opt.Enabled {
// Serve on the DefaultServeMux so can have global registrations appear
s, err := newServer(ctx, opt, http.DefaultServeMux)
if err != nil {
return nil, err
}
return s, s.Serve()
}
return nil, nil
}
// Server contains everything to run the rc server
type Server struct {
ctx context.Context // for global config
server *libhttp.Server
files http.Handler
pluginsHandler http.Handler
opt *rc.Options
}
func newServer(ctx context.Context, opt *rc.Options, mux *http.ServeMux) (*Server, error) {
fileHandler := http.Handler(nil)
pluginsHandler := http.Handler(nil)
// Add some more mime types which are often missing
_ = mime.AddExtensionType(".wasm", "application/wasm")
_ = mime.AddExtensionType(".js", "application/javascript")
cachePath := filepath.Join(config.GetCacheDir(), "webgui")
extractPath := filepath.Join(cachePath, "current/build")
// File handling
if opt.Files != "" {
if opt.WebUI {
fs.Logf(nil, "--rc-files overrides --rc-web-gui command\n")
}
fs.Logf(nil, "Serving files from %q", opt.Files)
fileHandler = http.FileServer(http.Dir(opt.Files))
} else if opt.WebUI {
if err := webgui.CheckAndDownloadWebGUIRelease(opt.WebGUIUpdate, opt.WebGUIForceUpdate, opt.WebGUIFetchURL, config.GetCacheDir()); err != nil {
fs.Errorf(nil, "Error while fetching the latest release of Web GUI: %v", err)
}
if opt.NoAuth {
fs.Logf(nil, "It is recommended to use web gui with auth.")
} else {
if opt.Auth.BasicUser == "" && opt.Auth.HtPasswd == "" {
opt.Auth.BasicUser = "gui"
fs.Infof(nil, "No username specified. Using default username: %s \n", rc.Opt.Auth.BasicUser)
}
if opt.Auth.BasicPass == "" && opt.Auth.HtPasswd == "" {
randomPass, err := random.Password(128)
if err != nil {
fs.Fatalf(nil, "Failed to make password: %v", err)
}
opt.Auth.BasicPass = randomPass
fs.Infof(nil, "No password specified. Using random password: %s \n", randomPass)
}
}
opt.Serve = true
fs.Logf(nil, "Serving Web GUI")
fileHandler = http.FileServer(http.Dir(extractPath))
pluginsHandler = http.FileServer(http.Dir(webgui.PluginsPath))
}
s := &Server{
ctx: ctx,
opt: opt,
files: fileHandler,
pluginsHandler: pluginsHandler,
}
var err error
s.server, err = libhttp.NewServer(ctx,
libhttp.WithConfig(opt.HTTP),
libhttp.WithAuth(opt.Auth),
libhttp.WithTemplate(opt.Template),
)
if err != nil {
return nil, fmt.Errorf("failed to init server: %w", err)
}
router := s.server.Router()
router.Use(
middleware.SetHeader("Accept-Ranges", "bytes"),
middleware.SetHeader("Server", "rclone/"+fs.Version),
)
// Add the debug handler which is installed in the default mux
router.Handle("/debug/pprof/*", mux)
// FIXME split these up into individual functions
router.Get("/*", s.handler)
router.Head("/*", s.handler)
router.Post("/*", s.handler)
router.Options("/*", s.handler)
return s, nil
}
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *Server) Serve() error {
s.server.Serve()
for _, URL := range s.server.URLs() {
fs.Logf(nil, "Serving remote control on %s", URL)
// Open the files in the browser if set
if s.files != nil {
openURL, err := url.Parse(URL)
if err != nil {
return fmt.Errorf("invalid serving URL: %w", err)
}
// Add username, password into the URL if they are set
user, pass := s.opt.Auth.BasicUser, s.opt.Auth.BasicPass
if user != "" && pass != "" {
openURL.User = url.UserPassword(user, pass)
// Base64 encode username and password to be sent through url
loginToken := user + ":" + pass
parameters := url.Values{}
encodedToken := base64.URLEncoding.EncodeToString([]byte(loginToken))
fs.Debugf(nil, "login_token %q", encodedToken)
parameters.Add("login_token", encodedToken)
openURL.RawQuery = parameters.Encode()
openURL.RawPath = "/#/login"
}
// Don't open browser if serving in testing environment or required not to do so.
if flag.Lookup("test.v") == nil && !s.opt.WebGUINoOpenBrowser {
if err := open.Start(openURL.String()); err != nil {
fs.Errorf(nil, "Failed to open Web GUI in browser: %v. Manually access it at: %s", err, openURL.String())
}
} else {
fs.Logf(nil, "Web GUI is not automatically opening browser. Navigate to %s to use.", openURL.String())
}
}
}
return nil
}
// writeError writes a formatted error to the output
func writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
params, status := rc.Error(path, in, err, status)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
err = rc.WriteJSON(w, params)
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: writeError: failed to write JSON output from %#v: %v", in, err)
}
}
// handler reads incoming requests and dispatches them
func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
path := strings.TrimLeft(r.URL.Path, "/")
switch r.Method {
case "POST":
s.handlePost(w, r, path)
case "OPTIONS":
s.handleOptions(w, r, path)
case "GET", "HEAD":
s.handleGet(w, r, path)
default:
writeError(path, nil, w, fmt.Errorf("method %q not allowed", r.Method), http.StatusMethodNotAllowed)
return
}
}
func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string) {
ctx := r.Context()
contentType := r.Header.Get("Content-Type")
var (
contentTypeMediaType string
contentTypeParams map[string]string
)
if contentType != "" {
var err error
contentTypeMediaType, contentTypeParams, err = mime.ParseMediaType(contentType)
if err != nil {
writeError(path, nil, w, fmt.Errorf("failed to parse Content-Type: %w", err), http.StatusBadRequest)
return
}
}
values := r.URL.Query()
if contentTypeMediaType == "application/x-www-form-urlencoded" {
// Parse the POST and URL parameters into r.Form, for others r.Form will be empty value
err := r.ParseForm()
if err != nil {
writeError(path, nil, w, fmt.Errorf("failed to parse form/URL parameters: %w", err), http.StatusBadRequest)
return
}
values = r.Form
}
// Read the POST and URL parameters into in
in := make(rc.Params)
for k, vs := range values {
if len(vs) > 0 {
in[k] = vs[len(vs)-1]
}
}
// Parse a JSON blob from the input
if contentTypeMediaType == "application/json" {
// Check the charset is utf-8 or unset
if charset, ok := contentTypeParams["charset"]; ok && !strings.EqualFold(charset, "utf-8") {
writeError(path, in, w, fmt.Errorf("unsupported charset %q for JSON input", charset), http.StatusBadRequest)
return
}
err := json.NewDecoder(r.Body).Decode(&in)
if err != nil {
writeError(path, in, w, fmt.Errorf("failed to read input JSON: %w", err), http.StatusBadRequest)
return
}
}
// Find the call
call := rc.Calls.Get(path)
if call == nil {
writeError(path, in, w, fmt.Errorf("couldn't find method %q", path), http.StatusNotFound)
return
}
// Check to see if it requires authorisation
if !s.opt.NoAuth && call.AuthRequired && !s.server.UsingAuth() {
writeError(path, in, w, fmt.Errorf("authentication must be set up on the rc server to use %q or the --rc-no-auth flag must be in use", path), http.StatusForbidden)
return
}
inOrig := in.Copy()
if call.NeedsRequest {
// Add the request to RC
in["_request"] = r
}
if call.NeedsResponse {
in["_response"] = w
}
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
job, out, err := jobs.NewJob(ctx, call.Fn, in)
if job != nil {
w.Header().Add("x-rclone-jobid", fmt.Sprintf("%d", job.ID))
}
if err != nil {
writeError(path, inOrig, w, err, http.StatusInternalServerError)
return
}
if out == nil {
out = make(rc.Params)
}
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
w.Header().Set("Content-Type", "application/json")
err = rc.WriteJSON(w, out)
if err != nil {
// can't return the error at this point - but have a go anyway
writeError(path, inOrig, w, err, http.StatusInternalServerError)
fs.Errorf(nil, "rc: handlePost: failed to write JSON output: %v", err)
}
}
func (s *Server) handleOptions(w http.ResponseWriter, r *http.Request, path string) {
w.WriteHeader(http.StatusOK)
}
func (s *Server) serveRoot(w http.ResponseWriter, r *http.Request) {
remoteNames := config.GetRemoteNames()
sort.Strings(remoteNames)
directory := serve.NewDirectory("", s.server.HTMLTemplate())
directory.Name = "List of all rclone remotes."
q := url.Values{}
for _, remoteName := range remoteNames {
q.Set("fs", remoteName)
directory.AddHTMLEntry("["+remoteName+":]", true, -1, time.Time{})
}
sortParm := r.URL.Query().Get("sort")
orderParm := r.URL.Query().Get("order")
directory.ProcessQueryParams(sortParm, orderParm)
directory.Serve(w, r)
}
func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string, fsName string) {
f, err := cache.Get(s.ctx, fsName)
if err != nil {
writeError(path, nil, w, fmt.Errorf("failed to make Fs: %w", err), http.StatusInternalServerError)
return
}
if path == "" || strings.HasSuffix(path, "/") {
path = strings.Trim(path, "/")
entries, err := list.DirSorted(r.Context(), f, false, path)
if err != nil {
writeError(path, nil, w, fmt.Errorf("failed to list directory: %w", err), http.StatusInternalServerError)
return
}
// Make the entries for display
directory := serve.NewDirectory(path, s.server.HTMLTemplate())
for _, entry := range entries {
_, isDir := entry.(fs.Directory)
var modTime time.Time
if !s.opt.ServeNoModTime {
modTime = entry.ModTime(r.Context())
}
directory.AddHTMLEntry(entry.Remote(), isDir, entry.Size(), modTime)
}
sortParm := r.URL.Query().Get("sort")
orderParm := r.URL.Query().Get("order")
directory.ProcessQueryParams(sortParm, orderParm)
directory.Serve(w, r)
} else {
path = strings.Trim(path, "/")
o, err := f.NewObject(r.Context(), path)
if err != nil {
writeError(path, nil, w, fmt.Errorf("failed to find object: %w", err), http.StatusInternalServerError)
return
}
serve.Object(w, r, o)
}
}
// Match URLS of the form [fs]/remote
var fsMatch = regexp.MustCompile(`^\[(.*?)\](.*)$`)
func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string) {
// Look to see if this has an fs in the path
fsMatchResult := fsMatch.FindStringSubmatch(path)
switch {
case fsMatchResult != nil && s.opt.Serve:
// Serve /[fs]/remote files
s.serveRemote(w, r, fsMatchResult[2], fsMatchResult[1])
return
case path == "metrics" && s.opt.EnableMetrics:
promHandlerFunc(w, r)
return
case path == "*" && s.opt.Serve:
// Serve /* as the remote listing
s.serveRoot(w, r)
return
case s.files != nil:
if s.opt.WebUI {
pluginsMatchResult := webgui.PluginsMatch.FindStringSubmatch(path)
if len(pluginsMatchResult) > 2 {
ok := webgui.ServePluginOK(w, r, pluginsMatchResult)
if !ok {
r.URL.Path = fmt.Sprintf("/%s/%s/app/build/%s", pluginsMatchResult[1], pluginsMatchResult[2], pluginsMatchResult[3])
s.pluginsHandler.ServeHTTP(w, r)
return
}
return
} else if webgui.ServePluginWithReferrerOK(w, r, path) {
return
}
}
// Serve the files
r.URL.Path = "/" + path
s.files.ServeHTTP(w, r)
return
case path == "" && s.opt.Serve:
// Serve the root as a remote listing
s.serveRoot(w, r)
return
}
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
// Wait blocks while the server is serving requests
func (s *Server) Wait() {
s.server.Wait()
}
// Shutdown gracefully shuts down the server
func (s *Server) Shutdown() error {
return s.server.Shutdown()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/rcserver/metrics_test.go | fs/rc/rcserver/metrics_test.go | package rcserver
import (
"context"
"fmt"
"net/http"
"regexp"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/require"
)
// Run a suite of tests
func testMetricsServer(t *testing.T, tests []testRun, opt *rc.Options) {
t.Helper()
ctx := context.Background()
configfile.Install()
rcServer, err := newMetricsServer(ctx, opt)
require.NoError(t, err)
testURL := rcServer.server.URLs()[0]
mux := rcServer.server.Router()
emulateCalls(t, tests, mux, testURL)
}
// return an enabled rc
func newMetricsTestOpt() rc.Options {
opt := rc.Opt
opt.MetricsHTTP.ListenAddr = []string{testBindAddress}
return opt
}
func TestMetrics(t *testing.T) {
stats := accounting.GlobalStats()
tests := makeMetricsTestCases(stats)
opt := newMetricsTestOpt()
testMetricsServer(t, tests, &opt)
// Test changing a couple options
stats.Bytes(500)
for range 30 {
require.NoError(t, stats.DeleteFile(context.Background(), 0))
}
stats.Errors(2)
stats.Bytes(324)
tests = makeMetricsTestCases(stats)
testMetricsServer(t, tests, &opt)
}
func makeMetricsTestCases(stats *accounting.StatsInfo) (tests []testRun) {
tests = []testRun{{
Name: "Bytes Transferred Metric",
URL: "metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_bytes_transferred_total %d", stats.GetBytes())),
}, {
Name: "Checked Files Metric",
URL: "metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_checked_files_total %d", stats.GetChecks())),
}, {
Name: "Errors Metric",
URL: "metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_errors_total %d", stats.GetErrors())),
}, {
Name: "Deleted Files Metric",
URL: "metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_deleted_total %d", stats.GetDeletes())),
}, {
Name: "Files Transferred Metric",
URL: "metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_transferred_total %d", stats.GetTransfers())),
},
}
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/js/serve.go | fs/rc/js/serve.go | //go:build none
package main
import (
"fmt"
"log"
"mime"
"net/http"
)
func main() {
mime.AddExtensionType(".wasm", "application/wasm")
mime.AddExtensionType(".js", "application/javascript")
mux := http.NewServeMux()
mux.Handle("/", http.FileServer(http.Dir(".")))
fmt.Printf("Serving on http://localhost:3000/\n")
log.Fatal(http.ListenAndServe(":3000", mux))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/js/main.go | fs/rc/js/main.go | // Rclone as a wasm library
//
// This library exports the core rc functionality
//go:build js
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"runtime"
"syscall/js"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
// Core functionality we need
_ "github.com/rclone/rclone/fs/operations"
_ "github.com/rclone/rclone/fs/sync"
// _ "github.com/rclone/rclone/backend/all" // import all backends
// Backends
_ "github.com/rclone/rclone/backend/memory"
)
var (
document js.Value
jsJSON js.Value
)
func getElementById(name string) js.Value {
node := document.Call("getElementById", name)
if node.IsUndefined() {
log.Fatalf("Couldn't find element %q", name)
}
return node
}
func time() int {
return js.Global().Get("Date").New().Call("getTime").Int()
}
func paramToValue(in rc.Params) (out js.Value) {
return js.Value{}
}
// errorValue turns an error into a js.Value
func errorValue(method string, in js.Value, err error) js.Value {
fs.Errorf(nil, "rc: %q: error: %v", method, err)
// Adjust the error return for some well known errors
status := http.StatusInternalServerError
switch {
case errors.Is(err, fs.ErrorDirNotFound) || errors.Is(err, fs.ErrorObjectNotFound):
status = http.StatusNotFound
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
status = http.StatusBadRequest
}
return js.ValueOf(map[string]interface{}{
"status": status,
"error": err.Error(),
"input": in,
"path": method,
})
}
// rcCallback is a callback for javascript to access the api
//
// FIXME should this should return a promise so we can return errors properly?
func rcCallback(this js.Value, args []js.Value) interface{} {
ctx := context.Background() // FIXME
log.Printf("rcCallback: this=%v args=%v", this, args)
if len(args) != 2 {
return errorValue("", js.Undefined(), errors.New("need two parameters to rc call"))
}
method := args[0].String()
inRaw := args[1]
var in = rc.Params{}
switch inRaw.Type() {
case js.TypeNull:
case js.TypeObject:
inJSON := jsJSON.Call("stringify", inRaw).String()
err := json.Unmarshal([]byte(inJSON), &in)
if err != nil {
return errorValue(method, inRaw, fmt.Errorf("couldn't unmarshal input: %w", err))
}
default:
return errorValue(method, inRaw, errors.New("in parameter must be null or object"))
}
call := rc.Calls.Get(method)
if call == nil {
return errorValue(method, inRaw, fmt.Errorf("method %q not found", method))
}
out, err := call.Fn(ctx, in)
if err != nil {
return errorValue(method, inRaw, fmt.Errorf("method call failed: %w", err))
}
if out == nil {
return nil
}
var out2 map[string]interface{}
err = rc.Reshape(&out2, out)
if err != nil {
return errorValue(method, inRaw, fmt.Errorf("result reshape failed: %w", err))
}
return js.ValueOf(out2)
}
func main() {
log.Printf("Running on goos/goarch = %s/%s", runtime.GOOS, runtime.GOARCH)
if js.Global().IsUndefined() {
log.Fatalf("Didn't find Global - not running in browser")
}
document = js.Global().Get("document")
if document.IsUndefined() {
log.Fatalf("Didn't find document - not running in browser")
}
jsJSON = js.Global().Get("JSON")
if jsJSON.IsUndefined() {
log.Fatalf("can't find JSON")
}
// Set rc
js.Global().Set("rc", js.FuncOf(rcCallback))
// Signal that it is valid
rcValidResolve := js.Global().Get("rcValidResolve")
if rcValidResolve.IsUndefined() {
log.Fatalf("Didn't find rcValidResolve")
}
rcValidResolve.Invoke()
// Wait forever
select {}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/webgui/rc.go | fs/rc/webgui/rc.go | package webgui
import (
"context"
"fmt"
"os"
"path/filepath"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
func init() {
rc.Add(rc.Call{
Path: "pluginsctl/listTestPlugins",
AuthRequired: true,
Fn: rcListTestPlugins,
Title: "Show currently loaded test plugins",
Help: `Allows listing of test plugins with the rclone.test set to true in package.json of the plugin.
This takes no parameters and returns:
- loadedTestPlugins - list of currently available test plugins.
E.g.
rclone rc pluginsctl/listTestPlugins
`,
})
}
func rcListTestPlugins(_ context.Context, _ rc.Params) (out rc.Params, err error) {
err = initPluginsOrError()
if err != nil {
return nil, err
}
return rc.Params{
"loadedTestPlugins": filterPlugins(loadedPlugins, func(json *PackageJSON) bool { return json.isTesting() }),
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "pluginsctl/removeTestPlugin",
AuthRequired: true,
Fn: rcRemoveTestPlugin,
Title: "Remove a test plugin",
Help: `This allows you to remove a plugin using it's name.
This takes the following parameters:
- name - name of the plugin in the format ` + "`author`/`plugin_name`" + `.
Example:
rclone rc pluginsctl/removeTestPlugin name=rclone/rclone-webui-react
`,
})
}
func rcRemoveTestPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
err = initPluginsOrError()
if err != nil {
return nil, err
}
name, err := in.GetString("name")
if err != nil {
return nil, err
}
err = loadedPlugins.removePlugin(name)
if err != nil {
return nil, err
}
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "pluginsctl/addPlugin",
AuthRequired: true,
Fn: rcAddPlugin,
Title: "Add a plugin using url",
Help: `Used for adding a plugin to the webgui.
This takes the following parameters:
- url - http url of the github repo where the plugin is hosted (http://github.com/rclone/rclone-webui-react).
Example:
rclone rc pluginsctl/addPlugin
`,
})
}
func rcAddPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
err = initPluginsOrError()
if err != nil {
return nil, err
}
pluginURL, err := in.GetString("url")
if err != nil {
return nil, err
}
author, repoName, repoBranch, err := getAuthorRepoBranchGitHub(pluginURL)
if err != nil {
return nil, err
}
branch, err := in.GetString("branch")
if err != nil || branch == "" {
branch = repoBranch
}
version, err := in.GetString("version")
if err != nil || version == "" {
version = "latest"
}
err = CreatePathIfNotExist(PluginsPath)
if err != nil {
return nil, err
}
// fetch and package.json
// https://raw.githubusercontent.com/rclone/rclone-webui-react/master/package.json
pluginID := fmt.Sprintf("%s/%s", author, repoName)
currentPluginPath := filepath.Join(PluginsPath, pluginID)
err = CreatePathIfNotExist(currentPluginPath)
if err != nil {
return nil, err
}
packageJSONUrl := fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/%s/package.json", author, repoName, branch)
packageJSONFilePath := filepath.Join(currentPluginPath, "package.json")
err = DownloadFile(packageJSONFilePath, packageJSONUrl)
if err != nil {
return nil, err
}
// register in plugins
// download release and save in plugins/<author>/repo-name/app
// https://api.github.com/repos/rclone/rclone-webui-react/releases/latest
releaseURL, tag, _, err := GetLatestReleaseURL(fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/%s", author, repoName, version))
if err != nil {
return nil, err
}
zipName := tag + ".zip"
zipPath := filepath.Join(currentPluginPath, zipName)
err = DownloadFile(zipPath, releaseURL)
if err != nil {
return nil, err
}
extractPath := filepath.Join(currentPluginPath, "app")
err = CreatePathIfNotExist(extractPath)
if err != nil {
return nil, err
}
err = os.RemoveAll(extractPath)
if err != nil {
fs.Logf(nil, "No previous downloads to remove")
}
fs.Logf(nil, "Unzipping plugin binary")
err = Unzip(zipPath, extractPath)
if err != nil {
return nil, err
}
err = loadedPlugins.addPlugin(pluginID, packageJSONFilePath)
if err != nil {
return nil, err
}
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "pluginsctl/listPlugins",
AuthRequired: true,
Fn: rcGetPlugins,
Title: "Get the list of currently loaded plugins",
Help: `This allows you to get the currently enabled plugins and their details.
This takes no parameters and returns:
- loadedPlugins - list of current production plugins.
- testPlugins - list of temporarily loaded development plugins, usually running on a different server.
E.g.
rclone rc pluginsctl/listPlugins
`,
})
}
func rcGetPlugins(_ context.Context, _ rc.Params) (out rc.Params, err error) {
err = initPluginsOrError()
if err != nil {
return nil, err
}
err = loadedPlugins.readFromFile()
if err != nil {
return nil, err
}
return rc.Params{
"loadedPlugins": filterPlugins(loadedPlugins, func(packageJSON *PackageJSON) bool { return !packageJSON.isTesting() }),
"loadedTestPlugins": filterPlugins(loadedPlugins, func(packageJSON *PackageJSON) bool { return packageJSON.isTesting() }),
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "pluginsctl/removePlugin",
AuthRequired: true,
Fn: rcRemovePlugin,
Title: "Remove a loaded plugin",
Help: `This allows you to remove a plugin using it's name.
This takes parameters:
- name - name of the plugin in the format ` + "`author`/`plugin_name`" + `.
E.g.
rclone rc pluginsctl/removePlugin name=rclone/video-plugin
`,
})
}
func rcRemovePlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
err = initPluginsOrError()
if err != nil {
return nil, err
}
name, err := in.GetString("name")
if err != nil {
return nil, err
}
err = loadedPlugins.removePlugin(name)
if err != nil {
return nil, err
}
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "pluginsctl/getPluginsForType",
AuthRequired: true,
Fn: rcGetPluginsForType,
Title: "Get plugins with type criteria",
Help: `This shows all possible plugins by a mime type.
This takes the following parameters:
- type - supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3).
- pluginType - filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL).
Returns:
- loadedPlugins - list of current production plugins.
- testPlugins - list of temporarily loaded development plugins, usually running on a different server.
Example:
rclone rc pluginsctl/getPluginsForType type=video/mp4
`,
})
}
func rcGetPluginsForType(_ context.Context, in rc.Params) (out rc.Params, err error) {
err = initPluginsOrError()
if err != nil {
return nil, err
}
handlesType, err := in.GetString("type")
if err != nil {
handlesType = ""
}
pluginType, err := in.GetString("pluginType")
if err != nil {
pluginType = ""
}
var loadedPluginsResult map[string]PackageJSON
var loadedTestPluginsResult map[string]PackageJSON
if pluginType == "" || pluginType == "FileHandler" {
loadedPluginsResult = filterPlugins(loadedPlugins, func(packageJSON *PackageJSON) bool {
for i := range packageJSON.Rclone.HandlesType {
if packageJSON.Rclone.HandlesType[i] == handlesType && !packageJSON.Rclone.Test {
return true
}
}
return false
})
loadedTestPluginsResult = filterPlugins(loadedPlugins, func(packageJSON *PackageJSON) bool {
for i := range packageJSON.Rclone.HandlesType {
if packageJSON.Rclone.HandlesType[i] == handlesType && packageJSON.Rclone.Test {
return true
}
}
return false
})
} else {
loadedPluginsResult = filterPlugins(loadedPlugins, func(packageJSON *PackageJSON) bool {
return packageJSON.Rclone.PluginType == pluginType && !packageJSON.isTesting()
})
loadedTestPluginsResult = filterPlugins(loadedPlugins, func(packageJSON *PackageJSON) bool {
return packageJSON.Rclone.PluginType == pluginType && packageJSON.isTesting()
})
}
return rc.Params{
"loadedPlugins": loadedPluginsResult,
"loadedTestPlugins": loadedTestPluginsResult,
}, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/webgui/plugins.go | fs/rc/webgui/plugins.go | // Package webgui provides plugin functionality to the Web GUI.
package webgui
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/rc"
)
// PackageJSON is the structure of package.json of a plugin
type PackageJSON struct {
Name string `json:"name"`
Version string `json:"version"`
Description string `json:"description"`
Author string `json:"author"`
Copyright string `json:"copyright"`
License string `json:"license"`
Private bool `json:"private"`
Homepage string `json:"homepage"`
TestURL string `json:"testUrl"`
Repository struct {
Type string `json:"type"`
URL string `json:"url"`
} `json:"repository"`
Bugs struct {
URL string `json:"url"`
} `json:"bugs"`
Rclone RcloneConfig `json:"rclone"`
}
// RcloneConfig represents the rclone specific config
type RcloneConfig struct {
HandlesType []string `json:"handlesType"`
PluginType string `json:"pluginType"`
RedirectReferrer bool `json:"redirectReferrer"`
Test bool `json:"-"`
}
func (r *PackageJSON) isTesting() bool {
return r.Rclone.Test
}
var (
//loadedTestPlugins *Plugins
cachePath string
loadedPlugins *Plugins
pluginsProxy = &httputil.ReverseProxy{}
// PluginsMatch is used for matching author and plugin name in the url path
PluginsMatch = regexp.MustCompile(`^plugins\/([^\/]*)\/([^\/\?]+)[\/]?(.*)$`)
// PluginsPath is the base path where webgui plugins are stored
PluginsPath string
pluginsConfigPath string
availablePluginsJSONPath = "availablePlugins.json"
initSuccess = false
initMutex = &sync.Mutex{}
)
// Plugins represents the structure how plugins are saved onto disk
type Plugins struct {
mutex sync.Mutex
LoadedPlugins map[string]PackageJSON `json:"loadedPlugins"`
fileName string
}
func newPlugins(fileName string) *Plugins {
p := Plugins{LoadedPlugins: map[string]PackageJSON{}}
p.fileName = fileName
p.mutex = sync.Mutex{}
return &p
}
func initPluginsOrError() error {
if !rc.Opt.WebUI {
return errors.New("WebUI needs to be enabled for plugins to work")
}
initMutex.Lock()
defer initMutex.Unlock()
if !initSuccess {
cachePath = filepath.Join(config.GetCacheDir(), "webgui")
PluginsPath = filepath.Join(cachePath, "plugins")
pluginsConfigPath = filepath.Join(PluginsPath, "config")
loadedPlugins = newPlugins(availablePluginsJSONPath)
err := loadedPlugins.readFromFile()
if err != nil {
fs.Errorf(nil, "error reading available plugins: %v", err)
}
initSuccess = true
}
return nil
}
func (p *Plugins) readFromFile() (err error) {
err = CreatePathIfNotExist(pluginsConfigPath)
if err != nil {
return err
}
availablePluginsJSON := filepath.Join(pluginsConfigPath, p.fileName)
_, err = os.Stat(availablePluginsJSON)
if err == nil {
data, err := os.ReadFile(availablePluginsJSON)
if err != nil {
return err
}
err = json.Unmarshal(data, &p)
if err != nil {
fs.Logf(nil, "%s", err)
}
return nil
} else if os.IsNotExist(err) {
// path does not exist
err = p.writeToFile()
if err != nil {
return err
}
}
return nil
}
func (p *Plugins) addPlugin(pluginName string, packageJSONPath string) (err error) {
p.mutex.Lock()
defer p.mutex.Unlock()
data, err := os.ReadFile(packageJSONPath)
if err != nil {
return err
}
var pkgJSON = PackageJSON{}
err = json.Unmarshal(data, &pkgJSON)
if err != nil {
return err
}
p.LoadedPlugins[pluginName] = pkgJSON
err = p.writeToFile()
if err != nil {
return err
}
return nil
}
func (p *Plugins) writeToFile() (err error) {
availablePluginsJSON := filepath.Join(pluginsConfigPath, p.fileName)
file, err := json.MarshalIndent(p, "", " ")
if err != nil {
fs.Logf(nil, "%s", err)
}
err = os.WriteFile(availablePluginsJSON, file, 0755)
if err != nil {
fs.Logf(nil, "%s", err)
}
return nil
}
func (p *Plugins) removePlugin(name string) (err error) {
p.mutex.Lock()
defer p.mutex.Unlock()
err = p.readFromFile()
if err != nil {
return err
}
_, ok := p.LoadedPlugins[name]
if !ok {
return fmt.Errorf("plugin %s not loaded", name)
}
delete(p.LoadedPlugins, name)
err = p.writeToFile()
if err != nil {
return err
}
return nil
}
// GetPluginByName returns the plugin object for the key (author/plugin-name)
func (p *Plugins) GetPluginByName(name string) (out *PackageJSON, err error) {
p.mutex.Lock()
defer p.mutex.Unlock()
po, ok := p.LoadedPlugins[name]
if !ok {
return nil, fmt.Errorf("plugin %s not loaded", name)
}
return &po, nil
}
// getAuthorRepoBranchGitHub gives author, repoName and branch from a github.com url
//
// url examples:
// https://github.com/rclone/rclone-webui-react/
// http://github.com/rclone/rclone-webui-react
// https://github.com/rclone/rclone-webui-react/tree/caman-js
// github.com/rclone/rclone-webui-react
func getAuthorRepoBranchGitHub(url string) (author string, repoName string, branch string, err error) {
repoURL := url
repoURL = strings.Replace(repoURL, "https://", "", 1)
repoURL = strings.Replace(repoURL, "http://", "", 1)
urlSplits := strings.Split(repoURL, "/")
if len(urlSplits) < 3 || len(urlSplits) > 5 || urlSplits[0] != "github.com" {
return "", "", "", fmt.Errorf("invalid github url: %s", url)
}
// get branch name
if len(urlSplits) == 5 && urlSplits[3] == "tree" {
return urlSplits[1], urlSplits[2], urlSplits[4], nil
}
return urlSplits[1], urlSplits[2], "master", nil
}
func filterPlugins(plugins *Plugins, compare func(packageJSON *PackageJSON) bool) map[string]PackageJSON {
output := map[string]PackageJSON{}
for key, val := range plugins.LoadedPlugins {
if compare(&val) {
output[key] = val
}
}
return output
}
// getDirectorForProxy is a helper function for reverse proxy of test plugins
func getDirectorForProxy(origin *url.URL) func(req *http.Request) {
return func(req *http.Request) {
req.Header.Add("X-Forwarded-Host", req.Host)
req.Header.Add("X-Origin-Host", origin.Host)
req.URL.Scheme = "http"
req.URL.Host = origin.Host
req.URL.Path = origin.Path
}
}
// ServePluginOK checks the plugin url and uses reverse proxy to allow redirection for content not being served by rclone
func ServePluginOK(w http.ResponseWriter, r *http.Request, pluginsMatchResult []string) (ok bool) {
testPlugin, err := loadedPlugins.GetPluginByName(fmt.Sprintf("%s/%s", pluginsMatchResult[1], pluginsMatchResult[2]))
if err != nil {
return false
}
if !testPlugin.Rclone.Test {
return false
}
origin, _ := url.Parse(fmt.Sprintf("%s/%s", testPlugin.TestURL, pluginsMatchResult[3]))
director := getDirectorForProxy(origin)
pluginsProxy.Director = director
pluginsProxy.ServeHTTP(w, r)
return true
}
var referrerPathReg = regexp.MustCompile(`^(https?):\/\/(.+):([0-9]+)?\/(.*)\/?\?(.*)$`)
// ServePluginWithReferrerOK check if redirectReferrer is set for the referred a plugin, if yes,
// sends a redirect to actual url. This function is useful for plugins to refer to absolute paths when
// the referrer in http.Request is set
func ServePluginWithReferrerOK(w http.ResponseWriter, r *http.Request, path string) (ok bool) {
err := initPluginsOrError()
if err != nil {
return false
}
referrer := r.Referer()
referrerPathMatch := referrerPathReg.FindStringSubmatch(referrer)
if len(referrerPathMatch) > 3 {
referrerPluginMatch := PluginsMatch.FindStringSubmatch(referrerPathMatch[4])
if len(referrerPluginMatch) > 2 {
pluginKey := fmt.Sprintf("%s/%s", referrerPluginMatch[1], referrerPluginMatch[2])
currentPlugin, err := loadedPlugins.GetPluginByName(pluginKey)
if err != nil {
return false
}
if currentPlugin.Rclone.RedirectReferrer {
path = fmt.Sprintf("/plugins/%s/%s/%s", referrerPluginMatch[1], referrerPluginMatch[2], path)
http.Redirect(w, r, path, http.StatusMovedPermanently)
return true
}
}
}
return false
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/webgui/webgui.go | fs/rc/webgui/webgui.go | // Package webgui defines the Web GUI helpers.
package webgui
import (
"archive/zip"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/file"
)
// GetLatestReleaseURL returns the latest release details of the rclone-webui-react
func GetLatestReleaseURL(fetchURL string) (string, string, int, error) {
resp, err := http.Get(fetchURL)
if err != nil {
return "", "", 0, fmt.Errorf("failed getting latest release of rclone-webui: %w", err)
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode != http.StatusOK {
return "", "", 0, fmt.Errorf("bad HTTP status %d (%s) when fetching %s", resp.StatusCode, resp.Status, fetchURL)
}
results := gitHubRequest{}
if err := json.NewDecoder(resp.Body).Decode(&results); err != nil {
return "", "", 0, fmt.Errorf("could not decode results from http request: %w", err)
}
if len(results.Assets) < 1 {
return "", "", 0, errors.New("could not find an asset in the release. " +
"check if asset was successfully added in github release assets")
}
res := results.Assets[0].BrowserDownloadURL
tag := results.TagName
size := results.Assets[0].Size
return res, tag, size, nil
}
// CheckAndDownloadWebGUIRelease is a helper function to download and setup latest release of rclone-webui-react
func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL string, cacheDir string) (err error) {
cachePath := filepath.Join(cacheDir, "webgui")
tagPath := filepath.Join(cachePath, "tag")
extractPath := filepath.Join(cachePath, "current")
extractPathExist, extractPathStat, err := exists(extractPath)
if err != nil {
return err
}
if extractPathExist && !extractPathStat.IsDir() {
return errors.New("Web GUI path exists, but is a file instead of folder. Please check the path " + extractPath)
}
// Get the latest release details
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
if err != nil {
return fmt.Errorf("error checking for web gui release update, skipping update: %w", err)
}
dat, err := os.ReadFile(tagPath)
tagsMatch := false
if err != nil {
fs.Errorf(nil, "Error reading tag file at %s ", tagPath)
checkUpdate = true
} else if string(dat) == tag {
tagsMatch = true
}
fs.Debugf(nil, "Current tag: %s, Release tag: %s", string(dat), tag)
if !tagsMatch {
fs.Infof(nil, "A release (%s) for gui is present at %s. Use --rc-web-gui-update to update. Your current version is (%s)", tag, WebUIURL, string(dat))
}
// if the old file exists does not exist or forced update is enforced.
// TODO: Add hashing to check integrity of the previous update.
if !extractPathExist || checkUpdate || forceUpdate {
if tagsMatch {
fs.Logf(nil, "No update to Web GUI available.")
if !forceUpdate {
return nil
}
fs.Logf(nil, "Force update the Web GUI binary.")
}
zipName := tag + ".zip"
zipPath := filepath.Join(cachePath, zipName)
cachePathExist, cachePathStat, _ := exists(cachePath)
if !cachePathExist {
if err := file.MkdirAll(cachePath, 0755); err != nil {
return errors.New("Error creating cache directory: " + cachePath)
}
}
if cachePathExist && !cachePathStat.IsDir() {
return errors.New("Web GUI path is a file instead of folder. Please check it " + extractPath)
}
fs.Logf(nil, "A new release for gui (%s) is present at %s", tag, WebUIURL)
fs.Logf(nil, "Downloading webgui binary. Please wait. [Size: %s, Path : %s]\n", strconv.Itoa(size), zipPath)
// download the zip from latest url
err = DownloadFile(zipPath, WebUIURL)
if err != nil {
return err
}
err = os.RemoveAll(extractPath)
if err != nil {
fs.Logf(nil, "No previous downloads to remove")
}
fs.Logf(nil, "Unzipping webgui binary")
err = Unzip(zipPath, extractPath)
if err != nil {
return err
}
err = os.RemoveAll(zipPath)
if err != nil {
fs.Logf(nil, "Downloaded ZIP cannot be deleted")
}
err = os.WriteFile(tagPath, []byte(tag), 0644)
if err != nil {
fs.Infof(nil, "Cannot write tag file. You may be required to redownload the binary next time.")
}
} else {
fs.Logf(nil, "Web GUI exists. Update skipped.")
}
return nil
}
// DownloadFile is a helper function to download a file from url to the filepath
func DownloadFile(filepath string, url string) (err error) {
// Get the data
resp, err := http.Get(url)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("bad HTTP status %d (%s) when fetching %s", resp.StatusCode, resp.Status, url)
}
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer fs.CheckClose(out, &err)
// Write the body to file
_, err = io.Copy(out, resp.Body)
return err
}
// Unzip is a helper function to Unzip a file specified in src to path dest
func Unzip(src, dest string) (err error) {
dest = filepath.Clean(dest) + string(os.PathSeparator)
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer fs.CheckClose(r, &err)
if err := file.MkdirAll(dest, 0755); err != nil {
return err
}
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
path := filepath.Join(dest, f.Name)
// Check for Zip Slip: https://github.com/rclone/rclone/issues/3529
if !strings.HasPrefix(path, dest) {
return fmt.Errorf("%s: illegal file path", path)
}
rc, err := f.Open()
if err != nil {
return err
}
defer fs.CheckClose(rc, &err)
if f.FileInfo().IsDir() {
if err := file.MkdirAll(path, 0755); err != nil {
return err
}
} else {
if err := file.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
f, err := file.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer fs.CheckClose(f, &err)
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}
func exists(path string) (existence bool, stat os.FileInfo, err error) {
stat, err = os.Stat(path)
if err == nil {
return true, stat, nil
}
if os.IsNotExist(err) {
return false, nil, nil
}
return false, stat, err
}
// CreatePathIfNotExist creates the path to a folder if it does not exist
func CreatePathIfNotExist(path string) (err error) {
exists, stat, _ := exists(path)
if !exists {
if err := file.MkdirAll(path, 0755); err != nil {
return errors.New("Error creating : " + path)
}
}
if exists && !stat.IsDir() {
return errors.New("Path is a file instead of folder. Please check it " + path)
}
return nil
}
// gitHubRequest Maps the GitHub API request to structure
type gitHubRequest struct {
URL string `json:"url"`
Prerelease bool `json:"prerelease"`
CreatedAt time.Time `json:"created_at"`
PublishedAt time.Time `json:"published_at"`
TagName string `json:"tag_name"`
Assets []struct {
URL string `json:"url"`
ID int `json:"id"`
NodeID string `json:"node_id"`
Name string `json:"name"`
Label string `json:"label"`
ContentType string `json:"content_type"`
State string `json:"state"`
Size int `json:"size"`
DownloadCount int `json:"download_count"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
BrowserDownloadURL string `json:"browser_download_url"`
} `json:"assets"`
TarballURL string `json:"tarball_url"`
ZipballURL string `json:"zipball_url"`
Body string `json:"body"`
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/rc/webgui/rc_test.go | fs/rc/webgui/rc_test.go | package webgui
import (
"context"
"path/filepath"
"strings"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const testPluginName = "rclone-test-plugin"
const testPluginAuthor = "rclone"
const testPluginKey = testPluginAuthor + "/" + testPluginName
const testPluginURL = "https://github.com/" + testPluginAuthor + "/" + testPluginName + "/"
func init() {
rc.Opt.WebUI = true
}
func setCacheDir(t *testing.T) {
cacheDir := t.TempDir()
PluginsPath = filepath.Join(cacheDir, "plugins")
pluginsConfigPath = filepath.Join(cacheDir, "config")
loadedPlugins = newPlugins(availablePluginsJSONPath)
err := loadedPlugins.readFromFile()
assert.Nil(t, err)
}
func addPlugin(t *testing.T) {
addPlugin := rc.Calls.Get("pluginsctl/addPlugin")
assert.NotNil(t, addPlugin)
in := rc.Params{
"url": testPluginURL,
}
out, err := addPlugin.Fn(context.Background(), in)
if err != nil && strings.Contains(err.Error(), "bad HTTP status") {
t.Skipf("skipping test as plugin download failed: %v", err)
}
require.Nil(t, err)
assert.Nil(t, out)
}
func removePlugin(t *testing.T) {
addPlugin := rc.Calls.Get("pluginsctl/removePlugin")
assert.NotNil(t, addPlugin)
in := rc.Params{
"name": testPluginKey,
}
out, err := addPlugin.Fn(context.Background(), in)
assert.NotNil(t, err)
assert.Nil(t, out)
}
//func TestListTestPlugins(t *testing.T) {
// addPlugin := rc.Calls.Get("pluginsctl/listTestPlugins")
// assert.NotNil(t, addPlugin)
// in := rc.Params{}
// out, err := addPlugin.Fn(context.Background(), in)
// assert.Nil(t, err)
// expected := rc.Params{
// "loadedTestPlugins": map[string]PackageJSON{},
// }
// assert.Equal(t, expected, out)
//}
//func TestRemoveTestPlugin(t *testing.T) {
// addPlugin := rc.Calls.Get("pluginsctl/removeTestPlugin")
// assert.NotNil(t, addPlugin)
// in := rc.Params{
// "name": "",
// }
// out, err := addPlugin.Fn(context.Background(), in)
// assert.NotNil(t, err)
// assert.Nil(t, out)
//}
func TestAddPlugin(t *testing.T) {
setCacheDir(t)
addPlugin(t)
_, ok := loadedPlugins.LoadedPlugins[testPluginKey]
assert.True(t, ok)
//removePlugin(t)
//_, ok = loadedPlugins.LoadedPlugins[testPluginKey]
//assert.False(t, ok)
}
func TestListPlugins(t *testing.T) {
setCacheDir(t)
addPlugin := rc.Calls.Get("pluginsctl/listPlugins")
assert.NotNil(t, addPlugin)
in := rc.Params{}
out, err := addPlugin.Fn(context.Background(), in)
assert.Nil(t, err)
expected := rc.Params{
"loadedPlugins": map[string]PackageJSON{},
"loadedTestPlugins": map[string]PackageJSON{},
}
assert.Equal(t, expected, out)
}
func TestRemovePlugin(t *testing.T) {
setCacheDir(t)
addPlugin(t)
removePluginCall := rc.Calls.Get("pluginsctl/removePlugin")
assert.NotNil(t, removePlugin)
in := rc.Params{
"name": testPluginKey,
}
out, err := removePluginCall.Fn(context.Background(), in)
assert.Nil(t, err)
assert.Nil(t, out)
removePlugin(t)
assert.Equal(t, len(loadedPlugins.LoadedPlugins), 0)
}
func TestPluginsForType(t *testing.T) {
addPlugin := rc.Calls.Get("pluginsctl/getPluginsForType")
assert.NotNil(t, addPlugin)
in := rc.Params{
"type": "",
"pluginType": "FileHandler",
}
out, err := addPlugin.Fn(context.Background(), in)
assert.Nil(t, err)
assert.NotNil(t, out)
in = rc.Params{
"type": "video/mp4",
"pluginType": "",
}
_, err = addPlugin.Fn(context.Background(), in)
assert.Nil(t, err)
assert.NotNil(t, out)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fshttp/dialer.go | fs/fshttp/dialer.go | package fshttp
import (
"context"
"net"
"runtime"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
// Dialer structure contains default dialer and timeout, tclass support
type Dialer struct {
net.Dialer
timeout time.Duration
tclass int
}
// NewDialer creates a Dialer structure with Timeout, Keepalive,
// LocalAddr and DSCP set from rclone flags.
func NewDialer(ctx context.Context) *Dialer {
ci := fs.GetConfig(ctx)
dialer := &Dialer{
Dialer: net.Dialer{
Timeout: time.Duration(ci.ConnectTimeout),
KeepAlive: 30 * time.Second,
},
timeout: time.Duration(ci.Timeout),
tclass: int(ci.TrafficClass),
}
if ci.BindAddr != nil {
dialer.Dialer.LocalAddr = &net.TCPAddr{IP: ci.BindAddr}
}
return dialer
}
// Dial connects to the network address.
func (d *Dialer) Dial(network, address string) (net.Conn, error) {
return d.DialContext(context.Background(), network, address)
}
var warnDSCPFail, warnDSCPWindows sync.Once
// DialContext connects to the network address using the provided context.
func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
// If local address is 0.0.0.0 or ::0 force IPv4 or IPv6
// This works around https://github.com/golang/go/issues/48723
// Which means 0.0.0.0 and ::0 both bind to both IPv4 and IPv6
if ip, ok := d.Dialer.LocalAddr.(*net.TCPAddr); ok && ip.IP.IsUnspecified() && (network == "tcp" || network == "udp") {
if ip.IP.To4() != nil {
network += "4" // IPv4 address
} else {
network += "6" // IPv6 address
}
}
c, err := d.Dialer.DialContext(ctx, network, address)
if err != nil {
return c, err
}
if d.tclass != 0 {
// IPv6 addresses must have two or more ":"
if strings.Count(c.RemoteAddr().String(), ":") > 1 {
err = ipv6.NewConn(c).SetTrafficClass(d.tclass)
} else {
err = ipv4.NewConn(c).SetTOS(d.tclass)
// Warn of silent failure on Windows (IPv4 only, IPv6 caught by error handler)
if runtime.GOOS == "windows" {
warnDSCPWindows.Do(func() {
fs.LogLevelPrintf(fs.LogLevelWarning, nil, "dialer: setting DSCP on Windows/IPv4 fails silently; see https://github.com/golang/go/issues/42728")
})
}
}
if err != nil {
warnDSCPFail.Do(func() {
fs.LogLevelPrintf(fs.LogLevelWarning, nil, "dialer: failed to set DSCP socket options: %v", err)
})
}
}
t := &timeoutConn{
Conn: c,
timeout: d.timeout,
}
return t, t.nudgeDeadline()
}
// A net.Conn that sets deadline for every Read/Write operation
type timeoutConn struct {
net.Conn
timeout time.Duration
}
// Nudge the deadline for an idle timeout on by c.timeout if non-zero
func (c *timeoutConn) nudgeDeadline() error {
if c.timeout > 0 {
return c.SetDeadline(time.Now().Add(c.timeout))
}
return nil
}
// Read bytes with rate limiting and idle timeouts
func (c *timeoutConn) Read(b []byte) (n int, err error) {
// Ideally we would LimitBandwidth(len(b)) here and replace tokens we didn't use
n, err = c.Conn.Read(b)
accounting.TokenBucket.LimitBandwidth(accounting.TokenBucketSlotTransportRx, n)
if err == nil && n > 0 && c.timeout > 0 {
err = c.nudgeDeadline()
}
return n, err
}
// Write bytes with rate limiting and idle timeouts
func (c *timeoutConn) Write(b []byte) (n int, err error) {
accounting.TokenBucket.LimitBandwidth(accounting.TokenBucketSlotTransportTx, len(b))
n, err = c.Conn.Write(b)
if err == nil && n > 0 && c.timeout > 0 {
err = c.nudgeDeadline()
}
return n, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fshttp/prometheus.go | fs/fshttp/prometheus.go | package fshttp
import (
"fmt"
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
// Metrics provide Transport HTTP level metrics.
type Metrics struct {
StatusCode *prometheus.CounterVec
}
// NewMetrics creates a new metrics instance, the instance shall be assigned to
// DefaultMetrics before any processing takes place.
func NewMetrics(namespace string) *Metrics {
return &Metrics{
StatusCode: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "http",
Name: "status_code",
}, []string{"host", "method", "code"}),
}
}
// DefaultMetrics specifies metrics used for new Transports.
var DefaultMetrics = (*Metrics)(nil)
// Collectors returns all prometheus metrics as collectors for registration.
func (m *Metrics) Collectors() []prometheus.Collector {
if m == nil {
return nil
}
return []prometheus.Collector{
m.StatusCode,
}
}
func (m *Metrics) onResponse(req *http.Request, resp *http.Response) {
if m == nil {
return
}
var statusCode = 0
if resp != nil {
statusCode = resp.StatusCode
}
m.StatusCode.WithLabelValues(req.Host, req.Method, fmt.Sprint(statusCode)).Inc()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fshttp/http_test.go | fs/fshttp/http_test.go | package fshttp
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCleanAuth(t *testing.T) {
for _, test := range []struct {
in string
want string
}{
{"", ""},
{"floo", "floo"},
{"Authorization: ", "Authorization: "},
{"Authorization: \n", "Authorization: \n"},
{"Authorization: A", "Authorization: X"},
{"Authorization: A\n", "Authorization: X\n"},
{"Authorization: AAAA", "Authorization: XXXX"},
{"Authorization: AAAA\n", "Authorization: XXXX\n"},
{"Authorization: AAAAA", "Authorization: XXXX"},
{"Authorization: AAAAA\n", "Authorization: XXXX\n"},
{"Authorization: AAAA\n", "Authorization: XXXX\n"},
{"Authorization: AAAAAAAAA\nPotato: Help\n", "Authorization: XXXX\nPotato: Help\n"},
{"Sausage: 1\nAuthorization: AAAAAAAAA\nPotato: Help\n", "Sausage: 1\nAuthorization: XXXX\nPotato: Help\n"},
} {
got := string(cleanAuth([]byte(test.in), authBufs[0]))
assert.Equal(t, test.want, got, test.in)
}
}
func TestCleanAuths(t *testing.T) {
for _, test := range []struct {
in string
want string
}{
{"", ""},
{"floo", "floo"},
{"Authorization: AAAAAAAAA\nPotato: Help\n", "Authorization: XXXX\nPotato: Help\n"},
{"X-Auth-Token: AAAAAAAAA\nPotato: Help\n", "X-Auth-Token: XXXX\nPotato: Help\n"},
{"X-Auth-Token: AAAAAAAAA\nAuthorization: AAAAAAAAA\nPotato: Help\n", "X-Auth-Token: XXXX\nAuthorization: XXXX\nPotato: Help\n"},
} {
got := string(cleanAuths([]byte(test.in)))
assert.Equal(t, test.want, got, test.in)
}
}
var certSerial = int64(0)
// Create a test certificate and key pair that is valid for a specific
// duration
func createTestCert(validity time.Duration) (keyPEM []byte, certPEM []byte, err error) {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
return
}
keyBytes := x509.MarshalPKCS1PrivateKey(key)
// PEM encoding of private key
keyPEM = pem.EncodeToMemory(
&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: keyBytes,
},
)
// Now create the certificate
notBefore := time.Now()
notAfter := notBefore.Add(validity).Add(expireWindow)
certSerial += 1
template := x509.Certificate{
SerialNumber: big.NewInt(certSerial),
Subject: pkix.Name{CommonName: "localhost"},
SignatureAlgorithm: x509.SHA256WithRSA,
NotBefore: notBefore,
NotAfter: notAfter,
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement | x509.KeyUsageKeyEncipherment | x509.KeyUsageDataEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
return
}
certPEM = pem.EncodeToMemory(
&pem.Block{
Type: "CERTIFICATE",
Bytes: derBytes,
},
)
return
}
func writeTestCert(t *testing.T, ci *fs.ConfigInfo, validity time.Duration) {
keyPEM, certPEM, err := createTestCert(1 * time.Second)
assert.NoError(t, err, "Cannot create test cert")
err = os.WriteFile(ci.ClientCert, certPEM, 0666)
assert.NoError(t, err, "Failed to write cert")
err = os.WriteFile(ci.ClientKey, keyPEM, 0666)
assert.NoError(t, err, "Failed to write key")
}
func TestCertificates(t *testing.T) {
startTime := time.Now()
// Starting a TLS server
expectedSerial := int64(0)
ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
cert := r.TLS.PeerCertificates
require.Greater(t, len(cert), 0, "No certificates received")
expectedSerial += 1
assert.Equal(t, expectedSerial, cert[0].SerialNumber.Int64(), "Did not get the correct serial number in certificate")
// Check that the certificate hasn't expired. We cannot use cert validation
// functions because those check for signature as well and our certificates
// are not properly signed
if time.Now().After(cert[0].NotAfter) {
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
}
// Write some test data to fulfill the request
w.Header().Set("Content-Type", "text/plain")
_, _ = fmt.Fprintln(w, "test data")
}))
defer ts.Close()
// Modify servers config to request a client certificate
// we cannot validate the certificate since we are not properly signing it
ts.TLS.ClientAuth = tls.RequestClientCert
// Set --client-cert and --client-key in config to
// a pair of temp files
// create a test cert/key pair and write it to the files
ctx := context.TODO()
ci := fs.GetConfig(ctx)
// Create a test certificate and write it to a temp file
ci.ClientCert = t.TempDir() + "client.cert"
ci.ClientKey = t.TempDir() + "client.key"
validity := 1 * time.Second
writeTestCert(t, ci, validity)
// Now create the client with the above settings
// we need to disable TLS verification since we don't
// care about server certificate
client := NewClient(ctx)
tt := client.Transport.(*Transport)
tt.TLSClientConfig.InsecureSkipVerify = true
// Now make requests, the first request should be within
// the valid window
_, err := client.Get(ts.URL)
assert.NoError(t, err)
// Wait for the 2* valid duration of the certificate so that has definitely expired
time.Sleep(2 * validity)
// Create a new cert and write it to files
writeTestCert(t, ci, validity)
// The new cert should be auto-loaded before we make this request
_, err = client.Get(ts.URL)
assert.NoError(t, err)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fshttp/http.go | fs/fshttp/http.go | // Package fshttp contains the common http parts of the config, Transport and Client
package fshttp
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"net"
"net/http"
"net/http/cookiejar"
"net/http/httputil"
"net/url"
"os"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/lib/structs"
"github.com/youmark/pkcs8"
"golang.org/x/net/publicsuffix"
)
const (
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
)
var (
transport *Transport
noTransport = new(sync.Once)
cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
logMutex sync.Mutex
// UnixSocketConfig describes the option to configure the path to a unix domain socket to connect to
UnixSocketConfig = fs.Option{
Name: "unix_socket",
Help: "Path to a unix domain socket to dial to, instead of opening a TCP connection directly",
Advanced: true,
Default: "",
}
)
// ResetTransport resets the existing transport, allowing it to take new settings.
// Should only be used for testing.
func ResetTransport() {
noTransport = new(sync.Once)
}
// LoadKeyPair loads a TLS certificate and private key from PEM-encoded files,
// with extended support for encrypted private keys.
//
// This function is designed as a robust replacement for tls.X509KeyPair,
// providing the same core functionality but adding support for
// password-protected private keys.
//
// The certificate file (certFile) must contain one or more PEM-encoded
// certificates. The first certificate is treated as the leaf certificate, and
// any subsequent certificates are treated as its chain.
//
// The key file (keyFile) must contain a PEM-encoded private key. Supported
// formats are:
//
// - Unencrypted PKCS#1 ("BEGIN RSA PRIVATE KEY")
// - Unencrypted PKCS#8 ("BEGIN PRIVATE KEY")
// - Encrypted PKCS#8 ("BEGIN ENCRYPTED PRIVATE KEY")
// - Legacy PEM encryption (e.g., DEK-Info headers), which are automatically detected.
//
// The password parameter is used to decrypt the private key. If the
// key is not encrypted, this parameter is ignored and can be an empty
// string. The password should be an obscured string.
//
// On success, it returns a fully populated tls.Certificate struct, including the
// Leaf certificate field.
func LoadKeyPair(certFile, keyFile, password string) (cert tls.Certificate, err error) {
certPEM, err := os.ReadFile(certFile)
if err != nil {
return cert, fmt.Errorf("read cert: %w", err)
}
keyPEM, err := os.ReadFile(keyFile)
if err != nil {
return cert, fmt.Errorf("read key: %w", err)
}
if password != "" {
password, err = obscure.Reveal(password)
if err != nil {
return cert, fmt.Errorf("reveal key password: %w", err)
}
}
// Fast path: unencrypted PKCS#1/PKCS#8
cert, err = tls.X509KeyPair(certPEM, keyPEM)
if err == nil {
if len(cert.Certificate) == 0 {
return cert, errors.New("no certificates parsed")
}
leaf, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return cert, fmt.Errorf("parse leaf: %w", err)
}
cert.Leaf = leaf
return cert, nil
}
// Decrypt / parse key manually
block, rest := pem.Decode(keyPEM)
if block == nil {
return cert, errors.New("no PEM block in key")
}
if len(rest) != 0 {
fs.Debugf(nil, "Trailing data (%d bytes) in key PEM loaded from %q", len(rest), keyFile)
}
var privKey any
switch {
case block.Type == "ENCRYPTED PRIVATE KEY":
if password == "" {
return cert, errors.New("key is encrypted but no --client-pass provided")
}
privKey, err = pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(password))
if err != nil {
return cert, fmt.Errorf("parse encrypted PKCS#8: %w", err)
}
case x509.IsEncryptedPEMBlock(block): //nolint:staticcheck // this is Legacy and insecure
if password == "" {
return cert, errors.New("key is encrypted but no --client-pass provided")
}
der, err := x509.DecryptPEMBlock(block, []byte(password)) //nolint:staticcheck // this is Legacy and insecure
if err != nil {
return cert, fmt.Errorf("decrypt PEM key: %w", err)
}
// Try PKCS#8, then RSA PKCS#1, then EC
if k, kerr1 := x509.ParsePKCS8PrivateKey(der); kerr1 == nil {
privKey = k
} else if k, kerr2 := x509.ParsePKCS1PrivateKey(der); kerr2 == nil {
privKey = k
} else if k, kerr3 := x509.ParseECPrivateKey(der); kerr3 == nil {
privKey = k
} else {
return cert, fmt.Errorf("parse decrypted key: pkcs8: %v, pkcs1: %v, ec: %v", kerr1, kerr2, kerr3)
}
default:
// Unencrypted specific types
switch block.Type {
case "PRIVATE KEY":
k, kerr := x509.ParsePKCS8PrivateKey(block.Bytes)
if kerr != nil {
return cert, fmt.Errorf("parse PKCS#8: %w", kerr)
}
privKey = k
case "RSA PRIVATE KEY":
k, kerr := x509.ParsePKCS1PrivateKey(block.Bytes)
if kerr != nil {
return cert, fmt.Errorf("parse PKCS#1 RSA: %w", kerr)
}
privKey = k
case "EC PRIVATE KEY":
k, kerr := x509.ParseECPrivateKey(block.Bytes)
if kerr != nil {
return cert, fmt.Errorf("parse EC: %w", kerr)
}
privKey = k
default:
return cert, fmt.Errorf("unsupported key type %q", block.Type)
}
}
// Build cert chain from PEM
var certDERs [][]byte
for rest := certPEM; ; {
var b *pem.Block
b, rest = pem.Decode(rest)
if b == nil {
break
}
if b.Type == "CERTIFICATE" {
certDERs = append(certDERs, b.Bytes)
}
}
if len(certDERs) == 0 {
return cert, fmt.Errorf("no CERTIFICATE blocks in %s", certFile)
}
cert = tls.Certificate{
Certificate: certDERs,
PrivateKey: privKey,
}
// Leaf is always the first certificate
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return cert, fmt.Errorf("parse leaf: %w", err)
}
return cert, nil
}
// NewTransportCustom returns an http.RoundTripper with the correct timeouts.
// The customize function is called if set to give the caller an opportunity to
// customize any defaults in the Transport.
func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) *Transport {
ci := fs.GetConfig(ctx)
// Start with a sensible set of defaults then override.
// This also means we get new stuff when it gets added to go
t := new(http.Transport)
structs.SetDefaults(t, http.DefaultTransport.(*http.Transport))
if ci.HTTPProxy != "" {
proxyURL, err := url.Parse(ci.HTTPProxy)
if err != nil {
t.Proxy = func(*http.Request) (*url.URL, error) {
return nil, fmt.Errorf("failed to set --http-proxy from %q: %w", ci.HTTPProxy, err)
}
} else {
t.Proxy = http.ProxyURL(proxyURL)
}
} else {
t.Proxy = http.ProxyFromEnvironment
}
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
t.TLSHandshakeTimeout = time.Duration(ci.ConnectTimeout)
t.ResponseHeaderTimeout = time.Duration(ci.Timeout)
t.DisableKeepAlives = ci.DisableHTTPKeepAlives
// TLS Config
t.TLSClientConfig = &tls.Config{
InsecureSkipVerify: ci.InsecureSkipVerify,
}
// Load client certs
if ci.ClientCert != "" || ci.ClientKey != "" {
if ci.ClientCert == "" || ci.ClientKey == "" {
fs.Fatalf(nil, "Both --client-cert and --client-key must be set")
}
cert, err := LoadKeyPair(ci.ClientCert, ci.ClientKey, ci.ClientPass)
if err != nil {
fs.Fatalf(nil, "Failed to load --client-cert/--client-key pair: %v", err)
}
t.TLSClientConfig.Certificates = []tls.Certificate{cert}
}
// Load CA certs
if len(ci.CaCert) != 0 {
caCertPool := x509.NewCertPool()
for _, cert := range ci.CaCert {
caCert, err := os.ReadFile(cert)
if err != nil {
fs.Fatalf(nil, "Failed to read --ca-cert file %q : %v", cert, err)
}
ok := caCertPool.AppendCertsFromPEM(caCert)
if !ok {
fs.Fatalf(nil, "Failed to add certificates from --ca-cert file %q", cert)
}
}
t.TLSClientConfig.RootCAs = caCertPool
}
t.DisableCompression = ci.NoGzip
t.DialContext = func(reqCtx context.Context, network, addr string) (net.Conn, error) {
return NewDialer(ctx).DialContext(reqCtx, network, addr)
}
t.IdleConnTimeout = 60 * time.Second
t.ExpectContinueTimeout = time.Duration(ci.ExpectContinueTimeout)
if ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
fs.Debugf(nil, "You have specified to dump information. Please be noted that the "+
"Accept-Encoding as shown may not be correct in the request and the response may not show "+
"Content-Encoding if the go standard libraries auto gzip encoding was in effect. In this case"+
" the body of the request will be gunzipped before showing it.")
}
if ci.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
// customize the transport if required
if customize != nil {
customize(t)
}
// Wrap that http.Transport in our own transport
return newTransport(ci, t)
}
// NewTransport returns an http.RoundTripper with the correct timeouts
func NewTransport(ctx context.Context) *Transport {
(*noTransport).Do(func() {
transport = NewTransportCustom(ctx, nil)
})
return transport
}
// NewClient returns an http.Client with the correct timeouts
func NewClient(ctx context.Context) *http.Client {
return NewClientCustom(ctx, nil)
}
// NewClientCustom returns an http.Client with the correct timeouts.
// It allows customizing the transport, using NewTransportCustom.
func NewClientCustom(ctx context.Context, customize func(*http.Transport)) *http.Client {
ci := fs.GetConfig(ctx)
client := &http.Client{
Transport: NewTransportCustom(ctx, customize),
}
if ci.Cookie {
client.Jar = cookieJar
}
return client
}
// NewClientWithUnixSocket returns an http.Client with the correct timeout.
// It internally uses NewClientCustom with a custom dialer connecting to
// the specified unix domain socket.
func NewClientWithUnixSocket(ctx context.Context, path string) *http.Client {
return NewClientCustom(ctx, func(t *http.Transport) {
t.DialContext = func(reqCtx context.Context, network, addr string) (net.Conn, error) {
return NewDialer(ctx).DialContext(reqCtx, "unix", path)
}
})
}
// Transport is our http Transport which wraps an http.Transport
// * Sets the User Agent
// * Does logging
// * Updates metrics
type Transport struct {
*http.Transport
ci *fs.ConfigInfo
dump fs.DumpFlags
filterRequest func(req *http.Request)
userAgent string
headers []*fs.HTTPOption
metrics *Metrics
// Mutex for serializing attempts at reloading the certificates
reloadMutex sync.Mutex
}
// newTransport wraps the http.Transport passed in and logs all
// roundtrips including the body if logBody is set.
func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
return &Transport{
Transport: transport,
ci: ci,
dump: ci.Dump,
userAgent: ci.UserAgent,
headers: ci.Headers,
metrics: DefaultMetrics,
}
}
// SetRequestFilter sets a filter to be used on each request
func (t *Transport) SetRequestFilter(f func(req *http.Request)) {
t.filterRequest = f
}
// A mutex to protect this map
var checkedHostMu sync.RWMutex
// A map of servers we have checked for time
var checkedHost = make(map[string]struct{}, 1)
// Check the server time is the same as ours, once for each server
func checkServerTime(req *http.Request, resp *http.Response) {
host := req.URL.Host
if req.Host != "" {
host = req.Host
}
checkedHostMu.RLock()
_, ok := checkedHost[host]
checkedHostMu.RUnlock()
if ok {
return
}
dateString := resp.Header.Get("Date")
if dateString == "" {
return
}
date, err := http.ParseTime(dateString)
if err != nil {
fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
return
}
dt := time.Since(date)
const window = 5 * 60 * time.Second
if dt > window || dt < -window {
fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
}
checkedHostMu.Lock()
checkedHost[host] = struct{}{}
checkedHostMu.Unlock()
}
// cleanAuth gets rid of one authBuf header within the first 4k
func cleanAuth(buf, authBuf []byte) []byte {
// Find how much buffer to check
n := min(len(buf), 4096)
// See if there is an Authorization: header
i := bytes.Index(buf[:n], authBuf)
if i < 0 {
return buf
}
i += len(authBuf)
// Overwrite the next 4 chars with 'X'
for j := 0; i < len(buf) && j < 4; j++ {
if buf[i] == '\n' {
break
}
buf[i] = 'X'
i++
}
// Snip out to the next '\n'
j := bytes.IndexByte(buf[i:], '\n')
if j < 0 {
return buf[:i]
}
n = copy(buf[i:], buf[i+j:])
return buf[:i+n]
}
var authBufs = [][]byte{
[]byte("Authorization: "),
[]byte("X-Auth-Token: "),
}
// cleanAuths gets rid of all the possible Auth headers
func cleanAuths(buf []byte) []byte {
for _, authBuf := range authBufs {
buf = cleanAuth(buf, authBuf)
}
return buf
}
var expireWindow = 30 * time.Second
func isCertificateExpired(cc *tls.Config) bool {
return len(cc.Certificates) > 0 && cc.Certificates[0].Leaf != nil && time.Until(cc.Certificates[0].Leaf.NotAfter) < expireWindow
}
func (t *Transport) reloadCertificates() {
t.reloadMutex.Lock()
defer t.reloadMutex.Unlock()
// Check that the certificate is expired before trying to reload it
// it might have been reloaded while we were waiting to lock the mutex
if !isCertificateExpired(t.TLSClientConfig) {
return
}
cert, err := LoadKeyPair(t.ci.ClientCert, t.ci.ClientKey, t.ci.ClientPass)
if err != nil {
fs.Fatalf(nil, "Failed to load --client-cert/--client-key pair: %v", err)
}
t.TLSClientConfig.Certificates = []tls.Certificate{cert}
}
// RoundTrip implements the RoundTripper interface.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
// Check if certificates are being used and the certificates are expired
if isCertificateExpired(t.TLSClientConfig) {
t.reloadCertificates()
}
// Limit transactions per second if required
accounting.LimitTPS(req.Context())
// Force user agent
req.Header.Set("User-Agent", t.userAgent)
// Set user defined headers
for _, option := range t.headers {
req.Header.Set(option.Key, option.Value)
}
// Filter the request if required
if t.filterRequest != nil {
t.filterRequest(req)
}
// Logf request
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0)
if t.dump&fs.DumpAuth == 0 {
buf = cleanAuths(buf)
}
logMutex.Lock()
fs.Debugf(nil, "%s", separatorReq)
fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
fs.Debugf(nil, "%s", string(buf))
fs.Debugf(nil, "%s", separatorReq)
logMutex.Unlock()
}
// Do round trip
resp, err = t.Transport.RoundTrip(req)
// Logf response
if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
logMutex.Lock()
fs.Debugf(nil, "%s", separatorResp)
fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
if err != nil {
fs.Debugf(nil, "Error: %v", err)
} else {
buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0)
fs.Debugf(nil, "%s", string(buf))
}
fs.Debugf(nil, "%s", separatorResp)
logMutex.Unlock()
}
// Update metrics
t.metrics.onResponse(req, resp)
if err == nil {
checkServerTime(req, resp)
}
return resp, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/retriable_errors_windows.go | fs/fserrors/retriable_errors_windows.go | //go:build windows
package fserrors
import (
"syscall"
)
// Windows error code list
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
const (
WSAENETDOWN syscall.Errno = 10050
WSAENETUNREACH syscall.Errno = 10051
WSAENETRESET syscall.Errno = 10052
WSAECONNABORTED syscall.Errno = 10053
WSAECONNRESET syscall.Errno = 10054
WSAENOBUFS syscall.Errno = 10055
WSAENOTCONN syscall.Errno = 10057
WSAESHUTDOWN syscall.Errno = 10058
WSAETIMEDOUT syscall.Errno = 10060
WSAECONNREFUSED syscall.Errno = 10061
WSAEHOSTDOWN syscall.Errno = 10064
WSAEHOSTUNREACH syscall.Errno = 10065
WSAEDISCON syscall.Errno = 10101
WSAEREFUSED syscall.Errno = 10112
WSAHOST_NOT_FOUND syscall.Errno = 11001 //nolint:revive // Don't include revive when running golangci-lint to avoid var-naming: don't use ALL_CAPS in Go names; use CamelCase (revive)
WSATRY_AGAIN syscall.Errno = 11002 //nolint:revive // Don't include revive when running golangci-lint to avoid var-naming: don't use ALL_CAPS in Go names; use CamelCase (revive)
)
func init() {
// append some lower level errors since the standardized ones
// don't seem to happen
retriableErrors = append(retriableErrors,
syscall.WSAECONNRESET,
WSAENETDOWN,
WSAENETUNREACH,
WSAENETRESET,
WSAECONNABORTED,
WSAECONNRESET,
WSAENOBUFS,
WSAENOTCONN,
WSAESHUTDOWN,
WSAETIMEDOUT,
WSAECONNREFUSED,
WSAEHOSTDOWN,
WSAEHOSTUNREACH,
WSAEDISCON,
WSAEREFUSED,
WSAHOST_NOT_FOUND,
WSATRY_AGAIN,
syscall.ERROR_HANDLE_EOF,
syscall.ERROR_NETNAME_DELETED,
syscall.ERROR_BROKEN_PIPE,
)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/error.go | fs/fserrors/error.go | // Package fserrors provides errors and error handling
package fserrors
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"slices"
"strings"
"time"
liberrors "github.com/rclone/rclone/lib/errors"
)
// Must be satisfied for errors.Is/errors.As/Errors.Unwrap
type unwrapper interface {
Unwrap() error
}
// Retrier is an optional interface for error as to whether the
// operation should be retried at a high level.
//
// This should be returned from Update or Put methods as required
type Retrier interface {
error
Retry() bool
}
// retryError is a type of error
type retryError string
// Error interface
func (r retryError) Error() string {
return string(r)
}
// Retry interface
func (r retryError) Retry() bool {
return true
}
// Check interfaces
var _ Retrier = retryError("")
// RetryErrorf makes an error which indicates it would like to be retried
func RetryErrorf(format string, a ...any) error {
return retryError(fmt.Sprintf(format, a...))
}
// wrappedRetryError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedRetryError struct {
error
}
// Retry interface
func (err wrappedRetryError) Retry() bool {
return true
}
// Check interfaces
var _ Retrier = wrappedRetryError{error(nil)}
var _ unwrapper = wrappedRetryError{}
// RetryError makes an error which indicates it would like to be retried
func RetryError(err error) error {
if err == nil {
err = errors.New("needs retry")
}
return wrappedRetryError{err}
}
func (err wrappedRetryError) Unwrap() error {
return err.error
}
// IsRetryError returns true if err conforms to the Retry interface
// and calling the Retry method returns true.
func IsRetryError(err error) (isRetry bool) {
liberrors.Walk(err, func(err error) bool {
if r, ok := err.(Retrier); ok {
isRetry = r.Retry()
return true
}
return false
})
return
}
// Fataler is an optional interface for error as to whether the
// operation should cause the entire operation to finish immediately.
//
// This should be returned from Update or Put methods as required
type Fataler interface {
error
Fatal() bool
}
// wrappedFatalError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedFatalError struct {
error
}
// Fatal interface
func (err wrappedFatalError) Fatal() bool {
return true
}
// Check interfaces
var _ Fataler = wrappedFatalError{error(nil)}
var _ unwrapper = wrappedFatalError{}
// FatalError makes an error which indicates it is a fatal error and
// the sync should stop.
func FatalError(err error) error {
if err == nil {
err = errors.New("fatal error")
}
return wrappedFatalError{err}
}
func (err wrappedFatalError) Unwrap() error {
return err.error
}
// IsFatalError returns true if err conforms to the Fatal interface
// and calling the Fatal method returns true.
func IsFatalError(err error) (isFatal bool) {
liberrors.Walk(err, func(err error) bool {
if r, ok := err.(Fataler); ok {
isFatal = r.Fatal()
return true
}
return false
})
return
}
// NoRetrier is an optional interface for error as to whether the
// operation should not be retried at a high level.
//
// If only NoRetry errors are returned in a sync then the sync won't
// be retried.
//
// This should be returned from Update or Put methods as required
type NoRetrier interface {
error
NoRetry() bool
}
// wrappedNoRetryError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedNoRetryError struct {
error
}
// NoRetry interface
func (err wrappedNoRetryError) NoRetry() bool {
return true
}
// Check interfaces
var _ NoRetrier = wrappedNoRetryError{error(nil)}
var _ unwrapper = wrappedNoRetryError{}
// NoRetryError makes an error which indicates the sync shouldn't be
// retried.
func NoRetryError(err error) error {
return wrappedNoRetryError{err}
}
func (err wrappedNoRetryError) Unwrap() error {
return err.error
}
// IsNoRetryError returns true if err conforms to the NoRetry
// interface and calling the NoRetry method returns true.
func IsNoRetryError(err error) (isNoRetry bool) {
liberrors.Walk(err, func(err error) bool {
if r, ok := err.(NoRetrier); ok {
isNoRetry = r.NoRetry()
return true
}
return false
})
return
}
// NoLowLevelRetrier is an optional interface for error as to whether
// the operation should not be retried at a low level.
//
// NoLowLevelRetry errors won't be retried by low level retry loops.
type NoLowLevelRetrier interface {
error
NoLowLevelRetry() bool
}
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
// NoLowLevelRetrier interface and return true
type wrappedNoLowLevelRetryError struct {
error
}
// NoLowLevelRetry interface
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
return true
}
// Check interfaces
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
var _ unwrapper = wrappedNoLowLevelRetryError{}
// NoLowLevelRetryError makes an error which indicates the sync
// shouldn't be low level retried.
func NoLowLevelRetryError(err error) error {
return wrappedNoLowLevelRetryError{err}
}
// Unwrap returns the underlying error
func (err wrappedNoLowLevelRetryError) Unwrap() error {
return err.error
}
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
// interface and calling the NoLowLevelRetry method returns true.
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
liberrors.Walk(err, func(err error) bool {
if r, ok := err.(NoLowLevelRetrier); ok {
isNoLowLevelRetry = r.NoLowLevelRetry()
return true
}
return false
})
return
}
// RetryAfter is an optional interface for error as to whether the
// operation should be retried after a given delay
//
// This should be returned from Update or Put methods as required and
// will cause the entire sync to be retried after a delay.
type RetryAfter interface {
error
RetryAfter() time.Time
}
// ErrorRetryAfter is an error which expresses a time that should be
// waited for until trying again
type ErrorRetryAfter time.Time
// NewErrorRetryAfter returns an ErrorRetryAfter with the given
// duration as an endpoint
func NewErrorRetryAfter(d time.Duration) ErrorRetryAfter {
return ErrorRetryAfter(time.Now().Add(d))
}
// Error returns the textual version of the error
func (e ErrorRetryAfter) Error() string {
return fmt.Sprintf("try again after %v (%v)", time.Time(e).Format(time.RFC3339Nano), time.Until(time.Time(e)))
}
// RetryAfter returns the time the operation should be retried at or
// after
func (e ErrorRetryAfter) RetryAfter() time.Time {
return time.Time(e)
}
// Check interfaces
var _ RetryAfter = ErrorRetryAfter{}
// RetryAfterErrorTime returns the time that the RetryAfter error
// indicates or a Zero time.Time
func RetryAfterErrorTime(err error) (retryAfter time.Time) {
liberrors.Walk(err, func(err error) bool {
if r, ok := err.(RetryAfter); ok {
retryAfter = r.RetryAfter()
return true
}
return false
})
return
}
// IsRetryAfterError returns true if err is an ErrorRetryAfter
func IsRetryAfterError(err error) bool {
return !RetryAfterErrorTime(err).IsZero()
}
// CountableError is an optional interface for error. It stores a boolean
// which signifies if the error has already been counted or not
type CountableError interface {
error
Count()
IsCounted() bool
}
// wrappedCountableError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedCountableError struct {
error
isCounted bool
}
// CountableError interface
func (err *wrappedCountableError) Count() {
err.isCounted = true
}
// CountableError interface
func (err *wrappedCountableError) IsCounted() bool {
return err.isCounted
}
func (err wrappedCountableError) Unwrap() error {
return err.error
}
// IsCounted returns true if err conforms to the CountableError interface
// and has already been counted
func IsCounted(err error) bool {
if r, ok := err.(CountableError); ok {
return r.IsCounted()
}
return false
}
// Count sets the isCounted variable on the error if it conforms to the
// CountableError interface
func Count(err error) {
if r, ok := err.(CountableError); ok {
r.Count()
}
}
// Check interface
var _ CountableError = &wrappedCountableError{error: error(nil)}
var _ unwrapper = wrappedCountableError{}
// FsError makes an error which can keep a record that it is already counted
// or not
func FsError(err error) error {
if err == nil {
err = errors.New("countable error")
}
return &wrappedCountableError{error: err}
}
// Cause is a souped up errors.Cause which can unwrap some standard
// library errors too. It returns true if any of the intermediate
// errors had a Timeout() or Temporary() method which returned true.
func Cause(cause error) (retriable bool, err error) {
liberrors.Walk(cause, func(c error) bool {
// Check for net error Timeout()
if x, ok := c.(interface {
Timeout() bool
}); ok && x.Timeout() {
retriable = true
}
// Check for net error Temporary()
if x, ok := c.(interface {
Temporary() bool
}); ok && x.Temporary() {
retriable = true
}
err = c
return false
})
return
}
// retriableErrorStrings is a list of phrases which when we find it
// in an error, we know it is a networking error which should be
// retried.
//
// This is incredibly ugly - if only errors.Cause worked for all
// errors and all errors were exported from the stdlib.
var retriableErrorStrings = []string{
"use of closed network connection", // internal/poll/fd.go
"unexpected EOF reading trailer", // net/http/transfer.go
"transport connection broken", // net/http/transport.go
"http: ContentLength=", // net/http/transfer.go
"server closed idle connection", // net/http/transport.go
"bad record MAC", // crypto/tls/alert.go
"stream error:", // net/http/h2_bundle.go
"tls: use of closed connection", // crypto/tls/conn.go
}
// Errors which indicate networking errors which should be retried
//
// These are added to in retriable_errors*.go
var retriableErrors = []error{
io.EOF,
io.ErrUnexpectedEOF,
}
// ShouldRetry looks at an error and tries to work out if retrying the
// operation that caused it would be a good idea. It returns true if
// the error implements Timeout() or Temporary() or if the error
// indicates a premature closing of the connection.
func ShouldRetry(err error) bool {
if err == nil {
return false
}
// If error has been marked to NoLowLevelRetry then don't retry
if IsNoLowLevelRetryError(err) {
return false
}
// Find root cause if available
retriable, err := Cause(err)
if retriable {
return true
}
// Check if it is a retriable error
if slices.Contains(retriableErrors, err) {
return true
}
// Check error strings (yuch!) too
errString := err.Error()
for _, phrase := range retriableErrorStrings {
if strings.Contains(errString, phrase) {
return true
}
}
return false
}
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
// It checks to see if the HTTP response code is in the slice
// retryErrorCodes.
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil {
return false
}
return slices.Contains(retryErrorCodes, resp.StatusCode)
}
// ContextError checks to see if ctx is in error.
//
// If it is in error then it overwrites *perr with the context error
// if *perr was nil and returns true.
//
// Otherwise it returns false.
func ContextError(ctx context.Context, perr *error) bool {
if ctxErr := ctx.Err(); ctxErr != nil {
if *perr == nil {
*perr = ctxErr
}
return true
}
return false
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/error_test.go | fs/fserrors/error_test.go | package fserrors
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// withMessage wraps an error with a message
//
// This is for backwards compatibility with the now removed github.com/pkg/errors
type withMessage struct {
cause error
msg string
}
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
func (w *withMessage) Cause() error { return w.cause }
// wrap returns an error annotating err with a stack trace
// at the point Wrap is called, and the supplied message.
// If err is nil, Wrap returns nil.
func wrap(err error, message string) error {
if err == nil {
return nil
}
return &withMessage{
cause: err,
msg: message,
}
}
var errUseOfClosedNetworkConnection = errors.New("use of closed network connection")
type myError1 struct {
Err error
}
func (e myError1) Error() string { return e.Err.Error() }
type myError2 struct {
Err error
}
func (e *myError2) Error() string {
if e == nil {
return "myError2(nil)"
}
if e.Err == nil {
return "myError2{Err: nil}"
}
return e.Err.Error()
}
type myError3 struct {
Err int
}
func (e *myError3) Error() string { return "hello" }
type myError4 struct {
e error
}
func (e *myError4) Error() string { return e.e.Error() }
type myError5 struct{}
func (e *myError5) Error() string { return "" }
func (e *myError5) Temporary() bool { return true }
type errorCause struct {
e error
}
func (e *errorCause) Error() string { return fmt.Sprintf("%#v", e) }
func (e *errorCause) Cause() error { return e.e }
func TestCause(t *testing.T) {
e3 := &myError3{3}
e4 := &myError4{io.EOF}
e5 := &myError5{}
eNil1 := &myError2{nil}
eNil2 := &myError2{Err: (*myError2)(nil)}
errPotato := errors.New("potato")
nilCause1 := &errorCause{nil}
nilCause2 := &errorCause{(*myError2)(nil)}
for i, test := range []struct {
err error
wantRetriable bool
wantErr error
}{
{nil, false, nil},
{errPotato, false, errPotato},
{fmt.Errorf("potato: %w", errPotato), false, errPotato},
{fmt.Errorf("potato2: %w", wrap(errPotato, "potato")), false, errPotato},
{errUseOfClosedNetworkConnection, false, errUseOfClosedNetworkConnection},
{eNil1, false, eNil1},
{eNil2, false, eNil2.Err},
{myError1{io.EOF}, false, io.EOF},
{&myError2{io.EOF}, false, io.EOF},
{e3, false, e3},
{e4, false, e4},
{e5, true, e5},
{&errorCause{errPotato}, false, errPotato},
{nilCause1, false, nilCause1},
{nilCause2, false, nilCause2.e},
} {
gotRetriable, gotErr := Cause(test.err)
what := fmt.Sprintf("test #%d: %v", i, test.err)
assert.Equal(t, test.wantErr, gotErr, what)
assert.Equal(t, test.wantRetriable, gotRetriable, what)
}
}
func TestShouldRetry(t *testing.T) {
for i, test := range []struct {
err error
want bool
}{
{nil, false},
{errors.New("potato"), false},
{fmt.Errorf("connection: %w", errUseOfClosedNetworkConnection), true},
{io.EOF, true},
{io.ErrUnexpectedEOF, true},
{&url.Error{Op: "post", URL: "/", Err: io.EOF}, true},
{&url.Error{Op: "post", URL: "/", Err: errUseOfClosedNetworkConnection}, true},
{&url.Error{Op: "post", URL: "/", Err: fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", fmt.Errorf("http: ContentLength=%d with Body length %d", 100663336, 99590598))}, true},
} {
got := ShouldRetry(test.err)
assert.Equal(t, test.want, got, fmt.Sprintf("test #%d: %v", i, test.err))
}
}
func TestRetryAfter(t *testing.T) {
e := NewErrorRetryAfter(time.Second)
after := e.RetryAfter()
dt := time.Until(after)
assert.True(t, dt >= 900*time.Millisecond && dt <= 1100*time.Millisecond)
assert.True(t, IsRetryAfterError(e))
assert.False(t, IsRetryAfterError(io.EOF))
assert.Equal(t, time.Time{}, RetryAfterErrorTime(io.EOF))
assert.False(t, IsRetryAfterError(nil))
assert.Contains(t, e.Error(), "try again after")
t0 := time.Now()
err := fmt.Errorf("potato: %w", ErrorRetryAfter(t0))
assert.Equal(t, t0, RetryAfterErrorTime(err))
assert.True(t, IsRetryAfterError(err))
assert.Contains(t, e.Error(), "try again after")
}
func TestContextError(t *testing.T) {
var err = io.EOF
ctx, cancel := context.WithCancel(context.Background())
assert.False(t, ContextError(ctx, &err))
assert.Equal(t, io.EOF, err)
cancel()
assert.True(t, ContextError(ctx, &err))
assert.Equal(t, io.EOF, err)
err = nil
assert.True(t, ContextError(ctx, &err))
assert.Equal(t, context.Canceled, err)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/retriable_errors.go | fs/fserrors/retriable_errors.go | //go:build !plan9
package fserrors
import (
"syscall"
)
func init() {
retriableErrors = append(retriableErrors,
syscall.EPIPE,
syscall.ETIMEDOUT,
syscall.ECONNREFUSED,
syscall.EHOSTDOWN,
syscall.EHOSTUNREACH,
syscall.ECONNABORTED,
syscall.EAGAIN,
syscall.EWOULDBLOCK,
syscall.ECONNRESET,
)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/enospc_error_notsupported.go | fs/fserrors/enospc_error_notsupported.go | //go:build plan9
package fserrors
// IsErrNoSpace on plan9 returns false because
// plan9 does not support syscall.ENOSPC error.
func IsErrNoSpace(cause error) (isNoSpc bool) {
isNoSpc = false
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/enospc_error.go | fs/fserrors/enospc_error.go | //go:build !plan9
package fserrors
import (
"syscall"
liberrors "github.com/rclone/rclone/lib/errors"
)
// IsErrNoSpace checks a possibly wrapped error to
// see if it contains a ENOSPC error
func IsErrNoSpace(cause error) (isNoSpc bool) {
liberrors.Walk(cause, func(c error) bool {
if c == syscall.ENOSPC {
isNoSpc = true
return true
}
isNoSpc = false
return false
})
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/fserrors/error_syscall_test.go | fs/fserrors/error_syscall_test.go | //go:build !plan9
package fserrors
import (
"fmt"
"net"
"net/url"
"os"
"syscall"
"testing"
"github.com/stretchr/testify/assert"
)
// make a plausible network error with the underlying errno
func makeNetErr(errno syscall.Errno) error {
return &net.OpError{
Op: "write",
Net: "tcp",
Source: &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 123},
Addr: &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8080},
Err: &os.SyscallError{
Syscall: "write",
Err: errno,
},
}
}
func TestWithSyscallCause(t *testing.T) {
for i, test := range []struct {
err error
wantRetriable bool
wantErr error
}{
{makeNetErr(syscall.EAGAIN), true, syscall.EAGAIN},
{makeNetErr(syscall.Errno(123123123)), false, syscall.Errno(123123123)},
} {
gotRetriable, gotErr := Cause(test.err)
what := fmt.Sprintf("test #%d: %v", i, test.err)
assert.Equal(t, test.wantErr, gotErr, what)
assert.Equal(t, test.wantRetriable, gotRetriable, what)
}
}
func TestWithSyscallShouldRetry(t *testing.T) {
for i, test := range []struct {
err error
want bool
}{
{makeNetErr(syscall.EAGAIN), true},
{makeNetErr(syscall.Errno(123123123)), false},
{
wrap(&url.Error{
Op: "post",
URL: "http://localhost/",
Err: makeNetErr(syscall.EPIPE),
}, "potato error"),
true,
},
{
wrap(&url.Error{
Op: "post",
URL: "http://localhost/",
Err: makeNetErr(syscall.Errno(123123123)),
}, "listing error"),
false,
},
} {
got := ShouldRetry(test.err)
assert.Equal(t, test.want, got, fmt.Sprintf("test #%d: %v", i, test.err))
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/crypt.go | fs/config/crypt.go | package config
import (
"bufio"
"bytes"
"context"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"io"
"os"
"os/exec"
"strings"
"golang.org/x/crypto/nacl/secretbox"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/obscure"
)
var (
// Key to use for password en/decryption.
// When nil, no encryption will be used for saving.
configKey []byte
// PasswordPromptOutput is output of prompt for password
PasswordPromptOutput = os.Stderr
// PassConfigKeyForDaemonization if set to true, the configKey
// is obscured with obscure.Obscure and saved to a temp file
// when it is calculated from the password. The path of that
// temp file is then written to the environment variable
// `_RCLONE_CONFIG_KEY_FILE`. If `_RCLONE_CONFIG_KEY_FILE` is
// present, password prompt is skipped and
// `RCLONE_CONFIG_PASS` ignored. For security reasons, the
// temp file is deleted once the configKey is successfully
// loaded. This can be used to pass the configKey to a child
// process.
PassConfigKeyForDaemonization = false
)
// IsEncrypted returns true if the config file is encrypted
func IsEncrypted() bool {
return len(configKey) > 0
}
// Decrypt will automatically decrypt a reader
func Decrypt(b io.ReadSeeker) (io.Reader, error) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
var usingPasswordCommand bool
var usingEnvPassword bool
// Find first non-empty line
r := bufio.NewReader(b)
for {
line, _, err := r.ReadLine()
if err != nil {
if err == io.EOF {
if _, err := b.Seek(0, io.SeekStart); err != nil {
return nil, err
}
return b, nil
}
return nil, err
}
l := strings.TrimSpace(string(line))
if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") {
continue
}
// First non-empty or non-comment must be ENCRYPT_V0
if l == "RCLONE_ENCRYPT_V0:" {
break
}
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
return nil, errors.New("unsupported configuration encryption - update rclone for support")
}
// Restore non-seekable plain-text stream to its original state
if _, err := b.Seek(0, io.SeekStart); err != nil {
return io.MultiReader(strings.NewReader(l+"\n"), r), nil
}
return b, nil
}
if len(configKey) == 0 {
pass, err := GetPasswordCommand(ctx)
if err != nil {
return nil, err
}
if pass != "" {
usingPasswordCommand = true
err = SetConfigPassword(pass)
if err != nil {
return nil, fmt.Errorf("incorrect password: %w", err)
}
if len(configKey) == 0 {
return nil, errors.New("unable to decrypt configuration: incorrect password")
}
} else {
usingPasswordCommand = false
envPassword := os.Getenv("RCLONE_CONFIG_PASS")
if envPassword != "" {
usingEnvPassword = true
err := SetConfigPassword(envPassword)
if err != nil {
fs.Errorf(nil, "Using RCLONE_CONFIG_PASS returned: %v", err)
} else {
fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
}
} else {
usingEnvPassword = false
}
}
}
// Encrypted content is base64 encoded.
dec := base64.NewDecoder(base64.StdEncoding, r)
box, err := io.ReadAll(dec)
if err != nil {
return nil, fmt.Errorf("failed to load base64 encoded data: %w", err)
}
if len(box) < 24+secretbox.Overhead {
return nil, errors.New("configuration data too short")
}
var out []byte
for {
if envKeyFile := os.Getenv("_RCLONE_CONFIG_KEY_FILE"); len(envKeyFile) > 0 {
fs.Debugf(nil, "attempting to obtain configKey from temp file %s", envKeyFile)
obscuredKey, err := os.ReadFile(envKeyFile)
if err != nil {
errRemove := os.Remove(envKeyFile)
if errRemove != nil {
return nil, fmt.Errorf("unable to read obscured config key and unable to delete the temp file: %w", err)
}
return nil, fmt.Errorf("unable to read obscured config key: %w", err)
}
errRemove := os.Remove(envKeyFile)
if errRemove != nil {
return nil, fmt.Errorf("unable to delete temp file with configKey: %w", errRemove)
}
configKey = []byte(obscure.MustReveal(string(obscuredKey)))
fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey")
} else if len(configKey) == 0 {
if usingPasswordCommand {
return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
}
if usingEnvPassword {
return nil, errors.New("using RCLONE_CONFIG_PASS env password, unable to decrypt configuration")
}
if !ci.AskPassword {
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
}
getConfigPassword("Enter configuration password:")
}
// Nonce is first 24 bytes of the ciphertext
var nonce [24]byte
copy(nonce[:], box[:24])
var key [32]byte
copy(key[:], configKey[:32])
// Attempt to decrypt
var ok bool
out, ok = secretbox.Open(nil, box[24:], &nonce, &key)
if ok {
break
}
// Retry
fs.Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.")
configKey = nil
}
return bytes.NewReader(out), nil
}
// GetPasswordCommand gets the password using the --password-command setting
//
// If the the --password-command flag was not in use it returns "", nil
func GetPasswordCommand(ctx context.Context) (pass string, err error) {
ci := fs.GetConfig(ctx)
if len(ci.PasswordCommand) == 0 {
return "", nil
}
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd := exec.Command(ci.PasswordCommand[0], ci.PasswordCommand[1:]...)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
cmd.Stdin = os.Stdin
err = cmd.Run()
if err != nil {
// One does not always get the stderr returned in the wrapped error.
fs.Errorf(nil, "Using --password-command returned: %v", err)
if ers := strings.TrimSpace(stderr.String()); ers != "" {
fs.Errorf(nil, "--password-command stderr: %s", ers)
}
return pass, fmt.Errorf("password command failed: %w", err)
}
pass = strings.Trim(stdout.String(), "\r\n")
if pass == "" {
return pass, errors.New("--password-command returned empty string")
}
return pass, nil
}
// Encrypt the config file
func Encrypt(src io.Reader, dst io.Writer) error {
if len(configKey) == 0 {
_, err := io.Copy(dst, src)
return err
}
_, _ = fmt.Fprintln(dst, "# Encrypted rclone configuration File")
_, _ = fmt.Fprintln(dst, "")
_, _ = fmt.Fprintln(dst, "RCLONE_ENCRYPT_V0:")
// Generate new nonce and write it to the start of the ciphertext
var nonce [24]byte
n, _ := rand.Read(nonce[:])
if n != 24 {
return fmt.Errorf("nonce short read: %d", n)
}
enc := base64.NewEncoder(base64.StdEncoding, dst)
_, err := enc.Write(nonce[:])
if err != nil {
return fmt.Errorf("failed to write config file: %w", err)
}
var key [32]byte
copy(key[:], configKey[:32])
data, err := io.ReadAll(src)
if err != nil {
return err
}
b := secretbox.Seal(nil, data, &nonce, &key)
_, err = enc.Write(b)
if err != nil {
return fmt.Errorf("failed to write config file: %w", err)
}
return enc.Close()
}
// getConfigPassword will query the user for a password the
// first time it is required.
func getConfigPassword(q string) {
if len(configKey) != 0 {
return
}
for {
password := GetPassword(q)
err := SetConfigPassword(password)
if err == nil {
return
}
_, _ = fmt.Fprintln(os.Stderr, "Error:", err)
}
}
// SetConfigPassword will set the configKey to the hash of
// the password. If the length of the password is
// zero after trimming+normalization, an error is returned.
func SetConfigPassword(password string) error {
password, err := checkPassword(password)
if err != nil {
return err
}
// Create SHA256 has of the password
sha := sha256.New()
_, err = sha.Write([]byte("[" + password + "][rclone-config]"))
if err != nil {
return err
}
configKey = sha.Sum(nil)
if PassConfigKeyForDaemonization {
tempFile, err := os.CreateTemp("", "rclone")
if err != nil {
return fmt.Errorf("cannot create temp file to store configKey: %w", err)
}
_, err = tempFile.WriteString(obscure.MustObscure(string(configKey)))
if err != nil {
errRemove := os.Remove(tempFile.Name())
if errRemove != nil {
return fmt.Errorf("error writing configKey to temp file and also error deleting it: %w", err)
}
return fmt.Errorf("error writing configKey to temp file: %w", err)
}
err = tempFile.Close()
if err != nil {
errRemove := os.Remove(tempFile.Name())
if errRemove != nil {
return fmt.Errorf("error closing temp file with configKey and also error deleting it: %w", err)
}
return fmt.Errorf("error closing temp file with configKey: %w", err)
}
fs.Debugf(nil, "saving configKey to temp file")
err = os.Setenv("_RCLONE_CONFIG_KEY_FILE", tempFile.Name())
if err != nil {
errRemove := os.Remove(tempFile.Name())
if errRemove != nil {
return fmt.Errorf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file: %w", err)
}
return fmt.Errorf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE: %w", err)
}
}
return nil
}
// ClearConfigPassword sets the current the password to empty
func ClearConfigPassword() {
configKey = nil
}
// changeConfigPassword will query the user twice
// for a password. If the same password is entered
// twice the key is updated.
//
// This will use --password-command if configured to read the password.
func changeConfigPassword() {
// Set RCLONE_PASSWORD_CHANGE to "1" when calling the --password-command tool
_ = os.Setenv("RCLONE_PASSWORD_CHANGE", "1")
defer func() {
_ = os.Unsetenv("RCLONE_PASSWORD_CHANGE")
}()
pass, err := GetPasswordCommand(context.Background())
if err != nil {
fmt.Printf("Failed to read new password with --password-command: %v\n", err)
return
}
if pass == "" {
pass = ChangePassword("NEW configuration")
} else {
fmt.Printf("Read password using --password-command\n")
}
err = SetConfigPassword(pass)
if err != nil {
fmt.Printf("Failed to set config password: %v\n", err)
return
}
}
// ChangeConfigPasswordAndSave will query the user twice
// for a password. If the same password is entered
// twice the key is updated.
//
// This will use --password-command if configured to read the password.
//
// It will then save the config
func ChangeConfigPasswordAndSave() {
changeConfigPassword()
SaveConfig()
}
// RemoveConfigPasswordAndSave will clear the config password and save
// the unencrypted config file.
func RemoveConfigPasswordAndSave() {
configKey = nil
SaveConfig()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/rc.go | fs/config/rc.go | package config
import (
"context"
"errors"
"os"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
)
func init() {
rc.Add(rc.Call{
Path: "config/unlock",
Fn: rcConfigPassword,
Title: "Unlock the config file.",
AuthRequired: true,
Help: `
Unlocks the config file if it is locked.
Parameters:
- 'configPassword' - password to unlock the config file
A good idea is to disable AskPassword before making this call
`,
})
}
// Unlock the config file
// A good idea is to disable AskPassword before making this call
func rcConfigPassword(ctx context.Context, in rc.Params) (out rc.Params, err error) {
configPass, err := in.GetString("configPassword")
if err != nil {
var err2 error
configPass, err2 = in.GetString("config_password") // backwards compat
if err2 != nil {
return nil, err
}
}
if SetConfigPassword(configPass) != nil {
return nil, errors.New("failed to set config password")
}
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "config/dump",
Fn: rcDump,
Title: "Dumps the config file.",
AuthRequired: true,
Help: `
Returns a JSON object:
- key: value
Where keys are remote names and values are the config parameters.
See the [config dump](/commands/rclone_config_dump/) command for more information on the above.
`,
})
}
// Return the config file dump
func rcDump(ctx context.Context, in rc.Params) (out rc.Params, err error) {
return DumpRcBlob(), nil
}
func init() {
rc.Add(rc.Call{
Path: "config/get",
Fn: rcGet,
Title: "Get a remote in the config file.",
AuthRequired: true,
Help: `
Parameters:
- name - name of remote to get
See the [config dump](/commands/rclone_config_dump/) command for more information on the above.
`,
})
}
// Return the config file get
func rcGet(ctx context.Context, in rc.Params) (out rc.Params, err error) {
name, err := in.GetString("name")
if err != nil {
return nil, err
}
return DumpRcRemote(name), nil
}
func init() {
rc.Add(rc.Call{
Path: "config/listremotes",
Fn: rcListRemotes,
Title: "Lists the remotes in the config file and defined in environment variables.",
AuthRequired: true,
Help: `
Returns
- remotes - array of remote names
See the [listremotes](/commands/rclone_listremotes/) command for more information on the above.
`,
})
}
// Return the a list of remotes in the config file
// including any defined by environment variables.
func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) {
remoteNames := GetRemoteNames()
if remoteNames == nil {
remoteNames = []string{}
}
out = rc.Params{
"remotes": remoteNames,
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "config/providers",
Fn: rcProviders,
Title: "Shows how providers are configured in the config file.",
AuthRequired: true,
Help: `
Returns a JSON object:
- providers - array of objects
See the [config providers](/commands/rclone_config_providers/) command
for more information on the above.
Note that the Options blocks are in the same format as returned by
"options/info". They are described in the
[option blocks](#option-blocks) section.
`,
})
}
// Return the config file providers
func rcProviders(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = rc.Params{
"providers": fs.Registry,
}
return out, nil
}
func init() {
for _, name := range []string{"create", "update", "password"} {
extraHelp := ""
if name == "create" {
extraHelp = "- type - type of the new remote\n"
}
if name == "create" || name == "update" {
extraHelp += `- opt - a dictionary of options to control the configuration
- obscure - declare passwords are plain and need obscuring
- noObscure - declare passwords are already obscured and don't need obscuring
- noOutput - don't print anything to stdout
- nonInteractive - don't interact with a user, return questions
- continue - continue the config process with an answer
- all - ask all the config questions not just the post config ones
- state - state to restart with - used with continue
- result - result to restart with - used with continue
`
}
rc.Add(rc.Call{
Path: "config/" + name,
AuthRequired: true,
Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) {
return rcConfig(ctx, in, name)
},
Title: name + " the config for a remote.",
Help: `This takes the following parameters:
- name - name of remote
- parameters - a map of \{ "key": "value" \} pairs
` + extraHelp + `
See the [config ` + name + `](/commands/rclone_config_` + name + `/) command for more information on the above.`,
})
}
}
// Manipulate the config file
func rcConfig(ctx context.Context, in rc.Params, what string) (out rc.Params, err error) {
name, err := in.GetString("name")
if err != nil {
return nil, err
}
parameters := rc.Params{}
err = in.GetStruct("parameters", ¶meters)
if err != nil {
return nil, err
}
var opt UpdateRemoteOpt
err = in.GetStruct("opt", &opt)
if err != nil && !rc.IsErrParamNotFound(err) {
return nil, err
}
// Backwards compatibility
if value, err := in.GetBool("obscure"); err == nil {
opt.Obscure = value
}
if value, err := in.GetBool("noObscure"); err == nil {
opt.NoObscure = value
}
var configOut *fs.ConfigOut
switch what {
case "create":
remoteType, typeErr := in.GetString("type")
if typeErr != nil {
return nil, typeErr
}
configOut, err = CreateRemote(ctx, name, remoteType, parameters, opt)
case "update":
configOut, err = UpdateRemote(ctx, name, parameters, opt)
case "password":
err = PasswordRemote(ctx, name, parameters)
default:
err = errors.New("unknown rcConfig type")
}
if err != nil {
return nil, err
}
if !opt.NonInteractive {
return nil, nil
}
if configOut == nil {
configOut = &fs.ConfigOut{}
}
err = rc.Reshape(&out, configOut)
if err != nil {
return nil, err
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "config/delete",
Fn: rcDelete,
Title: "Delete a remote in the config file.",
AuthRequired: true,
Help: `
Parameters:
- name - name of remote to delete
See the [config delete](/commands/rclone_config_delete/) command for more information on the above.
`,
})
}
// Return the config file delete
func rcDelete(ctx context.Context, in rc.Params) (out rc.Params, err error) {
name, err := in.GetString("name")
if err != nil {
return nil, err
}
DeleteRemote(name)
return nil, nil
}
func init() {
rc.Add(rc.Call{
Path: "config/setpath",
Fn: rcSetPath,
Title: "Set the path of the config file",
AuthRequired: true,
Help: `
Parameters:
- path - path to the config file to use
`,
})
}
// Set the config file path
func rcSetPath(ctx context.Context, in rc.Params) (out rc.Params, err error) {
path, err := in.GetString("path")
if err != nil {
return nil, err
}
err = SetConfigPath(path)
return nil, err
}
func init() {
rc.Add(rc.Call{
Path: "config/paths",
Fn: rcPaths,
Title: "Reads the config file path and other important paths.",
AuthRequired: true,
Help: `
Returns a JSON object with the following keys:
- config: path to config file
- cache: path to root of cache directory
- temp: path to root of temporary directory
Eg
{
"cache": "/home/USER/.cache/rclone",
"config": "/home/USER/.rclone.conf",
"temp": "/tmp"
}
See the [config paths](/commands/rclone_config_paths/) command for more information on the above.
`,
})
}
// Set the config file path
func rcPaths(ctx context.Context, in rc.Params) (out rc.Params, err error) {
return rc.Params{
"config": GetConfigPath(),
"cache": GetCacheDir(),
"temp": os.TempDir(),
}, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/config.go | fs/config/config.go | // Package config reads, writes and edits the config file and deals with command line flags
package config
import (
"context"
"encoding/json"
"errors"
"fmt"
mathrand "math/rand"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"time"
"github.com/mitchellh/go-homedir"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/random"
)
const (
configFileName = "rclone.conf"
hiddenConfigFileName = "." + configFileName
noConfigFile = "notfound"
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigClientID is the config key used to store the client id
ConfigClientID = "client_id"
// ConfigClientSecret is the config key used to store the client secret
ConfigClientSecret = "client_secret"
// ConfigAuthURL is the config key used to store the auth server endpoint
ConfigAuthURL = "auth_url"
// ConfigTokenURL is the config key used to store the token server endpoint
ConfigTokenURL = "token_url"
// ConfigClientCredentials - use OAUTH2 client credentials
ConfigClientCredentials = "client_credentials"
// ConfigEncoding is the config key to change the encoding for a backend
ConfigEncoding = "encoding"
// ConfigEncodingHelp is the help for ConfigEncoding
ConfigEncodingHelp = "The encoding for the backend.\n\nSee the [encoding section in the overview](/overview/#encoding) for more info."
// ConfigAuthorize indicates that we just want "rclone authorize"
ConfigAuthorize = "config_authorize"
// ConfigAuthNoBrowser indicates that we do not want to open browser
ConfigAuthNoBrowser = "config_auth_no_browser"
// ConfigTemplate is the template content to be used in the authorization webserver
ConfigTemplate = "config_template"
// ConfigTemplateFile is the path to a template file to read into the value of `ConfigTemplate` above
ConfigTemplateFile = "config_template_file"
)
// Storage defines an interface for loading and saving config to
// persistent storage. Rclone provides a default implementation to
// load and save to a config file when this is imported
//
// import "github.com/rclone/rclone/fs/config/configfile"
// configfile.Install()
type Storage interface {
// GetSectionList returns a slice of strings with names for all the
// sections
GetSectionList() []string
// HasSection returns true if section exists in the config file
HasSection(section string) bool
// DeleteSection removes the named section and all config from the
// config file
DeleteSection(section string)
// GetKeyList returns the keys in this section
GetKeyList(section string) []string
// GetValue returns the key in section with a found flag
GetValue(section string, key string) (value string, found bool)
// SetValue sets the value under key in section
SetValue(section string, key string, value string)
// DeleteKey removes the key under section
DeleteKey(section string, key string) bool
// Load the config from permanent storage
Load() error
// Save the config to permanent storage
Save() error
// Serialize the config into a string
Serialize() (string, error)
}
// Global
var (
// Password can be used to configure the random password generator
Password = random.Password
)
var (
configPath string
cacheDir string
data Storage
dataLoaded bool
)
func init() {
// Set the function pointers up in fs
fs.ConfigFileGet = FileGetValue
fs.ConfigFileSet = SetValueAndSave
fs.ConfigFileHasSection = func(section string) bool {
return LoadedData().HasSection(section)
}
configPath = makeConfigPath()
cacheDir = makeCacheDir() // Has fallback to tempDir, so set that first
data = newDefaultStorage()
}
// Join directory with filename, and check if exists
func findFile(dir string, name string) string {
path := filepath.Join(dir, name)
if _, err := os.Stat(path); err != nil {
return ""
}
return path
}
// Find current user's home directory
func findHomeDir() (string, error) {
path, err := homedir.Dir()
if err != nil {
fs.Debugf(nil, "Home directory lookup failed and cannot be used as configuration location: %v", err)
} else if path == "" {
// On Unix homedir return success but empty string for user with empty home configured in passwd file
fs.Debugf(nil, "Home directory not defined and cannot be used as configuration location")
}
return path, err
}
// Find rclone executable directory and look for existing rclone.conf there
// (<rclone_exe_dir>/rclone.conf)
func findLocalConfig() (configDir string, configFile string) {
if exePath, err := os.Executable(); err == nil {
configDir = filepath.Dir(exePath)
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to Windows AppData config subdirectory for rclone and look for existing rclone.conf there
// ($AppData/rclone/rclone.conf)
func findAppDataConfig() (configDir string, configFile string) {
if appDataDir := os.Getenv("APPDATA"); appDataDir != "" {
configDir = filepath.Join(appDataDir, "rclone")
configFile = findFile(configDir, configFileName)
} else {
fs.Debugf(nil, "Environment variable APPDATA is not defined and cannot be used as configuration location")
}
return
}
// Get path to XDG config subdirectory for rclone and look for existing rclone.conf there
// (see XDG Base Directory specification: https://specifications.freedesktop.org/basedir-spec/latest/).
// ($XDG_CONFIG_HOME\rclone\rclone.conf)
func findXDGConfig() (configDir string, configFile string) {
if xdgConfigDir := os.Getenv("XDG_CONFIG_HOME"); xdgConfigDir != "" {
configDir = filepath.Join(xdgConfigDir, "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Get path to .config subdirectory for rclone and look for existing rclone.conf there
// (~/.config/rclone/rclone.conf)
func findDotConfigConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = filepath.Join(home, ".config", "rclone")
configFile = findFile(configDir, configFileName)
}
return
}
// Look for existing .rclone.conf (legacy hidden filename) in root of user's home directory
// (~/.rclone.conf)
func findOldHomeConfig(home string) (configDir string, configFile string) {
if home != "" {
configDir = home
configFile = findFile(home, hiddenConfigFileName)
}
return
}
// Return the path to the configuration file
func makeConfigPath() string {
// Look for existing rclone.conf in prioritized list of known locations
// Also get configuration directory to use for new config file when no existing is found.
var (
configFile string
configDir string
primaryConfigDir string
fallbackConfigDir string
)
// <rclone_exe_dir>/rclone.conf
if _, configFile = findLocalConfig(); configFile != "" {
return configFile
}
// Windows: $AppData/rclone/rclone.conf
// This is also the default location for new config when no existing is found
if runtime.GOOS == "windows" {
if primaryConfigDir, configFile = findAppDataConfig(); configFile != "" {
return configFile
}
}
// $XDG_CONFIG_HOME/rclone/rclone.conf
// Also looking for this on Windows, for backwards compatibility reasons.
if configDir, configFile = findXDGConfig(); configFile != "" {
return configFile
}
if runtime.GOOS != "windows" {
// On Unix this is also the default location for new config when no existing is found
primaryConfigDir = configDir
}
// ~/.config/rclone/rclone.conf
// This is also the fallback location for new config
// (when $AppData on Windows and $XDG_CONFIG_HOME on Unix is not defined)
homeDir, homeDirErr := findHomeDir()
if fallbackConfigDir, configFile = findDotConfigConfig(homeDir); configFile != "" {
return configFile
}
// ~/.rclone.conf
if _, configFile = findOldHomeConfig(homeDir); configFile != "" {
return configFile
}
// No existing config file found, prepare proper default for a new one.
// But first check if user supplied a --config variable or environment
// variable, since then we skip actually trying to create the default
// and report any errors related to it (we can't use pflag for this because
// it isn't initialised yet so we search the command line manually).
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
if !configSupplied {
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
configSupplied = true
break
}
}
}
// If we found a configuration directory to be used for new config during search
// above, then create it to be ready for rclone.conf file to be written into it
// later, and also as a test of permissions to use fallback if not even able to
// create the directory.
if primaryConfigDir != "" {
configDir = primaryConfigDir
} else if fallbackConfigDir != "" {
configDir = fallbackConfigDir
} else {
configDir = ""
}
if configDir != "" {
configFile = filepath.Join(configDir, configFileName)
if configSupplied {
// User supplied custom config option, just return the default path
// as is without creating any directories, since it will not be used
// anyway and we don't want to unnecessarily create empty directory.
return configFile
}
var mkdirErr error
if mkdirErr = file.MkdirAll(configDir, os.ModePerm); mkdirErr == nil {
return configFile
}
// Problem: Try a fallback location. If we did find a home directory then
// just assume file .rclone.conf (legacy hidden filename) can be written in
// its root (~/.rclone.conf).
if homeDir != "" {
fs.Debugf(nil, "Configuration directory could not be created and will not be used: %v", mkdirErr)
return filepath.Join(homeDir, hiddenConfigFileName)
}
if !configSupplied {
fs.Errorf(nil, "Couldn't find home directory nor create configuration directory: %v", mkdirErr)
}
} else if !configSupplied {
if homeDirErr != nil {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory: %v", homeDirErr)
} else {
fs.Errorf(nil, "Couldn't find configuration directory nor home directory")
}
}
// No known location that can be used: Did possibly find a configDir
// (XDG_CONFIG_HOME or APPDATA) which couldn't be created, but in any case
// did not find a home directory!
// Report it as an error, and return as last resort the path relative to current
// working directory, of .rclone.conf (legacy hidden filename).
if !configSupplied {
fs.Errorf(nil, "Defaulting to storing config in current directory.")
fs.Errorf(nil, "Use --config flag to workaround.")
}
return hiddenConfigFileName
}
// GetConfigPath returns the current config file path
func GetConfigPath() string {
return configPath
}
// SetConfigPath sets new config file path
//
// Checks for empty string, os null device, or special path, all of which indicates in-memory config.
func SetConfigPath(path string) (err error) {
var cfgPath string
if path == "" || path == os.DevNull {
cfgPath = ""
} else if filepath.Base(path) == noConfigFile {
cfgPath = ""
} else if err = file.IsReserved(path); err != nil {
return err
} else if cfgPath, err = filepath.Abs(path); err != nil {
return err
}
configPath = cfgPath
return nil
}
// SetData sets new config file storage
func SetData(newData Storage) {
// If no config file, use in-memory config (which is the default)
if configPath == "" {
return
}
data = newData
dataLoaded = false
}
// Data returns current config file storage
func Data() Storage {
return data
}
// ErrorConfigFileNotFound is returned when the config file is not found
var ErrorConfigFileNotFound = errors.New("config file not found")
// LoadedData ensures the config file storage is loaded and returns it
func LoadedData() Storage {
if !dataLoaded {
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
// If empty configPath (in-memory only) the value will be "."
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(configPath))
// Load configuration from file (or initialize sensible default if no file or error)
if err := data.Load(); err == nil {
fs.Debugf(nil, "Using config file from %q", configPath)
dataLoaded = true
} else if err == ErrorConfigFileNotFound {
if configPath == "" {
fs.Debugf(nil, "Config is memory-only - using defaults")
} else {
fs.Logf(nil, "Config file %q not found - using defaults", configPath)
}
dataLoaded = true
} else {
fs.Fatalf(nil, "Failed to load config file %q: %v", configPath, err)
}
}
return data
}
// SaveConfig calling function which saves configuration file.
// if SaveConfig returns error trying again after sleep.
func SaveConfig() {
ctx := context.Background()
ci := fs.GetConfig(ctx)
var err error
for range ci.LowLevelRetries + 1 {
if err = LoadedData().Save(); err == nil {
return
}
waitingTimeMs := mathrand.Intn(1000)
time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
}
fs.Errorf(nil, "Failed to save config after %d tries: %v", ci.LowLevelRetries, err)
}
// FileSections returns the sections in the config file
func FileSections() []string {
return LoadedData().GetSectionList()
}
// FileGetValue gets the config key under section returning the
// the value and true if found and or ("", false) otherwise
func FileGetValue(section, key string) (string, bool) {
return LoadedData().GetValue(section, key)
}
// FileSetValue sets the key in section to value.
// It doesn't save the config file.
func FileSetValue(section, key, value string) {
LoadedData().SetValue(section, key, value)
}
// FileDeleteKey deletes the config key in the config file.
// It returns true if the key was deleted,
// or returns false if the section or key didn't exist.
func FileDeleteKey(section, key string) bool {
return LoadedData().DeleteKey(section, key)
}
// GetValue gets the value for a config key from environment
// or config file under section returning the default if not set.
//
// Emulates the preference documented and normally used by rclone via
// configmap, which means environment variables before config file.
func GetValue(remote, key string) string {
envKey := fs.ConfigToEnv(remote, key)
value, found := os.LookupEnv(envKey)
if found {
return value
}
value, _ = LoadedData().GetValue(remote, key)
return value
}
// SetValueAndSave sets the key to the value and saves just that
// value in the config file. It loads the old config file in from
// disk first and overwrites the given value only.
func SetValueAndSave(remote, key, value string) error {
// Set the value in config in case we fail to reload it
FileSetValue(remote, key, value)
// Save it again
SaveConfig()
return nil
}
// Remote defines a remote with a name, type, source and description
type Remote struct {
Name string `json:"name"`
Type string `json:"type"`
Source string `json:"source"`
Description string `json:"description"`
}
var remoteEnvRe = regexp.MustCompile(`^RCLONE_CONFIG_(.+?)_TYPE=(.+)$`)
// GetRemotes returns the list of remotes defined in environment and config file.
//
// Emulates the preference documented and normally used by rclone via
// configmap, which means environment variables before config file.
func GetRemotes() []Remote {
var remotes []Remote
for _, item := range os.Environ() {
matches := remoteEnvRe.FindStringSubmatch(item)
if len(matches) == 3 {
remotes = append(remotes, Remote{
Name: strings.ToLower(matches[1]),
Type: strings.ToLower(matches[2]),
Source: "environment",
})
}
}
remoteExists := func(name string) bool {
for _, remote := range remotes {
if name == remote.Name {
return true
}
}
return false
}
sections := LoadedData().GetSectionList()
for _, section := range sections {
if !remoteExists(section) {
typeValue, found := LoadedData().GetValue(section, "type")
if found {
description, _ := LoadedData().GetValue(section, "description")
remotes = append(remotes, Remote{
Name: section,
Type: typeValue,
Source: "file",
Description: description,
})
}
}
}
return remotes
}
// GetRemoteNames returns the names of remotes defined in environment and config file.
func GetRemoteNames() []string {
remotes := GetRemotes()
var remoteNames []string
for _, remote := range remotes {
remoteNames = append(remoteNames, remote.Name)
}
return remoteNames
}
// UpdateRemoteOpt configures the remote update
type UpdateRemoteOpt struct {
// Treat all passwords as plain that need obscuring
Obscure bool `json:"obscure"`
// Treat all passwords as obscured
NoObscure bool `json:"noObscure"`
// Don't provide any output
NoOutput bool `json:"noOutput"`
// Don't interact with the user - return questions
NonInteractive bool `json:"nonInteractive"`
// If set then supply state and result parameters to continue the process
Continue bool `json:"continue"`
// If set then ask all the questions, not just the post config questions
All bool `json:"all"`
// State to restart with - used with Continue
State string `json:"state"`
// Result to return - used with Continue
Result string `json:"result"`
// If set then edit existing values
Edit bool `json:"edit"`
}
func updateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
if opt.Obscure && opt.NoObscure {
return nil, errors.New("can't use --obscure and --no-obscure together")
}
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
interactive := !(opt.NonInteractive || opt.Continue)
if interactive && !opt.All {
ctx = suppressConfirm(ctx)
}
fsType := GetValue(name, "type")
if fsType == "" {
return nil, errors.New("couldn't find type field in config")
}
ri, err := fs.Find(fsType)
if err != nil {
return nil, fmt.Errorf("couldn't find backend for type %q", fsType)
}
// Work out which options need to be obscured
needsObscure := map[string]struct{}{}
if !opt.NoObscure {
for _, option := range ri.Options {
if option.IsPassword {
needsObscure[option.Name] = struct{}{}
}
}
}
choices := configmap.Simple{}
m := fs.ConfigMap(ri.Prefix, ri.Options, name, nil)
// Set the config
for k, v := range keyValues {
vStr := fmt.Sprint(v)
if strings.ContainsAny(k, "\n\r") || strings.ContainsAny(vStr, "\n\r") {
return nil, fmt.Errorf("update remote: invalid key or value contains \\n or \\r")
}
// Obscure parameter if necessary
if _, ok := needsObscure[k]; ok {
_, err := obscure.Reveal(vStr)
if err != nil || opt.Obscure {
// If error => not already obscured, so obscure it
// or we are forced to obscure
vStr, err = obscure.Obscure(vStr)
if err != nil {
return nil, fmt.Errorf("update remote: obscure failed: %w", err)
}
}
}
choices.Set(k, vStr)
if !strings.HasPrefix(k, fs.ConfigKeyEphemeralPrefix) {
m.Set(k, vStr)
}
}
if opt.Edit {
choices[fs.ConfigEdit] = "true"
}
if interactive {
var state = ""
if opt.All {
state = fs.ConfigAll
}
err = backendConfig(ctx, name, m, ri, choices, state)
} else {
// Start the config state machine
in := fs.ConfigIn{
State: opt.State,
Result: opt.Result,
}
if in.State == "" && opt.All {
in.State = fs.ConfigAll
}
out, err = fs.BackendConfig(ctx, name, m, ri, choices, in)
}
if err != nil {
return nil, err
}
SaveConfig()
cache.ClearConfig(name) // remove any remotes based on this config from the cache
return out, nil
}
// UpdateRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, opt UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
opt.Edit = true
return updateRemote(ctx, name, keyValues, opt)
}
// CreateRemote creates a new remote with name, type and a list of
// parameters which are key, value pairs. If update is set then it
// adds the new keys rather than replacing all of them.
func CreateRemote(ctx context.Context, name string, Type string, keyValues rc.Params, opts UpdateRemoteOpt) (out *fs.ConfigOut, err error) {
err = fspath.CheckConfigName(name)
if err != nil {
return nil, err
}
if !opts.Continue {
// Delete the old config if it exists
LoadedData().DeleteSection(name)
// Set the type
LoadedData().SetValue(name, "type", Type)
}
// Set the remaining values
return UpdateRemote(ctx, name, keyValues, opts)
}
// PasswordRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error {
ctx = suppressConfirm(ctx)
err := fspath.CheckConfigName(name)
if err != nil {
return err
}
for k, v := range keyValues {
keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
}
_, err = UpdateRemote(ctx, name, keyValues, UpdateRemoteOpt{
NoObscure: true,
})
return err
}
// JSONListProviders prints all the providers and options in JSON format
func JSONListProviders() error {
b, err := json.MarshalIndent(fs.Registry, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal examples: %w", err)
}
_, err = os.Stdout.Write(b)
if err != nil {
return fmt.Errorf("failed to write providers list: %w", err)
}
return nil
}
// fsOption returns an Option describing the possible remotes
func fsOption() *fs.Option {
o := &fs.Option{
Name: "Storage",
Help: "Type of storage to configure.",
Default: "",
Required: true,
}
for _, item := range fs.Registry {
if item.Hide {
continue
}
example := fs.OptionExample{
Value: item.Name,
Help: item.Description,
}
o.Examples = append(o.Examples, example)
}
o.Examples.Sort()
return o
}
// DumpRcRemote dumps the config for a single remote
func DumpRcRemote(name string) (dump rc.Params) {
params := rc.Params{}
for _, key := range LoadedData().GetKeyList(name) {
params[key] = GetValue(name, key)
}
return params
}
// DumpRcBlob dumps all the config as an unstructured blob suitable
// for the rc
func DumpRcBlob() (dump rc.Params) {
dump = rc.Params{}
for _, name := range LoadedData().GetSectionList() {
dump[name] = DumpRcRemote(name)
}
return dump
}
// Dump dumps all the config as a JSON file
func Dump() error {
dump := DumpRcBlob()
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal config dump: %w", err)
}
_, err = os.Stdout.Write(b)
if err != nil {
return fmt.Errorf("failed to write config dump: %w", err)
}
return nil
}
// makeCacheDir returns a directory to use for caching.
func makeCacheDir() (dir string) {
dir, err := os.UserCacheDir()
if err != nil || dir == "" {
fs.Debugf(nil, "Failed to find user cache dir, using temporary directory: %v", err)
// if no dir found then use TempDir - we will have a cachedir!
dir = os.TempDir()
}
return filepath.Join(dir, "rclone")
}
// GetCacheDir returns the default directory for cache
//
// The directory is neither guaranteed to exist nor have accessible permissions.
// Users of this should make a subdirectory and use MkdirAll() to create it
// and any parents.
func GetCacheDir() string {
return cacheDir
}
// SetCacheDir sets new default directory for cache
func SetCacheDir(path string) (err error) {
cacheDir, err = filepath.Abs(path)
return
}
// SetTempDir sets new default directory to use for temporary files.
//
// Assuming golang's os.TempDir is used to get the directory:
// "On Unix systems, it returns $TMPDIR if non-empty, else /tmp. On Windows,
// it uses GetTempPath, returning the first non-empty value from %TMP%, %TEMP%,
// %USERPROFILE%, or the Windows directory."
//
// To override the default we therefore set environment variable TMPDIR
// on Unix systems, and both TMP and TEMP on Windows (they are almost exclusively
// aliases for the same path, and programs may refer to to either of them).
// This should make all libraries and forked processes use the same.
func SetTempDir(path string) (err error) {
var tempDir string
if tempDir, err = filepath.Abs(path); err != nil {
return err
}
if runtime.GOOS == "windows" {
if err = os.Setenv("TMP", tempDir); err != nil {
return err
}
if err = os.Setenv("TEMP", tempDir); err != nil {
return err
}
} else {
return os.Setenv("TMPDIR", tempDir)
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/crypt_test.go | fs/config/crypt_test.go | // These are in an external package because we need to import configfile
//
// Internal tests are in crypt_internal_test.go
package config_test
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestConfigLoadEncrypted(t *testing.T) {
var err error
oldConfigPath := config.GetConfigPath()
assert.NoError(t, config.SetConfigPath("./testdata/encrypted.conf"))
defer func() {
assert.NoError(t, config.SetConfigPath(oldConfigPath))
config.ClearConfigPassword()
}()
// Set correct password
assert.False(t, config.IsEncrypted())
err = config.SetConfigPassword("asdf")
require.NoError(t, err)
assert.True(t, config.IsEncrypted())
err = config.Data().Load()
require.NoError(t, err)
sections := config.Data().GetSectionList()
var expect = []string{"nounc", "unc"}
assert.Equal(t, expect, sections)
keys := config.Data().GetKeyList("nounc")
expect = []string{"type", "nounc"}
assert.Equal(t, expect, keys)
}
func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
oldConfigPath := config.GetConfigPath()
oldConfig := *ci
assert.NoError(t, config.SetConfigPath("./testdata/encrypted.conf"))
// using ci.PasswordCommand, correct password
ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf"}
defer func() {
assert.NoError(t, config.SetConfigPath(oldConfigPath))
config.ClearConfigPassword()
*ci = oldConfig
ci.PasswordCommand = nil
}()
config.ClearConfigPassword()
err := config.Data().Load()
require.NoError(t, err)
sections := config.Data().GetSectionList()
var expect = []string{"nounc", "unc"}
assert.Equal(t, expect, sections)
keys := config.Data().GetKeyList("nounc")
expect = []string{"type", "nounc"}
assert.Equal(t, expect, keys)
}
func TestConfigLoadEncryptedWithInvalidPassCommand(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
oldConfigPath := config.GetConfigPath()
oldConfig := *ci
assert.NoError(t, config.SetConfigPath("./testdata/encrypted.conf"))
// using ci.PasswordCommand, incorrect password
ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf-blurfl"}
defer func() {
assert.NoError(t, config.SetConfigPath(oldConfigPath))
config.ClearConfigPassword()
*ci = oldConfig
ci.PasswordCommand = nil
}()
config.ClearConfigPassword()
err := config.Data().Load()
require.Error(t, err)
assert.Contains(t, err.Error(), "using --password-command derived password")
}
func TestConfigLoadEncryptedFailures(t *testing.T) {
var err error
// This file should be too short to be decoded.
oldConfigPath := config.GetConfigPath()
assert.NoError(t, config.SetConfigPath("./testdata/enc-short.conf"))
defer func() { assert.NoError(t, config.SetConfigPath(oldConfigPath)) }()
err = config.Data().Load()
require.Error(t, err)
// This file contains invalid base64 characters.
assert.NoError(t, config.SetConfigPath("./testdata/enc-invalid.conf"))
err = config.Data().Load()
require.Error(t, err)
// This file contains invalid base64 characters.
assert.NoError(t, config.SetConfigPath("./testdata/enc-too-new.conf"))
err = config.Data().Load()
require.Error(t, err)
// This file does not exist.
assert.NoError(t, config.SetConfigPath("./testdata/filenotfound.conf"))
err = config.Data().Load()
assert.Equal(t, config.ErrorConfigFileNotFound, err)
}
func TestGetPasswordCommand(t *testing.T) {
ctx, ci := fs.AddConfig(context.Background())
// Not configured
ci.PasswordCommand = fs.SpaceSepList{}
pass, err := config.GetPasswordCommand(ctx)
require.NoError(t, err)
assert.Equal(t, "", pass)
// With password - happy path
ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf"}
pass, err = config.GetPasswordCommand(ctx)
require.NoError(t, err)
assert.Equal(t, "asdf", pass)
// Empty password returned
ci.PasswordCommand = fs.SpaceSepList{"echo", ""}
_, err = config.GetPasswordCommand(ctx)
assert.ErrorContains(t, err, "returned empty string")
// Error when running command
ci.PasswordCommand = fs.SpaceSepList{"XXX non-existent command XXX", ""}
_, err = config.GetPasswordCommand(ctx)
assert.ErrorContains(t, err, "not found")
// Check the state of the environment variable in --password-command
checkCode := `
package main
import (
"fmt"
"os"
)
func main() {
if _, found := os.LookupEnv("RCLONE_PASSWORD_CHANGE"); found {
fmt.Println("Env var set")
} else {
fmt.Println("OK")
}
}
`
dir := t.TempDir()
code := filepath.Join(dir, "file.go")
require.NoError(t, os.WriteFile(code, []byte(checkCode), 0777))
// Check the environment variable unset when called directly
ci.PasswordCommand = fs.SpaceSepList{"go", "run", code}
pass, err = config.GetPasswordCommand(ctx)
require.NoError(t, err)
assert.Equal(t, "OK", pass)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/default_storage.go | fs/config/default_storage.go | package config
import (
"encoding/json"
"sync"
)
// defaultStorage implements config.Storage, providing in-memory config.
// Indexed by section, then key.
type defaultStorage struct {
mu sync.RWMutex
sections map[string]map[string]string
}
func newDefaultStorage() *defaultStorage {
return &defaultStorage{
sections: map[string]map[string]string{},
}
}
// GetSectionList returns a slice of strings with names for all the sections.
func (s *defaultStorage) GetSectionList() []string {
s.mu.RLock()
defer s.mu.RUnlock()
sections := make([]string, 0, len(s.sections))
for section := range s.sections {
sections = append(sections, section)
}
return sections
}
// HasSection returns true if section exists in the config.
func (s *defaultStorage) HasSection(section string) bool {
s.mu.RLock()
defer s.mu.RUnlock()
_, hasSection := s.sections[section]
return hasSection
}
// DeleteSection deletes the specified section.
func (s *defaultStorage) DeleteSection(section string) {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.sections, section)
}
// GetKeyList returns the keys in this section.
func (s *defaultStorage) GetKeyList(section string) []string {
s.mu.RLock()
defer s.mu.RUnlock()
theSection := s.sections[section]
keys := make([]string, 0, len(theSection))
for key := range theSection {
keys = append(keys, key)
}
return keys
}
// GetValue returns the key in section with a found flag.
func (s *defaultStorage) GetValue(section string, key string) (value string, found bool) {
s.mu.RLock()
defer s.mu.RUnlock()
theSection, hasSection := s.sections[section]
if !hasSection {
return "", false
}
value, hasValue := theSection[key]
return value, hasValue
}
func (s *defaultStorage) SetValue(section string, key string, value string) {
s.mu.Lock()
defer s.mu.Unlock()
theSection, hasSection := s.sections[section]
if !hasSection {
theSection = map[string]string{}
s.sections[section] = theSection
}
theSection[key] = value
}
func (s *defaultStorage) DeleteKey(section string, key string) bool {
s.mu.Lock()
defer s.mu.Unlock()
theSection, hasSection := s.sections[section]
if !hasSection {
return false
}
_, hasKey := theSection[key]
if !hasKey {
return false
}
delete(s.sections[section], key)
return true
}
func (s *defaultStorage) Load() error {
return nil
}
func (s *defaultStorage) Save() error {
return nil
}
// Serialize the config into a string
func (s *defaultStorage) Serialize() (string, error) {
s.mu.RLock()
defer s.mu.RUnlock()
j, err := json.Marshal(s.sections)
return string(j), err
}
// Check the interface is satisfied
var _ Storage = newDefaultStorage()
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/config_read_password_unsupported.go | fs/config/config_read_password_unsupported.go | // ReadPassword for OSes which are not supported by golang.org/x/term
// See https://github.com/golang/go/issues/14441 - plan9
//go:build plan9
package config
// ReadPassword reads a password with echoing it to the terminal.
func ReadPassword() string {
return ReadLine("")
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/ui_test.go | fs/config/ui_test.go | // These are in an external package because we need to import configfile
//
// Internal tests are in ui_internal_test.go
package config_test
import (
"context"
"fmt"
"os"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var simpleOptions = []fs.Option{{
Name: "bool",
Default: false,
IsPassword: false,
}, {
Name: "pass",
Default: "",
IsPassword: true,
}}
func testConfigFile(t *testing.T, options []fs.Option, configFileName string) func() {
ctx := context.Background()
ci := fs.GetConfig(ctx)
config.ClearConfigPassword()
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
_ = os.Unsetenv("RCLONE_CONFIG_PASS")
// create temp config file
tempFile, err := os.CreateTemp("", configFileName)
assert.NoError(t, err)
path := tempFile.Name()
assert.NoError(t, tempFile.Close())
// temporarily adapt configuration
oldOsStdout := os.Stdout
oldConfigPath := config.GetConfigPath()
oldConfig := *ci
oldConfigFile := config.Data()
oldReadLine := config.ReadLine
oldPassword := config.Password
os.Stdout = nil
assert.NoError(t, config.SetConfigPath(path))
ci = &fs.ConfigInfo{}
configfile.Install()
assert.Equal(t, []string{}, config.Data().GetSectionList())
// Fake a filesystem/backend
backendName := "config_test_remote"
if regInfo, _ := fs.Find(backendName); regInfo != nil {
regInfo.Options = options
} else {
fs.Register(&fs.RegInfo{
Name: backendName,
Options: options,
})
}
// Undo the above (except registered backend, unfortunately)
return func() {
err := os.Remove(path)
assert.NoError(t, err)
os.Stdout = oldOsStdout
assert.NoError(t, config.SetConfigPath(oldConfigPath))
config.ReadLine = oldReadLine
config.Password = oldPassword
*ci = oldConfig
config.SetData(oldConfigFile)
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
_ = os.Unsetenv("RCLONE_CONFIG_PASS")
}
}
// makeReadLine makes a simple readLine which returns a fixed list of
// strings
func makeReadLine(answers []string) func(string) string {
i := 0
return func(string) string {
i++
return answers[i-1]
}
}
func TestCRUD(t *testing.T) {
defer testConfigFile(t, simpleOptions, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"true", // bool value
"y", // type my own password
"secret", // password
"secret", // repeat
"n", // don't edit advanced config
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "true", config.GetValue("test", "bool"))
assert.Equal(t, "secret", obscure.MustReveal(config.GetValue("test", "pass")))
// normal rename, test → asdf
config.ReadLine = makeReadLine([]string{
"asdf",
"asdf",
"asdf",
})
config.RenameRemote("test")
assert.Equal(t, []string{"asdf"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("asdf", "type"))
assert.Equal(t, "true", config.GetValue("asdf", "bool"))
assert.Equal(t, "secret", obscure.MustReveal(config.GetValue("asdf", "pass")))
// delete remote
config.DeleteRemote("asdf")
assert.Equal(t, []string{}, config.Data().GetSectionList())
}
func TestChooseOption(t *testing.T) {
defer testConfigFile(t, simpleOptions, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"false", // bool value
"x", // bad choice
"g", // generate password
"1024", // very big
"y", // password OK
"y", // looks good, save
})
config.Password = func(bits int) (string, error) {
assert.Equal(t, 1024, bits)
return "not very random password", nil
}
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, "", config.GetValue("test", "bool")) // this is the default now
assert.Equal(t, "not very random password", obscure.MustReveal(config.GetValue("test", "pass")))
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"true", // bool value
"n", // not required
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, "true", config.GetValue("test", "bool"))
assert.Equal(t, "", config.GetValue("test", "pass"))
}
func TestNewRemoteName(t *testing.T) {
defer testConfigFile(t, simpleOptions, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"true", // bool value
"n", // not required
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
config.ReadLine = makeReadLine([]string{
"test", // already exists
"", // empty string not allowed
"bad^characters", // bad characters
"newname", // OK
})
assert.Equal(t, "newname", config.NewRemoteName())
}
func TestCreateUpdatePasswordRemote(t *testing.T) {
ctx := context.Background()
defer testConfigFile(t, simpleOptions, "update.conf")()
for _, doObscure := range []bool{false, true} {
for _, noObscure := range []bool{false, true} {
if doObscure && noObscure {
break
}
t.Run(fmt.Sprintf("doObscure=%v,noObscure=%v", doObscure, noObscure), func(t *testing.T) {
opt := config.UpdateRemoteOpt{
Obscure: doObscure,
NoObscure: noObscure,
}
_, err := config.CreateRemote(ctx, "test2", "config_test_remote", rc.Params{
"bool": true,
"pass": "potato",
}, opt)
require.NoError(t, err)
assert.Equal(t, []string{"test2"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test2", "type"))
assert.Equal(t, "true", config.GetValue("test2", "bool"))
gotPw := config.GetValue("test2", "pass")
if !noObscure {
gotPw = obscure.MustReveal(gotPw)
}
assert.Equal(t, "potato", gotPw)
wantPw := obscure.MustObscure("potato2")
_, err = config.UpdateRemote(ctx, "test2", rc.Params{
"bool": false,
"pass": wantPw,
"spare": "spare",
}, opt)
require.NoError(t, err)
assert.Equal(t, []string{"test2"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test2", "type"))
assert.Equal(t, "false", config.GetValue("test2", "bool"))
gotPw = config.GetValue("test2", "pass")
if doObscure {
gotPw = obscure.MustReveal(gotPw)
}
assert.Equal(t, wantPw, gotPw)
require.NoError(t, config.PasswordRemote(ctx, "test2", rc.Params{
"pass": "potato3",
}))
assert.Equal(t, []string{"test2"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test2", "type"))
assert.Equal(t, "false", config.GetValue("test2", "bool"))
assert.Equal(t, "potato3", obscure.MustReveal(config.GetValue("test2", "pass")))
})
}
}
}
func TestDefaultRequired(t *testing.T) {
// By default options are optional (sic), regardless if a default value is defined.
// Setting Required=true means empty string is no longer allowed, except when
// a default value is set: Default value means empty string is always allowed!
options := []fs.Option{{
Name: "string_required",
Required: true,
}, {
Name: "string_default",
Default: "AAA",
}, {
Name: "string_required_default",
Default: "BBB",
Required: true,
}}
defer testConfigFile(t, options, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"111", // string_required
"222", // string_default
"333", // string_required_default
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "111", config.GetValue("test", "string_required"))
assert.Equal(t, "222", config.GetValue("test", "string_default"))
assert.Equal(t, "333", config.GetValue("test", "string_required_default"))
// delete remote
config.DeleteRemote("test")
assert.Equal(t, []string{}, config.Data().GetSectionList())
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"", // string_required - invalid (empty string not allowed)
"111", // string_required - valid
"", // string_default (empty string allowed, means use default)
"", // string_required_default (empty string allowed, means use default)
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "111", config.GetValue("test", "string_required"))
assert.Equal(t, "", config.GetValue("test", "string_default"))
assert.Equal(t, "", config.GetValue("test", "string_required_default"))
}
func TestMultipleChoice(t *testing.T) {
// Multiple-choice options can be set to the number of a predefined choice, or
// its text. Unless Exclusive=true, tested later, any free text input is accepted.
//
// By default options are optional, regardless if a default value is defined.
// Setting Required=true means empty string is no longer allowed, except when
// a default value is set: Default value means empty string is always allowed!
options := []fs.Option{{
Name: "multiple_choice",
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}, {
Name: "multiple_choice_required",
Required: true,
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}, {
Name: "multiple_choice_default",
Default: "BBB",
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}, {
Name: "multiple_choice_required_default",
Required: true,
Default: "BBB",
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}}
defer testConfigFile(t, options, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"3", // multiple_choice
"3", // multiple_choice_required
"3", // multiple_choice_default
"3", // multiple_choice_required_default
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "CCC", config.GetValue("test", "multiple_choice"))
assert.Equal(t, "CCC", config.GetValue("test", "multiple_choice_required"))
assert.Equal(t, "CCC", config.GetValue("test", "multiple_choice_default"))
assert.Equal(t, "CCC", config.GetValue("test", "multiple_choice_required_default"))
// delete remote
config.DeleteRemote("test")
assert.Equal(t, []string{}, config.Data().GetSectionList())
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"XXX", // multiple_choice
"XXX", // multiple_choice_required
"XXX", // multiple_choice_default
"XXX", // multiple_choice_required_default
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "XXX", config.GetValue("test", "multiple_choice"))
assert.Equal(t, "XXX", config.GetValue("test", "multiple_choice_required"))
assert.Equal(t, "XXX", config.GetValue("test", "multiple_choice_default"))
assert.Equal(t, "XXX", config.GetValue("test", "multiple_choice_required_default"))
// delete remote
config.DeleteRemote("test")
assert.Equal(t, []string{}, config.Data().GetSectionList())
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"", // multiple_choice (empty string allowed)
"", // multiple_choice_required - invalid (empty string not allowed)
"XXX", // multiple_choice_required - valid (value not restricted to examples)
"", // multiple_choice_default (empty string allowed)
"", // multiple_choice_required_default (required does nothing when default is set)
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "", config.GetValue("test", "multiple_choice"))
assert.Equal(t, "XXX", config.GetValue("test", "multiple_choice_required"))
assert.Equal(t, "", config.GetValue("test", "multiple_choice_default"))
assert.Equal(t, "", config.GetValue("test", "multiple_choice_required_default"))
}
func TestMultipleChoiceExclusive(t *testing.T) {
// Setting Exclusive=true on multiple-choice option means any input
// value must be from the predefined list, but empty string is allowed.
// Setting a default value makes no difference.
options := []fs.Option{{
Name: "multiple_choice_exclusive",
Exclusive: true,
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}, {
Name: "multiple_choice_exclusive_default",
Exclusive: true,
Default: "CCC",
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}}
defer testConfigFile(t, options, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"XXX", // multiple_choice_exclusive - invalid (not a value from examples)
"", // multiple_choice_exclusive - valid (empty string allowed)
"YYY", // multiple_choice_exclusive_default - invalid (not a value from examples)
"", // multiple_choice_exclusive_default - valid (empty string allowed)
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "", config.GetValue("test", "multiple_choice_exclusive"))
assert.Equal(t, "", config.GetValue("test", "multiple_choice_exclusive_default"))
}
func TestMultipleChoiceExclusiveRequired(t *testing.T) {
// Setting Required=true together with Exclusive=true on multiple-choice option
// means empty string is no longer allowed, except when a default value is set
// (default value means empty string is always allowed).
options := []fs.Option{{
Name: "multiple_choice_exclusive_required",
Exclusive: true,
Required: true,
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}, {
Name: "multiple_choice_exclusive_required_default",
Exclusive: true,
Required: true,
Default: "CCC",
Examples: []fs.OptionExample{{
Value: "AAA",
Help: "This is value AAA",
}, {
Value: "BBB",
Help: "This is value BBB",
}, {
Value: "CCC",
Help: "This is value CCC",
}},
}}
defer testConfigFile(t, options, "crud.conf")()
ctx := context.Background()
// script for creating remote
config.ReadLine = makeReadLine([]string{
"config_test_remote", // type
"XXX", // multiple_choice_exclusive_required - invalid (not a value from examples)
"", // multiple_choice_exclusive_required - invalid (empty string not allowed)
"CCC", // multiple_choice_exclusive_required - valid
"XXX", // multiple_choice_exclusive_required_default - invalid (not a value from examples)
"", // multiple_choice_exclusive_required_default - valid (empty string allowed)
"y", // looks good, save
})
require.NoError(t, config.NewRemote(ctx, "test"))
assert.Equal(t, []string{"test"}, config.Data().GetSectionList())
assert.Equal(t, "config_test_remote", config.GetValue("test", "type"))
assert.Equal(t, "CCC", config.GetValue("test", "multiple_choice_exclusive_required"))
assert.Equal(t, "", config.GetValue("test", "multiple_choice_exclusive_required_default"))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/config_read_password.go | fs/config/config_read_password.go | // ReadPassword for OSes which are supported by golang.org/x/term
// See https://github.com/golang/go/issues/14441 - plan9
//go:build !plan9
package config
import (
"fmt"
"os"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/terminal"
)
// ReadPassword reads a password without echoing it to the terminal.
func ReadPassword() string {
stdin := int(os.Stdin.Fd())
if !terminal.IsTerminal(stdin) {
return ReadLine("")
}
line, err := terminal.ReadPassword(stdin)
_, _ = fmt.Fprintln(os.Stderr)
if err != nil {
fs.Fatalf(nil, "Failed to read password: %v", err)
}
return string(line)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/config_test.go | fs/config/config_test.go | // These are in an external package because we need to import configfile
package config_test
import (
"testing"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/stretchr/testify/assert"
)
func init() {
configfile.Install()
}
func TestConfigLoad(t *testing.T) {
oldConfigPath := config.GetConfigPath()
assert.NoError(t, config.SetConfigPath("./testdata/plain.conf"))
defer func() {
assert.NoError(t, config.SetConfigPath(oldConfigPath))
}()
config.ClearConfigPassword()
sections := config.Data().GetSectionList()
var expect = []string{"RCLONE_ENCRYPT_V0", "nounc", "unc"}
assert.Equal(t, expect, sections)
keys := config.Data().GetKeyList("nounc")
expect = []string{"type", "nounc"}
assert.Equal(t, expect, keys)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/default_storage_test.go | fs/config/default_storage_test.go | package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDefaultStorage(t *testing.T) {
a := assert.New(t)
ds := newDefaultStorage()
section := "test"
key := "key"
val := "something"
ds.SetValue(section, key, val)
ds.SetValue("some other section", key, val)
v, hasVal := ds.GetValue(section, key)
a.True(hasVal)
a.Equal(val, v)
a.ElementsMatch([]string{section, "some other section"}, ds.GetSectionList())
a.True(ds.HasSection(section))
a.False(ds.HasSection("nope"))
a.Equal([]string{key}, ds.GetKeyList(section))
_, err := ds.Serialize()
a.NoError(err)
a.True(ds.DeleteKey(section, key))
a.False(ds.DeleteKey(section, key))
a.False(ds.DeleteKey("not there", key))
_, hasVal = ds.GetValue(section, key)
a.False(hasVal)
ds.DeleteSection(section)
a.False(ds.HasSection(section))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/ui.go | fs/config/ui.go | // Textual user interface parts of the config system
package config
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"os"
"slices"
"sort"
"strconv"
"strings"
"sync"
"unicode/utf8"
"github.com/peterh/liner"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/driveletter"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/lib/terminal"
"golang.org/x/text/unicode/norm"
)
var (
stdinBufOnce sync.Once
stdinBuf *bufio.Reader
)
// ReadLine reads an unlimited length line from stdin with a prompt.
var ReadLine = func(prompt string) string {
if !terminal.IsTerminal(int(os.Stdout.Fd())) {
stdinBufOnce.Do(func() {
stdinBuf = bufio.NewReader(os.Stdin)
})
line, err := stdinBuf.ReadString('\n')
if err != nil && (line == "" || err != io.EOF) {
fs.Fatalf(nil, "Failed to read line: %v", err)
}
return strings.TrimSpace(line)
}
l := liner.NewLiner()
defer func() {
_ = l.Close()
}()
l.SetMultiLineMode(true)
l.SetCtrlCAborts(true)
line, err := l.Prompt(prompt)
if err == io.EOF {
return ""
}
if err != nil {
_ = l.Close()
fs.Fatalf(nil, "Failed to read: %v", err)
}
return strings.TrimSpace(line)
}
// ReadNonEmptyLine prints prompt and calls Readline until non empty
func ReadNonEmptyLine(prompt string) string {
result := ""
for result == "" {
result = strings.TrimSpace(ReadLine(prompt))
}
return result
}
// CommandDefault - choose one. If return is pressed then it will
// chose the defaultIndex if it is >= 0
//
// Must not call fs.Log anything from here to avoid deadlock in
// --interactive --progress
func CommandDefault(commands []string, defaultIndex int) byte {
opts := []string{}
for i, text := range commands {
def := ""
if i == defaultIndex {
def = " (default)"
}
fmt.Printf("%c) %s%s\n", text[0], text[1:], def)
opts = append(opts, text[:1])
}
optString := strings.Join(opts, "")
optHelp := strings.Join(opts, "/")
for {
result := strings.ToLower(ReadLine(fmt.Sprintf("%s> ", optHelp)))
if len(result) == 0 {
if defaultIndex >= 0 {
return optString[defaultIndex]
}
fmt.Printf("This value is required and it has no default.\n")
} else if len(result) == 1 {
i := strings.Index(optString, string(result[0]))
if i >= 0 {
return result[0]
}
fmt.Printf("This value must be one of the following characters: %s.\n", strings.Join(opts, ", "))
} else {
fmt.Printf("This value must be a single character, one of the following: %s.\n", strings.Join(opts, ", "))
}
}
}
// Command - choose one
func Command(commands []string) byte {
return CommandDefault(commands, -1)
}
// Confirm asks the user for Yes or No and returns true or false
//
// If the user presses enter then the Default will be used
func Confirm(Default bool) bool {
defaultIndex := 0
if !Default {
defaultIndex = 1
}
return CommandDefault([]string{"yYes", "nNo"}, defaultIndex) == 'y'
}
// Choose one of the choices, or default, or type a new string if newOk is set
func Choose(what string, kind string, choices, help []string, defaultValue string, required bool, newOk bool) string {
valueDescription := "an existing"
if newOk {
valueDescription = "your own"
}
fmt.Printf("Choose a number from below, or type in %s %s.\n", valueDescription, kind)
// Empty input is allowed if not required is set, or if
// required is set but there is a default value to use.
if defaultValue != "" {
fmt.Printf("Press Enter for the default (%s).\n", defaultValue)
} else if !required {
fmt.Printf("Press Enter to leave empty.\n")
}
attributes := []string{terminal.HiRedFg, terminal.HiGreenFg}
for i, text := range choices {
var lines []string
if help != nil && help[i] != "" {
parts := strings.Split(help[i], "\n")
lines = append(lines, parts...)
lines = append(lines, fmt.Sprintf("(%s)", text))
}
pos := i + 1
terminal.WriteString(attributes[i%len(attributes)])
if len(lines) == 0 {
fmt.Printf("%2d > %s\n", pos, text)
} else {
mid := (len(lines) - 1) / 2
for i, line := range lines {
var sep rune
switch i {
case 0:
sep = '/'
case len(lines) - 1:
sep = '\\'
default:
sep = '|'
}
number := " "
if i == mid {
number = fmt.Sprintf("%2d", pos)
}
fmt.Printf("%s %c %s\n", number, sep, line)
}
}
terminal.WriteString(terminal.Reset)
}
for {
result := ReadLine(fmt.Sprintf("%s> ", what))
i, err := strconv.Atoi(result)
if err != nil {
if slices.Contains(choices, result) {
return result
}
if result == "" {
// If empty string is in the predefined list of choices it has already been returned above.
// If parameter required is not set, then empty string is always a valid value.
if !required {
return result
}
// If parameter required is set, but there is a default, then empty input means default.
if defaultValue != "" {
return defaultValue
}
// If parameter required is set, and there is no default, then an input value is required.
fmt.Printf("This value is required and it has no default.\n")
} else if newOk {
// If legal input is not restricted to defined choices, then any nonzero input string is accepted.
return result
} else {
// A nonzero input string was specified but it did not match any of the strictly defined choices.
fmt.Printf("This value must match %s value.\n", valueDescription)
}
} else {
if i >= 1 && i <= len(choices) {
return choices[i-1]
}
fmt.Printf("No choices with this number.\n")
}
}
}
// Enter prompts for an input value of a specified type
func Enter(what string, kind string, defaultValue string, required bool) string {
// Empty input is allowed if not required is set, or if
// required is set but there is a default value to use.
fmt.Printf("Enter a %s.", kind)
if defaultValue != "" {
fmt.Printf(" Press Enter for the default (%s).\n", defaultValue)
} else if !required {
fmt.Println(" Press Enter to leave empty.")
} else {
fmt.Println()
}
for {
result := ReadLine(fmt.Sprintf("%s> ", what))
if !required || result != "" {
return result
}
if defaultValue != "" {
return defaultValue
}
fmt.Printf("This value is required and it has no default.\n")
}
}
// ChoosePassword asks the user for a password
func ChoosePassword(defaultValue string, required bool) string {
fmt.Printf("Choose an alternative below.")
actions := []string{"yYes, type in my own password", "gGenerate random password"}
defaultAction := -1
if defaultValue != "" {
defaultAction = len(actions)
actions = append(actions, "nNo, keep existing")
fmt.Printf(" Press Enter for the default (%s).", string(actions[defaultAction][0]))
} else if !required {
defaultAction = len(actions)
actions = append(actions, "nNo, leave this optional password blank")
fmt.Printf(" Press Enter for the default (%s).", string(actions[defaultAction][0]))
}
fmt.Println()
var password string
var err error
switch i := CommandDefault(actions, defaultAction); i {
case 'y':
password = ChangePassword("the")
case 'g':
for {
fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n")
bits := ChooseNumber("Bits", 64, 1024)
password, err = Password(bits)
if err != nil {
fs.Fatalf(nil, "Failed to make password: %v", err)
}
fmt.Printf("Your password is: %s\n", password)
fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " +
"password itself) will be stored under your \nconfiguration file, so keep this generated password " +
"in a safe place.\n")
if Confirm(true) {
break
}
}
case 'n':
return defaultValue
default:
fs.Errorf(nil, "Bad choice %c", i)
}
return obscure.MustObscure(password)
}
// ChooseNumber asks the user to enter a number between min and max
// inclusive prompting them with what.
func ChooseNumber(what string, min, max int) int {
for {
result := ReadLine(fmt.Sprintf("%s> ", what))
i, err := strconv.Atoi(result)
if err != nil {
fmt.Printf("Bad number: %v\n", err)
continue
}
if i < min || i > max {
fmt.Printf("Out of range - %d to %d inclusive\n", min, max)
continue
}
return i
}
}
// ShowRemotes shows an overview of the config file
func ShowRemotes() {
remotes := LoadedData().GetSectionList()
if len(remotes) == 0 {
return
}
sort.Strings(remotes)
fmt.Printf("%-20s %s\n", "Name", "Type")
fmt.Printf("%-20s %s\n", "====", "====")
for _, remote := range remotes {
fmt.Printf("%-20s %s\n", remote, GetValue(remote, "type"))
}
}
// ChooseRemote chooses a remote name
func ChooseRemote() string {
remotes := LoadedData().GetSectionList()
sort.Strings(remotes)
fmt.Println("Select remote.")
return Choose("remote", "value", remotes, nil, "", true, false)
}
// mustFindByName finds the RegInfo for the remote name passed in or
// exits with a fatal error.
func mustFindByName(name string) *fs.RegInfo {
fsType := GetValue(name, "type")
if fsType == "" {
fs.Fatalf(nil, "Couldn't find type of fs for %q", name)
}
return fs.MustFind(fsType)
}
// findByName finds the RegInfo for the remote name passed in or
// returns an error
func findByName(name string) (*fs.RegInfo, error) {
fsType := GetValue(name, "type")
if fsType == "" {
return nil, fmt.Errorf("couldn't find type of fs for %q", name)
}
return fs.Find(fsType)
}
// printRemoteOptions prints the options of the remote
func printRemoteOptions(name string, prefix string, sep string, redacted bool) {
fsInfo, err := findByName(name)
if err != nil {
fmt.Printf("# %v\n", err)
fsInfo = nil
}
for _, key := range LoadedData().GetKeyList(name) {
isPassword := false
isSensitive := false
if fsInfo != nil {
for _, option := range fsInfo.Options {
if option.Name == key {
if option.IsPassword {
isPassword = true
} else if option.Sensitive {
isSensitive = true
}
}
}
}
value := GetValue(name, key)
if redacted && (isSensitive || isPassword) && value != "" {
fmt.Printf("%s%s%sXXX\n", prefix, key, sep)
} else if isPassword && value != "" {
fmt.Printf("%s%s%s*** ENCRYPTED ***\n", prefix, key, sep)
} else {
fmt.Printf("%s%s%s%s\n", prefix, key, sep, value)
}
}
}
// listRemoteOptions lists the options of the remote
func listRemoteOptions(name string) {
printRemoteOptions(name, "- ", ": ", false)
}
// ShowRemote shows the contents of the remote in config file format
func ShowRemote(name string) {
fmt.Printf("[%s]\n", name)
printRemoteOptions(name, "", " = ", false)
}
// ShowRedactedRemote shows the contents of the remote in config file format
func ShowRedactedRemote(name string) {
fmt.Printf("[%s]\n", name)
printRemoteOptions(name, "", " = ", true)
}
// OkRemote prints the contents of the remote and ask if it is OK
func OkRemote(name string) bool {
fmt.Println("Configuration complete.")
fmt.Println("Options:")
listRemoteOptions(name)
fmt.Printf("Keep this %q remote?\n", name)
switch i := CommandDefault([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}, 0); i {
case 'y':
return true
case 'e':
return false
case 'd':
LoadedData().DeleteSection(name)
return true
default:
fs.Errorf(nil, "Bad choice %c", i)
}
return false
}
// newSection prints an empty line to separate sections
func newSection() {
fmt.Println()
}
// backendConfig configures the backend starting from the state passed in
//
// The is the user interface loop that drives the post configuration backend config.
func backendConfig(ctx context.Context, name string, m configmap.Mapper, ri *fs.RegInfo, choices configmap.Getter, startState string) error {
in := fs.ConfigIn{
State: startState,
}
for {
out, err := fs.BackendConfig(ctx, name, m, ri, choices, in)
if err != nil {
return err
}
if out == nil {
break
}
if out.Error != "" {
fmt.Println(out.Error)
}
in.State = out.State
in.Result = out.Result
if out.Option != nil {
fs.Debugf(name, "config: reading config parameter %q", out.Option.Name)
if out.Option.Default == nil {
out.Option.Default = ""
}
if Default, isBool := out.Option.Default.(bool); isBool &&
len(out.Option.Examples) == 2 &&
out.Option.Examples[0].Help == "Yes" &&
out.Option.Examples[0].Value == "true" &&
out.Option.Examples[1].Help == "No" &&
out.Option.Examples[1].Value == "false" &&
out.Option.Exclusive {
// Use Confirm for Yes/No questions as it has a nicer interface=
fmt.Println(out.Option.Help)
in.Result = fmt.Sprint(Confirm(Default))
} else {
value := ChooseOption(out.Option, name)
if value != "" {
err := out.Option.Set(value)
if err != nil {
return fmt.Errorf("failed to set option: %w", err)
}
}
in.Result = out.Option.String()
}
}
if out.State == "" {
break
}
newSection()
}
return nil
}
// PostConfig configures the backend after the main config has been done
//
// The is the user interface loop that drives the post configuration backend config.
func PostConfig(ctx context.Context, name string, m configmap.Mapper, ri *fs.RegInfo) error {
if ri.Config == nil {
return errors.New("backend doesn't support reconnect or authorize")
}
return backendConfig(ctx, name, m, ri, configmap.Simple{}, "")
}
// RemoteConfig runs the config helper for the remote if needed
func RemoteConfig(ctx context.Context, name string) error {
fmt.Printf("Remote config\n")
ri := mustFindByName(name)
m := fs.ConfigMap(ri.Prefix, ri.Options, name, nil)
if ri.Config == nil {
return nil
}
return PostConfig(ctx, name, m, ri)
}
// ChooseOption asks the user to choose an option
func ChooseOption(o *fs.Option, name string) string {
fmt.Printf("Option %s.\n", o.Name)
if o.Help != "" {
// Show help string without empty lines.
help := strings.ReplaceAll(strings.TrimSpace(o.Help), "\n\n", "\n")
fmt.Println(help)
}
var defaultValue string
if o.Default == nil {
defaultValue = ""
} else {
defaultValue = fmt.Sprint(o.Default)
}
if o.IsPassword {
return ChoosePassword(defaultValue, o.Required)
}
what := "value"
if o.Default != "" {
switch o.Default.(type) {
case bool:
what = "boolean value (true or false)"
case fs.SizeSuffix:
what = "size with suffix K,M,G,T"
case fs.Duration:
what = "duration s,m,h,d,w,M,y"
case int, int8, int16, int32, int64:
what = "signed integer"
case uint, byte, uint16, uint32, uint64:
what = "unsigned integer"
default:
what = fmt.Sprintf("value of type %s", o.Type())
}
}
var in string
for {
if len(o.Examples) > 0 {
var values []string
var help []string
for _, example := range o.Examples {
values = append(values, example.Value)
help = append(help, example.Help)
}
in = Choose(o.Name, what, values, help, defaultValue, o.Required, !o.Exclusive)
} else {
in = Enter(o.Name, what, defaultValue, o.Required)
}
if in != "" {
newIn, err := configstruct.StringToInterface(o.Default, in)
if err != nil {
fmt.Printf("Failed to parse %q: %v\n", in, err)
continue
}
in = fmt.Sprint(newIn) // canonicalise
}
return in
}
}
// NewRemoteName asks the user for a name for a new remote
func NewRemoteName() (name string) {
for {
fmt.Println("Enter name for new remote.")
name = ReadLine("name> ")
if LoadedData().HasSection(name) {
fmt.Printf("Remote %q already exists.\n", name)
continue
}
err := fspath.CheckConfigName(name)
switch {
case name == "":
fmt.Printf("Can't use empty name.\n")
case driveletter.IsDriveLetter(name):
fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name)
case err != nil:
fmt.Printf("Can't use %q as %v.\n", name, err)
default:
return name
}
}
}
// NewRemote make a new remote from its name
func NewRemote(ctx context.Context, name string) error {
var (
newType string
ri *fs.RegInfo
err error
)
// Set the type first
for {
newType = ChooseOption(fsOption(), name)
ri, err = fs.Find(newType)
if err != nil {
fmt.Printf("Bad remote %q: %v\n", newType, err)
continue
}
break
}
LoadedData().SetValue(name, "type", newType)
newSection()
_, err = CreateRemote(ctx, name, newType, nil, UpdateRemoteOpt{
All: true,
})
if err != nil {
return err
}
if OkRemote(name) {
SaveConfig()
return nil
}
newSection()
return EditRemote(ctx, ri, name)
}
// EditRemote gets the user to edit a remote
func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) error {
fmt.Printf("Editing existing %q remote with options:\n", name)
listRemoteOptions(name)
newSection()
for {
_, err := UpdateRemote(ctx, name, nil, UpdateRemoteOpt{
All: true,
})
if err != nil {
return err
}
if OkRemote(name) {
break
}
}
SaveConfig()
return nil
}
// DeleteRemote gets the user to delete a remote
func DeleteRemote(name string) {
LoadedData().DeleteSection(name)
SaveConfig()
}
// copyRemote asks the user for a new remote name and copies name into
// it. Returns the new name.
func copyRemote(name string) string {
newName := NewRemoteName()
// Copy the keys
for _, key := range LoadedData().GetKeyList(name) {
value, _ := FileGetValue(name, key)
LoadedData().SetValue(newName, key, value)
}
return newName
}
// RenameRemote renames a config section
func RenameRemote(name string) {
fmt.Printf("Enter new name for %q remote.\n", name)
newName := copyRemote(name)
if name != newName {
LoadedData().DeleteSection(name)
SaveConfig()
}
}
// CopyRemote copies a config section
func CopyRemote(name string) {
fmt.Printf("Enter name for copy of %q remote.\n", name)
copyRemote(name)
SaveConfig()
}
// ShowConfigLocation prints the location of the config file in use
func ShowConfigLocation() {
if configPath := GetConfigPath(); configPath == "" {
fmt.Println("Configuration is in memory only")
} else {
if _, err := os.Stat(configPath); os.IsNotExist(err) {
fmt.Println("Configuration file doesn't exist, but rclone will use this path:")
} else {
fmt.Println("Configuration file is stored at:")
}
fmt.Printf("%s\n", configPath)
}
}
// ShowConfig prints the (unencrypted) config options
func ShowConfig() {
str, err := LoadedData().Serialize()
if err != nil {
fs.Fatalf(nil, "Failed to serialize config: %v", err)
}
if str == "" {
str = "; empty config\n"
}
fmt.Printf("%s", str)
}
// ShowRedactedConfig prints the redacted (unencrypted) config options
func ShowRedactedConfig() {
remotes := LoadedData().GetSectionList()
if len(remotes) == 0 {
fmt.Println("; empty config")
return
}
sort.Strings(remotes)
for i, remote := range remotes {
if i != 0 {
fmt.Println()
}
ShowRedactedRemote(remote)
}
}
// EditConfig edits the config file interactively
func EditConfig(ctx context.Context) (err error) {
for {
haveRemotes := len(LoadedData().GetSectionList()) != 0
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
if haveRemotes {
fmt.Printf("Current remotes:\n\n")
ShowRemotes()
fmt.Printf("\n")
} else {
fmt.Printf("No remotes found, make a new one?\n")
// take 2nd item and last 2 items of menu list
what = append(what[1:2], what[len(what)-2:]...)
}
switch i := Command(what); i {
case 'e':
newSection()
name := ChooseRemote()
newSection()
fs := mustFindByName(name)
err = EditRemote(ctx, fs, name)
if err != nil {
return err
}
case 'n':
newSection()
name := NewRemoteName()
newSection()
err = NewRemote(ctx, name)
if err != nil {
return err
}
case 'd':
newSection()
name := ChooseRemote()
newSection()
DeleteRemote(name)
case 'r':
newSection()
name := ChooseRemote()
newSection()
RenameRemote(name)
case 'c':
newSection()
name := ChooseRemote()
newSection()
CopyRemote(name)
case 's':
newSection()
SetPassword()
case 'q':
return nil
}
newSection()
}
}
// Suppress the confirm prompts by altering the context config
func suppressConfirm(ctx context.Context) context.Context {
newCtx, ci := fs.AddConfig(ctx)
ci.AutoConfirm = true
return newCtx
}
// checkPassword normalises and validates the password
func checkPassword(password string) (string, error) {
if !utf8.ValidString(password) {
return "", errors.New("password contains invalid utf8 characters")
}
// Check for leading/trailing whitespace
trimmedPassword := strings.TrimSpace(password)
// Warn user if password has leading+trailing whitespace
if len(password) != len(trimmedPassword) {
_, _ = fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped")
}
// Normalize to reduce weird variations.
password = norm.NFKC.String(password)
if len(password) == 0 || len(trimmedPassword) == 0 {
return "", errors.New("no characters in password")
}
return password, nil
}
// GetPassword asks the user for a password with the prompt given.
func GetPassword(prompt string) string {
_, _ = fmt.Fprintln(PasswordPromptOutput, prompt)
for {
_, _ = fmt.Fprint(PasswordPromptOutput, "password:")
password := ReadPassword()
password, err := checkPassword(password)
if err == nil {
return password
}
_, _ = fmt.Fprintf(os.Stderr, "Bad password: %v\n", err)
}
}
// ChangePassword will query the user twice for the named password. If
// the same password is entered it is returned.
func ChangePassword(name string) string {
for {
a := GetPassword(fmt.Sprintf("Enter %s password:", name))
b := GetPassword(fmt.Sprintf("Confirm %s password:", name))
if a == b {
return a
}
fmt.Println("Passwords do not match!")
}
}
// SetPassword will allow the user to modify the current
// configuration encryption settings.
func SetPassword() {
for {
if len(configKey) > 0 {
fmt.Println("Your configuration is encrypted.")
what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"}
switch i := Command(what); i {
case 'c':
ChangeConfigPasswordAndSave()
fmt.Println("Password changed")
continue
case 'u':
RemoveConfigPasswordAndSave()
continue
case 'q':
return
}
} else {
fmt.Println("Your configuration is not encrypted.")
fmt.Println("If you add a password, you will protect your login information to cloud services.")
what := []string{"aAdd Password", "qQuit to main menu"}
switch i := Command(what); i {
case 'a':
ChangeConfigPasswordAndSave()
fmt.Println("Password set")
continue
case 'q':
return
}
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/crypt_internal_test.go | fs/config/crypt_internal_test.go | package config
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func hashedKeyCompare(t *testing.T, a, b string, shouldMatch bool) {
err := SetConfigPassword(a)
require.NoError(t, err)
k1 := configKey
err = SetConfigPassword(b)
require.NoError(t, err)
k2 := configKey
if shouldMatch {
assert.Equal(t, k1, k2)
} else {
assert.NotEqual(t, k1, k2)
}
}
func TestPassword(t *testing.T) {
defer func() {
configKey = nil // reset password
}()
var err error
// Empty password should give error
err = SetConfigPassword(" \t ")
require.Error(t, err)
// Test invalid utf8 sequence
err = SetConfigPassword(string([]byte{0xff, 0xfe, 0xfd}) + "abc")
require.Error(t, err)
// Simple check of wrong passwords
hashedKeyCompare(t, "mis", "match", false)
// Check that passwords match after unicode normalization
hashedKeyCompare(t, "ff\u0041\u030A", "ffÅ", true)
// Check that passwords preserves case
hashedKeyCompare(t, "abcdef", "ABCDEF", false)
}
func TestChangeConfigPassword(t *testing.T) {
ci := fs.GetConfig(context.Background())
var err error
oldConfigPath := GetConfigPath()
assert.NoError(t, SetConfigPath("./testdata/encrypted.conf"))
defer func() {
assert.NoError(t, SetConfigPath(oldConfigPath))
ClearConfigPassword()
ci.PasswordCommand = nil
}()
// Get rid of any config password
ClearConfigPassword()
// Return the password, checking the state of the environment variable
checkCode := `
package main
import (
"fmt"
"os"
"log"
)
func main() {
v := os.Getenv("RCLONE_PASSWORD_CHANGE")
if v == "" {
log.Fatal("Env var not found")
} else if v != "1" {
log.Fatal("Env var wrong value")
} else {
fmt.Println("asdf")
}
}
`
dir := t.TempDir()
code := filepath.Join(dir, "file.go")
require.NoError(t, os.WriteFile(code, []byte(checkCode), 0777))
// Set correct password using --password-command
ci.PasswordCommand = fs.SpaceSepList{"go", "run", code}
changeConfigPassword()
err = Data().Load()
require.NoError(t, err)
sections := Data().GetSectionList()
var expect = []string{"nounc", "unc"}
assert.Equal(t, expect, sections)
keys := Data().GetKeyList("nounc")
expect = []string{"type", "nounc"}
assert.Equal(t, expect, keys)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/rc_test.go | fs/config/rc_test.go | package config_test
import (
"context"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const testName = "configTestNameForRc"
func TestRc(t *testing.T) {
ctx := context.Background()
oldConfigFile := config.GetConfigPath()
defer func() {
require.NoError(t, config.SetConfigPath(oldConfigFile))
}()
// Set a temporary config file
require.NoError(t, config.SetConfigPath(filepath.Join(t.TempDir(), "rclone.conf")))
configfile.Install()
// Create the test remote
call := rc.Calls.Get("config/create")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
"type": "local",
"parameters": rc.Params{
"test_key": "sausage",
},
}
out, err := call.Fn(ctx, in)
require.NoError(t, err)
require.Nil(t, out)
assert.Equal(t, "local", config.GetValue(testName, "type"))
assert.Equal(t, "sausage", config.GetValue(testName, "test_key"))
// The sub tests rely on the remote created above but they can
// all be run independently
t.Run("Dump", func(t *testing.T) {
call := rc.Calls.Get("config/dump")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
require.NotNil(t, out[testName])
config := out[testName].(rc.Params)
assert.Equal(t, "local", config["type"])
assert.Equal(t, "sausage", config["test_key"])
})
t.Run("Get", func(t *testing.T) {
call := rc.Calls.Get("config/get")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, "local", out["type"])
assert.Equal(t, "sausage", out["test_key"])
})
t.Run("ListRemotes", func(t *testing.T) {
assert.NoError(t, os.Setenv("RCLONE_CONFIG_MY-LOCAL_TYPE", "local"))
defer func() {
assert.NoError(t, os.Unsetenv("RCLONE_CONFIG_MY-LOCAL_TYPE"))
}()
call := rc.Calls.Get("config/listremotes")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
var remotes []string
err = out.GetStruct("remotes", &remotes)
require.NoError(t, err)
assert.Contains(t, remotes, testName)
assert.Contains(t, remotes, "my-local")
})
t.Run("Update", func(t *testing.T) {
call := rc.Calls.Get("config/update")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
"parameters": rc.Params{
"test_key": "rutabaga",
"test_key2": "cabbage",
},
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Nil(t, out)
assert.Equal(t, "local", config.GetValue(testName, "type"))
assert.Equal(t, "rutabaga", config.GetValue(testName, "test_key"))
assert.Equal(t, "cabbage", config.GetValue(testName, "test_key2"))
})
t.Run("Password", func(t *testing.T) {
call := rc.Calls.Get("config/password")
assert.NotNil(t, call)
pw2 := obscure.MustObscure("password")
in := rc.Params{
"name": testName,
"parameters": rc.Params{
"test_key": "rutabaga",
"test_key2": pw2, // check we encode an already encoded password
},
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Nil(t, out)
assert.Equal(t, "local", config.GetValue(testName, "type"))
assert.Equal(t, "rutabaga", obscure.MustReveal(config.GetValue(testName, "test_key")))
assert.Equal(t, pw2, obscure.MustReveal(config.GetValue(testName, "test_key2")))
})
// Delete the test remote
call = rc.Calls.Get("config/delete")
assert.NotNil(t, call)
in = rc.Params{
"name": testName,
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Nil(t, out)
assert.Equal(t, "", config.GetValue(testName, "type"))
assert.Equal(t, "", config.GetValue(testName, "test_key"))
t.Run("ListRemotes empty not nil", func(t *testing.T) {
call := rc.Calls.Get("config/listremotes")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
var remotes []string
err = out.GetStruct("remotes", &remotes)
require.NoError(t, err)
assert.NotNil(t, remotes)
assert.Empty(t, remotes)
})
}
func TestRcProviders(t *testing.T) {
call := rc.Calls.Get("config/providers")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
require.NotNil(t, out)
var registry []*fs.RegInfo
err = out.GetStruct("providers", ®istry)
require.NoError(t, err)
foundLocal := false
for _, provider := range registry {
if provider.Name == "local" {
foundLocal = true
break
}
}
assert.True(t, foundLocal, "didn't find local provider")
}
func TestRcSetPath(t *testing.T) {
oldPath := config.GetConfigPath()
newPath := oldPath + ".newPath"
call := rc.Calls.Get("config/setpath")
assert.NotNil(t, call)
in := rc.Params{
"path": newPath,
}
_, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, newPath, config.GetConfigPath())
in["path"] = oldPath
_, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, oldPath, config.GetConfigPath())
}
func TestRcPaths(t *testing.T) {
call := rc.Calls.Get("config/paths")
assert.NotNil(t, call)
out, err := call.Fn(context.Background(), nil)
require.NoError(t, err)
assert.Equal(t, config.GetConfigPath(), out["config"])
assert.Equal(t, config.GetCacheDir(), out["cache"])
assert.Equal(t, os.TempDir(), out["temp"])
}
func TestRcConfigUnlock(t *testing.T) {
call := rc.Calls.Get("config/unlock")
assert.NotNil(t, call)
in := rc.Params{
"configPassword": "test",
}
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Nil(t, out)
in = rc.Params{
"config_password": "test",
}
out, err = call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Nil(t, out)
in = rc.Params{
"bad_config_password": "test",
}
out, err = call.Fn(context.Background(), in)
require.Error(t, err)
assert.ErrorContains(t, err, `Didn't find key "configPassword" in input`)
assert.Nil(t, out)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/authorize.go | fs/config/authorize.go | package config
import (
"context"
"fmt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
)
// Authorize is for remote authorization of headless machines.
//
// It expects 1, 2 or 3 arguments
//
// rclone authorize "backend name"
// rclone authorize "backend name" "base64 encoded JSON blob"
// rclone authorize "backend name" "client id" "client secret"
func Authorize(ctx context.Context, args []string, noAutoBrowser bool, templateFile string) error {
ctx = suppressConfirm(ctx)
ctx = fs.ConfigOAuthOnly(ctx)
switch len(args) {
case 1, 2, 3:
default:
return fmt.Errorf("invalid number of arguments: %d", len(args))
}
Type := args[0] // FIXME could read this from input
ri, err := fs.Find(Type)
if err != nil {
return err
}
if ri.Config == nil {
return fmt.Errorf("can't authorize fs %q", Type)
}
// Config map for remote
inM := configmap.Simple{}
// Indicate that we are running rclone authorize
inM[ConfigAuthorize] = "true"
if noAutoBrowser {
inM[ConfigAuthNoBrowser] = "true"
}
// Indicate if we specified a custom template via a file
if templateFile != "" {
inM[ConfigTemplateFile] = templateFile
}
// Add extra parameters if supplied
if len(args) == 2 {
err := inM.Decode(args[1])
if err != nil {
return err
}
} else if len(args) == 3 {
inM[ConfigClientID] = args[1]
inM[ConfigClientSecret] = args[2]
}
// Name used for temporary remote
name := "**temp-fs**"
m := fs.ConfigMap(ri.Prefix, ri.Options, name, inM)
outM := configmap.Simple{}
m.ClearSetters()
m.AddSetter(outM)
m.AddGetter(outM, configmap.PriorityNormal)
err = PostConfig(ctx, name, m, ri)
if err != nil {
return err
}
// Print the code for the user to paste
out := outM["token"]
// If received a config blob, then return one
if len(args) == 2 {
out, err = outM.Encode()
if err != nil {
return err
}
}
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste\n", out)
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fs/config/configstruct/configstruct.go | fs/config/configstruct/configstruct.go | // Package configstruct parses unstructured maps into structures
package configstruct
import (
"encoding/csv"
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs/config/configmap"
)
var matchUpper = regexp.MustCompile("([A-Z]+)")
// camelToSnake converts CamelCase to snake_case
func camelToSnake(in string) string {
out := matchUpper.ReplaceAllString(in, "_$1")
out = strings.ToLower(out)
out = strings.Trim(out, "_")
return out
}
// StringToInterface turns in into an interface{} the same type as def
//
// This supports a subset of builtin types, string, integer types,
// bool, time.Duration and []string.
//
// Builtin types are expected to be encoding as their natural
// stringificatons as produced by fmt.Sprint except for []string which
// is expected to be encoded a a CSV with empty array encoded as "".
//
// Any other types are expected to be encoded by their String()
// methods and decoded by their `Set(s string) error` methods.
func StringToInterface(def any, in string) (newValue any, err error) {
typ := reflect.TypeOf(def)
o := reflect.New(typ)
switch def.(type) {
case string:
// return strings unmodified
newValue = in
case int, int8, int16, int32, int64,
uint, uint8, uint16, uint32, uint64, uintptr,
float32, float64:
// As per Rob Pike's advice in https://github.com/golang/go/issues/43306
// we only use Sscan for numbers
var n int
n, err = fmt.Sscanln(in, o.Interface())
if err == nil && n != 1 {
err = errors.New("no items parsed")
}
newValue = o.Elem().Interface()
case bool:
newValue, err = strconv.ParseBool(in)
case time.Duration:
newValue, err = time.ParseDuration(in)
case []string:
// CSV decode arrays of strings - ideally we would use
// fs.CommaSepList here but we can't as it would cause
// a circular import.
if len(in) == 0 {
newValue = []string{}
} else {
r := csv.NewReader(strings.NewReader(in))
newValue, err = r.Read()
switch _err := err.(type) {
case *csv.ParseError:
err = _err.Err // remove line numbers from the error message
}
}
default:
// Try using a Set method
if do, ok := o.Interface().(interface{ Set(s string) error }); ok {
err = do.Set(in)
} else {
err = errors.New("don't know how to parse this type")
}
newValue = o.Elem().Interface()
}
if err != nil {
return nil, fmt.Errorf("parsing %q as %T failed: %w", in, def, err)
}
return newValue, nil
}
// InterfaceToString turns in into a string
//
// This supports a subset of builtin types, string, integer types,
// bool, time.Duration and []string.
//
// Builtin types are expected to be encoding as their natural
// stringificatons as produced by fmt.Sprint except for []string which
// is expected to be encoded a a CSV with empty array encoded as "".
//
// Any other types are expected to be encoded by their String()
// methods and decoded by their `Set(s string) error` methods.
func InterfaceToString(in any) (strValue string, err error) {
switch x := in.(type) {
case string:
// return strings unmodified
strValue = x
case int, int8, int16, int32, int64,
uint, uint8, uint16, uint32, uint64, uintptr,
float32, float64:
strValue = fmt.Sprint(in)
case bool:
strValue = fmt.Sprint(in)
case time.Duration:
strValue = fmt.Sprint(in)
case []string:
// CSV encode arrays of strings - ideally we would use
// fs.CommaSepList here but we can't as it would cause
// a circular import.
if len(x) == 0 {
strValue = ""
} else if len(x) == 1 && len(x[0]) == 0 {
strValue = `""`
} else {
var buf strings.Builder
w := csv.NewWriter(&buf)
err := w.Write(x)
if err != nil {
return "", err
}
w.Flush()
strValue = strings.TrimSpace(buf.String())
}
default:
// Try using a String method
if do, ok := in.(fmt.Stringer); ok {
strValue = do.String()
} else {
err = errors.New("don't know how to convert this")
}
}
if err != nil {
return "", fmt.Errorf("interpreting %T as string failed: %w", in, err)
}
return strValue, nil
}
// Item describes a single entry in the options structure
type Item struct {
Name string // snake_case
Field string // CamelCase
Set func(any) // set this field
Value any
}
// Items parses the opt struct and returns a slice of Item objects.
//
// opt must be a pointer to a struct. The struct should have entirely
// public fields.
//
// The config_name is looked up in a struct tag called "config" or if
// not found is the field name converted from CamelCase to snake_case.
//
// Nested structs are looked up too. If the parent struct has a struct
// tag, this will be used as a prefix for the values in the sub
// struct, otherwise they will be embedded as they are.
func Items(opt any) (items []Item, err error) {
def := reflect.ValueOf(opt)
if def.Kind() != reflect.Ptr {
return nil, errors.New("argument must be a pointer")
}
def = def.Elem() // indirect the pointer
if def.Kind() != reflect.Struct {
return nil, errors.New("argument must be a pointer to a struct")
}
defType := def.Type()
for i := range def.NumField() {
field := def.Field(i)
fieldType := defType.Field(i)
fieldName := fieldType.Name
configName, hasTag := fieldType.Tag.Lookup("config")
if hasTag && configName == "-" {
// Skip items with config:"-"
continue
}
if !hasTag {
configName = camelToSnake(fieldName)
}
valuePtr := field.Addr().Interface() // pointer to the value as an interface
_, canSet := valuePtr.(interface{ Set(string) error }) // can we set this with the Option Set protocol
// If we have a nested struct that isn't a config item then recurse
if fieldType.Type.Kind() == reflect.Struct && !canSet {
newItems, err := Items(valuePtr)
if err != nil {
return nil, fmt.Errorf("error parsing field %q: %w", fieldName, err)
}
for _, newItem := range newItems {
if hasTag {
newItem.Name = configName + "_" + newItem.Name
}
newItem.Field = fieldName + "." + newItem.Field
items = append(items, newItem)
}
} else {
defaultItem := Item{
Name: configName,
Field: fieldName,
Set: func(newValue any) {
field.Set(reflect.ValueOf(newValue))
},
Value: field.Interface(),
}
items = append(items, defaultItem)
}
}
return items, nil
}
// setValue sets newValue to configValue returning an updated newValue
func setValue(newValue any, configValue string) (any, error) {
newNewValue, err := StringToInterface(newValue, configValue)
if err != nil {
// Mask errors if setting an empty string as
// it isn't valid for all types. This makes
// empty string be the equivalent of unset.
if configValue != "" {
return nil, err
}
} else {
newValue = newNewValue
}
return newValue, nil
}
// Set interprets the field names in defaults and looks up config
// values in the config passed in. Any values found in config will be
// set in the opt structure.
//
// opt must be a pointer to a struct. The struct should have entirely
// public fields. The field names are converted from CamelCase to
// snake_case and looked up in the config supplied or a
// `config:"field_name"` is looked up.
//
// If items are found then they are converted from string to native
// types and set in opt.
//
// All the field types in the struct must implement fmt.Scanner.
func Set(config configmap.Getter, opt any) (err error) {
defaultItems, err := Items(opt)
if err != nil {
return err
}
for _, defaultItem := range defaultItems {
newValue := defaultItem.Value
if configValue, ok := config.Get(defaultItem.Name); ok {
newValue, err = setValue(newValue, configValue)
if err != nil {
return fmt.Errorf("couldn't parse config item %q = %q as %T: %w", defaultItem.Name, configValue, defaultItem.Value, err)
}
}
defaultItem.Set(newValue)
}
return nil
}
// setIfSameType set aPtr with b if they are the same type or returns false.
func setIfSameType(aPtr any, b any) bool {
aVal := reflect.ValueOf(aPtr).Elem()
bVal := reflect.ValueOf(b)
if aVal.Type() != bVal.Type() {
return false
}
aVal.Set(bVal)
return true
}
// SetAny interprets the field names in defaults and looks up config
// values in the config passed in. Any values found in config will be
// set in the opt structure.
//
// opt must be a pointer to a struct. The struct should have entirely
// public fields. The field names are converted from CamelCase to
// snake_case and looked up in the config supplied or a
// `config:"field_name"` is looked up.
//
// If items are found then they are set directly if the correct type,
// otherwise they are converted to string and then converted from
// string to native types and set in opt.
//
// All the field types in the struct must implement fmt.Scanner.
func SetAny(config map[string]any, opt any) (err error) {
defaultItems, err := Items(opt)
if err != nil {
return err
}
for _, defaultItem := range defaultItems {
newValue := defaultItem.Value
if configValue, ok := config[defaultItem.Name]; ok {
if !setIfSameType(&newValue, configValue) {
// Convert the config value to be a string
stringConfigValue, err := InterfaceToString(configValue)
if err != nil {
return err
}
newValue, err = setValue(newValue, stringConfigValue)
if err != nil {
return fmt.Errorf("couldn't parse config item %q = %q as %T: %w", defaultItem.Name, stringConfigValue, defaultItem.Value, err)
}
}
}
defaultItem.Set(newValue)
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.