repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/encoder.go | lib/encoder/encoder.go | // Package encoder provides functionality to translate file names
// for usage on restrictive storage systems.
//
// The restricted set of characters are mapped to a unicode equivalent version
// (most to their FULLWIDTH variant) to increase compatibility with other
// storage systems.
// See: http://unicode-search.net/unicode-namesearch.pl?term=FULLWIDTH
//
// Encoders will also quote reserved characters to differentiate between
// the raw and encoded forms.
package encoder
import (
"bytes"
"fmt"
"io"
"sort"
"strconv"
"strings"
"unicode/utf8"
)
const (
// adding this to any printable ASCII character turns it into the
// FULLWIDTH variant
fullOffset = 0xFEE0
// the first rune of the SYMBOL FOR block for control characters
symbolOffset = '␀' // SYMBOL FOR NULL
// QuoteRune is the rune used for quoting reserved characters
QuoteRune = '‛' // SINGLE HIGH-REVERSED-9 QUOTATION MARK
)
// NB keep the tests in fstests/fstests/fstests.go FsEncoding up to date with this
// NB keep the aliases up to date below also
// Possible flags for the MultiEncoder
const (
EncodeZero MultiEncoder = 0 // NUL(0x00)
EncodeRaw MultiEncoder = 1 << (iota - 1)
EncodeSlash // /
EncodeLtGt // <>
EncodeDoubleQuote // "
EncodeSingleQuote // '
EncodeBackQuote // `
EncodeDollar // $
EncodeColon // :
EncodeQuestion // ?
EncodeAsterisk // *
EncodePipe // |
EncodeHash // #
EncodePercent // %
EncodeBackSlash // \
EncodeCrLf // CR(0x0D), LF(0x0A)
EncodeDel // DEL(0x7F)
EncodeCtl // CTRL(0x01-0x1F)
EncodeLeftSpace // Leading SPACE
EncodeLeftPeriod // Leading .
EncodeLeftTilde // Leading ~
EncodeLeftCrLfHtVt // Leading CR LF HT VT
EncodeRightSpace // Trailing SPACE
EncodeRightPeriod // Trailing .
EncodeRightCrLfHtVt // Trailing CR LF HT VT
EncodeInvalidUtf8 // Invalid UTF-8 bytes
EncodeDot // . and .. names
EncodeSquareBracket // []
EncodeSemicolon // ;
EncodeExclamation // !
// Synthetic
EncodeWin = EncodeColon | EncodeQuestion | EncodeDoubleQuote | EncodeAsterisk | EncodeLtGt | EncodePipe // :?"*<>|
EncodeHashPercent = EncodeHash | EncodePercent // #%
)
// Has returns true if flag is contained in mask
func (mask MultiEncoder) Has(flag MultiEncoder) bool {
return mask&flag != 0
}
// Encoder can transform names to and from the original and translated version.
type Encoder interface {
// Encode takes a raw name and substitutes any reserved characters and
// patterns in it
Encode(string) string
// Decode takes a name and undoes any substitutions made by Encode
Decode(string) string
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string) string
}
// MultiEncoder is a configurable Encoder. The Encode* constants in this
// package can be combined using bitwise or (|) to enable handling of multiple
// character classes
type MultiEncoder uint
// Aliases maps encodings to names and vice versa
var (
encodingToName = map[MultiEncoder]string{}
nameToEncoding = map[string]MultiEncoder{}
)
// alias adds an alias for MultiEncoder.String() and MultiEncoder.Set()
func alias(name string, mask MultiEncoder) {
nameToEncoding[name] = mask
// don't overwrite existing reverse translations
if _, ok := encodingToName[mask]; !ok {
encodingToName[mask] = name
}
}
func init() {
alias("Raw", EncodeRaw)
alias("None", EncodeZero)
alias("Slash", EncodeSlash)
alias("LtGt", EncodeLtGt)
alias("SquareBracket", EncodeSquareBracket)
alias("Semicolon", EncodeSemicolon)
alias("Exclamation", EncodeExclamation)
alias("DoubleQuote", EncodeDoubleQuote)
alias("SingleQuote", EncodeSingleQuote)
alias("BackQuote", EncodeBackQuote)
alias("Dollar", EncodeDollar)
alias("Colon", EncodeColon)
alias("Question", EncodeQuestion)
alias("Asterisk", EncodeAsterisk)
alias("Pipe", EncodePipe)
alias("Hash", EncodeHash)
alias("Percent", EncodePercent)
alias("BackSlash", EncodeBackSlash)
alias("CrLf", EncodeCrLf)
alias("Del", EncodeDel)
alias("Ctl", EncodeCtl)
alias("LeftSpace", EncodeLeftSpace)
alias("LeftPeriod", EncodeLeftPeriod)
alias("LeftTilde", EncodeLeftTilde)
alias("LeftCrLfHtVt", EncodeLeftCrLfHtVt)
alias("RightSpace", EncodeRightSpace)
alias("RightPeriod", EncodeRightPeriod)
alias("RightCrLfHtVt", EncodeRightCrLfHtVt)
alias("InvalidUtf8", EncodeInvalidUtf8)
alias("Dot", EncodeDot)
}
// ValidStrings returns all the valid MultiEncoder strings
func ValidStrings() string {
var out []string
for k := range nameToEncoding {
out = append(out, k)
}
sort.Strings(out)
return strings.Join(out, ", ")
}
// String converts the MultiEncoder into text
func (mask MultiEncoder) String() string {
// See if there is an exact translation - if so return that
if name, ok := encodingToName[mask]; ok {
return name
}
var out []string
// Otherwise decompose bit by bit
for bit := MultiEncoder(1); bit != 0; bit *= 2 {
if (mask & bit) != 0 {
if name, ok := encodingToName[bit]; ok {
out = append(out, name)
} else {
out = append(out, fmt.Sprintf("0x%X", uint(bit)))
}
}
}
return strings.Join(out, ",")
}
// Set converts a string into a MultiEncoder
func (mask *MultiEncoder) Set(in string) error {
var out MultiEncoder
parts := strings.SplitSeq(in, ",")
for part := range parts {
part = strings.TrimSpace(part)
if bits, ok := nameToEncoding[part]; ok {
out |= bits
} else {
i, err := strconv.ParseUint(part, 0, 0)
if err != nil {
return fmt.Errorf("bad encoding %q: possible values are: %s", part, ValidStrings())
}
out |= MultiEncoder(i)
}
}
*mask = out
return nil
}
// Type returns a textual type of the MultiEncoder to satisfy the pflag.Value interface
func (mask MultiEncoder) Type() string {
return "Encoding"
}
// Scan implements the fmt.Scanner interface
func (mask *MultiEncoder) Scan(s fmt.ScanState, ch rune) error {
token, err := s.Token(true, nil)
if err != nil {
return err
}
return mask.Set(string(token))
}
// Encode takes a raw name and substitutes any reserved characters and
// patterns in it
func (mask MultiEncoder) Encode(in string) string {
if mask == EncodeRaw {
return in
}
if in == "" {
return ""
}
if mask.Has(EncodeDot) {
switch in {
case ".":
return "."
case "..":
return ".."
case ".":
return string(QuoteRune) + "."
case "..":
return string(QuoteRune) + "." + string(QuoteRune) + "."
}
}
// handle prefix only replacements
prefix := ""
if mask.Has(EncodeLeftSpace) { // Leading SPACE
if in[0] == ' ' {
prefix, in = "␠", in[1:] // SYMBOL FOR SPACE
} else if r, l := utf8.DecodeRuneInString(in); r == '␠' { // SYMBOL FOR SPACE
prefix, in = string(QuoteRune)+"␠", in[l:] // SYMBOL FOR SPACE
}
}
if mask.Has(EncodeLeftPeriod) && prefix == "" { // Leading PERIOD
if in[0] == '.' {
prefix, in = ".", in[1:] // FULLWIDTH FULL STOP
} else if r, l := utf8.DecodeRuneInString(in); r == '.' { // FULLWIDTH FULL STOP
prefix, in = string(QuoteRune)+".", in[l:] // FULLWIDTH FULL STOP
}
}
if mask.Has(EncodeLeftTilde) && prefix == "" { // Leading ~
if in[0] == '~' {
prefix, in = string('~'+fullOffset), in[1:] // FULLWIDTH TILDE
} else if r, l := utf8.DecodeRuneInString(in); r == '~'+fullOffset {
prefix, in = string(QuoteRune)+string('~'+fullOffset), in[l:] // FULLWIDTH TILDE
}
}
if mask.Has(EncodeLeftCrLfHtVt) && prefix == "" { // Leading CR LF HT VT
switch c := in[0]; c {
case '\t', '\n', '\v', '\r':
prefix, in = string('␀'+rune(c)), in[1:] // SYMBOL FOR NULL
default:
switch r, l := utf8.DecodeRuneInString(in); r {
case '␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r':
prefix, in = string(QuoteRune)+string(r), in[l:]
}
}
}
// handle suffix only replacements
suffix := ""
if in != "" {
if mask.Has(EncodeRightSpace) { // Trailing SPACE
if in[len(in)-1] == ' ' {
suffix, in = "␠", in[:len(in)-1] // SYMBOL FOR SPACE
} else if r, l := utf8.DecodeLastRuneInString(in); r == '␠' {
suffix, in = string(QuoteRune)+"␠", in[:len(in)-l] // SYMBOL FOR SPACE
}
}
if mask.Has(EncodeRightPeriod) && suffix == "" { // Trailing .
if in[len(in)-1] == '.' {
suffix, in = ".", in[:len(in)-1] // FULLWIDTH FULL STOP
} else if r, l := utf8.DecodeLastRuneInString(in); r == '.' {
suffix, in = string(QuoteRune)+".", in[:len(in)-l] // FULLWIDTH FULL STOP
}
}
if mask.Has(EncodeRightCrLfHtVt) && suffix == "" { // Trailing .
switch c := in[len(in)-1]; c {
case '\t', '\n', '\v', '\r':
suffix, in = string('␀'+rune(c)), in[:len(in)-1] // FULLWIDTH FULL STOP
default:
switch r, l := utf8.DecodeLastRuneInString(in); r {
case '␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r':
suffix, in = string(QuoteRune)+string(r), in[:len(in)-l]
}
}
}
}
index := 0
if prefix == "" && suffix == "" {
// find the first rune which (most likely) needs to be replaced
index = strings.IndexFunc(in, func(r rune) bool {
switch r {
case 0, '␀', QuoteRune, utf8.RuneError:
return true
}
if mask.Has(EncodeAsterisk) { // *
switch r {
case '*', '*':
return true
}
}
if mask.Has(EncodeLtGt) { // <>
switch r {
case '<', '>',
'<', '>':
return true
}
}
if mask.Has(EncodeSquareBracket) { // []
switch r {
case '[', ']',
'[', ']':
return true
}
}
if mask.Has(EncodeSemicolon) { // ;
switch r {
case ';', ';':
return true
}
}
if mask.Has(EncodeExclamation) { // !
switch r {
case '!', '!':
return true
}
}
if mask.Has(EncodeQuestion) { // ?
switch r {
case '?', '?':
return true
}
}
if mask.Has(EncodeColon) { // :
switch r {
case ':', ':':
return true
}
}
if mask.Has(EncodePipe) { // |
switch r {
case '|', '|':
return true
}
}
if mask.Has(EncodeDoubleQuote) { // "
switch r {
case '"', '"':
return true
}
}
if mask.Has(EncodeSingleQuote) { // '
switch r {
case '\'', ''':
return true
}
}
if mask.Has(EncodeBackQuote) { // `
switch r {
case '`', '`':
return true
}
}
if mask.Has(EncodeDollar) { // $
switch r {
case '$', '$':
return true
}
}
if mask.Has(EncodeSlash) { // /
switch r {
case '/', '/':
return true
}
}
if mask.Has(EncodeBackSlash) { // \
switch r {
case '\\', '\':
return true
}
}
if mask.Has(EncodeCrLf) { // CR LF
switch r {
case rune(0x0D), rune(0x0A),
'␍', '␊':
return true
}
}
if mask.Has(EncodeHash) { // #
switch r {
case '#', '#':
return true
}
}
if mask.Has(EncodePercent) { // %
switch r {
case '%', '%':
return true
}
}
if mask.Has(EncodeDel) { // DEL(0x7F)
switch r {
case rune(0x7F), '␡':
return true
}
}
if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F)
if r >= 1 && r <= 0x1F {
return true
} else if r > symbolOffset && r <= symbolOffset+0x1F {
return true
}
}
return false
})
}
// nothing to replace, return input
if index == -1 {
return in
}
var out bytes.Buffer
out.Grow(len(in) + len(prefix) + len(suffix))
out.WriteString(prefix)
// copy the clean part of the input and skip it
out.WriteString(in[:index])
in = in[index:]
for i, r := range in {
switch r {
case 0:
out.WriteRune(symbolOffset)
continue
case '␀', QuoteRune:
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
case utf8.RuneError:
if mask.Has(EncodeInvalidUtf8) {
// only encode invalid sequences and not utf8.RuneError
if i+3 > len(in) || in[i:i+3] != string(utf8.RuneError) {
_, l := utf8.DecodeRuneInString(in[i:])
appendQuotedBytes(&out, in[i:i+l])
continue
}
} else {
// append the real bytes instead of utf8.RuneError
_, l := utf8.DecodeRuneInString(in[i:])
out.WriteString(in[i : i+l])
continue
}
}
if mask.Has(EncodeAsterisk) { // *
switch r {
case '*':
out.WriteRune(r + fullOffset)
continue
case '*':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeLtGt) { // <>
switch r {
case '<', '>':
out.WriteRune(r + fullOffset)
continue
case '<', '>':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeSquareBracket) { // []
switch r {
case '[', ']':
out.WriteRune(r + fullOffset)
continue
case '[', ']':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeSemicolon) { // ;
switch r {
case ';':
out.WriteRune(r + fullOffset)
continue
case ';':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeExclamation) { // !
switch r {
case '!':
out.WriteRune(r + fullOffset)
continue
case '!':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeQuestion) { // ?
switch r {
case '?':
out.WriteRune(r + fullOffset)
continue
case '?':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeColon) { // :
switch r {
case ':':
out.WriteRune(r + fullOffset)
continue
case ':':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodePipe) { // |
switch r {
case '|':
out.WriteRune(r + fullOffset)
continue
case '|':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeDoubleQuote) { // "
switch r {
case '"':
out.WriteRune(r + fullOffset)
continue
case '"':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeSingleQuote) { // '
switch r {
case '\'':
out.WriteRune(r + fullOffset)
continue
case ''':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeBackQuote) { // `
switch r {
case '`':
out.WriteRune(r + fullOffset)
continue
case '`':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeDollar) { // $
switch r {
case '$':
out.WriteRune(r + fullOffset)
continue
case '$':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeSlash) { // /
switch r {
case '/':
out.WriteRune(r + fullOffset)
continue
case '/':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeBackSlash) { // \
switch r {
case '\\':
out.WriteRune(r + fullOffset)
continue
case '\':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeCrLf) { // CR LF
switch r {
case rune(0x0D), rune(0x0A):
out.WriteRune(r + symbolOffset)
continue
case '␍', '␊':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeHash) { // #
switch r {
case '#':
out.WriteRune(r + fullOffset)
continue
case '#':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodePercent) { // %
switch r {
case '%':
out.WriteRune(r + fullOffset)
continue
case '%':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeDel) { // DEL(0x7F)
switch r {
case rune(0x7F):
out.WriteRune('␡') // SYMBOL FOR DELETE
continue
case '␡':
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F)
if r >= 1 && r <= 0x1F {
out.WriteRune('␀' + r) // SYMBOL FOR NULL
continue
} else if r > symbolOffset && r <= symbolOffset+0x1F {
out.WriteRune(QuoteRune)
out.WriteRune(r)
continue
}
}
out.WriteRune(r)
}
out.WriteString(suffix)
return out.String()
}
// Decode takes a name and undoes any substitutions made by Encode
func (mask MultiEncoder) Decode(in string) string {
if mask == EncodeRaw {
return in
}
if mask.Has(EncodeDot) {
switch in {
case ".":
return "."
case "..":
return ".."
case string(QuoteRune) + ".":
return "."
case string(QuoteRune) + "." + string(QuoteRune) + ".":
return ".."
}
}
// handle prefix only replacements
prefix := ""
if r, l1 := utf8.DecodeRuneInString(in); mask.Has(EncodeLeftSpace) && r == '␠' { // SYMBOL FOR SPACE
prefix, in = " ", in[l1:]
} else if mask.Has(EncodeLeftPeriod) && r == '.' { // FULLWIDTH FULL STOP
prefix, in = ".", in[l1:]
} else if mask.Has(EncodeLeftTilde) && r == '~' { // FULLWIDTH TILDE
prefix, in = "~", in[l1:]
} else if mask.Has(EncodeLeftCrLfHtVt) && (r == '␀'+'\t' || r == '␀'+'\n' || r == '␀'+'\v' || r == '␀'+'\r') {
prefix, in = string(r-'␀'), in[l1:]
} else if r == QuoteRune {
if r, l2 := utf8.DecodeRuneInString(in[l1:]); mask.Has(EncodeLeftSpace) && r == '␠' { // SYMBOL FOR SPACE
prefix, in = "␠", in[l1+l2:]
} else if mask.Has(EncodeLeftPeriod) && r == '.' { // FULLWIDTH FULL STOP
prefix, in = ".", in[l1+l2:]
} else if mask.Has(EncodeLeftTilde) && r == '~' { // FULLWIDTH TILDE
prefix, in = "~", in[l1+l2:]
} else if mask.Has(EncodeLeftCrLfHtVt) && (r == '␀'+'\t' || r == '␀'+'\n' || r == '␀'+'\v' || r == '␀'+'\r') {
prefix, in = string(r), in[l1+l2:]
}
}
// handle suffix only replacements
suffix := ""
if r, l := utf8.DecodeLastRuneInString(in); mask.Has(EncodeRightSpace) && r == '␠' { // SYMBOL FOR SPACE
in = in[:len(in)-l]
if q, l2 := utf8.DecodeLastRuneInString(in); q == QuoteRune {
suffix, in = "␠", in[:len(in)-l2]
} else {
suffix = " "
}
} else if mask.Has(EncodeRightPeriod) && r == '.' { // FULLWIDTH FULL STOP
in = in[:len(in)-l]
if q, l2 := utf8.DecodeLastRuneInString(in); q == QuoteRune {
suffix, in = ".", in[:len(in)-l2]
} else {
suffix = "."
}
} else if mask.Has(EncodeRightCrLfHtVt) && (r == '␀'+'\t' || r == '␀'+'\n' || r == '␀'+'\v' || r == '␀'+'\r') {
in = in[:len(in)-l]
if q, l2 := utf8.DecodeLastRuneInString(in); q == QuoteRune {
suffix, in = string(r), in[:len(in)-l2]
} else {
suffix = string(r - '␀')
}
}
index := 0
if prefix == "" && suffix == "" {
// find the first rune which (most likely) needs to be replaced
index = strings.IndexFunc(in, func(r rune) bool {
switch r {
case '␀', QuoteRune:
return true
}
if mask.Has(EncodeAsterisk) { // *
switch r {
case '*':
return true
}
}
if mask.Has(EncodeLtGt) { // <>
switch r {
case '<', '>':
return true
}
}
if mask.Has(EncodeSquareBracket) { // []
switch r {
case '[', ']':
return true
}
}
if mask.Has(EncodeSemicolon) { // ;
switch r {
case ';':
return true
}
}
if mask.Has(EncodeExclamation) { // !
switch r {
case '!':
return true
}
}
if mask.Has(EncodeQuestion) { // ?
switch r {
case '?':
return true
}
}
if mask.Has(EncodeColon) { // :
switch r {
case ':':
return true
}
}
if mask.Has(EncodePipe) { // |
switch r {
case '|':
return true
}
}
if mask.Has(EncodeDoubleQuote) { // "
switch r {
case '"':
return true
}
}
if mask.Has(EncodeSingleQuote) { // '
switch r {
case ''':
return true
}
}
if mask.Has(EncodeBackQuote) { // `
switch r {
case '`':
return true
}
}
if mask.Has(EncodeDollar) { // $
switch r {
case '$':
return true
}
}
if mask.Has(EncodeSlash) { // /
switch r {
case '/':
return true
}
}
if mask.Has(EncodeBackSlash) { // \
switch r {
case '\':
return true
}
}
if mask.Has(EncodeCrLf) { // CR LF
switch r {
case '␍', '␊':
return true
}
}
if mask.Has(EncodeHash) { // #
switch r {
case '#':
return true
}
}
if mask.Has(EncodePercent) { // %
switch r {
case '%':
return true
}
}
if mask.Has(EncodeDel) { // DEL(0x7F)
switch r {
case '␡':
return true
}
}
if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F)
if r > symbolOffset && r <= symbolOffset+0x1F {
return true
}
}
return false
})
}
// nothing to replace, return input
if index == -1 {
return in
}
var out bytes.Buffer
out.Grow(len(in))
out.WriteString(prefix)
// copy the clean part of the input and skip it
out.WriteString(in[:index])
in = in[index:]
var unquote, unquoteNext, skipNext bool
for i, r := range in {
if skipNext {
skipNext = false
continue
}
unquote, unquoteNext = unquoteNext, false
switch r {
case '␀': // SYMBOL FOR NULL
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(0)
}
continue
case QuoteRune:
if unquote {
out.WriteRune(r)
} else {
unquoteNext = true
}
continue
}
if mask.Has(EncodeAsterisk) { // *
switch r {
case '*':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeLtGt) { // <>
switch r {
case '<', '>':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeSquareBracket) { // []
switch r {
case '[', ']':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeSemicolon) { // ;
switch r {
case ';':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeExclamation) { // !
switch r {
case '!':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeQuestion) { // ?
switch r {
case '?':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeColon) { // :
switch r {
case ':':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodePipe) { // |
switch r {
case '|':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeDoubleQuote) { // "
switch r {
case '"':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeSingleQuote) { // '
switch r {
case ''':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeBackQuote) { // `
switch r {
case '`':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeDollar) { // $
switch r {
case '$':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeSlash) { // /
switch r {
case '/': // FULLWIDTH SOLIDUS
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeBackSlash) { // \
switch r {
case '\': // FULLWIDTH REVERSE SOLIDUS
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeCrLf) { // CR LF
switch r {
case '␍', '␊':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - symbolOffset)
}
continue
}
}
if mask.Has(EncodeHash) { // %
switch r {
case '#':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodePercent) { // %
switch r {
case '%':
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - fullOffset)
}
continue
}
}
if mask.Has(EncodeDel) { // DEL(0x7F)
switch r {
case '␡': // SYMBOL FOR DELETE
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(0x7F)
}
continue
}
}
if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F)
if r > symbolOffset && r <= symbolOffset+0x1F {
if unquote {
out.WriteRune(r)
} else {
out.WriteRune(r - symbolOffset)
}
continue
}
}
if unquote {
if mask.Has(EncodeInvalidUtf8) {
skipNext = appendUnquotedByte(&out, in[i:])
if skipNext {
continue
}
}
out.WriteRune(QuoteRune)
}
switch r {
case utf8.RuneError:
// append the real bytes instead of utf8.RuneError
_, l := utf8.DecodeRuneInString(in[i:])
out.WriteString(in[i : i+l])
continue
}
out.WriteRune(r)
}
if unquoteNext {
out.WriteRune(QuoteRune)
}
out.WriteString(suffix)
return out.String()
}
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
func (mask MultiEncoder) FromStandardPath(s string) string {
return FromStandardPath(mask, s)
}
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
func (mask MultiEncoder) FromStandardName(s string) string {
return FromStandardName(mask, s)
}
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
func (mask MultiEncoder) ToStandardPath(s string) string {
return ToStandardPath(mask, s)
}
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
func (mask MultiEncoder) ToStandardName(s string) string {
return ToStandardName(mask, s)
}
func appendQuotedBytes(w io.Writer, s string) {
for _, b := range []byte(s) {
_, _ = fmt.Fprintf(w, string(QuoteRune)+"%02X", b)
}
}
func appendUnquotedByte(w io.Writer, s string) bool {
if len(s) < 2 {
return false
}
u, err := strconv.ParseUint(s[:2], 16, 8)
if err != nil {
return false
}
n, _ := w.Write([]byte{byte(u)})
return n == 1
}
type identity struct{}
func (identity) Encode(in string) string { return in }
func (identity) Decode(in string) string { return in }
func (i identity) FromStandardPath(s string) string {
return FromStandardPath(i, s)
}
func (i identity) FromStandardName(s string) string {
return FromStandardName(i, s)
}
func (i identity) ToStandardPath(s string) string {
return ToStandardPath(i, s)
}
func (i identity) ToStandardName(s string) string {
return ToStandardName(i, s)
}
// Identity returns an Encoder that always returns the input value
func Identity() Encoder {
return identity{}
}
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in the given encoding.
func FromStandardPath(e Encoder, s string) string {
if e == Standard {
return s
}
parts := strings.Split(s, "/")
encoded := make([]string, len(parts))
changed := false
for i, p := range parts {
enc := FromStandardName(e, p)
changed = changed || enc != p
encoded[i] = enc
}
if !changed {
return s
}
return strings.Join(encoded, "/")
}
// FromStandardName takes name in Standard encoding and converts
// it in the given encoding.
func FromStandardName(e Encoder, s string) string {
if e == Standard {
return s
}
return e.Encode(Standard.Decode(s))
}
// ToStandardPath takes a / separated path in the given encoding
// and converts it to a / separated path in Standard encoding.
func ToStandardPath(e Encoder, s string) string {
if e == Standard {
return s
}
parts := strings.Split(s, "/")
encoded := make([]string, len(parts))
changed := false
for i, p := range parts {
dec := ToStandardName(e, p)
changed = changed || dec != p
encoded[i] = dec
}
if !changed {
return s
}
return strings.Join(encoded, "/")
}
// ToStandardName takes name in the given encoding and converts
// it in Standard encoding.
func ToStandardName(e Encoder, s string) string {
if e == Standard {
return s
}
return Standard.Encode(e.Decode(s))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/filename/fuzz.go | lib/encoder/filename/fuzz.go | //go:build gofuzz
package filename
import (
"bytes"
"fmt"
)
// Run like:
// go-fuzz-build -o=fuzz-build.zip -func=Fuzz . && go-fuzz -minimize=5s -bin=fuzz-build.zip -workdir=testdata/corpus -procs=24
// Fuzz test the provided input.
func Fuzz(data []byte) int {
// First try to decode as is.
// We don't care about the result, it just shouldn't crash.
Decode(string(data))
// Now encode
enc := Encode(string(data))
// And decoded must match
decoded, err := Decode(enc)
if err != nil {
panic(fmt.Sprintf("error decoding %q, input %q: %v", enc, string(data), err))
}
if !bytes.Equal(data, []byte(decoded)) {
table := decodeMap[enc[0]]
table--
panic(fmt.Sprintf("decode mismatch, encoded: %q, org: %q, got: %q, table %d", enc, string(data), decoded, int(table)))
}
// Everything is good.
return 1
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/filename/init.go | lib/encoder/filename/init.go | // Package filename provides utilities for encoder.
package filename
import (
"encoding/base64"
"sync"
"github.com/klauspost/compress/huff0"
)
// encodeURL is base64 url encoding values.
const encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
// decodeMap will return x = decodeMap[encodeURL[byte(x)]] - 1 if x >= 0 and x < 64, otherwise -1 is returned.
var decodeMap [256]byte
// maxLength is the maximum length that will be attempted to be compressed.
const maxLength = 256
var (
initOnce sync.Once // Used to control init of tables.
encTables [64]*huff0.Scratch // Encoders.
encTableLocks [64]sync.Mutex // Temporary locks for encoders since they are stateful.
decTables [64]*huff0.Decoder // Stateless decoders.
)
const (
tableUncompressed = 0
tableSCSU = 59
tableSCSUPlain = 60
tableRLE = 61
tableCustom = 62
tableReserved = 63
)
// predefined tables as base64 URL encoded string.
var tablesData = [64]string{
// Uncompressed
tableUncompressed: "",
// ncw home directory
1: "MRDIEtAAMAzDMAzDSjX_ybu0w97bb-L3b2mR-rUl5LXW3lZII43kIDMzM1NXu3okgQs=",
// ncw images
2: "IhDIAEAA______-Pou_4Sf5z-uS-39MVWjullFLKM7EBECs=",
// ncw Google Drive:
3: "JxDQAIIBMDMzMwOzbv7nJJCyd_m_9D2llCarnQX33nvvlFKEhUxAAQ==",
// Hex
4: "ExDoSTD___-tfXfhJ0hKSkryTxU=",
// Base64
5: "JRDIcQf_______8PgIiIiIgINkggARHlkQwSSCCBxHFYINHdfXI=",
// Hex plus a bit...
6: "E5CxwAHm9sYcAlmWZVvMHA4Y5jw=",
// Hex, upper case letters.
7: "FICxgAMMAGC3YwMthe3DWM_wDAAQ",
// Special tables:
// SCSU and a fairly generic table:
tableSCSU: "UxAgZmEB-RYPU8hrnAk6uMgpTNQMB5MGRBx0D3T0JjyUyY-yOi5CoGgktbAktSh7d36HtPTFu7SXJ7FYw_AYmA74ZH2vWgc8O6Z5jLnWnsFqU_4B",
// SCSU with no table...
tableSCSUPlain: "",
// Compressed data has its own table.
tableCustom: "",
// Reserved for extension.
tableReserved: "",
}
func initCoders() {
initOnce.Do(func() {
// Init base 64 decoder.
for i, v := range encodeURL {
decodeMap[v] = byte(i) + 1
}
// Initialize encoders and decoders.
for i, dataString := range tablesData {
if len(dataString) == 0 {
continue
}
data, err := base64.URLEncoding.DecodeString(dataString)
if err != nil {
panic(err)
}
s, _, err := huff0.ReadTable(data, nil)
if err != nil {
panic(err)
}
// We want to save at least len(in) >> 5
s.WantLogLess = 5
s.Reuse = huff0.ReusePolicyMust
encTables[i] = s
decTables[i] = s.Decoder()
}
// Add custom table type.
var s huff0.Scratch
s.Reuse = huff0.ReusePolicyNone
encTables[tableCustom] = &s
decTables[tableCustom] = nil
})
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/filename/gentable.go | lib/encoder/filename/gentable.go | //go:build ignore
package main
import (
"bufio"
"bytes"
"encoding/base64"
"flag"
"fmt"
"math"
"os"
"strings"
"unicode/utf8"
"github.com/dop251/scsu"
"github.com/klauspost/compress"
"github.com/klauspost/compress/huff0"
)
// execute go run gentable.go
var indexFile = flag.String("index", "", "Index this file for table")
// Allow non-represented characters.
var addUnused = flag.Bool("all", true, "Make all bytes possible")
var scsuEncode = flag.Bool("scsu", false, "SCSU encode on each line before table")
func main() {
flag.Parse()
histogram := [256]uint64{
// Replace/add histogram data and execute go run gentable.go
// ncw home directory
//0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19442, 760, 0, 349, 570, 1520, 199, 76, 685, 654, 0, 40377, 1605, 395132, 935270, 0, 1156377, 887730, 811737, 712241, 693240, 689139, 675964, 656417, 666577, 657413, 532, 24, 0, 145, 0, 3, 946, 44932, 37362, 46126, 36752, 76346, 19338, 47457, 14288, 38163, 4350, 7867, 36541, 65011, 30255, 26792, 22097, 1803, 39191, 61965, 76585, 11887, 12896, 5931, 1935, 1731, 1385, 1279, 9, 1278, 1, 420185, 0, 1146359, 746359, 968896, 868703, 1393640, 745019, 354147, 159462, 483979, 169092, 75937, 385858, 322166, 466635, 571268, 447132, 13792, 446484, 736844, 732675, 170232, 112983, 63184, 142357, 173945, 21521, 250, 0, 250, 4140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 39, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 15, 0, 0, 0, 10, 0, 5, 0, 0, 0, 0, 0, 0, 283, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
//Images:
//0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 765, 0, 0, 0, 0, 0, 8, 7, 3, 3, 0, 29, 53, 247265, 83587, 0, 265952, 233552, 229781, 71156, 78374, 65141, 46152, 43767, 55603, 39411, 0, 0, 0, 0, 0, 88, 84, 141, 70, 222, 191, 51, 52, 101, 60, 53, 23, 17, 49, 93, 53, 17, 92, 0, 158, 109, 41, 19, 43, 28, 10, 5, 1, 0, 0, 0, 0, 879, 0, 3415, 6770, 39823, 3566, 2491, 964, 42115, 825, 5178, 40755, 483, 1290, 3294, 1720, 6309, 42983, 10, 37739, 3454, 7028, 5077, 854, 227, 1259, 767, 218, 0, 0, 0, 163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
// Google Drive:
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 459, 0, 0, 7, 0, 0, 0, 7, 1, 1, 0, 2, 1, 506, 706, 0, 3903, 3552, 3694, 3338, 3262, 3257, 3222, 3249, 3325, 3261, 5, 0, 0, 1, 0, 0, 0, 48, 31, 61, 53, 46, 17, 17, 34, 32, 9, 22, 17, 31, 27, 19, 52, 5, 46, 84, 38, 14, 5, 19, 2, 2, 0, 8, 0, 8, 0, 180, 0, 5847, 3282, 3729, 3695, 3842, 3356, 316, 139, 487, 117, 95, 476, 289, 428, 609, 467, 5, 446, 592, 955, 130, 112, 57, 390, 168, 14, 0, 2, 0, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}
if *indexFile != "" {
for i := range histogram[:] {
histogram[i] = 0
}
b, err := os.ReadFile(*indexFile)
if err != nil {
panic(err)
}
if *scsuEncode {
br := bufio.NewReader(bytes.NewBuffer(b))
var encoded []byte
for {
line, err := br.ReadString('\n')
if err != nil {
break
}
line = strings.TrimSpace(line)
if len(line) < 3 || !utf8.ValidString(line) {
continue
}
e, err := scsu.Encode(line, nil)
if err != nil {
panic(err)
}
if len(e) >= len([]byte(line)) {
continue
}
encoded = append(encoded, e...)
}
fmt.Println("scsu", len(b), "->", len(encoded), "(excluding bigger)")
b = encoded
}
for _, v := range b {
histogram[v]++
}
}
// Sum up distributions
var total uint64
for _, v := range histogram[:] {
total += v
}
// Scale the distribution to approx this size.
const scale = 100 << 10
var tmp []byte
for i, v := range histogram[:] {
if v == 0 && !*addUnused {
continue
}
nf := float64(v) / float64(total) * scale
if nf < 1 {
nf = 1
}
t2 := make([]byte, int(math.Ceil(nf)))
for j := range t2 {
t2[j] = byte(i)
}
tmp = append(tmp, t2...)
}
var s huff0.Scratch
s.Reuse = huff0.ReusePolicyNone
_, _, err := huff0.Compress1X(tmp, &s)
if err != nil {
panic(err)
}
fmt.Println("table:", base64.URLEncoding.EncodeToString(s.OutTable))
// Encode without ones:
s.Reuse = huff0.ReusePolicyPrefer
tmp = tmp[:0]
for i, v := range histogram[:] {
nf := float64(v) / float64(total) * scale
t2 := make([]byte, int(math.Ceil(nf)))
for j := range t2 {
t2[j] = byte(i)
}
tmp = append(tmp, t2...)
}
_, _, err = huff0.Compress1X(tmp, &s)
fmt.Println("sample", len(tmp), "byte, compressed size:", len(s.OutData))
fmt.Println("Shannon limit:", compress.ShannonEntropyBits(tmp)/8, "bytes")
if err != nil {
panic(err)
}
fmt.Printf("avg size: 1 -> %.02f", float64(len(s.OutData))/float64(len(tmp)))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/filename/encode.go | lib/encoder/filename/encode.go | package filename
import (
"encoding/base64"
"encoding/binary"
"github.com/dop251/scsu"
"github.com/klauspost/compress/huff0"
)
// Encode will encode the string and return a base64 (url) compatible version of it.
// Calling Decode with the returned string should always succeed.
// It is not a requirement that the input string is valid utf-8.
func Encode(s string) string {
table, payload := EncodeBytes(s)
return string(encodeURL[table]) + base64.URLEncoding.EncodeToString(payload)
}
// EncodeBytes will compress the given string and return a table identifier and a payload.
func EncodeBytes(s string) (table byte, payload []byte) {
initCoders()
bestSize := len(s)
bestTable := byte(tableUncompressed)
org := []byte(s)
bestOut := []byte(s)
// Try all tables and choose the best
for i, enc := range encTables[:] {
org := org
if len(org) <= 1 || len(org) > maxLength {
// Use the uncompressed
break
}
if enc == nil {
continue
}
if i == tableSCSU {
var err error
olen := len(org)
org, err = scsu.EncodeStrict(s, make([]byte, 0, len(org)))
if err != nil || olen <= len(org) {
continue
}
if len(org) < bestSize {
// This is already better, store so we can use if the table cannot.
bestOut = bestOut[:len(org)]
bestTable = tableSCSUPlain
bestSize = len(org)
copy(bestOut, org)
}
}
// Try to encode using table.
err := func() error {
encTableLocks[i].Lock()
defer encTableLocks[i].Unlock()
out, _, err := huff0.Compress1X(org, enc)
if err != nil {
return err
}
if len(out) < bestSize {
bestOut = bestOut[:len(out)]
bestTable = byte(i)
bestSize = len(out)
copy(bestOut, out)
}
return nil
}()
// If input is a single byte repeated store as RLE or save uncompressed.
if err == huff0.ErrUseRLE && i != tableSCSU {
if len(org) > 2 {
// Encode as one byte repeated since it will be smaller than uncompressed.
n := binary.PutUvarint(bestOut, uint64(len(org)))
bestOut = bestOut[:n+1]
bestOut[n] = org[0]
bestSize = n + 1
bestTable = tableRLE
}
break
}
}
return bestTable, bestOut
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/filename/decode.go | lib/encoder/filename/decode.go | package filename
import (
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"sync"
"github.com/dop251/scsu"
"github.com/klauspost/compress/huff0"
)
// ErrCorrupted is returned if a provided encoded filename cannot be decoded.
var ErrCorrupted = errors.New("file name corrupt")
// ErrUnsupported is returned if a provided encoding may come from a future version or the file name is corrupt.
var ErrUnsupported = errors.New("file name possibly generated by future version of rclone")
// Custom decoder for tableCustom types. Stateful, so must have lock.
var customDec huff0.Scratch
var customDecMu sync.Mutex
// Decode an encoded string.
func Decode(s string) (string, error) {
initCoders()
if len(s) < 1 {
return "", ErrCorrupted
}
table := decodeMap[s[0]]
if table == 0 {
return "", ErrCorrupted
}
table--
s = s[1:]
data := make([]byte, base64.URLEncoding.DecodedLen(len(s)))
n, err := base64.URLEncoding.Decode(data, ([]byte)(s))
if err != nil || n < 0 {
return "", ErrCorrupted
}
data = data[:n]
return DecodeBytes(table, data)
}
// DecodeBytes will decode raw id and data values.
func DecodeBytes(table byte, data []byte) (string, error) {
initCoders()
switch table {
case tableUncompressed:
return string(data), nil
case tableReserved:
return "", ErrUnsupported
case tableSCSUPlain:
return scsu.Decode(data)
case tableRLE:
if len(data) < 2 {
return "", ErrCorrupted
}
n, used := binary.Uvarint(data[:len(data)-1])
if used <= 0 || n > maxLength {
return "", ErrCorrupted
}
return string(bytes.Repeat(data[len(data)-1:], int(n))), nil
case tableCustom:
customDecMu.Lock()
defer customDecMu.Unlock()
_, data, err := huff0.ReadTable(data, &customDec)
if err != nil {
return "", ErrCorrupted
}
customDec.MaxDecodedSize = maxLength
decoded, err := customDec.Decompress1X(data)
if err != nil {
return "", ErrCorrupted
}
return string(decoded), nil
default:
if table >= byte(len(decTables)) {
return "", ErrCorrupted
}
dec := decTables[table]
if dec == nil {
return "", ErrUnsupported
}
var dst [maxLength]byte
name, err := dec.Decompress1X(dst[:0], data)
if err != nil {
return "", ErrCorrupted
}
if table == tableSCSU {
return scsu.Decode(name)
}
return string(name), nil
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/filename/decode_test.go | lib/encoder/filename/decode_test.go | package filename
import (
"testing"
)
func TestDecode(t *testing.T) {
tests := []struct {
name string
encoded string
want string
wantErr bool
}{
{
name: "uncompressed",
// tableUncompressed
encoded: "AYS5i",
want: "a.b",
},
{
name: "uncompressed-long",
// tableUncompressed
encoded: "AQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZQnpHUVl4cUhCQTZsalRzaXI4MGdVTTVZ",
want: "BzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5YBzGQYxqHBA6ljTsir80gUM5Y",
},
{
name: "plain-1",
// Table 2
encoded: "BzGQYxqHBA6ljTsir80gUM5Y=",
want: "-Duplican99E8ZI4___9_",
},
{
name: "hex-1",
// Table 4
encoded: "D_--tHZROQpqqJ9PafqNa6STF",
want: "13646871dfabbs43323564654bbefff",
},
{
name: "hex-2",
// Table 6
encoded: "GhIEAIOBQMFQeWm4SClVpXVldCXFZLj4uOgoJHChQ4KBiXQ==",
want: "5368616e6e6f6e206c696d69743a203534353833206279746573-+._=!()",
},
{
name: "hex-3",
// Table 7
encoded: "HohwXBXoJcVFSHgpdVQlxHXIuVgpNCR06Eg5aBg==",
want: "7461626C6520312073697A653A203335206572723A203C6E696C3E",
},
{
name: "base64-1",
// Table 5
encoded: "FMpABB9Ef0KP8OrVxjnE3LzUePuLZi8pPg7eW8bgyW2d3Ucckf4rlE0mkAvlILVpOmF3L-rFbmNrpUO2HQFlF4SCMPVPeCEX6LeOg5JVpUVCXV1WSazD9vSpr",
want: "UxAYiB0FNTTkXRw9P8hwq-WmN7tYwbe-sFw8C3snDRG1d-yjrdOUVZQyLdtkJ8tuvhBSnuBiLjVieCAroWEZDIO4Hb_rKgdzPjMqFE7inwHJ2isF==",
},
{
name: "custom-1",
// Table 62, custom
encoded: "-BeADJCoG_________________xc=",
want: "Uaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
{
name: "custom-2",
// Table 62, custom
encoded: "-BPABDWUppYyllDKW0sYYSymljJQx",
want: "12312132123121321321321321312312312313132132131231213213213213123121321321321",
},
{
name: "rle-1",
// tableRLE
encoded: "9a2E=",
want: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
{
name: "regular-1",
// Table 1
encoded: "BeSSrnzj0j3OXyR9K81M=",
want: "regular-filename.txt",
},
{
name: "regular-3",
// Table 2
encoded: "COyCCD-42d9s=",
want: "00012312.JPG",
},
{
name: "regular-4",
// Table 3
encoded: "DmqiJmrhNSDOJTCKTyCQ=",
want: ". . . .txta123123123",
},
{
name: "unicode-1",
// tableSCSUPlain
encoded: "8D5V3MESVd-WEF7WuqaOvpKUWtYGEyw5UDQ==",
want: "長い長いUNICODEファイル名",
},
{
name: "unicode-2",
// tableSCSUPlain
encoded: "8GyHV1N7u2OEg4ufQ3eHQ3Ngg6N3X0CDg4-HX0NXU2tg=",
want: "ვეპხის ტყაოსანი შოთა რუსთაველი",
},
{
name: "unicode-3",
// tableSCSU
encoded: "7LpehMXOrWe7mcT_lpf2MN1Nmgu55jpXHLavZcXJb2UTJ-UmGU15iznkD",
want: "Sønderjysk: Æ ka æe glass uhen at det go mæ naue.,",
},
{
name: "unicode-4",
// tableSCSU
encoded: "7TCSRm0liJDR0ulpBq4Lla_XB2mWdLFMEs8wEQKHAGa8FRr333ntJ6Ww6_f__N5VKeYM=",
want: "Hello------world 時危兵甲滿天涯,載道流離起怨咨.bin",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Decode(tt.encoded)
if (err != nil) != tt.wantErr {
if tt.encoded == "" && tt.want != "" {
proposed := Encode(tt.want)
table := decodeMap[proposed[0]] - 1
t.Errorf("No encoded value, try '%s', table is %d", proposed, table)
return
}
t.Errorf("Decode() error = %v, wantErr %v", err, tt.wantErr)
return
}
if err == nil {
proposed := Encode(tt.want)
table := decodeMap[proposed[0]] - 1
if len(proposed) > len(tt.encoded) {
t.Errorf("Got longer encoded value than reference. Likely compression regression. Got %s, table %d", proposed, table)
}
if len(proposed) > len(tt.encoded) {
t.Logf("Got better encoded value, improved length %d, was %d", len(proposed), len(tt.encoded))
}
}
if got != tt.want {
t.Errorf("Decode() got = %v, want %v", got, tt.want)
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/encoder/internal/gen/main.go | lib/encoder/internal/gen/main.go | // Package main provides utilities for encoder.
package main
import (
"flag"
"fmt"
"math/rand"
"os"
"slices"
"strconv"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
)
const (
edgeLeft = iota
edgeRight
)
type mapping struct {
mask encoder.MultiEncoder
src, dst []rune
}
type stringPair struct {
a, b string
}
const header = `// Code generated by ./internal/gen/main.go. DO NOT EDIT.
` + `//go:generate go run ./internal/gen/main.go
package encoder
`
var maskBits = []struct {
mask encoder.MultiEncoder
name string
}{
{encoder.EncodeZero, "EncodeZero"},
{encoder.EncodeSlash, "EncodeSlash"},
{encoder.EncodeSingleQuote, "EncodeSingleQuote"},
{encoder.EncodeBackQuote, "EncodeBackQuote"},
{encoder.EncodeLtGt, "EncodeLtGt"},
{encoder.EncodeSquareBracket, "EncodeSquareBracket"},
{encoder.EncodeSemicolon, "EncodeSemicolon"},
{encoder.EncodeExclamation, "EncodeExclamation"},
{encoder.EncodeDollar, "EncodeDollar"},
{encoder.EncodeDoubleQuote, "EncodeDoubleQuote"},
{encoder.EncodeColon, "EncodeColon"},
{encoder.EncodeQuestion, "EncodeQuestion"},
{encoder.EncodeAsterisk, "EncodeAsterisk"},
{encoder.EncodePipe, "EncodePipe"},
{encoder.EncodeHash, "EncodeHash"},
{encoder.EncodePercent, "EncodePercent"},
{encoder.EncodeBackSlash, "EncodeBackSlash"},
{encoder.EncodeCrLf, "EncodeCrLf"},
{encoder.EncodeDel, "EncodeDel"},
{encoder.EncodeCtl, "EncodeCtl"},
{encoder.EncodeLeftSpace, "EncodeLeftSpace"},
{encoder.EncodeLeftPeriod, "EncodeLeftPeriod"},
{encoder.EncodeLeftTilde, "EncodeLeftTilde"},
{encoder.EncodeLeftCrLfHtVt, "EncodeLeftCrLfHtVt"},
{encoder.EncodeRightSpace, "EncodeRightSpace"},
{encoder.EncodeRightPeriod, "EncodeRightPeriod"},
{encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt"},
{encoder.EncodeInvalidUtf8, "EncodeInvalidUtf8"},
{encoder.EncodeDot, "EncodeDot"},
}
type edge struct {
mask encoder.MultiEncoder
name string
edge int
orig []rune
replace []rune
}
var allEdges = []edge{
{encoder.EncodeLeftSpace, "EncodeLeftSpace", edgeLeft, []rune{' '}, []rune{'␠'}},
{encoder.EncodeLeftPeriod, "EncodeLeftPeriod", edgeLeft, []rune{'.'}, []rune{'.'}},
{encoder.EncodeLeftTilde, "EncodeLeftTilde", edgeLeft, []rune{'~'}, []rune{'~'}},
{encoder.EncodeLeftCrLfHtVt, "EncodeLeftCrLfHtVt", edgeLeft,
[]rune{'\t', '\n', '\v', '\r'},
[]rune{'␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r'},
},
{encoder.EncodeRightSpace, "EncodeRightSpace", edgeRight, []rune{' '}, []rune{'␠'}},
{encoder.EncodeRightPeriod, "EncodeRightPeriod", edgeRight, []rune{'.'}, []rune{'.'}},
{encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt", edgeRight,
[]rune{'\t', '\n', '\v', '\r'},
[]rune{'␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r'},
},
}
var allMappings = []mapping{{
encoder.EncodeZero, []rune{
0,
}, []rune{
'␀',
}}, {
encoder.EncodeSlash, []rune{
'/',
}, []rune{
'/',
}}, {
encoder.EncodeLtGt, []rune{
'<', '>',
}, []rune{
'<', '>',
}}, {
encoder.EncodeSquareBracket, []rune{
'[', ']',
}, []rune{
'[', ']',
}}, {
encoder.EncodeSemicolon, []rune{
';',
}, []rune{
';',
}}, {
encoder.EncodeExclamation, []rune{
'!',
}, []rune{
'!',
}}, {
encoder.EncodeDoubleQuote, []rune{
'"',
}, []rune{
'"',
}}, {
encoder.EncodeSingleQuote, []rune{
'\'',
}, []rune{
''',
}}, {
encoder.EncodeBackQuote, []rune{
'`',
}, []rune{
'`',
}}, {
encoder.EncodeDollar, []rune{
'$',
}, []rune{
'$',
}}, {
encoder.EncodeColon, []rune{
':',
}, []rune{
':',
}}, {
encoder.EncodeQuestion, []rune{
'?',
}, []rune{
'?',
}}, {
encoder.EncodeAsterisk, []rune{
'*',
}, []rune{
'*',
}}, {
encoder.EncodePipe, []rune{
'|',
}, []rune{
'|',
}}, {
encoder.EncodeHash, []rune{
'#',
}, []rune{
'#',
}}, {
encoder.EncodePercent, []rune{
'%',
}, []rune{
'%',
}}, {
encoder.EncodeSlash, []rune{
'/',
}, []rune{
'/',
}}, {
encoder.EncodeBackSlash, []rune{
'\\',
}, []rune{
'\',
}}, {
encoder.EncodeCrLf, []rune{
rune(0x0D), rune(0x0A),
}, []rune{
'␍', '␊',
}}, {
encoder.EncodeDel, []rune{
0x7F,
}, []rune{
'␡',
}}, {
encoder.EncodeCtl,
runeRange(0x01, 0x1F),
runeRange('␁', '␟'),
}}
var (
rng *rand.Rand
printables = runeRange(0x20, 0x7E)
fullwidthPrintables = runeRange(0xFF00, 0xFF5E)
encodables = collectEncodables(allMappings)
encoded = collectEncoded(allMappings)
greek = runeRange(0x03B1, 0x03C9)
)
func main() {
seed := flag.Int64("s", 42, "random seed")
flag.Parse()
rng = rand.New(rand.NewSource(*seed))
fd, err := os.Create("encoder_cases_test.go")
fatal(err, "Unable to open encoder_cases_test.go:")
defer func() {
fatal(fd.Close(), "Failed to close encoder_cases_test.go:")
}()
fatalW(fd.WriteString(header))("Failed to write header:")
fatalW(fd.WriteString("var testCasesSingle = []testCase{\n\t"))("Write:")
_i := 0
i := func() (r int) {
r, _i = _i, _i+1
return
}
for _, m := range maskBits {
if len(getMapping(m.mask).src) == 0 {
continue
}
if _i != 0 {
fatalW(fd.WriteString(" "))("Write:")
}
in, out := buildTestString(
[]mapping{getMapping(m.mask)}, // pick
[]mapping{getMapping(encoder.EncodeZero)}, // quote
printables, fullwidthPrintables, encodables, encoded, greek) // fill
fatalW(fmt.Fprintf(fd, `{ // %d
mask: %s,
in: %s,
out: %s,
},`, i(), m.name, strconv.Quote(in), strconv.Quote(out)))("Error writing test case:")
}
fatalW(fd.WriteString(`
}
var testCasesSingleEdge = []testCase{
`))("Write:")
_i = 0
for _, e := range allEdges {
for idx, orig := range e.orig {
if _i != 0 {
fatalW(fd.WriteString(" "))("Write:")
}
fatalW(fmt.Fprintf(fd, `{ // %d
mask: %s,
in: %s,
out: %s,
},`, i(), e.name, strconv.Quote(string(orig)), strconv.Quote(string(e.replace[idx]))))("Error writing test case:")
}
for _, m := range maskBits {
if len(getMapping(m.mask).src) == 0 || invalidMask(e.mask|m.mask) {
continue
}
for idx, orig := range e.orig {
replace := e.replace[idx]
pairs := buildEdgeTestString(
[]edge{e}, []mapping{getMapping(encoder.EncodeZero), getMapping(m.mask)}, // quote
[][]rune{printables, fullwidthPrintables, encodables, encoded, greek}, // fill
func(rIn, rOut []rune, quoteOut []bool, testMappings []mapping) (out []stringPair) {
testL := len(rIn)
skipOrig := false
for _, m := range testMappings {
if runePos(orig, m.src) != -1 || runePos(orig, m.dst) != -1 {
skipOrig = true
break
}
}
if !skipOrig {
rIn[10], rOut[10], quoteOut[10] = orig, orig, false
}
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
for _, i := range []int{0, 1, testL - 2, testL - 1} {
for _, j := range []int{1, testL - 2, testL - 1} {
if j < i {
continue
}
rIn := slices.Clone(rIn)
rOut := slices.Clone(rOut)
quoteOut := slices.Clone(quoteOut)
for _, in := range []rune{orig, replace} {
expect, quote := in, false
if i == 0 && e.edge == edgeLeft ||
i == testL-1 && e.edge == edgeRight {
expect, quote = replace, in == replace
}
rIn[i], rOut[i], quoteOut[i] = in, expect, quote
if i != j {
for _, in := range []rune{orig, replace} {
expect, quote = in, false
if j == testL-1 && e.edge == edgeRight {
expect, quote = replace, in == replace
}
rIn[j], rOut[j], quoteOut[j] = in, expect, quote
}
}
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
}
}
}
return
})
for _, p := range pairs {
fatalW(fmt.Fprintf(fd, ` { // %d
mask: %s | %s,
in: %s,
out: %s,
},`, i(), m.name, e.name, strconv.Quote(p.a), strconv.Quote(p.b)))("Error writing test case:")
}
}
}
}
fatalW(fmt.Fprintf(fd, ` { // %d
mask: EncodeLeftSpace,
in: " ",
out: "␠ ",
}, { // %d
mask: EncodeLeftPeriod,
in: "..",
out: "..",
}, { // %d
mask: EncodeLeftTilde,
in: "~~",
out: "~~",
}, { // %d
mask: EncodeRightSpace,
in: " ",
out: " ␠",
}, { // %d
mask: EncodeRightPeriod,
in: "..",
out: "..",
}, { // %d
mask: EncodeLeftSpace | EncodeRightPeriod,
in: " .",
out: "␠.",
}, { // %d
mask: EncodeLeftSpace | EncodeRightSpace,
in: " ",
out: "␠",
}, { // %d
mask: EncodeLeftSpace | EncodeRightSpace,
in: " ",
out: "␠␠",
}, { // %d
mask: EncodeLeftSpace | EncodeRightSpace,
in: " ",
out: "␠ ␠",
}, { // %d
mask: EncodeLeftPeriod | EncodeRightPeriod,
in: "...",
out: "...",
}, { // %d
mask: EncodeRightPeriod | EncodeRightSpace,
in: "a. ",
out: "a.␠",
}, { // %d
mask: EncodeRightPeriod | EncodeRightSpace,
in: "a .",
out: "a .",
},
}
var testCasesDoubleEdge = []testCase{
`, i(), i(), i(), i(), i(), i(), i(), i(), i(), i(), i(), i()))("Error writing test case:")
_i = 0
for _, e1 := range allEdges {
for _, e2 := range allEdges {
if e1.mask == e2.mask {
continue
}
for _, m := range maskBits {
if len(getMapping(m.mask).src) == 0 || invalidMask(m.mask|e1.mask|e2.mask) {
continue
}
orig, replace := e1.orig[0], e1.replace[0]
edges := []edge{e1, e2}
pairs := buildEdgeTestString(
edges, []mapping{getMapping(encoder.EncodeZero), getMapping(m.mask)}, // quote
[][]rune{printables, fullwidthPrintables, encodables, encoded, greek}, // fill
func(rIn, rOut []rune, quoteOut []bool, testMappings []mapping) (out []stringPair) {
testL := len(rIn)
for _, i := range []int{0, testL - 1} {
for _, secondOrig := range e2.orig {
rIn := slices.Clone(rIn)
rOut := slices.Clone(rOut)
quoteOut := slices.Clone(quoteOut)
rIn[1], rOut[1], quoteOut[1] = secondOrig, secondOrig, false
rIn[testL-2], rOut[testL-2], quoteOut[testL-2] = secondOrig, secondOrig, false
for _, in := range []rune{orig, replace} {
rIn[i], rOut[i], quoteOut[i] = in, in, false
fixEdges(rIn, rOut, quoteOut, edges)
out = append(out, stringPair{string(rIn), quotedToString(rOut, quoteOut)})
}
}
}
return
})
for _, p := range pairs {
if _i != 0 {
fatalW(fd.WriteString(" "))("Write:")
}
fatalW(fmt.Fprintf(fd, `{ // %d
mask: %s | %s | %s,
in: %s,
out: %s,
},`, i(), m.name, e1.name, e2.name, strconv.Quote(p.a), strconv.Quote(p.b)))("Error writing test case:")
}
}
}
}
fatalW(fmt.Fprint(fd, "\n}\n"))("Error writing test case:")
}
func fatal(err error, s ...any) {
if err != nil {
fs.Fatal(nil, fmt.Sprint(append(s, err)))
}
}
func fatalW(_ int, err error) func(...any) {
if err != nil {
return func(s ...any) {
fs.Fatal(nil, fmt.Sprint(append(s, err)))
}
}
return func(s ...any) {}
}
func invalidMask(mask encoder.MultiEncoder) bool {
return mask&(encoder.EncodeCtl|encoder.EncodeCrLf) != 0 && mask&(encoder.EncodeLeftCrLfHtVt|encoder.EncodeRightCrLfHtVt) != 0
}
// construct a slice containing the runes between (l)ow (inclusive) and (h)igh (inclusive)
func runeRange(l, h rune) []rune {
if h < l {
panic("invalid range")
}
out := make([]rune, h-l+1)
for i := range out {
out[i] = l + rune(i)
}
return out
}
func getMapping(mask encoder.MultiEncoder) mapping {
for _, m := range allMappings {
if m.mask == mask {
return m
}
}
return mapping{}
}
func collectEncodables(m []mapping) (out []rune) {
for _, s := range m {
out = append(out, s.src...)
}
return
}
func collectEncoded(m []mapping) (out []rune) {
for _, s := range m {
out = append(out, s.dst...)
}
return
}
func buildTestString(mappings, testMappings []mapping, fill ...[]rune) (string, string) {
combinedMappings := append(mappings, testMappings...)
var (
rIn []rune
rOut []rune
)
for _, m := range mappings {
if len(m.src) == 0 || len(m.src) != len(m.dst) {
panic("invalid length")
}
rIn = append(rIn, m.src...)
rOut = append(rOut, m.dst...)
}
inL := len(rIn)
testL := max(inL*3, 30)
rIn = append(rIn, make([]rune, testL-inL)...)
rOut = append(rOut, make([]rune, testL-inL)...)
quoteOut := make([]bool, testL)
set := func(i int, in, out rune, quote bool) {
rIn[i] = in
rOut[i] = out
quoteOut[i] = quote
}
for i, r := range rOut[:inL] {
set(inL+i, r, r, true)
}
outer:
for pos := inL * 2; pos < testL; pos++ {
m := pos % len(fill)
i := rng.Intn(len(fill[m]))
r := fill[m][i]
for _, m := range combinedMappings {
if pSrc := runePos(r, m.src); pSrc != -1 {
set(pos, r, m.dst[pSrc], false)
continue outer
} else if pDst := runePos(r, m.dst); pDst != -1 {
set(pos, r, r, true)
continue outer
}
}
set(pos, r, r, false)
}
rng.Shuffle(testL, func(i, j int) {
rIn[i], rIn[j] = rIn[j], rIn[i]
rOut[i], rOut[j] = rOut[j], rOut[i]
quoteOut[i], quoteOut[j] = quoteOut[j], quoteOut[i]
})
var bOut strings.Builder
bOut.Grow(testL)
for i, r := range rOut {
if quoteOut[i] {
bOut.WriteRune(encoder.QuoteRune)
}
bOut.WriteRune(r)
}
return string(rIn), bOut.String()
}
func buildEdgeTestString(edges []edge, testMappings []mapping, fill [][]rune,
gen func(rIn, rOut []rune, quoteOut []bool, testMappings []mapping) []stringPair,
) []stringPair {
testL := 30
rIn := make([]rune, testL) // test input string
rOut := make([]rune, testL) // test output string without quote runes
quoteOut := make([]bool, testL) // if true insert quote rune before the output rune
set := func(i int, in, out rune, quote bool) {
rIn[i] = in
rOut[i] = out
quoteOut[i] = quote
}
// populate test strings with values from the `fill` set
outer:
for pos := range testL {
m := pos % len(fill)
i := rng.Intn(len(fill[m]))
r := fill[m][i]
for _, m := range testMappings {
if pSrc := runePos(r, m.src); pSrc != -1 {
set(pos, r, m.dst[pSrc], false)
continue outer
} else if pDst := runePos(r, m.dst); pDst != -1 {
set(pos, r, r, true)
continue outer
}
}
set(pos, r, r, false)
}
rng.Shuffle(testL, func(i, j int) {
rIn[i], rIn[j] = rIn[j], rIn[i]
rOut[i], rOut[j] = rOut[j], rOut[i]
quoteOut[i], quoteOut[j] = quoteOut[j], quoteOut[i]
})
fixEdges(rIn, rOut, quoteOut, edges)
return gen(rIn, rOut, quoteOut, testMappings)
}
func fixEdges(rIn, rOut []rune, quoteOut []bool, edges []edge) {
testL := len(rIn)
for _, e := range edges {
for idx, o := range e.orig {
r := e.replace[idx]
if e.edge == edgeLeft && rIn[0] == o {
rOut[0], quoteOut[0] = r, false
} else if e.edge == edgeLeft && rIn[0] == r {
quoteOut[0] = true
} else if e.edge == edgeRight && rIn[testL-1] == o {
rOut[testL-1], quoteOut[testL-1] = r, false
} else if e.edge == edgeRight && rIn[testL-1] == r {
quoteOut[testL-1] = true
}
}
}
}
func runePos(r rune, s []rune) int {
for i, c := range s {
if c == r {
return i
}
}
return -1
}
// quotedToString returns a string for the chars slice where an encoder.QuoteRune is
// inserted before a char[i] when quoted[i] is true.
func quotedToString(chars []rune, quoted []bool) string {
var out strings.Builder
out.Grow(len(chars))
for i, r := range chars {
if quoted[i] {
out.WriteRune(encoder.QuoteRune)
}
out.WriteRune(r)
}
return out.String()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/caller/caller_test.go | lib/caller/caller_test.go | package caller
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPresent(t *testing.T) {
assert.False(t, Present("NotFound"))
assert.False(t, Present("TestPresent"))
f := func() {
assert.True(t, Present("TestPresent"))
}
f()
}
func BenchmarkPresent(b *testing.B) {
for b.Loop() {
_ = Present("NotFound")
}
}
func BenchmarkPresent100(b *testing.B) {
var fn func(level int)
fn = func(level int) {
if level > 0 {
fn(level - 1)
return
}
for b.Loop() {
_ = Present("NotFound")
}
}
fn(100)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/caller/caller.go | lib/caller/caller.go | // Package caller contains functions to examine the call stack.
package caller
import (
"runtime"
"strings"
)
// Present looks for functionName in the call stack and return true if found
//
// Note that this ignores the caller.
func Present(functionName string) bool {
var pcs [48]uintptr
n := runtime.Callers(3, pcs[:]) // skip runtime.Callers, Present and caller
frames := runtime.CallersFrames(pcs[:n])
for {
f, more := frames.Next()
if strings.HasSuffix(f.Function, functionName) {
return true
}
if !more {
break
}
}
return false
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/sdactivation/sdactivation_stub.go | lib/sdactivation/sdactivation_stub.go | //go:build windows || plan9
// Package sdactivation provides support for systemd socket activation,
// wrapping the coreos/go-systemd package.
// This wraps the underlying go-systemd binary, as it fails to build on plan9
// https://github.com/coreos/go-systemd/pull/440
package sdactivation
import (
"net"
)
// ListenersWithNames maps a listener name to a set of net.Listener instances.
// This wraps the underlying go-systemd binary, as it fails to build on plan9
// https://github.com/coreos/go-systemd/pull/440
func ListenersWithNames() (map[string][]net.Listener, error) {
return make(map[string][]net.Listener), nil
}
// Listeners returns a slice containing a net.Listener for each matching socket type passed to this process.
func Listeners() ([]net.Listener, error) {
return nil, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/sdactivation/sdactivation_unix.go | lib/sdactivation/sdactivation_unix.go | //go:build !windows && !plan9
// Package sdactivation provides support for systemd socket activation, wrapping
// the coreos/go-systemd package.
// This wraps the underlying go-systemd library, as it fails to build on plan9
// https://github.com/coreos/go-systemd/pull/440
package sdactivation
import (
"net"
sdActivation "github.com/coreos/go-systemd/v22/activation"
)
// ListenersWithNames maps a listener name to a set of net.Listener instances.
func ListenersWithNames() (map[string][]net.Listener, error) {
return sdActivation.ListenersWithNames()
}
// Listeners returns a slice containing a net.Listener for each matching socket type passed to this process.
func Listeners() ([]net.Listener, error) {
return sdActivation.Listeners()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/mmap/mmap_unsupported.go | lib/mmap/mmap_unsupported.go | // Fallback Alloc and Free for unsupported OSes
//go:build plan9 || js
package mmap
// Alloc allocates size bytes and returns a slice containing them. If
// the allocation fails it will return with an error. This is best
// used for allocations which are a multiple of the Pagesize.
func Alloc(size int) ([]byte, error) {
return make([]byte, size), nil
}
// Free frees buffers allocated by Alloc. Note it should be passed
// the same slice (not a derived slice) that Alloc returned. If the
// free fails it will return with an error.
func Free(mem []byte) error {
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/mmap/mmap.go | lib/mmap/mmap.go | // Package mmap provides memory mapped related utilities.
package mmap
import "os"
// PageSize is the minimum allocation size. Allocations will use at
// least this size and are likely to be multiplied up to a multiple of
// this size.
var PageSize = os.Getpagesize()
// MustAlloc allocates size bytes and returns a slice containing them. If
// the allocation fails it will panic. This is best used for
// allocations which are a multiple of the PageSize.
func MustAlloc(size int) []byte {
mem, err := Alloc(size)
if err != nil {
panic(err)
}
return mem
}
// MustFree frees buffers allocated by Alloc. Note it should be passed
// the same slice (not a derived slice) that Alloc returned. If the
// free fails it will panic.
func MustFree(mem []byte) {
err := Free(mem)
if err != nil {
panic(err)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/mmap/mmap_windows.go | lib/mmap/mmap_windows.go | // Package mmap implements a large block memory allocator using
// anonymous memory maps.
//go:build windows
package mmap
import (
"fmt"
"unsafe"
"golang.org/x/sys/windows"
)
// Alloc allocates size bytes and returns a slice containing them. If
// the allocation fails it will return with an error. This is best
// used for allocations which are a multiple of the PageSize.
func Alloc(size int) ([]byte, error) {
p, err := windows.VirtualAlloc(0, uintptr(size), windows.MEM_COMMIT, windows.PAGE_READWRITE)
if err != nil {
return nil, fmt.Errorf("mmap: failed to allocate memory for buffer: %w", err)
}
pp := unsafe.Pointer(&p)
up := *(*unsafe.Pointer)(pp)
return unsafe.Slice((*byte)(up), size), nil
}
// Free frees buffers allocated by Alloc. Note it should be passed
// the same slice (not a derived slice) that Alloc returned. If the
// free fails it will return with an error.
func Free(mem []byte) error {
p := unsafe.SliceData(mem)
err := windows.VirtualFree(uintptr(unsafe.Pointer(p)), 0, windows.MEM_RELEASE)
if err != nil {
return fmt.Errorf("mmap: failed to unmap memory: %w", err)
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/mmap/mmap_test.go | lib/mmap/mmap_test.go | package mmap
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// Constants to control the benchmarking
const (
maxAllocs = 16 * 1024
)
func TestAllocFree(t *testing.T) {
const Size = 4096
b := MustAlloc(Size)
assert.Equal(t, Size, len(b))
// check we can write to all the memory
for i := range b {
b[i] = byte(i)
}
// Now free the memory
MustFree(b)
}
func BenchmarkAllocFree(b *testing.B) {
for _, dirty := range []bool{false, true} {
for size := 4096; size <= 32*1024*1024; size *= 2 {
b.Run(fmt.Sprintf("%dk,dirty=%v", size>>10, dirty), func(b *testing.B) {
for b.Loop() {
mem := MustAlloc(size)
if dirty {
mem[0] ^= 0xFF
}
MustFree(mem)
}
})
}
}
}
// benchmark the time alloc/free takes with lots of allocations already
func BenchmarkAllocFreeWithLotsOfAllocations(b *testing.B) {
const size = 4096
alloc := func(n int) (allocs [][]byte) {
for range n {
mem := MustAlloc(size)
mem[0] ^= 0xFF
allocs = append(allocs, mem)
}
return allocs
}
free := func(allocs [][]byte) {
for _, mem := range allocs {
MustFree(mem)
}
}
for preAllocs := 1; preAllocs <= maxAllocs; preAllocs *= 2 {
allocs := alloc(preAllocs)
b.Run(fmt.Sprintf("%d", preAllocs), func(b *testing.B) {
for b.Loop() {
mem := MustAlloc(size)
mem[0] ^= 0xFF
MustFree(mem)
}
})
free(allocs)
}
}
// benchmark the time alloc/free takes for lots of allocations
func BenchmarkAllocFreeForLotsOfAllocations(b *testing.B) {
const size = 4096
alloc := func(n int) (allocs [][]byte) {
for range n {
mem := MustAlloc(size)
mem[0] ^= 0xFF
allocs = append(allocs, mem)
}
return allocs
}
free := func(allocs [][]byte) {
for _, mem := range allocs {
MustFree(mem)
}
}
for preAllocs := 1; preAllocs <= maxAllocs; preAllocs *= 2 {
b.Run(fmt.Sprintf("%d", preAllocs), func(b *testing.B) {
for b.Loop() {
allocs := alloc(preAllocs)
free(allocs)
}
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/mmap/mmap_unix.go | lib/mmap/mmap_unix.go | // Package mmap implements a large block memory allocator using
// anonymous memory maps.
//go:build !plan9 && !windows && !js
package mmap
import (
"fmt"
"golang.org/x/sys/unix"
)
// Alloc allocates size bytes and returns a slice containing them. If
// the allocation fails it will return with an error. This is best
// used for allocations which are a multiple of the PageSize.
func Alloc(size int) ([]byte, error) {
mem, err := unix.Mmap(-1, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANON)
if err != nil {
return nil, fmt.Errorf("mmap: failed to allocate memory for buffer: %w", err)
}
return mem, nil
}
// Free frees buffers allocated by Alloc. Note it should be passed
// the same slice (not a derived slice) that Alloc returned. If the
// free fails it will return with an error.
func Free(mem []byte) error {
err := unix.Munmap(mem)
if err != nil {
return fmt.Errorf("mmap: failed to unmap memory: %w", err)
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pacer/pacers.go | lib/pacer/pacers.go | package pacer
import (
"math/rand"
"time"
"golang.org/x/time/rate"
)
type (
// MinSleep configures the minimum sleep time of a Calculator
MinSleep time.Duration
// MaxSleep configures the maximum sleep time of a Calculator
MaxSleep time.Duration
// DecayConstant configures the decay constant time of a Calculator
DecayConstant uint
// AttackConstant configures the attack constant of a Calculator
AttackConstant uint
// Burst configures the number of API calls to allow without sleeping
Burst int
)
// Default is a truncated exponential attack and decay.
//
// On retries the sleep time is doubled, on non errors then sleeptime decays
// according to the decay constant as set with SetDecayConstant.
//
// The sleep never goes below that set with SetMinSleep or above that set
// with SetMaxSleep.
type Default struct {
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
attackConstant uint // attack constant
}
// DefaultOption is the interface implemented by all options for the Default Calculator
type DefaultOption interface {
ApplyDefault(*Default)
}
// NewDefault creates a Calculator used by Pacer as the default.
func NewDefault(opts ...DefaultOption) *Default {
c := &Default{
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *Default) Update(opts ...DefaultOption) {
for _, opt := range opts {
opt.ApplyDefault(c)
}
}
// ApplyDefault updates the value on the Calculator
func (o MinSleep) ApplyDefault(c *Default) {
c.minSleep = time.Duration(o)
}
// ApplyDefault updates the value on the Calculator
func (o MaxSleep) ApplyDefault(c *Default) {
c.maxSleep = time.Duration(o)
}
// ApplyDefault updates the value on the Calculator
func (o DecayConstant) ApplyDefault(c *Default) {
c.decayConstant = uint(o)
}
// ApplyDefault updates the value on the Calculator
func (o AttackConstant) ApplyDefault(c *Default) {
c.attackConstant = uint(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *Default) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
if state.ConsecutiveRetries > 0 {
sleepTime := c.maxSleep
if c.attackConstant != 0 {
sleepTime = (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1)
}
if sleepTime > c.maxSleep {
sleepTime = c.maxSleep
}
return sleepTime
}
sleepTime := max((state.SleepTime<<c.decayConstant-state.SleepTime)>>c.decayConstant, c.minSleep)
return sleepTime
}
// ZeroDelayCalculator is a Calculator that never delays.
type ZeroDelayCalculator struct {
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *ZeroDelayCalculator) Calculate(state State) time.Duration {
return 0
}
// AzureIMDS is a pacer for the Azure instance metadata service.
type AzureIMDS struct {
}
// NewAzureIMDS returns a new Azure IMDS calculator.
func NewAzureIMDS() *AzureIMDS {
c := &AzureIMDS{}
return c
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *AzureIMDS) Calculate(state State) time.Duration {
var addBackoff time.Duration
if state.ConsecutiveRetries == 0 {
// Initial condition: no backoff.
return 0
}
if state.ConsecutiveRetries > 4 {
// The number of consecutive retries shouldn't exceed five.
// In case it does for some reason, cap delay.
addBackoff = 0
} else {
addBackoff = time.Duration(2<<uint(state.ConsecutiveRetries-1)) * time.Second
}
return addBackoff + state.SleepTime
}
// GoogleDrive is a specialized pacer for Google Drive
//
// It implements a truncated exponential backoff strategy with randomization.
// Normally operations are paced at the interval set with SetMinSleep. On errors
// the sleep timer is set to (2 ^ n) + random_number_milliseconds seconds.
//
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
type GoogleDrive struct {
minSleep time.Duration // minimum sleep time
burst int // number of requests without sleeping
limiter *rate.Limiter // rate limiter for the minSleep
}
// GoogleDriveOption is the interface implemented by all options for the GoogleDrive Calculator
type GoogleDriveOption interface {
ApplyGoogleDrive(*GoogleDrive)
}
// NewGoogleDrive returns a new GoogleDrive Calculator with default values
func NewGoogleDrive(opts ...GoogleDriveOption) *GoogleDrive {
c := &GoogleDrive{
minSleep: 10 * time.Millisecond,
burst: 100,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *GoogleDrive) Update(opts ...GoogleDriveOption) {
for _, opt := range opts {
opt.ApplyGoogleDrive(c)
}
if c.burst <= 0 {
c.burst = 1
}
c.limiter = rate.NewLimiter(rate.Every(c.minSleep), c.burst)
}
// ApplyGoogleDrive updates the value on the Calculator
func (o MinSleep) ApplyGoogleDrive(c *GoogleDrive) {
c.minSleep = time.Duration(o)
}
// ApplyGoogleDrive updates the value on the Calculator
func (o Burst) ApplyGoogleDrive(c *GoogleDrive) {
c.burst = int(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *GoogleDrive) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
consecutiveRetries := state.ConsecutiveRetries
if consecutiveRetries == 0 {
return c.limiter.Reserve().Delay()
}
if consecutiveRetries > 5 {
consecutiveRetries = 5
}
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
return time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
}
// S3 implements a pacer compatible with our expectations of S3, where it tries to not
// delay at all between successful calls, but backs off in the default fashion in response
// to any errors.
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
// the sort of stability questions rclone is likely to run into), and in the happy case
// it can handle calls with no delays between them.
//
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
type S3 struct {
minSleep time.Duration // minimum sleep time
maxSleep time.Duration // maximum sleep time
decayConstant uint // decay constant
attackConstant uint // attack constant
}
// S3Option is the interface implemented by all options for the S3 Calculator
type S3Option interface {
ApplyS3(*S3)
}
// NewS3 returns a new S3 Calculator with default values
func NewS3(opts ...S3Option) *S3 {
c := &S3{
maxSleep: 2 * time.Second,
decayConstant: 2,
attackConstant: 1,
}
c.Update(opts...)
return c
}
// Update applies the Calculator options.
func (c *S3) Update(opts ...S3Option) {
for _, opt := range opts {
opt.ApplyS3(c)
}
}
// ApplyS3 updates the value on the Calculator
func (o MaxSleep) ApplyS3(c *S3) {
c.maxSleep = time.Duration(o)
}
// ApplyS3 updates the value on the Calculator
func (o MinSleep) ApplyS3(c *S3) {
c.minSleep = time.Duration(o)
}
// ApplyS3 updates the value on the Calculator
func (o DecayConstant) ApplyS3(c *S3) {
c.decayConstant = uint(o)
}
// ApplyS3 updates the value on the Calculator
func (o AttackConstant) ApplyS3(c *S3) {
c.attackConstant = uint(o)
}
// Calculate takes the current Pacer state and return the wait time until the next try.
func (c *S3) Calculate(state State) time.Duration {
if t, ok := IsRetryAfter(state.LastError); ok {
if t < c.minSleep {
return c.minSleep
}
return t
}
if state.ConsecutiveRetries > 0 {
if c.attackConstant == 0 {
return c.maxSleep
}
if state.SleepTime == 0 {
return c.minSleep
}
sleepTime := min((state.SleepTime<<c.attackConstant)/((1<<c.attackConstant)-1), c.maxSleep)
return sleepTime
}
sleepTime := (state.SleepTime<<c.decayConstant - state.SleepTime) >> c.decayConstant
if sleepTime < c.minSleep {
sleepTime = 0
}
return sleepTime
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pacer/pacer_test.go | lib/pacer/pacer_test.go | package pacer
import (
"errors"
"fmt"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNew(t *testing.T) {
const expectedRetries = 7
const expectedConnections = 9
p := New(RetriesOption(expectedRetries), MaxConnectionsOption(expectedConnections))
if d, ok := p.calculator.(*Default); ok {
assert.Equal(t, 10*time.Millisecond, d.minSleep)
assert.Equal(t, 2*time.Second, d.maxSleep)
assert.Equal(t, d.minSleep, p.state.SleepTime)
assert.Equal(t, uint(2), d.decayConstant)
assert.Equal(t, uint(1), d.attackConstant)
} else {
t.Errorf("calculator")
}
assert.Equal(t, expectedRetries, p.retries)
assert.Equal(t, 1, cap(p.pacer))
assert.Equal(t, 1, len(p.pacer))
assert.Equal(t, expectedConnections, p.maxConnections)
assert.Equal(t, expectedConnections, cap(p.connTokens))
assert.Equal(t, 0, p.state.ConsecutiveRetries)
}
func TestMaxConnections(t *testing.T) {
p := New()
p.SetMaxConnections(20)
assert.Equal(t, 20, p.maxConnections)
assert.Equal(t, 20, cap(p.connTokens))
p.SetMaxConnections(0)
assert.Equal(t, 0, p.maxConnections)
assert.Nil(t, p.connTokens)
}
func TestDecay(t *testing.T) {
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
for _, test := range []struct {
in State
attackConstant uint
want time.Duration
}{
{State{SleepTime: 8 * time.Millisecond}, 1, 4 * time.Millisecond},
{State{SleepTime: 1 * time.Millisecond}, 0, 1 * time.Microsecond},
{State{SleepTime: 1 * time.Millisecond}, 2, (3 * time.Millisecond) / 4},
{State{SleepTime: 1 * time.Millisecond}, 3, (7 * time.Millisecond) / 8},
} {
c.decayConstant = test.attackConstant
got := c.Calculate(test.in)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestAttack(t *testing.T) {
c := NewDefault(MinSleep(1*time.Microsecond), MaxSleep(1*time.Second))
for _, test := range []struct {
in State
attackConstant uint
want time.Duration
}{
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 1, 2 * time.Millisecond},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 0, 1 * time.Second},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2, (4 * time.Millisecond) / 3},
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 3, (8 * time.Millisecond) / 7},
} {
c.attackConstant = test.attackConstant
got := c.Calculate(test.in)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestSetRetries(t *testing.T) {
p := New()
p.SetRetries(18)
assert.Equal(t, 18, p.retries)
}
// emptyTokens empties the pacer of all its tokens
func emptyTokens(p *Pacer) {
for len(p.pacer) != 0 {
<-p.pacer
}
for len(p.connTokens) != 0 {
<-p.connTokens
}
}
// waitForPace waits for duration for the pace to arrive
// returns the time that it arrived or a zero time
func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
select {
case <-time.After(duration):
return
case <-p.pacer:
return time.Now()
}
}
func TestBeginCall(t *testing.T) {
p := New(MaxConnectionsOption(10), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
emptyTokens(p)
go p.beginCall(true)
if !waitForPace(p, 10*time.Millisecond).IsZero() {
t.Errorf("beginSleep fired too early #1")
}
startTime := time.Now()
p.pacer <- struct{}{}
time.Sleep(1 * time.Millisecond)
connTime := time.Now()
p.connTokens <- struct{}{}
time.Sleep(1 * time.Millisecond)
paceTime := waitForPace(p, 1000*time.Millisecond)
if paceTime.IsZero() {
t.Errorf("beginSleep didn't fire")
} else if paceTime.Sub(startTime) < 0 {
t.Errorf("pace arrived before returning pace token")
} else if paceTime.Sub(connTime) < 0 {
t.Errorf("pace arrived before sending conn token")
}
}
func TestBeginCallZeroConnections(t *testing.T) {
p := New(MaxConnectionsOption(0), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond))))
emptyTokens(p)
go p.beginCall(false)
if !waitForPace(p, 10*time.Millisecond).IsZero() {
t.Errorf("beginSleep fired too early #1")
}
startTime := time.Now()
p.pacer <- struct{}{}
time.Sleep(1 * time.Millisecond)
paceTime := waitForPace(p, 1000*time.Millisecond)
if paceTime.IsZero() {
t.Errorf("beginSleep didn't fire")
} else if paceTime.Sub(startTime) < 0 {
t.Errorf("pace arrived before returning pace token")
}
}
func TestDefaultPacer(t *testing.T) {
c := NewDefault(MinSleep(1*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
for _, test := range []struct {
state State
want time.Duration
}{
{State{SleepTime: 1 * time.Millisecond, ConsecutiveRetries: 1}, 2 * time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second},
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second},
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond},
{State{SleepTime: 1000 * time.Microsecond}, 1 * time.Millisecond},
{State{SleepTime: 1200 * time.Microsecond}, 1 * time.Millisecond},
} {
got := c.Calculate(test.state)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestAzureIMDSPacer(t *testing.T) {
c := NewAzureIMDS()
for _, test := range []struct {
state State
want time.Duration
}{
{State{SleepTime: 0, ConsecutiveRetries: 0}, 0},
{State{SleepTime: 0, ConsecutiveRetries: 1}, 2 * time.Second},
{State{SleepTime: 2 * time.Second, ConsecutiveRetries: 2}, 6 * time.Second},
{State{SleepTime: 6 * time.Second, ConsecutiveRetries: 3}, 14 * time.Second},
{State{SleepTime: 14 * time.Second, ConsecutiveRetries: 4}, 30 * time.Second},
} {
got := c.Calculate(test.state)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestGoogleDrivePacer(t *testing.T) {
// Do lots of times because of the random number!
for _, test := range []struct {
state State
want time.Duration
}{
{State{SleepTime: 1 * time.Millisecond}, 0},
{State{SleepTime: 10 * time.Millisecond}, 0},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 2}, 2*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 3}, 4*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 4}, 8*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 5}, 16*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 6}, 16*time.Second + 500*time.Millisecond},
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 7}, 16*time.Second + 500*time.Millisecond},
} {
const n = 1000
var sum time.Duration
// measure average time over n cycles
for range n {
c := NewGoogleDrive(MinSleep(1 * time.Millisecond))
sum += c.Calculate(test.state)
}
got := sum / n
assert.False(t, got < (test.want*9)/10 || got > (test.want*11)/10, "test: %+v, got: %v", test, got)
}
const minSleep = 2 * time.Millisecond
for _, test := range []struct {
calls int
want int
}{
{1, 0},
{9, 0},
{10, 0},
{11, 1},
{12, 2},
} {
c := NewGoogleDrive(MinSleep(minSleep), Burst(10))
count := 0
for range test.calls {
sleep := c.Calculate(State{})
if sleep != 0 {
count++
}
}
assert.Equalf(t, test.want, count, "test: %+v, got: %v", test, count)
}
}
func TestS3Pacer(t *testing.T) {
c := NewS3(MinSleep(10*time.Millisecond), MaxSleep(1*time.Second), DecayConstant(2))
for _, test := range []struct {
state State
want time.Duration
}{
{State{SleepTime: 0, ConsecutiveRetries: 1}, 10 * time.Millisecond}, //Things were going ok, we failed once, back off to minSleep
{State{SleepTime: 10 * time.Millisecond, ConsecutiveRetries: 1}, 20 * time.Millisecond}, //Another fail, double the backoff
{State{SleepTime: 10 * time.Millisecond}, 0}, //Things start going ok when we're at minSleep; should result in no sleep
{State{SleepTime: 12 * time.Millisecond}, 0}, //*near* minsleep and going ok, decay would take below minSleep, should go to 0
{State{SleepTime: 0}, 0}, //Things have been going ok; not retrying should keep sleep at 0
{State{SleepTime: 1 * time.Second, ConsecutiveRetries: 1}, 1 * time.Second}, //Check maxSleep is enforced
{State{SleepTime: (3 * time.Second) / 4, ConsecutiveRetries: 1}, 1 * time.Second}, //Check attack heading to maxSleep doesn't exceed maxSleep
{State{SleepTime: 1 * time.Second}, 750 * time.Millisecond}, //Check decay from maxSleep
{State{SleepTime: 48 * time.Millisecond}, 36 * time.Millisecond}, //Check simple decay above minSleep
} {
got := c.Calculate(test.state)
assert.Equal(t, test.want, got, "test: %+v", test)
}
}
func TestEndCall(t *testing.T) {
p := New(MaxConnectionsOption(5))
emptyTokens(p)
p.state.ConsecutiveRetries = 1
p.endCall(true, nil, true)
assert.Equal(t, 1, len(p.connTokens))
assert.Equal(t, 2, p.state.ConsecutiveRetries)
}
func TestEndCallZeroConnections(t *testing.T) {
p := New(MaxConnectionsOption(0))
emptyTokens(p)
p.state.ConsecutiveRetries = 1
p.endCall(false, nil, false)
assert.Equal(t, 0, len(p.connTokens))
assert.Equal(t, 0, p.state.ConsecutiveRetries)
}
var errFoo = errors.New("foo")
type dummyPaced struct {
retry bool
called int
wait *sync.Cond
}
func (dp *dummyPaced) fn() (bool, error) {
if dp.wait != nil {
dp.wait.L.Lock()
dp.called++
dp.wait.Wait()
dp.wait.L.Unlock()
} else {
dp.called++
}
return dp.retry, errFoo
}
func TestCallFixed(t *testing.T) {
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
dp := &dummyPaced{retry: false}
err := p.call(dp.fn, 10)
assert.Equal(t, 1, dp.called)
assert.Equal(t, errFoo, err)
}
func Test_callRetry(t *testing.T) {
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
dp := &dummyPaced{retry: true}
err := p.call(dp.fn, 10)
assert.Equal(t, 10, dp.called)
assert.Equal(t, errFoo, err)
}
func TestCall(t *testing.T) {
p := New(RetriesOption(20), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
dp := &dummyPaced{retry: true}
err := p.Call(dp.fn)
assert.Equal(t, 20, dp.called)
assert.Equal(t, errFoo, err)
}
func TestCallParallel(t *testing.T) {
p := New(MaxConnectionsOption(3), RetriesOption(1), CalculatorOption(NewDefault(MinSleep(100*time.Microsecond), MaxSleep(1*time.Millisecond))))
wait := sync.NewCond(&sync.Mutex{})
funcs := make([]*dummyPaced, 5)
for i := range funcs {
dp := &dummyPaced{wait: wait}
funcs[i] = dp
go func() {
assert.Equal(t, errFoo, p.CallNoRetry(dp.fn))
}()
}
time.Sleep(250 * time.Millisecond)
called := 0
wait.L.Lock()
for _, dp := range funcs {
called += dp.called
}
wait.L.Unlock()
assert.Equal(t, 3, called)
wait.Broadcast()
time.Sleep(250 * time.Millisecond)
called = 0
wait.L.Lock()
for _, dp := range funcs {
called += dp.called
}
wait.L.Unlock()
assert.Equal(t, 5, called)
wait.Broadcast()
}
func TestCallMaxConnectionsRecursiveDeadlock(t *testing.T) {
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
p.SetMaxConnections(1)
dp := &dummyPaced{retry: false}
err := p.Call(func() (bool, error) {
// check we have taken the connection token
// no tokens left means deadlock on the recursive call
assert.Equal(t, 0, len(p.connTokens))
return false, p.Call(dp.fn)
})
assert.Equal(t, 1, dp.called)
assert.Equal(t, errFoo, err)
}
func TestRetryAfterError_NonNilErr(t *testing.T) {
orig := errors.New("test failure")
dur := 2 * time.Second
err := RetryAfterError(orig, dur)
rErr, ok := err.(*retryAfterError)
if !ok {
t.Fatalf("expected *retryAfterError, got %T", err)
}
if !strings.Contains(err.Error(), "test failure") {
t.Errorf("Error() = %q, want it to contain original message", err.Error())
}
if !strings.Contains(err.Error(), dur.String()) {
t.Errorf("Error() = %q, want it to contain retryAfter %v", err.Error(), dur)
}
if rErr.retryAfter != dur {
t.Errorf("retryAfter = %v, want %v", rErr.retryAfter, dur)
}
if !errors.Is(err, orig) {
t.Error("errors.Is(err, orig) = false, want true")
}
}
func TestRetryAfterError_NilErr(t *testing.T) {
dur := 5 * time.Second
err := RetryAfterError(nil, dur)
if !strings.Contains(err.Error(), "too many requests") {
t.Errorf("Error() = %q, want it to mention default message", err.Error())
}
if !strings.Contains(err.Error(), dur.String()) {
t.Errorf("Error() = %q, want it to contain retryAfter %v", err.Error(), dur)
}
}
func TestCauseMethod(t *testing.T) {
orig := errors.New("underlying")
dur := time.Second
rErr := RetryAfterError(orig, dur).(*retryAfterError)
cause := rErr.Cause()
if !errors.Is(cause, orig) {
t.Errorf("Cause() does not wrap original: got %v", cause)
}
}
func TestIsRetryAfter_True(t *testing.T) {
orig := errors.New("oops")
dur := 3 * time.Second
err := RetryAfterError(orig, dur)
gotDur, ok := IsRetryAfter(err)
if !ok {
t.Error("IsRetryAfter returned false, want true")
}
if gotDur != dur {
t.Errorf("got %v, want %v", gotDur, dur)
}
}
func TestIsRetryAfter_Nested(t *testing.T) {
orig := errors.New("fail")
dur := 4 * time.Second
retryErr := RetryAfterError(orig, dur)
nested := fmt.Errorf("wrapped: %w", retryErr)
gotDur, ok := IsRetryAfter(nested)
if !ok {
t.Error("IsRetryAfter on nested error returned false, want true")
}
if gotDur != dur {
t.Errorf("got %v, want %v", gotDur, dur)
}
}
func TestIsRetryAfter_False(t *testing.T) {
if _, ok := IsRetryAfter(errors.New("other")); ok {
t.Error("IsRetryAfter = true for non-retry error, want false")
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pacer/pacer.go | lib/pacer/pacer.go | // Package pacer makes pacing and retrying API calls easy
package pacer
import (
"errors"
"fmt"
"sync"
"time"
"github.com/rclone/rclone/lib/caller"
liberrors "github.com/rclone/rclone/lib/errors"
)
// State represents the public Pacer state that will be passed to the
// configured Calculator
type State struct {
SleepTime time.Duration // current time to sleep before adding the pacer token back
ConsecutiveRetries int // number of consecutive retries, will be 0 when the last invoker call returned false
LastError error // the error returned by the last invoker call or nil
}
// Calculator is a generic calculation function for a Pacer.
type Calculator interface {
// Calculate takes the current Pacer state and returns the sleep time after which
// the next Pacer call will be done.
Calculate(state State) time.Duration
}
// Pacer is the primary type of the pacer package. It allows to retry calls
// with a configurable delay in between.
type Pacer struct {
pacerOptions
mu sync.Mutex // Protecting read/writes
pacer chan struct{} // To pace the operations
connTokens chan struct{} // Connection tokens
state State
}
type pacerOptions struct {
maxConnections int // Maximum number of concurrent connections
retries int // Max number of retries
calculator Calculator // switchable pacing algorithm - call with mu held
invoker InvokerFunc // wrapper function used to invoke the target function
}
// InvokerFunc is the signature of the wrapper function used to invoke the
// target function in Pacer.
type InvokerFunc func(try, tries int, f Paced) (bool, error)
// Option can be used in New to configure the Pacer.
type Option func(*pacerOptions)
// CalculatorOption sets a Calculator for the new Pacer.
func CalculatorOption(c Calculator) Option {
return func(p *pacerOptions) { p.calculator = c }
}
// RetriesOption sets the retries number for the new Pacer.
func RetriesOption(retries int) Option {
return func(p *pacerOptions) { p.retries = retries }
}
// MaxConnectionsOption sets the maximum connections number for the new Pacer.
func MaxConnectionsOption(maxConnections int) Option {
return func(p *pacerOptions) { p.maxConnections = maxConnections }
}
// InvokerOption sets an InvokerFunc for the new Pacer.
func InvokerOption(invoker InvokerFunc) Option {
return func(p *pacerOptions) { p.invoker = invoker }
}
// Paced is a function which is called by the Call and CallNoRetry
// methods. It should return a boolean, true if it would like to be
// retried, and an error. This error may be returned or returned
// wrapped in a RetryError.
type Paced func() (bool, error)
// New returns a Pacer with sensible defaults.
func New(options ...Option) *Pacer {
opts := pacerOptions{
maxConnections: 0,
retries: 3,
}
for _, o := range options {
o(&opts)
}
p := &Pacer{
pacerOptions: opts,
pacer: make(chan struct{}, 1),
}
if p.calculator == nil {
p.SetCalculator(nil)
}
p.state.SleepTime = p.calculator.Calculate(p.state)
if p.invoker == nil {
p.invoker = invoke
}
p.SetMaxConnections(p.maxConnections)
// Put the first pacing token in
p.pacer <- struct{}{}
return p
}
// SetMaxConnections sets the maximum number of concurrent connections.
// Setting the value to 0 will allow unlimited number of connections.
// Should not be changed once you have started calling the pacer.
// By default this will be 0.
func (p *Pacer) SetMaxConnections(n int) {
p.mu.Lock()
defer p.mu.Unlock()
p.maxConnections = n
if n <= 0 {
p.connTokens = nil
} else {
p.connTokens = make(chan struct{}, n)
for range n {
p.connTokens <- struct{}{}
}
}
}
// SetRetries sets the max number of retries for Call
func (p *Pacer) SetRetries(retries int) {
p.mu.Lock()
defer p.mu.Unlock()
p.retries = retries
}
// SetCalculator sets the pacing algorithm. Don't modify the Calculator object
// afterwards, use the ModifyCalculator method when needed.
//
// It will choose the default algorithm if nil is passed in.
func (p *Pacer) SetCalculator(c Calculator) {
p.mu.Lock()
defer p.mu.Unlock()
if c == nil {
c = NewDefault()
}
p.calculator = c
}
// ModifyCalculator calls the given function with the currently configured
// Calculator and the Pacer lock held.
func (p *Pacer) ModifyCalculator(f func(Calculator)) {
p.mu.Lock()
f(p.calculator)
p.mu.Unlock()
}
// Start a call to the API
//
// This must be called as a pair with endCall.
//
// This waits for the pacer token
func (p *Pacer) beginCall(limitConnections bool) {
// pacer starts with a token in and whenever we take one out
// XXX ms later we put another in. We could do this with a
// Ticker more accurately, but then we'd have to work out how
// not to run it when it wasn't needed
<-p.pacer
if limitConnections {
<-p.connTokens
}
p.mu.Lock()
// Restart the timer
go func(t time.Duration) {
time.Sleep(t)
p.pacer <- struct{}{}
}(p.state.SleepTime)
p.mu.Unlock()
}
// endCall implements the pacing algorithm
//
// This should calculate a new sleepTime. It takes a boolean as to
// whether the operation should be retried or not.
func (p *Pacer) endCall(retry bool, err error, limitConnections bool) {
if limitConnections {
p.connTokens <- struct{}{}
}
p.mu.Lock()
if retry {
p.state.ConsecutiveRetries++
} else {
p.state.ConsecutiveRetries = 0
}
p.state.LastError = err
p.state.SleepTime = p.calculator.Calculate(p.state)
p.mu.Unlock()
}
// call implements Call but with settable retries
//
// This detects the pacer being called reentrantly.
//
// This looks for Pacer.call in the call stack and returns true if it
// is found.
//
// Ideally we would do this by passing a context about but there are
// an awful lot of Pacer calls!
//
// This is only needed when p.maxConnections > 0 which isn't a common
// configuration so adding a bit of extra slowdown here is not a
// problem.
func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
limitConnections := false
if p.maxConnections > 0 && !caller.Present("(*Pacer).call") {
limitConnections = true
}
for i := 1; i <= retries; i++ {
p.beginCall(limitConnections)
retry, err = p.invoker(i, retries, fn)
p.endCall(retry, err, limitConnections)
if !retry {
break
}
}
return err
}
// Call paces the remote operations to not exceed the limits and retry
// on rate limit exceeded
//
// This calls fn, expecting it to return a retry flag and an
// error. This error may be returned wrapped in a RetryError if the
// number of retries is exceeded.
func (p *Pacer) Call(fn Paced) (err error) {
p.mu.Lock()
retries := p.retries
p.mu.Unlock()
return p.call(fn, retries)
}
// CallNoRetry paces the remote operations to not exceed the limits
// and return a retry error on rate limit exceeded
//
// This calls fn and wraps the output in a RetryError if it would like
// it to be retried
func (p *Pacer) CallNoRetry(fn Paced) error {
return p.call(fn, 1)
}
func invoke(try, tries int, f Paced) (bool, error) {
return f()
}
type retryAfterError struct {
error
retryAfter time.Duration
}
func (r *retryAfterError) Error() string {
return fmt.Sprintf("%v: trying again in %v", r.error, r.retryAfter)
}
func (r *retryAfterError) Cause() error {
return r.error
}
func (r *retryAfterError) Unwrap() error {
return r.error
}
// RetryAfterError returns a wrapped error that can be used by Calculator implementations
func RetryAfterError(err error, retryAfter time.Duration) error {
if err == nil {
err = errors.New("too many requests")
}
return &retryAfterError{
error: err,
retryAfter: retryAfter,
}
}
// IsRetryAfter returns true if the error or any of it's Cause's is an error
// returned by RetryAfterError. It also returns the associated Duration if possible.
func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) {
liberrors.Walk(err, func(err error) bool {
if r, ok := err.(*retryAfterError); ok {
retryAfter, isRetryAfter = r.retryAfter, true
return true
}
return false
})
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pacer/tokens_test.go | lib/pacer/tokens_test.go | package pacer
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestTokenDispenser(t *testing.T) {
td := NewTokenDispenser(5)
assert.Equal(t, 5, len(td.tokens))
td.Get()
assert.Equal(t, 4, len(td.tokens))
td.Put()
assert.Equal(t, 5, len(td.tokens))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/pacer/tokens.go | lib/pacer/tokens.go | // Tokens for controlling concurrency
package pacer
// TokenDispenser is for controlling concurrency
type TokenDispenser struct {
tokens chan struct{}
}
// NewTokenDispenser makes a pool of n tokens
func NewTokenDispenser(n int) *TokenDispenser {
td := &TokenDispenser{
tokens: make(chan struct{}, n),
}
// Fill up the upload tokens
for range n {
td.tokens <- struct{}{}
}
return td
}
// Get gets a token from the pool - don't forget to return it with Put
func (td *TokenDispenser) Get() {
<-td.tokens
}
// Put returns a token
func (td *TokenDispenser) Put() {
td.tokens <- struct{}{}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage_unix.go | lib/diskusage/diskusage_unix.go | //go:build aix || android || darwin || dragonfly || freebsd || ios || linux
package diskusage
import (
"golang.org/x/sys/unix"
)
// New returns the disk status for dir.
//
// May return Unsupported error if it doesn't work on this platform.
func New(dir string) (info Info, err error) {
var statfs unix.Statfs_t
err = unix.Statfs(dir, &statfs)
if err != nil {
return info, err
}
// Note that these can be different sizes on different OSes so
// we upcast them all to uint64
//nolint:unconvert
info.Free = uint64(statfs.Bfree) * uint64(statfs.Bsize)
//nolint:unconvert
info.Available = uint64(statfs.Bavail) * uint64(statfs.Bsize)
//nolint:unconvert
info.Total = uint64(statfs.Blocks) * uint64(statfs.Bsize)
return info, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage_netbsd.go | lib/diskusage/diskusage_netbsd.go | //go:build netbsd
package diskusage
import (
"golang.org/x/sys/unix"
)
// New returns the disk status for dir.
//
// May return Unsupported error if it doesn't work on this platform.
func New(dir string) (info Info, err error) {
var statfs unix.Statvfs_t
err = unix.Statvfs(dir, &statfs)
if err != nil {
return info, err
}
// Note that these can be different sizes on different OSes so
// we upcast them all to uint64
//nolint:unconvert
info.Free = uint64(statfs.Bfree) * uint64(statfs.Bsize)
//nolint:unconvert
info.Available = uint64(statfs.Bavail) * uint64(statfs.Bsize)
//nolint:unconvert
info.Total = uint64(statfs.Blocks) * uint64(statfs.Bsize)
return info, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage_openbsd.go | lib/diskusage/diskusage_openbsd.go | //go:build openbsd
package diskusage
import (
"golang.org/x/sys/unix"
)
// New returns the disk status for dir.
//
// May return Unsupported error if it doesn't work on this platform.
func New(dir string) (info Info, err error) {
var statfs unix.Statfs_t
err = unix.Statfs(dir, &statfs)
if err != nil {
return info, err
}
// Note that these can be different sizes on different OSes so
// we upcast them all to uint64
//nolint:unconvert
info.Free = uint64(statfs.F_bfree) * uint64(statfs.F_bsize)
//nolint:unconvert
info.Available = uint64(statfs.F_bavail) * uint64(statfs.F_bsize)
//nolint:unconvert
info.Total = uint64(statfs.F_blocks) * uint64(statfs.F_bsize)
return info, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage_windows.go | lib/diskusage/diskusage_windows.go | //go:build windows
package diskusage
import (
"golang.org/x/sys/windows"
)
// New returns the disk status for dir.
//
// May return Unsupported error if it doesn't work on this platform.
func New(dir string) (info Info, err error) {
dir16 := windows.StringToUTF16Ptr(dir)
err = windows.GetDiskFreeSpaceEx(dir16, &info.Available, &info.Total, &info.Free)
return info, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage_unsupported.go | lib/diskusage/diskusage_unsupported.go | //go:build illumos || js || plan9 || solaris
package diskusage
// New returns the disk status for dir.
//
// May return Unsupported error if it doesn't work on this platform.
func New(dir string) (info Info, err error) {
return info, ErrUnsupported
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage_test.go | lib/diskusage/diskusage_test.go | package diskusage
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
info, err := New(".")
if err == ErrUnsupported {
t.Skip(err)
}
require.NoError(t, err)
t.Logf("Free %16d", info.Free)
t.Logf("Available %16d", info.Available)
t.Logf("Total %16d", info.Total)
assert.True(t, info.Total != 0)
assert.True(t, info.Total > info.Free)
assert.True(t, info.Total > info.Available)
assert.True(t, info.Free >= info.Available)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/diskusage/diskusage.go | lib/diskusage/diskusage.go | // Package diskusage provides a cross platform version of the statfs
// system call to read disk space usage.
package diskusage
import "errors"
// Info is returned from New showing details about the disk.
type Info struct {
Free uint64 // total free bytes
Available uint64 // free bytes available to the current user
Total uint64 // total bytes on disk
}
// ErrUnsupported is returned if this platform doesn't support disk usage.
var ErrUnsupported = errors.New("disk usage unsupported on this platform")
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/terminal/terminal_normal.go | lib/terminal/terminal_normal.go | //go:build !js
package terminal
import (
"fmt"
"os"
"golang.org/x/term"
)
// GetSize reads the dimensions of the current terminal or returns a
// sensible default
func GetSize() (w, h int) {
w, h, err := term.GetSize(int(os.Stdout.Fd()))
if err != nil {
w, h = 80, 25
}
return w, h
}
// IsTerminal returns whether the fd passed in is a terminal or not
func IsTerminal(fd int) bool {
return term.IsTerminal(fd)
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
return term.ReadPassword(fd)
}
// WriteTerminalTitle writes a string to the terminal title
func WriteTerminalTitle(title string) {
fmt.Print(ChangeTitle + title + BEL)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/terminal/terminal.go | lib/terminal/terminal.go | // Package terminal provides VT100 terminal codes and a windows
// implementation of that.
package terminal
import (
"context"
"io"
"os"
"runtime"
"sync"
colorable "github.com/mattn/go-colorable"
"github.com/rclone/rclone/fs"
)
// VT100 codes
const (
EraseLine = "\x1b[2K"
MoveToStartOfLine = "\x1b[1G"
MoveUp = "\x1b[1A"
Reset = "\x1b[0m"
Bright = "\x1b[1m"
Dim = "\x1b[2m"
Underscore = "\x1b[4m"
Blink = "\x1b[5m"
Reverse = "\x1b[7m"
Hidden = "\x1b[8m"
BlackFg = "\x1b[30m"
RedFg = "\x1b[31m"
GreenFg = "\x1b[32m"
YellowFg = "\x1b[33m"
BlueFg = "\x1b[34m"
MagentaFg = "\x1b[35m"
CyanFg = "\x1b[36m"
WhiteFg = "\x1b[37m"
BlackBg = "\x1b[40m"
RedBg = "\x1b[41m"
GreenBg = "\x1b[42m"
YellowBg = "\x1b[43m"
BlueBg = "\x1b[44m"
MagentaBg = "\x1b[45m"
CyanBg = "\x1b[46m"
WhiteBg = "\x1b[47m"
HiBlackFg = "\x1b[90m"
HiRedFg = "\x1b[91m"
HiGreenFg = "\x1b[92m"
HiYellowFg = "\x1b[93m"
HiBlueFg = "\x1b[94m"
HiMagentaFg = "\x1b[95m"
HiCyanFg = "\x1b[96m"
HiWhiteFg = "\x1b[97m"
HiBlackBg = "\x1b[100m"
HiRedBg = "\x1b[101m"
HiGreenBg = "\x1b[102m"
HiYellowBg = "\x1b[103m"
HiBlueBg = "\x1b[104m"
HiMagentaBg = "\x1b[105m"
HiCyanBg = "\x1b[106m"
HiWhiteBg = "\x1b[107m"
ChangeTitle = "\033]0;"
BEL = "\007"
)
var (
// make sure that start is only called once
once sync.Once
)
// Start the terminal - must be called before use
func Start() {
once.Do(func() {
ci := fs.GetConfig(context.Background())
f := os.Stdout
if !IsTerminal(int(f.Fd())) {
// If stdout is not a tty, remove escape codes EXCEPT if terminal color mode equals "ALWAYS"
if ci.TerminalColorMode == fs.TerminalColorModeAlways {
Out = colorable.NewColorable(f)
} else {
Out = colorable.NewNonColorable(f)
}
} else if runtime.GOOS == "windows" && os.Getenv("TERM") != "" {
// If TERM is set just use stdout
Out = f
} else if ci.TerminalColorMode == fs.TerminalColorModeNever {
Out = colorable.NewNonColorable(f)
} else {
Out = colorable.NewColorable(f)
}
})
}
// WriteString writes the string passed in to the terminal
func WriteString(s string) {
Write([]byte(s))
}
// Out is an io.Writer which can be used to write to the terminal
// e.g. for use with fmt.Fprintf(terminal.Out, "terminal fun: %d\n", n)
var Out io.Writer
// Write sends out to the VT100 terminal.
// It will initialise the terminal if this is the first call.
func Write(out []byte) {
Start()
_, _ = Out.Write(out)
}
// EnableColorsStdout enable colors if possible.
// This enables virtual terminal processing on Windows 10 console,
// adding native support for VT100 escape codes. When this terminal
// package is used for output, the result is that the colorable library
// don't have to decode the escapes and explicitly write text with color
// formatting to the console using Windows API functions, but can simply
// relay everything to stdout.
func EnableColorsStdout() {
_ = colorable.EnableColorsStdout(nil)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/terminal/hidden_windows.go | lib/terminal/hidden_windows.go | //go:build windows
package terminal
import (
"golang.org/x/sys/windows"
)
// HideConsole hides the console window and activates another window
func HideConsole() {
getConsoleWindow := windows.NewLazySystemDLL("kernel32.dll").NewProc("GetConsoleWindow")
showWindow := windows.NewLazySystemDLL("user32.dll").NewProc("ShowWindow")
if getConsoleWindow.Find() == nil && showWindow.Find() == nil {
hwnd, _, _ := getConsoleWindow.Call()
if hwnd != 0 {
_, _, _ = showWindow.Call(hwnd, 0)
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/terminal/hidden_other.go | lib/terminal/hidden_other.go | //go:build !windows
package terminal
// HideConsole is only supported on windows
func HideConsole() {
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/terminal/terminal_unsupported.go | lib/terminal/terminal_unsupported.go | //go:build js
package terminal
import "errors"
// GetSize reads the dimensions of the current terminal or returns a
// sensible default
func GetSize() (w, h int) {
return 80, 25
}
// IsTerminal returns whether the fd passed in is a terminal or not
func IsTerminal(fd int) bool {
return false
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
return nil, errors.New("can't read password")
}
// WriteTerminalTitle writes a string to the terminal title
func WriteTerminalTitle(title string) {
// Since there's nothing to return, this is a NOOP
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/exitcode/exitcode.go | lib/exitcode/exitcode.go | // Package exitcode exports rclone's exit status numbers.
package exitcode
const (
// Success is returned when rclone finished without error.
Success = iota
// UncategorizedError is returned for any error not categorised otherwise.
UncategorizedError
// UsageError is returned when there was a syntax or usage error in the arguments.
UsageError
// DirNotFound is returned when a source or destination directory is not found.
DirNotFound
// FileNotFound is returned when a source or destination file is not found.
FileNotFound
// RetryError is returned for temporary errors during operations which may be retried.
RetryError
// NoRetryError is returned for errors from operations which can't/shouldn't be retried.
NoRetryError
// FatalError is returned for errors one or more retries won't resolve.
FatalError
// TransferExceeded is returned when network I/O exceeded the quota.
TransferExceeded
// NoFilesTransferred everything succeeded, but no transfer was made.
NoFilesTransferred
// DurationExceeded is returned when transfer duration exceeded the quota.
DurationExceeded
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/transform/gen_help.go | lib/transform/gen_help.go | // Create the help text for transform
//
// Run with go generate (defined in transform.go)
//
//go:build none
package main
import (
"context"
"fmt"
"os"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/transform"
)
type commands struct {
command string
description string
}
type example struct {
path string
flags []string
}
var commandList = []commands{
{command: "--name-transform prefix=XXXX", description: "Prepends XXXX to the file name."},
{command: "--name-transform suffix=XXXX", description: "Appends XXXX to the file name after the extension."},
{command: "--name-transform suffix_keep_extension=XXXX", description: "Appends XXXX to the file name while preserving the original file extension."},
{command: "--name-transform trimprefix=XXXX", description: "Removes XXXX if it appears at the start of the file name."},
{command: "--name-transform trimsuffix=XXXX", description: "Removes XXXX if it appears at the end of the file name."},
{command: "--name-transform regex=pattern/replacement", description: "Applies a regex-based transformation."},
{command: "--name-transform replace=old:new", description: "Replaces occurrences of old with new in the file name."},
{command: "--name-transform date={YYYYMMDD}", description: "Appends or prefixes the specified date format."},
{command: "--name-transform truncate=N", description: "Truncates the file name to a maximum of N characters."},
{command: "--name-transform truncate_keep_extension=N", description: "Truncates the file name to a maximum of N characters while preserving the original file extension."},
{command: "--name-transform truncate_bytes=N", description: "Truncates the file name to a maximum of N bytes (not characters)."},
{command: "--name-transform truncate_bytes_keep_extension=N", description: "Truncates the file name to a maximum of N bytes (not characters) while preserving the original file extension."},
{command: "--name-transform base64encode", description: "Encodes the file name in Base64."},
{command: "--name-transform base64decode", description: "Decodes a Base64-encoded file name."},
{command: "--name-transform encoder=ENCODING", description: "Converts the file name to the specified encoding (e.g., ISO-8859-1, Windows-1252, Macintosh)."},
{command: "--name-transform decoder=ENCODING", description: "Decodes the file name from the specified encoding."},
{command: "--name-transform charmap=MAP", description: "Applies a character mapping transformation."},
{command: "--name-transform lowercase", description: "Converts the file name to lowercase."},
{command: "--name-transform uppercase", description: "Converts the file name to UPPERCASE."},
{command: "--name-transform titlecase", description: "Converts the file name to Title Case."},
{command: "--name-transform ascii", description: "Strips non-ASCII characters."},
{command: "--name-transform url", description: "URL-encodes the file name."},
{command: "--name-transform nfc", description: "Converts the file name to NFC Unicode normalization form."},
{command: "--name-transform nfd", description: "Converts the file name to NFD Unicode normalization form."},
{command: "--name-transform nfkc", description: "Converts the file name to NFKC Unicode normalization form."},
{command: "--name-transform nfkd", description: "Converts the file name to NFKD Unicode normalization form."},
{command: "--name-transform command=/path/to/my/programfile names.", description: "Executes an external program to transform."},
}
var examples = []example{
{"stories/The Quick Brown Fox!.txt", []string{"all,uppercase"}},
{"stories/The Quick Brown Fox!.txt", []string{"all,replace=Fox:Turtle", "all,replace=Quick:Slow"}},
{"stories/The Quick Brown Fox!.txt", []string{"all,base64encode"}},
{"c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", []string{"all,base64decode"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfc"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfd"}},
{"stories/The Quick Brown 🦊 Fox!.txt", []string{"all,ascii"}},
{"stories/The Quick Brown Fox!.txt", []string{"all,trimsuffix=.txt"}},
{"stories/The Quick Brown Fox!.txt", []string{"all,prefix=OLD_"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,charmap=ISO-8859-7"}},
{"stories/The Quick Brown Fox: A Memoir [draft].txt", []string{"all,encoder=Colon,SquareBracket"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,truncate=21"}},
{"stories/The Quick Brown Fox!.txt", []string{"all,command=echo"}},
{"stories/The Quick Brown Fox!", []string{"date=-{YYYYMMDD}"}},
{"stories/The Quick Brown Fox!", []string{"date=-{macfriendlytime}"}},
{"stories/The Quick Brown Fox!.txt", []string{"all,regex=[\\.\\w]/ab"}},
}
func (e example) command() string {
s := fmt.Sprintf(`rclone convmv %q`, e.path)
for _, f := range e.flags {
s += fmt.Sprintf(" --name-transform %q", f)
}
return s
}
func (e example) output() string {
ctx := context.Background()
err := transform.SetOptions(ctx, e.flags...)
if err != nil {
fs.Errorf(nil, "error generating help text: %v", err)
}
return transform.Path(ctx, e.path, false)
}
// go run ./ convmv --help
func sprintExamples() string {
s := "Examples:\n"
for _, e := range examples {
s += fmt.Sprintf("\n```console\n%s\n", e.command())
s += fmt.Sprintf("// Output: %s\n```\n", e.output())
}
return s
}
func commandTable() string {
s := `| Command | Description |
|------|------|`
for _, c := range commandList {
s += fmt.Sprintf("\n| `%s` | %s |", c.command, c.description)
}
s += "\n"
return s
}
// SprintList returns the example help text as a string
func SprintList() string {
var algos transform.Algo
var charmaps transform.CharmapChoices
s := commandTable()
s += "\nConversion modes:\n\n```text\n"
for _, v := range algos.Choices() {
s += v + "\n"
}
s += "```\n\n"
s += "Char maps:\n\n```text\n"
for _, v := range charmaps.Choices() {
s += v + "\n"
}
s += "```\n\n"
s += "Encoding masks:\n\n```text\n"
for _, v := range strings.Split(encoder.ValidStrings(), ", ") {
s += v + "\n"
}
s += "```\n\n"
s += sprintExamples()
return s
}
// Output the help to stdout
func main() {
out := os.Stdout
if len(os.Args) > 1 {
var err error
out, err = os.Create(os.Args[1])
if err != nil {
fs.Fatalf(nil, "Open output failed: %v", err)
}
defer out.Close()
}
fmt.Fprintf(out, "<!--- Docs generated by help.go - use go generate to rebuild - DO NOT EDIT --->\n\n")
fmt.Fprint(out, SprintList())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/transform/transform_test.go | lib/transform/transform_test.go | package transform
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// sync tests are in fs/sync/sync_transform_test.go to avoid import cycle issues
func newOptions(s ...string) (context.Context, error) {
ctx := context.Background()
err := SetOptions(ctx, s...)
return ctx, err
}
func TestPath(t *testing.T) {
for _, test := range []struct {
path string
want string
}{
{"", ""},
{"toe/toe/toe", "tictactoe/tictactoe/tictactoe"},
{"a/b/c", "tictaca/tictacb/tictacc"},
} {
ctx, err := newOptions("all,prefix=tac", "all,prefix=tic")
require.NoError(t, err)
got := Path(ctx, test.path, false)
assert.Equal(t, test.want, got)
}
}
func TestFileTagOnFile(t *testing.T) {
for _, test := range []struct {
path string
want string
}{
{"a/b/c.txt", "a/b/1c.txt"},
} {
ctx, err := newOptions("file,prefix=1")
require.NoError(t, err)
got := Path(ctx, test.path, false)
assert.Equal(t, test.want, got)
}
}
func TestDirTagOnFile(t *testing.T) {
for _, test := range []struct {
path string
want string
}{
{"a/b/c.txt", "1a/1b/c.txt"},
} {
ctx, err := newOptions("dir,prefix=1")
require.NoError(t, err)
got := Path(ctx, test.path, false)
assert.Equal(t, test.want, got)
}
}
func TestAllTag(t *testing.T) {
for _, test := range []struct {
path string
want string
}{
{"a/b/c.txt", "1a/1b/1c.txt"},
} {
ctx, err := newOptions("all,prefix=1")
require.NoError(t, err)
got := Path(ctx, test.path, false)
assert.Equal(t, test.want, got)
}
}
func TestFileTagOnDir(t *testing.T) {
for _, test := range []struct {
path string
want string
}{
{"a/b", "a/b"},
} {
ctx, err := newOptions("file,prefix=1")
require.NoError(t, err)
got := Path(ctx, test.path, true)
assert.Equal(t, test.want, got)
}
}
func TestDirTagOnDir(t *testing.T) {
for _, test := range []struct {
path string
want string
}{
{"a/b", "1a/1b"},
} {
ctx, err := newOptions("dir,prefix=1")
require.NoError(t, err)
got := Path(ctx, test.path, true)
assert.Equal(t, test.want, got)
}
}
func TestVarious(t *testing.T) {
for _, test := range []struct {
path string
want string
flags []string
}{
{"stories/The Quick Brown Fox!.txt", "STORIES/THE QUICK BROWN FOX!.TXT", []string{"all,uppercase"}},
{"stories/The Quick Brown Fox!.txt", "stories/The Slow Brown Turtle!.txt", []string{"all,replace=Fox:Turtle", "all,replace=Quick:Slow"}},
{"stories/The Quick Brown Fox!.txt", "c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", []string{"all,base64encode"}},
{"c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", "stories/The Quick Brown Fox!.txt", []string{"all,base64decode"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfc"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfd"}},
{"stories/The Quick Brown 🦊 Fox!.txt", "stories/The Quick Brown Fox!.txt", []string{"all,ascii"}},
{"stories/The Quick Brown 🦊 Fox!.txt", "stories/The+Quick+Brown+%F0%9F%A6%8A+Fox%21.txt", []string{"all,url"}},
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!", []string{"all,trimsuffix=.txt"}},
{"stories/The Quick Brown Fox!.txt", "OLD_stories/OLD_The Quick Brown Fox!.txt", []string{"all,prefix=OLD_"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown _ Fox Went to the Caf_!.txt", []string{"all,charmap=ISO-8859-7"}},
{"stories/The Quick Brown Fox: A Memoir [draft].txt", "stories/The Quick Brown Fox: A Memoir [draft].txt", []string{"all,encoder=Colon,SquareBracket"}},
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox", []string{"all,truncate=21"}},
{"stories/Вот русское предложение, в котором байтов больше, чем символов.txt", "stories/Вот русское предложение, в котором байтов больше, чем символов.txt", []string{"truncate=70"}},
{"stories/Вот русское предложение, в котором байтов больше, чем символов.txt", "stories/Вот русское предложение, в котором байтов больше, чем символ", []string{"truncate=60"}},
{"stories/Вот русское предложение, в котором байтов больше, чем символов.txt", "stories/Вот русское предложение, в котором байтов больше, чем символов.txt", []string{"truncate_bytes=300"}},
{"stories/Вот русское предложение, в котором байтов больше, чем символов.txt", "stories/Вот русское предложение, в котором бай", []string{"truncate_bytes=70"}},
{"stories/Вот русское предложение, в котором байтов больше, чем символов.txt", "stories/Вот русское предложение, в котором байтов больше, чем си.txt", []string{"truncate_keep_extension=60"}},
{"stories/Вот русское предложение, в котором байтов больше, чем символов.txt", "stories/Вот русское предложение, в котором б.txt", []string{"truncate_bytes_keep_extension=70"}},
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt", []string{"all,command=echo"}},
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt-" + time.Now().Local().Format("20060102"), []string{"date=-{YYYYMMDD}"}},
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt-" + time.Now().Local().Format("2006-01-02 0304PM"), []string{"date=-{macfriendlytime}"}},
{"stories/The Quick Brown Fox!.txt", "ababababababab/ababab ababababab ababababab ababab!abababab", []string{"all,regex=[\\.\\w]/ab"}},
} {
ctx, err := newOptions(test.flags...)
require.NoError(t, err)
got := Path(ctx, test.path, false)
assert.Equal(t, test.want, got)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/transform/transform.go | lib/transform/transform.go | // Package transform holds functions for path name transformations
//
//go:generate go run gen_help.go transform.md
package transform
import (
"bytes"
"context"
_ "embed"
"encoding/base64"
"errors"
"fmt"
"mime"
"net/url"
"os/exec"
"path"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/encoder"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/unicode/norm"
)
//go:embed transform.md
var help string
// Help returns the help string cleaned up to simplify appending
func Help() string {
// Chop off auto generated message
nl := strings.IndexRune(help, '\n')
return strings.TrimSpace(help[nl:]) + "\n\n"
}
// Path transforms a path s according to the --name-transform options in use
//
// If no transforms are in use, s is returned unchanged
func Path(ctx context.Context, s string, isDir bool) string {
if !Transforming(ctx) {
return s
}
old := s
opt, err := getOptions(ctx)
if err != nil {
err = fs.CountError(ctx, err)
fs.Errorf(s, "Failed to parse transform flags: %v", err)
}
for _, t := range opt {
if isDir && t.tag == file {
continue
}
baseOnly := !isDir && t.tag == file
if t.tag == dir && !isDir {
s, err = transformDir(s, t)
} else {
s, err = transformPath(s, t, baseOnly)
}
if err != nil {
err = fs.CountError(ctx, fserrors.NoRetryError(err))
fs.Errorf(s, "Failed to transform: %v", err)
}
}
if old != s {
fs.Debugf(old, "transformed to: %v", s)
}
if strings.Count(old, "/") != strings.Count(s, "/") {
err = fs.CountError(ctx, fserrors.NoRetryError(fmt.Errorf("number of path segments must match: %v (%v), %v (%v)", old, strings.Count(old, "/"), s, strings.Count(s, "/"))))
fs.Errorf(old, "%v", err)
return old
}
return s
}
// transformPath transforms a path string according to the chosen TransformAlgo.
// Each path segment is transformed separately, to preserve path separators.
// If baseOnly is true, only the base will be transformed (useful for renaming while walking a dir tree recursively.)
// for example, "some/nested/path" -> "some/nested/CONVERTEDPATH"
// otherwise, the entire is path is transformed.
func transformPath(s string, t transform, baseOnly bool) (string, error) {
if s == "" || s == "/" || s == "\\" || s == "." {
return "", nil
}
if baseOnly {
transformedBase, err := transformPathSegment(path.Base(s), t)
if err := validateSegment(transformedBase); err != nil {
return "", err
}
return path.Join(path.Dir(s), transformedBase), err
}
segments := strings.Split(s, "/")
transformedSegments := make([]string, len(segments))
for _, seg := range segments {
convSeg, err := transformPathSegment(seg, t)
if err != nil {
return "", err
}
if err := validateSegment(convSeg); err != nil {
return "", err
}
transformedSegments = append(transformedSegments, convSeg)
}
return path.Join(transformedSegments...), nil
}
// transform all but the last path segment
func transformDir(s string, t transform) (string, error) {
dirPath, err := transformPath(path.Dir(s), t, false)
if err != nil {
return "", err
}
return path.Join(dirPath, path.Base(s)), nil
}
// transformPathSegment transforms one path segment (or really any string) according to the chosen TransformAlgo.
// It assumes path separators have already been trimmed.
func transformPathSegment(s string, t transform) (string, error) {
switch t.key {
case ConvNone:
return s, nil
case ConvToNFC:
return norm.NFC.String(s), nil
case ConvToNFD:
return norm.NFD.String(s), nil
case ConvToNFKC:
return norm.NFKC.String(s), nil
case ConvToNFKD:
return norm.NFKD.String(s), nil
case ConvBase64Encode:
return base64.URLEncoding.EncodeToString([]byte(s)), nil // URLEncoding to avoid slashes
case ConvBase64Decode:
if s == ".DS_Store" {
return s, nil
}
b, err := base64.URLEncoding.DecodeString(s)
if err != nil {
fs.Errorf(s, "base64 error")
}
return string(b), err
case ConvFindReplace:
split := strings.Split(t.value, ":")
if len(split) != 2 {
return s, fmt.Errorf("wrong number of values: %v", t.value)
}
return strings.ReplaceAll(s, split[0], split[1]), nil
case ConvPrefix:
return t.value + s, nil
case ConvSuffix:
return s + t.value, nil
case ConvSuffixKeepExtension:
return SuffixKeepExtension(s, t.value), nil
case ConvTrimPrefix:
return strings.TrimPrefix(s, t.value), nil
case ConvTrimSuffix:
return strings.TrimSuffix(s, t.value), nil
case ConvTruncate:
max, err := strconv.Atoi(t.value)
if err != nil {
return s, err
}
return truncateChars(s, max, false), nil
case ConvTruncateKeepExtension:
max, err := strconv.Atoi(t.value)
if err != nil {
return s, err
}
return truncateChars(s, max, true), nil
case ConvTruncateBytes:
max, err := strconv.Atoi(t.value)
if err != nil {
return s, err
}
return truncateBytes(s, max, false)
case ConvTruncateBytesKeepExtension:
max, err := strconv.Atoi(t.value)
if err != nil {
return s, err
}
return truncateBytes(s, max, true)
case ConvEncoder:
var enc encoder.MultiEncoder
err := enc.Set(t.value)
if err != nil {
return s, err
}
return enc.Encode(s), nil
case ConvDecoder:
var enc encoder.MultiEncoder
err := enc.Set(t.value)
if err != nil {
return s, err
}
return enc.Decode(s), nil
case ConvISO8859_1:
return encodeWithReplacement(s, charmap.ISO8859_1), nil
case ConvWindows1252:
return encodeWithReplacement(s, charmap.Windows1252), nil
case ConvMacintosh:
return encodeWithReplacement(s, charmap.Macintosh), nil
case ConvCharmap:
var cmapType CharmapChoices
err := cmapType.Set(t.value)
if err != nil {
return s, err
}
c := charmapByID(cmapType)
return encodeWithReplacement(s, c), nil
case ConvLowercase:
return strings.ToLower(s), nil
case ConvUppercase:
return strings.ToUpper(s), nil
case ConvTitlecase:
return strings.ToTitle(s), nil
case ConvASCII:
return toASCII(s), nil
case ConvURL:
return url.QueryEscape(s), nil
case ConvDate:
return s + AppyTimeGlobs(t.value, time.Now()), nil
case ConvRegex:
split := strings.Split(t.value, "/")
if len(split) != 2 {
return s, fmt.Errorf("regex syntax error: %v", t.value)
}
re := regexp.MustCompile(split[0])
return re.ReplaceAllString(s, split[1]), nil
case ConvCommand:
return mapper(s, t.value)
default:
return "", errors.New("this option is not yet implemented")
}
}
// SuffixKeepExtension adds a suffix while keeping extension
//
// i.e. file.txt becomes file_somesuffix.txt not file.txt_somesuffix
func SuffixKeepExtension(remote string, suffix string) string {
base, exts := splitExtension(remote)
return base + suffix + exts
}
func splitExtension(remote string) (base, exts string) {
base = remote
var (
first = true
ext = path.Ext(remote)
)
for ext != "" {
// Look second and subsequent extensions in mime types.
// If they aren't found then don't keep it as an extension.
if !first && mime.TypeByExtension(ext) == "" {
break
}
base = base[:len(base)-len(ext)]
exts = ext + exts
first = false
ext = path.Ext(base)
}
return base, exts
}
func truncateChars(s string, max int, keepExtension bool) string {
if max <= 0 {
return s
}
if utf8.RuneCountInString(s) <= max {
return s
}
exts := ""
if keepExtension {
s, exts = splitExtension(s)
}
runes := []rune(s)
return string(runes[:max-utf8.RuneCountInString(exts)]) + exts
}
// truncateBytes is like truncateChars but counts the number of bytes, not UTF-8 characters
func truncateBytes(s string, max int, keepExtension bool) (string, error) {
if max <= 0 {
return s, nil
}
if len(s) <= max {
return s, nil
}
exts := ""
if keepExtension {
s, exts = splitExtension(s)
}
// ensure we don't split a multi-byte UTF-8 character
for i := max - len(exts); i > 0; i-- {
b := append([]byte(s)[:i], exts...)
if len(b) <= max && utf8.Valid(b) {
return string(b), nil
}
}
return "", errors.New("could not truncate to valid UTF-8")
}
// forbid transformations that add/remove path separators
func validateSegment(s string) error {
if strings.TrimSpace(s) == "" {
return errors.New("transform cannot render path segments empty")
}
if strings.ContainsRune(s, '/') {
return fmt.Errorf("transform cannot add path separators: %v", s)
}
return nil
}
// ParseGlobs determines whether a string contains {brackets}
// and returns the substring (including both brackets) for replacing
// substring is first opening bracket to last closing bracket --
// good for {{this}} but not {this}{this}
func ParseGlobs(s string) (hasGlobs bool, substring string) {
open := strings.Index(s, "{")
close := strings.LastIndex(s, "}")
if open >= 0 && close > open {
return true, s[open : close+1]
}
return false, ""
}
// TrimBrackets converts {{this}} to this
func TrimBrackets(s string) string {
return strings.Trim(s, "{}")
}
// TimeFormat converts a user-supplied string to a Go time constant, if possible
func TimeFormat(timeFormat string) string {
switch timeFormat {
case "Layout":
timeFormat = time.Layout
case "ANSIC":
timeFormat = time.ANSIC
case "UnixDate":
timeFormat = time.UnixDate
case "RubyDate":
timeFormat = time.RubyDate
case "RFC822":
timeFormat = time.RFC822
case "RFC822Z":
timeFormat = time.RFC822Z
case "RFC850":
timeFormat = time.RFC850
case "RFC1123":
timeFormat = time.RFC1123
case "RFC1123Z":
timeFormat = time.RFC1123Z
case "RFC3339":
timeFormat = time.RFC3339
case "RFC3339Nano":
timeFormat = time.RFC3339Nano
case "Kitchen":
timeFormat = time.Kitchen
case "Stamp":
timeFormat = time.Stamp
case "StampMilli":
timeFormat = time.StampMilli
case "StampMicro":
timeFormat = time.StampMicro
case "StampNano":
timeFormat = time.StampNano
case "DateTime":
timeFormat = time.DateTime
case "DateOnly":
timeFormat = time.DateOnly
case "TimeOnly":
timeFormat = time.TimeOnly
case "MacFriendlyTime", "macfriendlytime", "mac":
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
case "YYYYMMDD":
timeFormat = "20060102"
}
return timeFormat
}
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
func AppyTimeGlobs(s string, t time.Time) string {
hasGlobs, substring := ParseGlobs(s)
if !hasGlobs {
return s
}
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
return strings.ReplaceAll(s, substring, timeString)
}
func mapper(s string, command string) (string, error) {
out, err := exec.Command(command, s).CombinedOutput()
if err != nil {
out = bytes.TrimSpace(out)
return s, fmt.Errorf("%s: error running command %q: %v", out, command+" "+s, err)
}
return string(bytes.TrimSpace(out)), nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/transform/options.go | lib/transform/options.go | package transform
import (
"context"
"errors"
"slices"
"strings"
"sync"
"github.com/rclone/rclone/fs"
)
type transform struct {
key Algo // for example, "prefix"
value string // for example, "some_prefix_"
tag tag // file, dir, or all
}
// tag controls which part of the file path is affected (file, dir, all)
type tag int
// tag modes
const (
file tag = iota // Only transform the leaf name of files (default)
dir // Only transform name of directories - these may appear anywhere in the path
all // Transform the entire path for files and directories
)
// Transforming returns true when transforms are in use
func Transforming(ctx context.Context) bool {
ci := fs.GetConfig(ctx)
return len(ci.NameTransform) > 0
}
// SetOptions sets the options in ctx from flags passed in.
// Any existing flags will be overwritten.
// s should be in the same format as cmd line flags, i.e. "all,prefix=XXX"
func SetOptions(ctx context.Context, s ...string) (err error) {
ci := fs.GetConfig(ctx)
ci.NameTransform = s
_, err = getOptions(ctx)
return err
}
// cache to minimize re-parsing
var (
cachedNameTransform []string
cachedOpt []transform
cacheLock sync.Mutex
)
// getOptions sets the options from flags passed in.
func getOptions(ctx context.Context) (opt []transform, err error) {
if !Transforming(ctx) {
return opt, nil
}
ci := fs.GetConfig(ctx)
// return cached opt if available
if cachedNameTransform != nil && slices.Equal(ci.NameTransform, cachedNameTransform) {
return cachedOpt, nil
}
for _, transform := range ci.NameTransform {
t, err := parse(transform)
if err != nil {
return opt, err
}
opt = append(opt, t)
}
updateCache(ci.NameTransform, opt)
return opt, nil
}
func updateCache(nt []string, o []transform) {
cacheLock.Lock()
cachedNameTransform = slices.Clone(nt)
cachedOpt = o
cacheLock.Unlock()
}
// parse a single instance of --name-transform
func parse(s string) (t transform, err error) {
if s == "" {
return t, nil
}
s = t.parseTag(s)
err = t.parseKeyVal(s)
return t, err
}
// parse the tag (file/dir/all), set the option accordingly, and return the trimmed string
//
// we don't worry about errors here because it will error anyway as an invalid key
func (t *transform) parseTag(s string) string {
if strings.HasPrefix(s, "file,") {
t.tag = file
return strings.TrimPrefix(s, "file,")
}
if strings.HasPrefix(s, "dir,") {
t.tag = dir
return strings.TrimPrefix(s, "dir,")
}
if strings.HasPrefix(s, "all,") {
t.tag = all
return strings.TrimPrefix(s, "all,")
}
return s
}
// parse key and value (if any) by splitting on '=' sign
// (file/dir/all tag has already been trimmed)
func (t *transform) parseKeyVal(s string) (err error) {
if !strings.ContainsRune(s, '=') {
err = t.key.Set(s)
if err != nil {
return err
}
if t.requiresValue() {
fs.Debugf(nil, "received %v", s)
return errors.New("value is required for " + t.key.String())
}
return nil
}
split := strings.Split(s, "=")
if len(split) != 2 {
return errors.New("too many values")
}
if split[0] == "" {
return errors.New("key cannot be blank")
}
err = t.key.Set(split[0])
if err != nil {
return err
}
t.value = split[1]
return nil
}
// returns true if this particular algorithm requires a value
func (t *transform) requiresValue() bool {
switch t.key {
case ConvFindReplace:
return true
case ConvPrefix:
return true
case ConvSuffix:
return true
case ConvSuffixKeepExtension:
return true
case ConvTrimPrefix:
return true
case ConvTrimSuffix:
return true
case ConvIndex:
return true
case ConvDate:
return true
case ConvTruncate:
return true
case ConvTruncateKeepExtension:
return true
case ConvTruncateBytes:
return true
case ConvTruncateBytesKeepExtension:
return true
case ConvEncoder:
return true
case ConvDecoder:
return true
case ConvRegex:
return true
case ConvCommand:
return true
}
return false
}
// Algo describes conversion setting
type Algo = fs.Enum[transformChoices]
// Supported transform options
const (
ConvNone Algo = iota
ConvToNFC
ConvToNFD
ConvToNFKC
ConvToNFKD
ConvFindReplace
ConvPrefix
ConvSuffix
ConvSuffixKeepExtension
ConvTrimPrefix
ConvTrimSuffix
ConvIndex
ConvDate
ConvTruncate
ConvTruncateKeepExtension
ConvTruncateBytes
ConvTruncateBytesKeepExtension
ConvBase64Encode
ConvBase64Decode
ConvEncoder
ConvDecoder
ConvISO8859_1
ConvWindows1252
ConvMacintosh
ConvCharmap
ConvLowercase
ConvUppercase
ConvTitlecase
ConvASCII
ConvURL
ConvRegex
ConvCommand
)
type transformChoices struct{}
func (transformChoices) Choices() []string {
return []string{
ConvNone: "none",
ConvToNFC: "nfc",
ConvToNFD: "nfd",
ConvToNFKC: "nfkc",
ConvToNFKD: "nfkd",
ConvFindReplace: "replace",
ConvPrefix: "prefix",
ConvSuffix: "suffix",
ConvSuffixKeepExtension: "suffix_keep_extension",
ConvTrimPrefix: "trimprefix",
ConvTrimSuffix: "trimsuffix",
ConvIndex: "index",
ConvDate: "date",
ConvTruncate: "truncate",
ConvTruncateKeepExtension: "truncate_keep_extension",
ConvTruncateBytes: "truncate_bytes",
ConvTruncateBytesKeepExtension: "truncate_bytes_keep_extension",
ConvBase64Encode: "base64encode",
ConvBase64Decode: "base64decode",
ConvEncoder: "encoder",
ConvDecoder: "decoder",
ConvISO8859_1: "ISO-8859-1",
ConvWindows1252: "Windows-1252",
ConvMacintosh: "Macintosh",
ConvCharmap: "charmap",
ConvLowercase: "lowercase",
ConvUppercase: "uppercase",
ConvTitlecase: "titlecase",
ConvASCII: "ascii",
ConvURL: "url",
ConvRegex: "regex",
ConvCommand: "command",
}
}
func (transformChoices) Type() string {
return "string"
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/transform/cmap.go | lib/transform/cmap.go | package transform
import (
"fmt"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"golang.org/x/text/encoding/charmap"
)
var (
cmaps = map[int]*charmap.Charmap{}
lock sync.Mutex
)
// CharmapChoices is an enum of the character map choices.
type CharmapChoices = fs.Enum[cmapChoices]
type cmapChoices struct{}
func (cmapChoices) Choices() []string {
choices := []string{}
i := 0
for _, enc := range charmap.All {
c, ok := enc.(*charmap.Charmap)
if !ok {
continue
}
name := strings.ReplaceAll(c.String(), " ", "-")
if name == "" {
name = fmt.Sprintf("unknown-%d", i)
}
lock.Lock()
cmaps[i] = c
lock.Unlock()
choices = append(choices, name)
i++
}
return choices
}
func (cmapChoices) Type() string {
return "string"
}
func charmapByID(cm fs.Enum[cmapChoices]) *charmap.Charmap {
lock.Lock()
c, ok := cmaps[int(cm)]
lock.Unlock()
if ok {
return c
}
return nil
}
func encodeWithReplacement(s string, cmap *charmap.Charmap) string {
return strings.Map(func(r rune) rune {
b, ok := cmap.EncodeRune(r)
if !ok {
return '_'
}
return cmap.DecodeByte(b)
}, s)
}
func toASCII(s string) string {
return strings.Map(func(r rune) rune {
if r <= 127 {
return r
}
return -1
}, s)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/structs/structs_test.go | lib/structs/structs_test.go | package structs
import (
"fmt"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
// returns the "%p" representation of the thing passed in
func ptr(p any) string {
return fmt.Sprintf("%p", p)
}
func TestSetDefaults(t *testing.T) {
old := http.DefaultTransport.(*http.Transport)
newT := new(http.Transport)
SetDefaults(newT, old)
// Can't use assert.Equal or reflect.DeepEqual for this as it has functions in
// Check functions by comparing the "%p" representations of them
assert.Equal(t, ptr(old.Proxy), ptr(newT.Proxy), "when checking .Proxy")
assert.Equal(t, ptr(old.DialContext), ptr(newT.DialContext), "when checking .DialContext")
assert.Equal(t, ptr(old.DialTLSContext), ptr(newT.DialTLSContext), "when checking .DialTLSContext")
assert.Equal(t, old.TLSClientConfig, newT.TLSClientConfig, "when checking .TLSClientConfig")
assert.Equal(t, old.TLSHandshakeTimeout, newT.TLSHandshakeTimeout, "when checking .TLSHandshakeTimeout")
assert.Equal(t, old.DisableKeepAlives, newT.DisableKeepAlives, "when checking .DisableKeepAlives")
assert.Equal(t, old.DisableCompression, newT.DisableCompression, "when checking .DisableCompression")
assert.Equal(t, old.MaxIdleConns, newT.MaxIdleConns, "when checking .MaxIdleConns")
assert.Equal(t, old.MaxIdleConnsPerHost, newT.MaxIdleConnsPerHost, "when checking .MaxIdleConnsPerHost")
assert.Equal(t, old.IdleConnTimeout, newT.IdleConnTimeout, "when checking .IdleConnTimeout")
assert.Equal(t, old.ResponseHeaderTimeout, newT.ResponseHeaderTimeout, "when checking .ResponseHeaderTimeout")
assert.Equal(t, old.ExpectContinueTimeout, newT.ExpectContinueTimeout, "when checking .ExpectContinueTimeout")
assert.Equal(t, old.TLSNextProto, newT.TLSNextProto, "when checking .TLSNextProto")
assert.Equal(t, old.MaxResponseHeaderBytes, newT.MaxResponseHeaderBytes, "when checking .MaxResponseHeaderBytes")
}
type aType struct {
Matching string
OnlyA string
MatchingInt int
DifferentType string
}
type bType struct {
Matching string
OnlyB string
MatchingInt int
DifferentType int
Unused string
}
func TestSetFrom(t *testing.T) {
a := aType{
Matching: "a",
OnlyA: "onlyA",
MatchingInt: 1,
DifferentType: "surprise",
}
b := bType{
Matching: "b",
OnlyB: "onlyB",
MatchingInt: 2,
DifferentType: 7,
Unused: "Ha",
}
bBefore := b
SetFrom(&a, &b)
assert.Equal(t, aType{
Matching: "b",
OnlyA: "onlyA",
MatchingInt: 2,
DifferentType: "surprise",
}, a)
assert.Equal(t, bBefore, b)
}
func TestSetFromReversed(t *testing.T) {
a := aType{
Matching: "a",
OnlyA: "onlyA",
MatchingInt: 1,
DifferentType: "surprise",
}
aBefore := a
b := bType{
Matching: "b",
OnlyB: "onlyB",
MatchingInt: 2,
DifferentType: 7,
Unused: "Ha",
}
SetFrom(&b, &a)
assert.Equal(t, bType{
Matching: "a",
OnlyB: "onlyB",
MatchingInt: 1,
DifferentType: 7,
Unused: "Ha",
}, b)
assert.Equal(t, aBefore, a)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/structs/structs.go | lib/structs/structs.go | // Package structs is for manipulating structures with reflection
package structs
import (
"reflect"
)
// SetFrom sets the public members of a from b
//
// a and b should be pointers to structs
//
// a can be a different type from b
//
// Only the Fields which have the same name and assignable type on a
// and b will be set.
//
// This is useful for copying between almost identical structures that
// are frequently present in auto-generated code for cloud storage
// interfaces.
func SetFrom(a, b any) {
ta := reflect.TypeOf(a).Elem()
tb := reflect.TypeOf(b).Elem()
va := reflect.ValueOf(a).Elem()
vb := reflect.ValueOf(b).Elem()
for i := range tb.NumField() {
bField := vb.Field(i)
tbField := tb.Field(i)
name := tbField.Name
aField := va.FieldByName(name)
taField, found := ta.FieldByName(name)
if found && aField.IsValid() && bField.IsValid() && aField.CanSet() && tbField.Type.AssignableTo(taField.Type) {
aField.Set(bField)
}
}
}
// SetDefaults for a from b
//
// a and b should be pointers to the same kind of struct
//
// This copies the public members only from b to a. This is useful if
// you can't just use a struct copy because it contains a private
// mutex, e.g. as http.Transport.
func SetDefaults(a, b any) {
pt := reflect.TypeOf(a)
t := pt.Elem()
va := reflect.ValueOf(a).Elem()
vb := reflect.ValueOf(b).Elem()
for i := range t.NumField() {
aField := va.Field(i)
// Set a from b if it is public
if aField.CanSet() {
bField := vb.Field(i)
aField.Set(bField)
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/kv/types.go | lib/kv/types.go | package kv
import (
"context"
"errors"
)
// package errors
var (
ErrEmpty = errors.New("database empty")
ErrInactive = errors.New("database stopped")
ErrUnsupported = errors.New("unsupported on this OS")
)
// Op represents a database operation
type Op interface {
Do(context.Context, Bucket) error
}
// Bucket decouples bbolt.Bucket from key-val operations
type Bucket interface {
Get([]byte) []byte
Put([]byte, []byte) error
Delete([]byte) error
ForEach(func(bkey, data []byte) error) error
Cursor() Cursor
}
// Cursor decouples bbolt.Cursor from key-val operations
type Cursor interface {
First() ([]byte, []byte)
Next() ([]byte, []byte)
Seek([]byte) ([]byte, []byte)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/kv/unsupported.go | lib/kv/unsupported.go | //go:build plan9 || js
// Package kv provides key/value database.
package kv
import (
"context"
"github.com/rclone/rclone/fs"
)
// DB represents a key-value database
type DB struct{}
// Supported returns true on supported OSes
func Supported() bool { return false }
// Start a key-value database
func Start(ctx context.Context, facility string, f fs.Fs) (*DB, error) {
return nil, ErrUnsupported
}
// Get returns database for given filesystem and facility
func Get(f fs.Fs, facility string) *DB { return nil }
// Path returns database path
func (*DB) Path() string { return "UNSUPPORTED" }
// Do submits a key-value request and waits for results
func (*DB) Do(write bool, op Op) error {
return ErrUnsupported
}
// Stop a database loop, optionally removing the file
func (*DB) Stop(remove bool) error {
return ErrUnsupported
}
// IsStopped returns true if db is already stopped
func (db *DB) IsStopped() bool {
return true
}
// Exit stops all databases
func Exit() {}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/kv/internal_test.go | lib/kv/internal_test.go | //go:build !plan9 && !js
package kv
import (
"context"
"fmt"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestKvConcurrency(t *testing.T) {
require.Equal(t, 0, len(dbMap), "no databases can be started initially")
const threadNum = 5
var wg sync.WaitGroup
ctx := context.Background()
results := make([]*DB, threadNum)
wg.Add(threadNum)
for i := range threadNum {
go func(i int) {
db, err := Start(ctx, "test", nil)
require.NoError(t, err)
require.NotNil(t, db)
results[i] = db
wg.Done()
}(i)
}
wg.Wait()
// must have a single multi-referenced db
db := results[0]
assert.Equal(t, 1, len(dbMap))
assert.Equal(t, threadNum, db.refs)
for i := range threadNum {
assert.Equal(t, db, results[i])
}
for i := range threadNum {
assert.Equal(t, 1, len(dbMap))
err := db.Stop(false)
assert.NoError(t, err, "unexpected error %v at retry %d", err, i)
}
assert.Equal(t, 0, len(dbMap), "must be closed in the end")
err := db.Stop(false)
assert.ErrorIs(t, err, ErrInactive, "missing expected stop indication")
}
func TestKvExit(t *testing.T) {
require.Equal(t, 0, len(dbMap), "no databases can be started initially")
const dbNum = 5
ctx := context.Background()
for i := range dbNum {
facility := fmt.Sprintf("test-%d", i)
for j := 0; j <= i; j++ {
db, err := Start(ctx, facility, nil)
require.NoError(t, err)
require.NotNil(t, db)
}
}
assert.Equal(t, dbNum, len(dbMap))
Exit()
assert.Equal(t, 0, len(dbMap))
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/kv/bolt.go | lib/kv/bolt.go | //go:build !plan9 && !js
// Package kv provides key/value database.
package kv
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
"go.etcd.io/bbolt"
)
const (
initTime = 24 * time.Hour // something reasonably long
dbFileMode = 0600
dbDirMode = 0700
queueSize = 2
)
// DB represents a key-value database
type DB struct {
name string
path string
facility string
refs int
bolt *bbolt.DB
mu sync.Mutex
canWrite bool
queue chan *request
lockTime time.Duration
idleTime time.Duration
openTime time.Duration
idleTimer *time.Timer
lockTimer *time.Timer
}
var (
dbMap = map[string]*DB{}
dbMut sync.Mutex
atExit bool
)
// Supported returns true on supported OSes
func Supported() bool { return true }
// makeName makes a store name
func makeName(facility string, f fs.Fs) string {
var name string
if f != nil {
name = f.Name()
if idx := strings.Index(name, "{"); idx != -1 {
name = name[:idx]
}
name = encoder.OS.FromStandardPath(name)
name += "~"
}
return name + facility + ".bolt"
}
// Start a new key-value database
func Start(ctx context.Context, facility string, f fs.Fs) (*DB, error) {
dbMut.Lock()
defer dbMut.Unlock()
if db := lockedGet(facility, f); db != nil {
return db, nil
}
dir := filepath.Join(config.GetCacheDir(), "kv")
if err := os.MkdirAll(dir, dbDirMode); err != nil {
return nil, err
}
name := makeName(facility, f)
lockTime := time.Duration(fs.GetConfig(ctx).KvLockTime)
db := &DB{
name: name,
path: filepath.Join(dir, name),
facility: facility,
refs: 1,
lockTime: lockTime,
idleTime: lockTime / 4,
openTime: lockTime * 2,
idleTimer: time.NewTimer(initTime),
lockTimer: time.NewTimer(initTime),
queue: make(chan *request, queueSize),
}
fi, err := os.Stat(db.path)
if strings.HasSuffix(os.Args[0], ".test") || (err == nil && fi.Size() == 0) {
_ = os.Remove(db.path)
fs.Infof(db.name, "drop cache remaining after unit test")
}
if err = db.open(ctx, false); err != nil && err != ErrEmpty {
return nil, fmt.Errorf("cannot open db: %s: %w", db.path, err)
}
dbMap[name] = db
go db.loop()
return db, nil
}
// Get returns database record for given filesystem and facility
func Get(facility string, f fs.Fs) *DB {
dbMut.Lock()
defer dbMut.Unlock()
return lockedGet(facility, f)
}
func lockedGet(facility string, f fs.Fs) *DB {
name := makeName(facility, f)
db := dbMap[name]
if db != nil {
db.mu.Lock()
db.refs++
db.mu.Unlock()
}
return db
}
// Path returns database path
func (db *DB) Path() string { return db.path }
var modeNames = map[bool]string{false: "reading", true: "writing"}
func (db *DB) open(ctx context.Context, forWrite bool) (err error) {
if db.bolt != nil && (db.canWrite || !forWrite) {
return nil
}
_ = db.close()
db.canWrite = forWrite
if !forWrite {
// mitigate https://github.com/etcd-io/bbolt/issues/98
_, err = os.Stat(db.path)
if os.IsNotExist(err) {
return ErrEmpty
}
}
opt := &bbolt.Options{
Timeout: db.openTime,
ReadOnly: !forWrite,
}
openMode := modeNames[forWrite]
startTime := time.Now()
var bolt *bbolt.DB
retry := 1
maxRetries := fs.GetConfig(ctx).LowLevelRetries
for {
bolt, err = bbolt.Open(db.path, dbFileMode, opt)
if err == nil || retry >= maxRetries {
break
}
fs.Debugf(db.name, "Retry #%d opening for %s: %v", retry, openMode, err)
retry++
}
if err != nil {
return err
}
fs.Debugf(db.name, "Opened for %s in %v", openMode, time.Since(startTime))
_ = db.lockTimer.Reset(db.lockTime)
_ = db.idleTimer.Reset(db.idleTime)
db.bolt = bolt
return nil
}
func (db *DB) close() (err error) {
if db.bolt != nil {
_ = db.lockTimer.Stop()
_ = db.idleTimer.Stop()
err = db.bolt.Close()
db.bolt = nil
fs.Debugf(db.name, "released")
}
return
}
// loop over database operations sequentially
func (db *DB) loop() {
ctx := context.Background()
var req *request
quit := false
for !quit {
select {
case req = <-db.queue:
if quit = req.handle(ctx, db); !quit {
req.wg.Done()
_ = db.idleTimer.Reset(db.idleTime)
}
case <-db.idleTimer.C:
_ = db.close()
case <-db.lockTimer.C:
_ = db.close()
}
}
db.queue = nil
if !atExit {
dbMut.Lock()
delete(dbMap, db.name)
dbMut.Unlock()
}
req.wg.Done()
}
// Do a key-value operation and return error when done
func (db *DB) Do(write bool, op Op) error {
if db == nil || db.queue == nil {
return ErrInactive
}
r := &request{
op: op,
wr: write,
}
r.wg.Add(1)
db.queue <- r
r.wg.Wait()
return r.err
}
// request encapsulates a synchronous operation and its results
type request struct {
op Op
wr bool
err error
wg sync.WaitGroup
}
// handle a key-value request with given DB
// returns true as a signal to quit the loop
func (r *request) handle(ctx context.Context, db *DB) bool {
db.mu.Lock()
defer db.mu.Unlock()
if op, stop := r.op.(*opStop); stop {
r.err = db.close()
if op.remove {
if err := os.Remove(db.path); !os.IsNotExist(err) {
r.err = err
}
}
db.refs--
return db.refs <= 0
}
r.err = db.execute(ctx, r.op, r.wr)
return false
}
// execute a key-value DB operation
func (db *DB) execute(ctx context.Context, op Op, write bool) error {
if err := db.open(ctx, write); err != nil {
return err
}
if write {
return db.bolt.Update(func(tx *bbolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(db.facility))
if err != nil || b == nil {
return ErrEmpty
}
return op.Do(ctx, &bucketAdapter{b})
})
}
return db.bolt.View(func(tx *bbolt.Tx) error {
b := tx.Bucket([]byte(db.facility))
if b == nil {
return ErrEmpty
}
return op.Do(ctx, &bucketAdapter{b})
})
}
// bucketAdapter is a thin wrapper adapting kv.Bucket to bbolt.Bucket
type bucketAdapter struct {
*bbolt.Bucket
}
func (b *bucketAdapter) Cursor() Cursor {
return b.Bucket.Cursor()
}
// Stop a database loop, optionally removing the file
func (db *DB) Stop(remove bool) error {
return db.Do(false, &opStop{remove: remove})
}
// IsStopped returns true if db is already stopped
func (db *DB) IsStopped() bool {
return len(dbMap) == 0
}
// opStop: close database and stop operation loop
type opStop struct {
remove bool
}
func (*opStop) Do(context.Context, Bucket) error {
return nil
}
// Exit immediately stops all databases
func Exit() {
dbMut.Lock()
atExit = true
for _, s := range dbMap {
s.refs = 0
_ = s.Stop(false)
}
dbMap = map[string]*DB{}
atExit = false
dbMut.Unlock()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/gzip_test.go | lib/readers/gzip_test.go | package readers
import (
"bytes"
"compress/gzip"
"io"
"testing"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type checkClose struct {
io.Reader
closed bool
}
func (cc *checkClose) Close() error {
cc.closed = true
return nil
}
func TestGzipReader(t *testing.T) {
// Create some compressed data
data := random.String(1000)
var out bytes.Buffer
zw := gzip.NewWriter(&out)
_, err := io.Copy(zw, bytes.NewBufferString(data))
require.NoError(t, err)
require.NoError(t, zw.Close())
gzData := out.Bytes()
// Check we can decompress it
cc := &checkClose{Reader: bytes.NewBuffer(gzData)}
var decompressed bytes.Buffer
zr, err := NewGzipReader(cc)
require.NoError(t, err)
_, err = io.Copy(&decompressed, zr)
require.NoError(t, err)
assert.Equal(t, data, decompressed.String())
// Check the underlying close gets called
assert.False(t, cc.closed)
require.NoError(t, zr.Close())
assert.True(t, cc.closed)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/pattern_reader_test.go | lib/readers/pattern_reader_test.go | package readers
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPatternReader(t *testing.T) {
b2 := make([]byte, 1)
r := NewPatternReader(0)
b, err := io.ReadAll(r)
require.NoError(t, err)
assert.Equal(t, []byte{}, b)
n, err := r.Read(b2)
require.Equal(t, io.EOF, err)
require.Equal(t, 0, n)
r = NewPatternReader(10)
b, err = io.ReadAll(r)
require.NoError(t, err)
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
n, err = r.Read(b2)
require.Equal(t, io.EOF, err)
require.Equal(t, 0, n)
}
func TestPatternReaderSeek(t *testing.T) {
r := NewPatternReader(1024)
b, err := io.ReadAll(r)
require.NoError(t, err)
for i := range b {
assert.Equal(t, byte(i%251), b[i])
}
n, err := r.Seek(1, io.SeekStart)
require.NoError(t, err)
assert.Equal(t, int64(1), n)
// pos 1
b2 := make([]byte, 10)
nn, err := r.Read(b2)
require.NoError(t, err)
assert.Equal(t, 10, nn)
assert.Equal(t, b[1:11], b2)
// pos 11
n, err = r.Seek(9, io.SeekCurrent)
require.NoError(t, err)
assert.Equal(t, int64(20), n)
// pos 20
nn, err = r.Read(b2)
require.NoError(t, err)
assert.Equal(t, 10, nn)
assert.Equal(t, b[20:30], b2)
n, err = r.Seek(-24, io.SeekEnd)
require.NoError(t, err)
assert.Equal(t, int64(1000), n)
// pos 1000
nn, err = r.Read(b2)
require.NoError(t, err)
assert.Equal(t, 10, nn)
assert.Equal(t, b[1000:1010], b2)
// Now test errors
n, err = r.Seek(1, 400)
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid whence")
assert.Equal(t, int64(0), n)
n, err = r.Seek(-1, io.SeekStart)
require.Error(t, err)
assert.Contains(t, err.Error(), "negative position")
assert.Equal(t, int64(0), n)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/gzip.go | lib/readers/gzip.go | package readers
import (
"compress/gzip"
"io"
)
// gzipReader wraps a *gzip.Reader so it closes the underlying stream
// which the gzip library doesn't.
type gzipReader struct {
*gzip.Reader
in io.ReadCloser
}
// NewGzipReader returns an io.ReadCloser which will read the stream
// and close it when Close is called.
//
// Unfortunately gz.Reader does not close the underlying stream so we
// can't use that directly.
func NewGzipReader(in io.ReadCloser) (io.ReadCloser, error) {
zr, err := gzip.NewReader(in)
if err != nil {
return nil, err
}
return &gzipReader{
Reader: zr,
in: in,
}, nil
}
// Close the underlying stream and the gzip reader
func (gz *gzipReader) Close() error {
zrErr := gz.Reader.Close()
inErr := gz.in.Close()
if inErr != nil {
return inErr
}
if zrErr != nil {
return zrErr
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/error.go | lib/readers/error.go | package readers
// ErrorReader wraps an error to return on Read
type ErrorReader struct {
Err error
}
// Read always returns the error
func (er ErrorReader) Read(p []byte) (n int, err error) {
return 0, er.Err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/readfill.go | lib/readers/readfill.go | package readers
import "io"
// ReadFill reads as much data from r into buf as it can
//
// It reads until the buffer is full or r.Read returned an error.
//
// This is io.ReadFull but when you just want as much data as
// possible, not an exact size of block.
func ReadFill(r io.Reader, buf []byte) (n int, err error) {
var nn int
for n < len(buf) && err == nil {
nn, err = r.Read(buf[n:])
n += nn
}
return n, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/error_test.go | lib/readers/error_test.go | package readers
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestErrorReader(t *testing.T) {
errRead := errors.New("boom")
r := ErrorReader{errRead}
buf := make([]byte, 16)
n, err := r.Read(buf)
assert.Equal(t, errRead, err)
assert.Equal(t, 0, n)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/context_test.go | lib/readers/context_test.go | package readers
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestContextReader(t *testing.T) {
r := NewPatternReader(100)
ctx, cancel := context.WithCancel(context.Background())
cr := NewContextReader(ctx, r)
var buf = make([]byte, 3)
n, err := cr.Read(buf)
require.NoError(t, err)
assert.Equal(t, 3, n)
assert.Equal(t, []byte{0, 1, 2}, buf)
cancel()
n, err = cr.Read(buf)
assert.Equal(t, context.Canceled, err)
assert.Equal(t, 0, n)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/fakeseeker_test.go | lib/readers/fakeseeker_test.go | package readers
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Check interface
var _ io.ReadSeeker = &FakeSeeker{}
func TestFakeSeeker(t *testing.T) {
// Test that passing in an io.ReadSeeker just passes it through
bufReader := bytes.NewReader([]byte{1})
r := NewFakeSeeker(bufReader, 5)
assert.Equal(t, r, bufReader)
in := bytes.NewBufferString("hello")
buf := make([]byte, 16)
r = NewFakeSeeker(in, 5)
assert.NotEqual(t, r, in)
// check the seek offset is as passed in
checkPos := func(pos int64) {
abs, err := r.Seek(0, io.SeekCurrent)
require.NoError(t, err)
assert.Equal(t, pos, abs)
}
// Test some seeking
checkPos(0)
abs, err := r.Seek(2, io.SeekStart)
require.NoError(t, err)
assert.Equal(t, int64(2), abs)
checkPos(2)
abs, err = r.Seek(-1, io.SeekEnd)
require.NoError(t, err)
assert.Equal(t, int64(4), abs)
checkPos(4)
// Check can't read if not at start
_, err = r.Read(buf)
require.ErrorContains(t, err, "not at start")
// Seek back to start
abs, err = r.Seek(-4, io.SeekCurrent)
require.NoError(t, err)
assert.Equal(t, int64(0), abs)
checkPos(0)
_, err = r.Seek(42, 17)
require.ErrorContains(t, err, "invalid whence")
_, err = r.Seek(-1, io.SeekStart)
require.ErrorContains(t, err, "negative position")
// Test reading now seeked back to the start
n, err := r.Read(buf)
require.NoError(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, []byte("hello"), buf[:5])
// Seeking should give an error now
_, err = r.Seek(-1, io.SeekEnd)
require.ErrorContains(t, err, "after reading")
}
func TestFakeSeekerError(t *testing.T) {
in := bytes.NewBufferString("hello")
r := NewFakeSeeker(in, 5)
assert.NotEqual(t, r, in)
buf, err := io.ReadAll(r)
require.NoError(t, err)
assert.Equal(t, []byte("hello"), buf)
_, err = r.Read(buf)
assert.Equal(t, io.EOF, err)
_, err = r.Seek(0, io.SeekStart)
assert.Equal(t, io.EOF, err)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/noclose.go | lib/readers/noclose.go | package readers
import "io"
// noClose is used to wrap an io.Reader to stop it being upgraded
type noClose struct {
in io.Reader
}
// Read implements io.Closer by passing it straight on
func (nc noClose) Read(p []byte) (n int, err error) {
return nc.in.Read(p)
}
// NoCloser makes sure that the io.Reader passed in can't upgraded to
// an io.Closer.
//
// This is for use with http.NewRequest to make sure the body doesn't
// get upgraded to an io.Closer and the body closed unexpectedly.
func NoCloser(in io.Reader) io.Reader {
if in == nil {
return in
}
// if in doesn't implement io.Closer, just return it
if _, canClose := in.(io.Closer); !canClose {
return in
}
return noClose{in: in}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/noseeker.go | lib/readers/noseeker.go | package readers
import (
"errors"
"io"
)
var (
errCantSeek = errors.New("can't Seek")
)
// NoSeeker adapts an io.Reader into an io.ReadSeeker.
//
// However if Seek() is called it will return an error.
type NoSeeker struct {
io.Reader
}
// Seek the stream - returns an error
func (r NoSeeker) Seek(offset int64, whence int) (abs int64, err error) {
return 0, errCantSeek
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/fakeseeker.go | lib/readers/fakeseeker.go | package readers
import (
"errors"
"fmt"
"io"
)
// FakeSeeker adapts an io.Seeker into an io.ReadSeeker
type FakeSeeker struct {
in io.Reader
readErr error
length int64
offset int64
read bool
}
// NewFakeSeeker creates a fake io.ReadSeeker from an io.Reader
//
// This can be seeked before reading to discover the length passed in.
func NewFakeSeeker(in io.Reader, length int64) io.ReadSeeker {
if rs, ok := in.(io.ReadSeeker); ok {
return rs
}
return &FakeSeeker{
in: in,
length: length,
}
}
// Seek the stream - possible only before reading
func (r *FakeSeeker) Seek(offset int64, whence int) (abs int64, err error) {
if r.readErr != nil {
return 0, r.readErr
}
if r.read {
return 0, fmt.Errorf("FakeSeeker: can't Seek(%d, %d) after reading", offset, whence)
}
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.offset + offset
case io.SeekEnd:
abs = r.length + offset
default:
return 0, errors.New("FakeSeeker: invalid whence")
}
if abs < 0 {
return 0, errors.New("FakeSeeker: negative position")
}
r.offset = abs
return abs, nil
}
// Read data from the stream. Will give an error if seeked.
func (r *FakeSeeker) Read(p []byte) (n int, err error) {
if r.readErr != nil {
return 0, r.readErr
}
if !r.read && r.offset != 0 {
return 0, errors.New("FakeSeeker: not at start: can't read")
}
n, err = r.in.Read(p)
if n != 0 {
r.read = true
}
if err != nil {
r.readErr = err
}
return n, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/repeatable_test.go | lib/readers/repeatable_test.go | package readers
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRepeatableReader(t *testing.T) {
var dst []byte
var n int
var pos int64
var err error
b := []byte("Testbuffer")
buf := bytes.NewBuffer(b)
r := NewRepeatableReader(buf)
dst = make([]byte, 100)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
require.Equal(t, b, dst[0:10])
// Test read EOF
n, err = r.Read(dst)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
// Test Seek Back to start
dst = make([]byte, 10)
pos, err = r.Seek(0, io.SeekStart)
assert.Nil(t, err)
require.Equal(t, 0, int(pos))
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 10, n)
require.Equal(t, b, dst)
// Test partial read
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
dst = make([]byte, 5)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
require.Equal(t, b[0:5], dst)
n, err = r.Read(dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
require.Equal(t, b[5:], dst)
// Test Seek
buf = bytes.NewBuffer(b)
r = NewRepeatableReader(buf)
// Should not allow seek past cache index
pos, err = r.Seek(5, io.SeekCurrent)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: offset is unavailable", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek to negative position start
pos, err = r.Seek(-1, io.SeekCurrent)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: negative position", err.Error())
assert.Equal(t, 0, int(pos))
// Should not allow seek with invalid whence
pos, err = r.Seek(0, 3)
assert.NotNil(t, err)
assert.Equal(t, "fs.RepeatableReader.Seek: invalid whence", err.Error())
assert.Equal(t, 0, int(pos))
// Should seek from index with io.SeekCurrent(1) whence
dst = make([]byte, 5)
_, _ = r.Read(dst)
pos, err = r.Seek(-3, io.SeekCurrent)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
pos, err = r.Seek(1, io.SeekCurrent)
assert.Nil(t, err)
require.Equal(t, 3, int(pos))
// Should seek from cache end with io.SeekEnd(2) whence
pos, err = r.Seek(-3, io.SeekEnd)
assert.Nil(t, err)
require.Equal(t, 2, int(pos))
// Should read from seek position and past it
dst = make([]byte, 5)
n, err = io.ReadFull(r, dst)
assert.Nil(t, err)
assert.Equal(t, 5, n)
assert.Equal(t, b[2:7], dst)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/noseeker_test.go | lib/readers/noseeker_test.go | package readers
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNoSeeker(t *testing.T) {
r := bytes.NewBufferString("hello")
rs := NoSeeker{Reader: r}
// Check read
b := make([]byte, 4)
n, err := rs.Read(b)
assert.NoError(t, err)
assert.Equal(t, 4, n)
assert.Equal(t, []byte("hell"), b)
// Check seek
_, err = rs.Seek(0, io.SeekCurrent)
assert.Equal(t, errCantSeek, err)
}
// check interfaces
var (
_ io.Reader = NoSeeker{}
_ io.Seeker = NoSeeker{}
)
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/limited.go | lib/readers/limited.go | package readers
import (
"io"
"github.com/rclone/rclone/fs"
)
// LimitedReadCloser adds io.Closer to io.LimitedReader. Create one with NewLimitedReadCloser
type LimitedReadCloser struct {
*io.LimitedReader
io.Closer
}
// Close closes the underlying io.Closer. The error, if any, will be ignored if data is read completely
func (lrc *LimitedReadCloser) Close() error {
err := lrc.Closer.Close()
if err != nil && lrc.N == 0 {
fs.Debugf(nil, "ignoring close error because we already got all the data")
err = nil
}
return err
}
// NewLimitedReadCloser returns a LimitedReadCloser wrapping rc to
// limit it to reading limit bytes. If limit < 0 then it does not
// wrap rc, it just returns it.
func NewLimitedReadCloser(rc io.ReadCloser, limit int64) (lrc io.ReadCloser) {
if limit < 0 {
return rc
}
return &LimitedReadCloser{
LimitedReader: &io.LimitedReader{R: rc, N: limit},
Closer: rc,
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/noclose_test.go | lib/readers/noclose_test.go | package readers
import (
"errors"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
var errRead = errors.New("read error")
type readOnly struct{}
func (readOnly) Read(p []byte) (n int, err error) {
return 0, io.EOF
}
type readClose struct{}
func (readClose) Read(p []byte) (n int, err error) {
return 0, errRead
}
func (readClose) Close() (err error) {
return io.EOF
}
func TestNoCloser(t *testing.T) {
assert.Equal(t, nil, NoCloser(nil))
ro := readOnly{}
assert.Equal(t, ro, NoCloser(ro))
rc := readClose{}
nc := NoCloser(rc)
assert.NotEqual(t, nc, rc)
_, hasClose := nc.(io.Closer)
assert.False(t, hasClose)
_, err := nc.Read(nil)
assert.Equal(t, errRead, err)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/context.go | lib/readers/context.go | // Package readers provides io.Reader related utilities.
package readers
import (
"context"
"io"
)
// NewContextReader creates a reader, that returns any errors that ctx gives
func NewContextReader(ctx context.Context, r io.Reader) io.Reader {
return &contextReader{
ctx: ctx,
r: r,
}
}
type contextReader struct {
ctx context.Context
r io.Reader
}
// Read bytes as per io.Reader interface
func (cr *contextReader) Read(p []byte) (n int, err error) {
err = cr.ctx.Err()
if err != nil {
return 0, err
}
return cr.r.Read(p)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/readfill_test.go | lib/readers/readfill_test.go | package readers
import (
"io"
"testing"
"github.com/stretchr/testify/assert"
)
type byteReader struct {
c byte
}
func (br *byteReader) Read(p []byte) (n int, err error) {
if br.c == 0 {
err = io.EOF
} else if len(p) >= 1 {
p[0] = br.c
n = 1
br.c--
}
return
}
func TestReadFill(t *testing.T) {
buf := []byte{9, 9, 9, 9, 9}
n, err := ReadFill(&byteReader{0}, buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 0, n)
assert.Equal(t, []byte{9, 9, 9, 9, 9}, buf)
n, err = ReadFill(&byteReader{3}, buf)
assert.Equal(t, io.EOF, err)
assert.Equal(t, 3, n)
assert.Equal(t, []byte{3, 2, 1, 9, 9}, buf)
n, err = ReadFill(&byteReader{8}, buf)
assert.Equal(t, nil, err)
assert.Equal(t, 5, n)
assert.Equal(t, []byte{8, 7, 6, 5, 4}, buf)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/repeatable.go | lib/readers/repeatable.go | package readers
import (
"errors"
"io"
"sync"
)
// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data
// back and forth within the reader but will only read data from the internal Reader as necessary
// and will play nicely with the Account and io.LimitedReader to reflect current speed
type RepeatableReader struct {
mu sync.Mutex // protect against concurrent use
in io.Reader // Input reader
i int64 // current reading index
b []byte // internal cache buffer
}
var _ io.ReadSeeker = (*RepeatableReader)(nil)
// Seek implements the io.Seeker interface.
// If seek position is passed the cache buffer length the function will return
// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error
func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
var abs int64
cacheLen := int64(len(r.b))
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.i + offset
case io.SeekEnd:
abs = cacheLen + offset
default:
return 0, errors.New("fs.RepeatableReader.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.New("fs.RepeatableReader.Seek: negative position")
}
if abs > cacheLen {
return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable")
}
r.i = abs
return abs, nil
}
// Read data from original Reader into bytes
// Data is either served from the underlying Reader or from cache if was already read
func (r *RepeatableReader) Read(b []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
cacheLen := int64(len(r.b))
if r.i == cacheLen {
n, err = r.in.Read(b)
if n > 0 {
r.b = append(r.b, b[:n]...)
}
} else {
n = copy(b, r.b[r.i:])
}
r.i += int64(n)
return n, err
}
// NewRepeatableReader create new repeatable reader from Reader r
func NewRepeatableReader(r io.Reader) *RepeatableReader {
return &RepeatableReader{in: r}
}
// NewRepeatableReaderSized create new repeatable reader from Reader r
// with an initial buffer of size.
func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader {
return &RepeatableReader{
in: r,
b: make([]byte, 0, size),
}
}
// NewRepeatableLimitReader create new repeatable reader from Reader r
// with an initial buffer of size wrapped in an io.LimitReader to read
// only size.
func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader {
return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size)
}
// NewRepeatableReaderBuffer create new repeatable reader from Reader r
// using the buffer passed in.
func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader {
return &RepeatableReader{
in: r,
b: buf[:0],
}
}
// NewRepeatableLimitReaderBuffer create new repeatable reader from
// Reader r and buf wrapped in an io.LimitReader to read only size.
func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader {
return NewRepeatableReaderBuffer(io.LimitReader(r, size), buf)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/pattern_reader.go | lib/readers/pattern_reader.go | package readers
import (
"errors"
"io"
)
// This is the smallest prime less than 256
//
// Using a prime here means we are less likely to hit repeating patterns
const patternReaderModulo = 251
// NewPatternReader creates a reader, that returns a deterministic byte pattern.
// After length bytes are read
func NewPatternReader(length int64) io.ReadSeeker {
return &patternReader{
length: length,
}
}
type patternReader struct {
offset int64
length int64
c byte
}
func (r *patternReader) Read(p []byte) (n int, err error) {
for i := range p {
if r.offset >= r.length {
return n, io.EOF
}
p[i] = r.c
r.c = (r.c + 1) % patternReaderModulo
r.offset++
n++
}
return
}
// Seek implements the io.Seeker interface.
func (r *patternReader) Seek(offset int64, whence int) (abs int64, err error) {
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs = r.offset + offset
case io.SeekEnd:
abs = r.length + offset
default:
return 0, errors.New("patternReader: invalid whence")
}
if abs < 0 {
return 0, errors.New("patternReader: negative position")
}
r.offset = abs
r.c = byte(abs % patternReaderModulo)
return abs, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/readers/counting_reader.go | lib/readers/counting_reader.go | package readers
import "io"
// NewCountingReader returns a CountingReader, which will read from the given
// reader while keeping track of how many bytes were read.
func NewCountingReader(in io.Reader) *CountingReader {
return &CountingReader{in: in}
}
// CountingReader holds a reader and a read count of how many bytes were read
// so far.
type CountingReader struct {
in io.Reader
read uint64
}
// Read reads from the underlying reader.
func (cr *CountingReader) Read(b []byte) (int, error) {
n, err := cr.in.Read(b)
cr.read += uint64(n)
return n, err
}
// BytesRead returns how many bytes were read from the underlying reader so far.
func (cr *CountingReader) BytesRead() uint64 {
return cr.read
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/bucket/bucket.go | lib/bucket/bucket.go | // Package bucket is contains utilities for managing bucket-based backends
package bucket
import (
"errors"
"strings"
"sync"
)
var (
// ErrAlreadyDeleted is returned when an already deleted
// bucket is passed to Remove
ErrAlreadyDeleted = errors.New("bucket already deleted")
)
// Split takes an absolute path which includes the bucket and
// splits it into a bucket and a path in that bucket
// bucketPath
func Split(absPath string) (bucket, bucketPath string) {
// No bucket
if absPath == "" {
return "", ""
}
slash := strings.IndexRune(absPath, '/')
// Bucket but no path
if slash < 0 {
return absPath, ""
}
return absPath[:slash], absPath[slash+1:]
}
// Join path1 and path2
//
// Like path.Join but does not clean the path - useful to preserve trailing /.
//
// It also does not clean multiple // in the path.
func Join(path1, path2 string) string {
if path1 == "" {
return path2
}
if path2 == "" {
return path1
}
return path1 + "/" + path2
}
// IsAllSlashes returns true if s is all / characters.
//
// It returns false if s is "".
func IsAllSlashes(s string) bool {
if len(s) == 0 {
return false
}
for _, c := range s {
if c != '/' {
return false
}
}
return true
}
// Cache stores whether buckets are available and their IDs
type Cache struct {
mu sync.Mutex // mutex to protect created and deleted
status map[string]bool // true if we have created the container, false if deleted
createMu sync.Mutex // mutex to protect against simultaneous Remove
removeMu sync.Mutex // mutex to protect against simultaneous Create
}
// NewCache creates an empty Cache
func NewCache() *Cache {
return &Cache{
status: make(map[string]bool, 1),
}
}
// MarkOK marks the bucket as being present
func (c *Cache) MarkOK(bucket string) {
if bucket != "" {
c.mu.Lock()
c.status[bucket] = true
c.mu.Unlock()
}
}
// MarkDeleted marks the bucket as being deleted
func (c *Cache) MarkDeleted(bucket string) {
if bucket != "" {
c.mu.Lock()
c.status[bucket] = false
c.mu.Unlock()
}
}
type (
// ExistsFn should be passed to Create to see if a bucket
// exists or not
ExistsFn func() (found bool, err error)
// CreateFn should be passed to Create to make a bucket
CreateFn func() error
)
// Create the bucket with create() if it doesn't exist
//
// If exists is set then if the bucket has been deleted it will call
// exists() to see if it still exists.
//
// If f returns an error we assume the bucket was not created
func (c *Cache) Create(bucket string, create CreateFn, exists ExistsFn) (err error) {
// if we are at the root, then it is OK
if bucket == "" {
return nil
}
c.createMu.Lock()
defer c.createMu.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
// if have exists function and bucket has been deleted, check
// it still exists
if created, ok := c.status[bucket]; ok && !created && exists != nil {
found, err := exists()
if err == nil {
c.status[bucket] = found
}
if err != nil || found {
return err
}
}
// If bucket already exists then it is OK
if created, ok := c.status[bucket]; ok && created {
return nil
}
// Create the bucket
c.mu.Unlock()
err = create()
c.mu.Lock()
if err != nil {
return err
}
// Mark OK if successful
c.status[bucket] = true
return nil
}
// Remove the bucket with f if it exists
//
// If f returns an error we assume the bucket was not removed.
//
// If the bucket has already been deleted it returns ErrAlreadyDeleted
func (c *Cache) Remove(bucket string, f func() error) error {
// if we are at the root, then it is OK
if bucket == "" {
return nil
}
c.removeMu.Lock()
defer c.removeMu.Unlock()
c.mu.Lock()
defer c.mu.Unlock()
// If bucket already deleted then it is OK
if created, ok := c.status[bucket]; ok && !created {
return ErrAlreadyDeleted
}
// Remove the bucket
c.mu.Unlock()
err := f()
c.mu.Lock()
if err != nil {
return err
}
// Mark removed if successful
c.status[bucket] = false
return err
}
// IsDeleted returns true if the bucket has definitely been deleted by
// us, false otherwise.
func (c *Cache) IsDeleted(bucket string) bool {
c.mu.Lock()
created, ok := c.status[bucket]
c.mu.Unlock()
// if status unknown then return false
if !ok {
return false
}
return !created
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/bucket/bucket_test.go | lib/bucket/bucket_test.go | package bucket
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSplit(t *testing.T) {
for _, test := range []struct {
in string
wantBucket string
wantPath string
}{
{in: "", wantBucket: "", wantPath: ""},
{in: "bucket", wantBucket: "bucket", wantPath: ""},
{in: "bucket/path", wantBucket: "bucket", wantPath: "path"},
{in: "bucket/path/subdir", wantBucket: "bucket", wantPath: "path/subdir"},
} {
gotBucket, gotPath := Split(test.in)
assert.Equal(t, test.wantBucket, gotBucket, test.in)
assert.Equal(t, test.wantPath, gotPath, test.in)
}
}
func TestJoin(t *testing.T) {
for _, test := range []struct {
in1, in2 string
want string
}{
{in1: "", in2: "", want: ""},
{in1: "in1", in2: "", want: "in1"},
{in1: "", in2: "in2", want: "in2"},
{in1: "in1", in2: "in2", want: "in1/in2"},
{in1: "in1/", in2: "in2", want: "in1//in2"},
{in1: "in1", in2: "/in2", want: "in1//in2"},
{in1: "in1", in2: "in2/", want: "in1/in2/"},
{in1: "/in1", in2: "/in2", want: "/in1//in2"},
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
} {
got := Join(test.in1, test.in2)
assert.Equal(t, test.want, got, fmt.Sprintf("in1=%q, in2=%q", test.in1, test.in2))
}
}
func TestIsAllSlashes(t *testing.T) {
for _, test := range []struct {
in string
want bool
}{
{in: "", want: false},
{in: "/", want: true},
{in: "x/", want: false},
{in: "/x", want: false},
{in: "//", want: true},
{in: "/x/", want: false},
{in: "///", want: true},
} {
got := IsAllSlashes(test.in)
assert.Equal(t, test.want, got, test.in)
}
}
func TestCache(t *testing.T) {
c := NewCache()
errBoom := errors.New("boom")
assert.Equal(t, 0, len(c.status))
// IsDeleted before creation
assert.False(t, c.IsDeleted("bucket"))
// MarkOK
c.MarkOK("")
assert.Equal(t, 0, len(c.status))
// MarkOK again
c.MarkOK("bucket")
assert.Equal(t, map[string]bool{"bucket": true}, c.status)
// MarkDeleted
c.MarkDeleted("bucket")
assert.Equal(t, map[string]bool{"bucket": false}, c.status)
// MarkOK again
c.MarkOK("bucket")
assert.Equal(t, map[string]bool{"bucket": true}, c.status)
// IsDeleted after creation
assert.False(t, c.IsDeleted("bucket"))
// Create from root
err := c.Create("", nil, nil)
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true}, c.status)
// Create bucket that is already OK
err = c.Create("bucket", nil, nil)
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true}, c.status)
// Create new bucket
err = c.Create("bucket2", func() error {
return nil
}, func() (bool, error) {
return true, nil
})
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true}, c.status)
// Create bucket that has been deleted with error
c.status["bucket2"] = false // mark bucket deleted
err = c.Create("bucket2", nil, func() (bool, error) {
return false, errBoom
})
assert.Equal(t, errBoom, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": false}, c.status)
// Create bucket that has been deleted with no error
err = c.Create("bucket2", nil, func() (bool, error) {
return true, nil
})
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true}, c.status)
// Create a new bucket with no exists function
err = c.Create("bucket3", func() error {
return nil
}, nil)
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true, "bucket3": true}, c.status)
// Create a new bucket with no exists function with an error
err = c.Create("bucket4", func() error {
return errBoom
}, nil)
assert.Equal(t, errBoom, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true, "bucket3": true}, c.status)
// Remove root
err = c.Remove("", func() error {
return nil
})
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true, "bucket3": true}, c.status)
// Remove existing bucket
err = c.Remove("bucket3", func() error {
return nil
})
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true, "bucket3": false}, c.status)
// IsDeleted after removal
assert.True(t, c.IsDeleted("bucket3"))
// Remove it again
err = c.Remove("bucket3", func() error {
return errBoom
})
assert.Equal(t, ErrAlreadyDeleted, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true, "bucket3": false}, c.status)
// Remove bucket with error
err = c.Remove("bucket2", func() error {
return errBoom
})
assert.Equal(t, errBoom, err)
assert.Equal(t, map[string]bool{"bucket": true, "bucket2": true, "bucket3": false}, c.status)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/ranges/ranges.go | lib/ranges/ranges.go | // Package ranges provides the Ranges type for keeping track of byte
// ranges which may or may not be present in an object.
package ranges
import (
"sort"
)
// Range describes a single byte range
type Range struct {
Pos int64
Size int64
}
// End returns the end of the Range
func (r Range) End() int64 {
return r.Pos + r.Size
}
// IsEmpty true if the range has no size
func (r Range) IsEmpty() bool {
return r.Size <= 0
}
// Clip ensures r.End() <= offset by modifying r.Size if necessary
//
// if r.Pos > offset then a Range{Pos:0, Size:0} will be returned.
func (r *Range) Clip(offset int64) {
if r.End() <= offset {
return
}
r.Size -= r.End() - offset
if r.Size < 0 {
r.Pos = 0
r.Size = 0
}
}
// Intersection returns the common Range for two Range~s
//
// If there is no intersection then the Range returned will have
// IsEmpty() true
func (r Range) Intersection(b Range) (intersection Range) {
if (r.Pos >= b.Pos && r.Pos < b.End()) || (b.Pos >= r.Pos && b.Pos < r.End()) {
intersection.Pos = max(r.Pos, b.Pos)
intersection.Size = min(r.End(), b.End()) - intersection.Pos
}
return
}
// Ranges describes a number of Range segments. These should only be
// added with the Ranges.Insert function. The Ranges are kept sorted
// and coalesced to the minimum size.
type Ranges []Range
// merge the Range new into dest if possible
//
// dst.Pos must be >= src.Pos
//
// return true if merged
func merge(new, dst *Range) bool {
if new.End() < dst.Pos {
return false
}
if new.End() > dst.End() {
dst.Size = new.Size
} else {
dst.Size += dst.Pos - new.Pos
}
dst.Pos = new.Pos
return true
}
// coalesce ranges assuming an element has been inserted at i
func (rs *Ranges) coalesce(i int) {
ranges := *rs
var j int
startChop := i
endChop := i
// look at previous element too
if i > 0 && merge(&ranges[i-1], &ranges[i]) {
startChop = i - 1
}
for j = i; j < len(ranges)-1; j++ {
if !merge(&ranges[j], &ranges[j+1]) {
break
}
endChop = j + 1
}
if endChop > startChop {
// chop the unneeded ranges out
copy(ranges[startChop:], ranges[endChop:])
*rs = ranges[:len(ranges)-endChop+startChop]
}
}
// search finds the first Range in rs that has Pos >= r.Pos
//
// The return takes on values 0..len(rs) so may point beyond the end
// of the slice.
func (rs Ranges) search(r Range) int {
return sort.Search(len(rs), func(i int) bool {
return rs[i].Pos >= r.Pos
})
}
// Insert the new Range into a sorted and coalesced slice of
// Ranges. The result will be sorted and coalesced.
func (rs *Ranges) Insert(r Range) {
if r.IsEmpty() {
return
}
ranges := *rs
if len(ranges) == 0 {
ranges = append(ranges, r)
*rs = ranges
return
}
i := ranges.search(r)
if i == len(ranges) || !merge(&r, &ranges[i]) {
// insert into the range
ranges = append(ranges, Range{})
copy(ranges[i+1:], ranges[i:])
ranges[i] = r
*rs = ranges
}
rs.coalesce(i)
}
// Find searches for r in rs and returns the next present or absent
// Range. It returns:
//
// curr which is the Range found
// next is the Range which should be presented to Find next
// present shows whether curr is present or absent
//
// if !next.IsEmpty() then Find should be called again with r = next
// to retrieve the next Range.
//
// Note that r.Pos == curr.Pos always
func (rs Ranges) Find(r Range) (curr, next Range, present bool) {
if r.IsEmpty() {
return r, next, false
}
var intersection Range
i := rs.search(r)
if i > 0 {
prev := rs[i-1]
// we know prev.Pos < r.Pos so intersection.Pos == r.Pos
intersection = prev.Intersection(r)
if !intersection.IsEmpty() {
r.Pos = intersection.End()
r.Size -= intersection.Size
return intersection, r, true
}
}
if i >= len(rs) {
return r, Range{}, false
}
found := rs[i]
intersection = found.Intersection(r)
if intersection.IsEmpty() {
return r, Range{}, false
}
if r.Pos < intersection.Pos {
curr = Range{
Pos: r.Pos,
Size: intersection.Pos - r.Pos,
}
r.Pos = curr.End()
r.Size -= curr.Size
return curr, r, false
}
r.Pos = intersection.End()
r.Size -= intersection.Size
return intersection, r, true
}
// FoundRange is returned from FindAll
//
// It contains a Range and a boolean as to whether the range was
// Present or not.
type FoundRange struct {
R Range
Present bool
}
// FindAll repeatedly calls Find searching for r in rs and returning
// present or absent ranges.
//
// It returns a slice of FoundRange. Each element has a range and an
// indication of whether it was present or not.
func (rs Ranges) FindAll(r Range) (frs []FoundRange) {
for !r.IsEmpty() {
var fr FoundRange
fr.R, r, fr.Present = rs.Find(r)
frs = append(frs, fr)
}
return frs
}
// Present returns whether r can be satisfied by rs
func (rs Ranges) Present(r Range) (present bool) {
if r.IsEmpty() {
return true
}
_, next, present := rs.Find(r)
if !present {
return false
}
if next.IsEmpty() {
return true
}
return false
}
// Intersection works out which ranges out of rs are entirely
// contained within r and returns a new Ranges
func (rs Ranges) Intersection(r Range) (newRs Ranges) {
if len(rs) == 0 {
return rs
}
for !r.IsEmpty() {
var curr Range
var found bool
curr, r, found = rs.Find(r)
if found {
newRs.Insert(curr)
}
}
return newRs
}
// Equal returns true if rs == bs
func (rs Ranges) Equal(bs Ranges) bool {
if len(rs) != len(bs) {
return false
}
if rs == nil || bs == nil {
return true
}
for i := range rs {
if rs[i] != bs[i] {
return false
}
}
return true
}
// Size returns the total size of all the segments
func (rs Ranges) Size() (size int64) {
for _, r := range rs {
size += r.Size
}
return size
}
// FindMissing finds the initial part of r that is not in rs
//
// If r is entirely present in rs then r an empty block will be returned.
//
// If r is not present in rs then the block returned will have IsEmpty
// return true.
//
// If r is partially present in rs then a new block will be returned
// which starts with the first part of rs that isn't present in r. The
// End() for this block will be the same as originally passed in.
//
// For all returns rout.End() == r.End()
func (rs Ranges) FindMissing(r Range) (rout Range) {
rout = r
if r.IsEmpty() {
return rout
}
curr, _, present := rs.Find(r)
if !present {
// Initial block is not present
return rout
}
rout.Size -= curr.End() - rout.Pos
rout.Pos = curr.End()
return rout
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/ranges/ranges_test.go | lib/ranges/ranges_test.go | package ranges
import (
"fmt"
"math/rand"
"slices"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRangeEnd(t *testing.T) {
assert.Equal(t, int64(3), Range{Pos: 1, Size: 2}.End())
}
func TestRangeIsEmpty(t *testing.T) {
assert.Equal(t, false, Range{Pos: 1, Size: 2}.IsEmpty())
assert.Equal(t, true, Range{Pos: 1, Size: 0}.IsEmpty())
assert.Equal(t, true, Range{Pos: 1, Size: -1}.IsEmpty())
}
func TestRangeClip(t *testing.T) {
r := Range{Pos: 1, Size: 2}
r.Clip(5)
assert.Equal(t, Range{Pos: 1, Size: 2}, r)
r = Range{Pos: 1, Size: 6}
r.Clip(5)
assert.Equal(t, Range{Pos: 1, Size: 4}, r)
r = Range{Pos: 5, Size: 6}
r.Clip(5)
assert.Equal(t, Range{Pos: 5, Size: 0}, r)
r = Range{Pos: 7, Size: 6}
r.Clip(5)
assert.Equal(t, Range{Pos: 0, Size: 0}, r)
}
func TestRangeIntersection(t *testing.T) {
for _, test := range []struct {
r Range
b Range
want Range
}{
{
r: Range{1, 1},
b: Range{3, 1},
want: Range{},
},
{
r: Range{1, 1},
b: Range{1, 1},
want: Range{1, 1},
},
{
r: Range{1, 9},
b: Range{3, 2},
want: Range{3, 2},
},
{
r: Range{1, 5},
b: Range{3, 5},
want: Range{3, 3},
},
} {
what := fmt.Sprintf("test r=%v, b=%v", test.r, test.b)
got := test.r.Intersection(test.b)
assert.Equal(t, test.want, got, what)
got = test.b.Intersection(test.r)
assert.Equal(t, test.want, got, what)
}
}
func TestRangeMerge(t *testing.T) {
for _, test := range []struct {
new Range
dst Range
want Range
wantMerged bool
}{
{
new: Range{Pos: 1, Size: 1}, // .N.......
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 3, Size: 3}, // ...DDD...
wantMerged: false,
},
{
new: Range{Pos: 1, Size: 2}, // .NN......
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 1, Size: 5}, // .XXXXX...
wantMerged: true,
},
{
new: Range{Pos: 1, Size: 3}, // .NNN.....
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 1, Size: 5}, // .XXXXX...
wantMerged: true,
},
{
new: Range{Pos: 1, Size: 5}, // .NNNNN...
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 1, Size: 5}, // .XXXXX...
wantMerged: true,
},
{
new: Range{Pos: 1, Size: 6}, // .NNNNNN..
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 1, Size: 6}, // .XXXXXX..
wantMerged: true,
},
{
new: Range{Pos: 3, Size: 3}, // ...NNN...
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 3, Size: 3}, // ...XXX...
wantMerged: true,
},
{
new: Range{Pos: 3, Size: 2}, // ...NN....
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 3, Size: 3}, // ...XXX...
wantMerged: true,
},
{
new: Range{Pos: 3, Size: 4}, // ...NNNN..
dst: Range{Pos: 3, Size: 3}, // ...DDD...
want: Range{Pos: 3, Size: 4}, // ...XXXX..
wantMerged: true,
},
} {
what := fmt.Sprintf("test new=%v, dst=%v", test.new, test.dst)
gotMerged := merge(&test.new, &test.dst)
assert.Equal(t, test.wantMerged, gotMerged)
assert.Equal(t, test.want, test.dst, what)
}
}
func checkRanges(t *testing.T, rs Ranges, what string) bool {
if len(rs) < 2 {
return true
}
ok := true
for i := range len(rs) - 1 {
a := rs[i]
b := rs[i+1]
if a.Pos >= b.Pos {
assert.Failf(t, "%s: Ranges in wrong order at %d in: %v", what, i, rs)
ok = false
}
if a.End() > b.Pos {
assert.Failf(t, "%s: Ranges overlap at %d in: %v", what, i, rs)
ok = false
}
if a.End() == b.Pos {
assert.Failf(t, "%s: Ranges not coalesced at %d in: %v", what, i, rs)
ok = false
}
}
return ok
}
func TestRangeCoalesce(t *testing.T) {
for _, test := range []struct {
rs Ranges
i int
want Ranges
}{
{
rs: Ranges{},
want: Ranges{},
},
{
rs: Ranges{
{Pos: 1, Size: 1},
},
want: Ranges{
{Pos: 1, Size: 1},
},
i: 0,
},
{
rs: Ranges{
{Pos: 1, Size: 1},
{Pos: 2, Size: 1},
{Pos: 3, Size: 1},
},
want: Ranges{
{Pos: 1, Size: 3},
},
i: 0,
},
{
rs: Ranges{
{Pos: 1, Size: 1},
{Pos: 3, Size: 1},
{Pos: 4, Size: 1},
{Pos: 5, Size: 1},
},
want: Ranges{
{Pos: 1, Size: 1},
{Pos: 3, Size: 3},
},
i: 2,
},
{
rs: Ranges{{38, 8}, {51, 10}, {60, 3}},
want: Ranges{{38, 8}, {51, 12}},
i: 1,
},
} {
got := slices.Clone(test.rs)
got.coalesce(test.i)
what := fmt.Sprintf("test rs=%v, i=%d", test.rs, test.i)
assert.Equal(t, test.want, got, what)
checkRanges(t, got, what)
}
}
func TestRangeInsert(t *testing.T) {
for _, test := range []struct {
new Range
rs Ranges
want Ranges
}{
{
new: Range{Pos: 1, Size: 0},
rs: Ranges{},
want: Ranges{},
},
{
new: Range{Pos: 1, Size: 1}, // .N.......
rs: Ranges{}, // .........
want: Ranges{ // .N.......
{Pos: 1, Size: 1},
},
},
{
new: Range{Pos: 1, Size: 1}, // .N.......
rs: Ranges{{Pos: 5, Size: 1}}, // .....R...
want: Ranges{ // .N...R...
{Pos: 1, Size: 1},
{Pos: 5, Size: 1},
},
},
{
new: Range{Pos: 5, Size: 1}, // .....R...
rs: Ranges{{Pos: 1, Size: 1}}, // .N.......
want: Ranges{ // .N...R...
{Pos: 1, Size: 1},
{Pos: 5, Size: 1},
},
},
{
new: Range{Pos: 1, Size: 1}, // .N.......
rs: Ranges{{Pos: 2, Size: 1}}, // ..R......
want: Ranges{ // .XX......
{Pos: 1, Size: 2},
},
},
{
new: Range{Pos: 2, Size: 1}, // ..N.......
rs: Ranges{{Pos: 1, Size: 1}}, // .R......
want: Ranges{ // .XX......
{Pos: 1, Size: 2},
},
},
{
new: Range{Pos: 51, Size: 10},
rs: Ranges{{38, 8}, {57, 2}, {60, 3}},
want: Ranges{{38, 8}, {51, 12}},
},
} {
got := slices.Clone(test.rs)
got.Insert(test.new)
what := fmt.Sprintf("test new=%v, rs=%v", test.new, test.rs)
assert.Equal(t, test.want, got, what)
checkRanges(t, test.rs, what)
checkRanges(t, got, what)
}
}
func TestRangeInsertRandom(t *testing.T) {
for range 100 {
var rs Ranges
for range 100 {
var r = Range{
Pos: rand.Int63n(100),
Size: rand.Int63n(10) + 1,
}
what := fmt.Sprintf("inserting %v into %v\n", r, rs)
rs.Insert(r)
if !checkRanges(t, rs, what) {
break
}
//fmt.Printf("%d: %d: %v\n", i, j, rs)
}
}
}
func TestRangeFind(t *testing.T) {
for _, test := range []struct {
rs Ranges
r Range
wantCurr Range
wantNext Range
wantPresent bool
}{
{
r: Range{Pos: 1, Size: 0},
rs: Ranges{},
wantCurr: Range{Pos: 1, Size: 0},
wantNext: Range{},
wantPresent: false,
},
{
r: Range{Pos: 1, Size: 1},
rs: Ranges{},
wantCurr: Range{Pos: 1, Size: 1},
wantNext: Range{},
wantPresent: false,
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 1, Size: 10},
},
wantCurr: Range{Pos: 1, Size: 2},
wantNext: Range{Pos: 3, Size: 0},
wantPresent: true,
},
{
r: Range{Pos: 1, Size: 10},
rs: Ranges{
Range{Pos: 1, Size: 2},
},
wantCurr: Range{Pos: 1, Size: 2},
wantNext: Range{Pos: 3, Size: 8},
wantPresent: true,
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 5, Size: 2},
},
wantCurr: Range{Pos: 1, Size: 2},
wantNext: Range{Pos: 0, Size: 0},
wantPresent: false,
},
{
r: Range{Pos: 2, Size: 10},
rs: Ranges{
Range{Pos: 1, Size: 2},
},
wantCurr: Range{Pos: 2, Size: 1},
wantNext: Range{Pos: 3, Size: 9},
wantPresent: true,
},
{
r: Range{Pos: 1, Size: 9},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantCurr: Range{Pos: 1, Size: 1},
wantNext: Range{Pos: 2, Size: 8},
wantPresent: false,
},
{
r: Range{Pos: 2, Size: 8},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantCurr: Range{Pos: 2, Size: 1},
wantNext: Range{Pos: 3, Size: 7},
wantPresent: true,
},
{
r: Range{Pos: 3, Size: 7},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantCurr: Range{Pos: 3, Size: 1},
wantNext: Range{Pos: 4, Size: 6},
wantPresent: false,
},
{
r: Range{Pos: 4, Size: 6},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantCurr: Range{Pos: 4, Size: 1},
wantNext: Range{Pos: 5, Size: 5},
wantPresent: true,
},
{
r: Range{Pos: 5, Size: 5},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantCurr: Range{Pos: 5, Size: 5},
wantNext: Range{Pos: 0, Size: 0},
wantPresent: false,
},
} {
what := fmt.Sprintf("test r=%v, rs=%v", test.r, test.rs)
checkRanges(t, test.rs, what)
gotCurr, gotNext, gotPresent := test.rs.Find(test.r)
assert.Equal(t, test.r.Pos, gotCurr.Pos, what)
assert.Equal(t, test.wantCurr, gotCurr, what)
assert.Equal(t, test.wantNext, gotNext, what)
assert.Equal(t, test.wantPresent, gotPresent, what)
}
}
func TestRangeFindAll(t *testing.T) {
for _, test := range []struct {
rs Ranges
r Range
want []FoundRange
wantNext Range
wantPresent bool
}{
{
r: Range{Pos: 1, Size: 0},
rs: Ranges{},
want: []FoundRange(nil),
},
{
r: Range{Pos: 1, Size: 1},
rs: Ranges{},
want: []FoundRange{
{
R: Range{Pos: 1, Size: 1},
Present: false,
},
},
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 1, Size: 10},
},
want: []FoundRange{
{
R: Range{Pos: 1, Size: 2},
Present: true,
},
},
},
{
r: Range{Pos: 1, Size: 10},
rs: Ranges{
Range{Pos: 1, Size: 2},
},
want: []FoundRange{
{
R: Range{Pos: 1, Size: 2},
Present: true,
},
{
R: Range{Pos: 3, Size: 8},
Present: false,
},
},
},
{
r: Range{Pos: 5, Size: 5},
rs: Ranges{
Range{Pos: 4, Size: 2},
Range{Pos: 7, Size: 1},
Range{Pos: 9, Size: 2},
},
want: []FoundRange{
{
R: Range{Pos: 5, Size: 1},
Present: true,
},
{
R: Range{Pos: 6, Size: 1},
Present: false,
},
{
R: Range{Pos: 7, Size: 1},
Present: true,
},
{
R: Range{Pos: 8, Size: 1},
Present: false,
},
{
R: Range{Pos: 9, Size: 1},
Present: true,
},
},
},
} {
what := fmt.Sprintf("test r=%v, rs=%v", test.r, test.rs)
checkRanges(t, test.rs, what)
got := test.rs.FindAll(test.r)
assert.Equal(t, test.want, got, what)
}
}
func TestRangePresent(t *testing.T) {
for _, test := range []struct {
rs Ranges
r Range
wantPresent bool
}{
{
r: Range{Pos: 1, Size: 0},
rs: Ranges{},
wantPresent: true,
},
{
r: Range{Pos: 1, Size: 0},
rs: Ranges(nil),
wantPresent: true,
},
{
r: Range{Pos: 0, Size: 1},
rs: Ranges{},
wantPresent: false,
},
{
r: Range{Pos: 0, Size: 1},
rs: Ranges(nil),
wantPresent: false,
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 1, Size: 1},
},
wantPresent: false,
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 1, Size: 2},
},
wantPresent: true,
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 1, Size: 10},
},
wantPresent: true,
},
{
r: Range{Pos: 1, Size: 2},
rs: Ranges{
Range{Pos: 5, Size: 2},
},
wantPresent: false,
},
{
r: Range{Pos: 1, Size: 9},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantPresent: false,
},
{
r: Range{Pos: 2, Size: 8},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantPresent: false,
},
{
r: Range{Pos: 3, Size: 7},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantPresent: false,
},
{
r: Range{Pos: 4, Size: 6},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantPresent: false,
},
{
r: Range{Pos: 5, Size: 5},
rs: Ranges{
Range{Pos: 2, Size: 1},
Range{Pos: 4, Size: 1},
},
wantPresent: false,
},
} {
what := fmt.Sprintf("test r=%v, rs=%v", test.r, test.rs)
checkRanges(t, test.rs, what)
gotPresent := test.rs.Present(test.r)
assert.Equal(t, test.wantPresent, gotPresent, what)
checkRanges(t, test.rs, what)
}
}
func TestRangesIntersection(t *testing.T) {
for _, test := range []struct {
rs Ranges
r Range
want Ranges
}{
{
rs: Ranges(nil),
r: Range{},
want: Ranges(nil),
},
{
rs: Ranges{},
r: Range{},
want: Ranges{},
},
{
rs: Ranges{},
r: Range{Pos: 1, Size: 0},
want: Ranges{},
},
{
rs: Ranges{},
r: Range{Pos: 1, Size: 1},
want: Ranges{},
},
{
rs: Ranges{{Pos: 1, Size: 5}},
r: Range{Pos: 1, Size: 3},
want: Ranges{
{Pos: 1, Size: 3},
},
},
{
rs: Ranges{{Pos: 1, Size: 5}},
r: Range{Pos: 1, Size: 10},
want: Ranges{
{Pos: 1, Size: 5},
},
},
{
rs: Ranges{{Pos: 1, Size: 5}},
r: Range{Pos: 3, Size: 10},
want: Ranges{
{Pos: 3, Size: 3},
},
},
{
rs: Ranges{{Pos: 1, Size: 5}},
r: Range{Pos: 6, Size: 10},
want: Ranges(nil),
},
{
rs: Ranges{
{Pos: 1, Size: 2},
{Pos: 11, Size: 2},
{Pos: 21, Size: 2},
{Pos: 31, Size: 2},
{Pos: 41, Size: 2},
},
r: Range{Pos: 12, Size: 20},
want: Ranges{
{Pos: 12, Size: 1},
{Pos: 21, Size: 2},
{Pos: 31, Size: 1},
},
},
} {
got := test.rs.Intersection(test.r)
what := fmt.Sprintf("test ra=%v, r=%v", test.rs, test.r)
assert.Equal(t, test.want, got, what)
checkRanges(t, test.rs, what)
checkRanges(t, got, what)
}
}
func TestRangesEqual(t *testing.T) {
for _, test := range []struct {
rs Ranges
bs Ranges
want bool
}{
{
rs: Ranges(nil),
bs: Ranges(nil),
want: true,
},
{
rs: Ranges{},
bs: Ranges(nil),
want: true,
},
{
rs: Ranges(nil),
bs: Ranges{},
want: true,
},
{
rs: Ranges{},
bs: Ranges{},
want: true,
},
{
rs: Ranges{
{Pos: 0, Size: 1},
},
bs: Ranges{},
want: false,
},
{
rs: Ranges{
{Pos: 0, Size: 1},
},
bs: Ranges{
{Pos: 0, Size: 1},
},
want: true,
},
{
rs: Ranges{
{Pos: 0, Size: 1},
{Pos: 10, Size: 9},
{Pos: 20, Size: 21},
},
bs: Ranges{
{Pos: 0, Size: 1},
{Pos: 10, Size: 9},
{Pos: 20, Size: 22},
},
want: false,
},
{
rs: Ranges{
{Pos: 0, Size: 1},
{Pos: 10, Size: 9},
{Pos: 20, Size: 21},
},
bs: Ranges{
{Pos: 0, Size: 1},
{Pos: 10, Size: 9},
{Pos: 20, Size: 21},
},
want: true,
},
} {
got := test.rs.Equal(test.bs)
what := fmt.Sprintf("test rs=%v, bs=%v", test.rs, test.bs)
assert.Equal(t, test.want, got, what)
checkRanges(t, test.bs, what)
checkRanges(t, test.rs, what)
}
}
func TestRangesSize(t *testing.T) {
for _, test := range []struct {
rs Ranges
want int64
}{
{
rs: Ranges(nil),
want: 0,
},
{
rs: Ranges{},
want: 0,
},
{
rs: Ranges{
{Pos: 7, Size: 11},
},
want: 11,
},
{
rs: Ranges{
{Pos: 0, Size: 1},
{Pos: 10, Size: 9},
{Pos: 20, Size: 21},
},
want: 31,
},
} {
got := test.rs.Size()
what := fmt.Sprintf("test rs=%v", test.rs)
assert.Equal(t, test.want, got, what)
checkRanges(t, test.rs, what)
}
}
func TestFindMissing(t *testing.T) {
for _, test := range []struct {
r Range
rs Ranges
want Range
}{
{
r: Range{},
rs: Ranges(nil),
want: Range{},
},
{
r: Range{},
rs: Ranges{},
want: Range{},
},
{
r: Range{Pos: 3, Size: 5},
rs: Ranges{
{Pos: 10, Size: 5},
{Pos: 20, Size: 5},
},
want: Range{Pos: 3, Size: 5},
},
{
r: Range{Pos: 3, Size: 15},
rs: Ranges{
{Pos: 10, Size: 5},
{Pos: 20, Size: 5},
},
want: Range{Pos: 3, Size: 15},
},
{
r: Range{Pos: 10, Size: 5},
rs: Ranges{
{Pos: 10, Size: 5},
{Pos: 20, Size: 5},
},
want: Range{Pos: 15, Size: 0},
},
{
r: Range{Pos: 10, Size: 7},
rs: Ranges{
{Pos: 10, Size: 5},
{Pos: 20, Size: 5},
},
want: Range{Pos: 15, Size: 2},
},
{
r: Range{Pos: 11, Size: 7},
rs: Ranges{
{Pos: 10, Size: 5},
{Pos: 20, Size: 5},
},
want: Range{Pos: 15, Size: 3},
},
} {
got := test.rs.FindMissing(test.r)
what := fmt.Sprintf("test r=%v, rs=%v", test.r, test.rs)
assert.Equal(t, test.want, got, what)
assert.Equal(t, test.r.End(), got.End())
checkRanges(t, test.rs, what)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/daemonize/daemon_unix.go | lib/daemonize/daemon_unix.go | //go:build unix && !aix
// Package daemonize provides daemonization interface for Unix platforms.
package daemonize
import (
"fmt"
"os"
"strings"
"syscall"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
// StartDaemon runs background twin of current process.
// It executes separate parts of code in child and parent processes.
// Returns child process pid in the parent or nil in the child.
// The method looks like a fork but safe for goroutines.
func StartDaemon(args []string) (*os.Process, error) {
if fs.IsDaemon() {
// This process is already daemonized
return nil, nil
}
env := append(os.Environ(), fs.DaemonMarkVar+"="+fs.DaemonMarkChild)
me, err := os.Executable()
if err != nil {
me = os.Args[0]
}
// os.Executable might have resolved symbolic link to the executable
// so we run the background process with pre-converted CLI arguments.
// Double conversion is still probable but isn't a problem as it should
// preserve the converted command line.
if len(args) != 0 {
args[0] = me
}
if fs.PassDaemonArgsAsEnviron {
args, env = argsToEnv(args, env)
}
null, err := os.Open(os.DevNull)
if err != nil {
return nil, err
}
files := []*os.File{
null, // (0) stdin
null, // (1) stdout
null, // (2) stderr
}
sysAttr := &syscall.SysProcAttr{
// setsid (https://linux.die.net/man/2/setsid) in the child process will reset
// its process group id (PGID) to its PID thus detaching it from parent.
// This would make autofs fail because it detects mounting process by its PGID.
Setsid: false,
}
attr := &os.ProcAttr{
Env: env,
Files: files,
Sys: sysAttr,
}
daemon, err := os.StartProcess(me, args, attr)
if err != nil {
return nil, err
}
return daemon, nil
}
// Processed command line flags of mount helper have simple structure:
// `--flag` or `--flag=value` but never `--flag value` or `-x`
// so we can easily pass them as environment variables.
func argsToEnv(origArgs, origEnv []string) (args, env []string) {
env = origEnv
if len(origArgs) == 0 {
return
}
args = []string{origArgs[0]}
for _, arg := range origArgs[1:] {
if !strings.HasPrefix(arg, "--") {
args = append(args, arg)
continue
}
arg = arg[2:]
var key, val string
var ok bool
if key, val, ok = strings.Cut(arg, "="); !ok {
val = "true"
}
name := "RCLONE_" + strings.ToUpper(strings.ReplaceAll(key, "-", "_"))
pref := name + "="
line := name + "=" + val
found := false
for i, s := range env {
if strings.HasPrefix(s, pref) {
env[i] = line
found = true
}
}
if !found {
env = append(env, line)
}
}
return
}
// Check returns non nil if the daemon process has died
func Check(daemon *os.Process) error {
var status unix.WaitStatus
wpid, err := unix.Wait4(daemon.Pid, &status, unix.WNOHANG, nil)
// fs.Debugf(nil, "wait4 returned wpid=%d, err=%v, status=%d", wpid, err, status)
if err != nil {
return err
}
if wpid == 0 {
return nil
}
if status.Exited() {
return fmt.Errorf("daemon exited with error code %d", status.ExitStatus())
}
return nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/daemonize/daemon_other.go | lib/daemonize/daemon_other.go | //go:build !unix || aix
// Package daemonize provides daemonization stub for non-Unix platforms.
package daemonize
import (
"fmt"
"os"
"runtime"
)
var errNotSupported = fmt.Errorf("daemon mode is not supported on the %s platform", runtime.GOOS)
// StartDaemon runs background twin of current process.
func StartDaemon(args []string) (*os.Process, error) {
return nil, errNotSupported
}
// Check returns non nil if the daemon process has died
func Check(daemon *os.Process) error {
return errNotSupported
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/israce/israce.go | lib/israce/israce.go | //go:build race
// Package israce reports if the Go race detector is enabled.
//
// From https://stackoverflow.com/questions/44944959/how-can-i-check-if-the-race-detector-is-enabled-at-runtime
package israce
// Enabled reports if the race detector is enabled.
const Enabled = true
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/israce/norace.go | lib/israce/norace.go | //go:build !race
// Package israce reports if the Go race detector is enabled.
//
// From https://stackoverflow.com/questions/44944959/how-can-i-check-if-the-race-detector-is-enabled-at-runtime
package israce
// Enabled reports if the race detector is enabled.
const Enabled = false
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/atexit/atexit_test.go | lib/atexit/atexit_test.go | package atexit
import (
"os"
"runtime"
"testing"
"github.com/rclone/rclone/lib/exitcode"
"github.com/stretchr/testify/assert"
)
type fakeSignal struct{}
func (*fakeSignal) String() string {
return "fake"
}
func (*fakeSignal) Signal() {
}
var _ os.Signal = (*fakeSignal)(nil)
func TestExitCode(t *testing.T) {
switch runtime.GOOS {
case "windows", "plan9":
for _, i := range []os.Signal{
os.Interrupt,
os.Kill,
} {
assert.Equal(t, exitCode(i), exitcode.UncategorizedError)
}
default:
// SIGINT (2) and SIGKILL (9) are portable numbers specified by POSIX.
assert.Equal(t, exitCode(os.Interrupt), 128+2)
assert.Equal(t, exitCode(os.Kill), 128+9)
}
// Never a real signal
assert.Equal(t, exitCode(&fakeSignal{}), exitcode.UncategorizedError)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/atexit/atexit.go | lib/atexit/atexit.go | // Package atexit provides handling for functions you want called when
// the program exits unexpectedly due to a signal.
//
// You should also make sure you call Run in the normal exit path.
package atexit
import (
"os"
"os/signal"
"sync"
"sync/atomic"
"github.com/rclone/rclone/fs"
)
var (
fns = make(map[FnHandle]bool)
fnsMutex sync.Mutex
exitChan chan os.Signal
exitOnce sync.Once
registerOnce sync.Once
signalled atomic.Int32
runCalled atomic.Int32
)
// FnHandle is the type of the handle returned by function `Register`
// that can be used to unregister an at-exit function
type FnHandle *func()
// Register a function to be called on exit.
// Returns a handle which can be used to unregister the function with `Unregister`.
func Register(fn func()) FnHandle {
if running() {
return nil
}
fnsMutex.Lock()
fns[&fn] = true
fnsMutex.Unlock()
// Run AtExit handlers on exitSignals so everything gets tidied up properly
registerOnce.Do(func() {
exitChan = make(chan os.Signal, 1)
signal.Notify(exitChan, exitSignals...)
go func() {
sig := <-exitChan
if sig == nil {
return
}
signal.Stop(exitChan)
signalled.Store(1)
fs.Infof(nil, "Signal received: %s", sig)
Run()
fs.Infof(nil, "Exiting...")
os.Exit(exitCode(sig))
}()
})
return &fn
}
// Signalled returns true if an exit signal has been received
func Signalled() bool {
return signalled.Load() != 0
}
// running returns true if run has been called
func running() bool {
return runCalled.Load() != 0
}
// Unregister a function using the handle returned by `Register`
func Unregister(handle FnHandle) {
if running() {
return
}
fnsMutex.Lock()
defer fnsMutex.Unlock()
delete(fns, handle)
}
// IgnoreSignals disables the signal handler and prevents Run from being executed automatically
func IgnoreSignals() {
if running() {
return
}
registerOnce.Do(func() {})
if exitChan != nil {
signal.Stop(exitChan)
close(exitChan)
exitChan = nil
}
}
// Run all the at exit functions if they haven't been run already
func Run() {
runCalled.Store(1)
// Take the lock here (not inside the exitOnce) so we wait
// until the exit handlers have run before any calls to Run()
// return.
fnsMutex.Lock()
defer fnsMutex.Unlock()
exitOnce.Do(func() {
for fnHandle := range fns {
(*fnHandle)()
}
})
}
// OnError registers fn with atexit and returns a function which
// runs fn() if *perr != nil and deregisters fn
//
// It should be used in a defer statement normally so
//
// defer OnError(&err, cancelFunc)()
//
// So cancelFunc will be run if the function exits with an error or
// at exit.
//
// cancelFunc will only be run once.
func OnError(perr *error, fn func()) func() {
var once sync.Once
onceFn := func() {
once.Do(fn)
}
handle := Register(onceFn)
return func() {
defer Unregister(handle)
if *perr != nil {
onceFn()
}
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/atexit/atexit_other.go | lib/atexit/atexit_other.go | //go:build windows || plan9
package atexit
import (
"os"
"github.com/rclone/rclone/lib/exitcode"
)
var exitSignals = []os.Signal{os.Interrupt}
func exitCode(_ os.Signal) int {
return exitcode.UncategorizedError
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/atexit/atexit_unix.go | lib/atexit/atexit_unix.go | //go:build !windows && !plan9
package atexit
import (
"os"
"syscall"
"github.com/rclone/rclone/lib/exitcode"
)
var exitSignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} // Not syscall.SIGQUIT as we want the default behaviour
// exitCode calculates the exit code for the given signal. Many Unix programs
// exit with 128+signum if they handle signals. Most shell also implement the
// same convention if a program is terminated by an uncaught and/or fatal
// signal.
func exitCode(sig os.Signal) int {
if real, ok := sig.(syscall.Signal); ok && int(real) > 0 {
return 128 + int(real)
}
return exitcode.UncategorizedError
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/multipart/multipart.go | lib/multipart/multipart.go | // Package multipart implements generic multipart uploading.
package multipart
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
const (
// BufferSize is the default size of the pages used in the reader
BufferSize = pool.BufferSize
)
// NewRW gets a pool.RW using the global pool
func NewRW() *pool.RW {
return pool.NewRW(pool.Global())
}
// UploadMultipartOptions options for the generic multipart upload
type UploadMultipartOptions struct {
Open fs.OpenChunkWriter // thing to call OpenChunkWriter on
OpenOptions []fs.OpenOption // options for OpenChunkWriter
}
// UploadMultipart does a generic multipart upload from src using f as OpenChunkWriter.
//
// in is read seqentially and chunks from it are uploaded in parallel.
//
// It returns the chunkWriter used in case the caller needs to extract any private info from it.
func UploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, opt UploadMultipartOptions) (chunkWriterOut fs.ChunkWriter, err error) {
info, chunkWriter, err := opt.Open.OpenChunkWriter(ctx, src.Remote(), src, opt.OpenOptions...)
if err != nil {
return nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
}
// make concurrency machinery
concurrency := max(info.Concurrency, 1)
tokens := pacer.NewTokenDispenser(concurrency)
uploadCtx, cancel := context.WithCancel(ctx)
defer cancel()
defer atexit.OnError(&err, func() {
cancel()
if info.LeavePartsOnError {
return
}
fs.Debugf(src, "Cancelling multipart upload")
errCancel := chunkWriter.Abort(ctx)
if errCancel != nil {
fs.Debugf(src, "Failed to cancel multipart upload: %v", errCancel)
}
})()
var (
g, gCtx = errgroup.WithContext(uploadCtx)
finished = false
off int64
size = src.Size()
chunkSize = info.ChunkSize
)
// Do the accounting manually
in, acc := accounting.UnWrapAccounting(in)
for partNum := int64(0); !finished; partNum++ {
// Get a block of memory from the pool and token which limits concurrency.
tokens.Get()
rw := NewRW().Reserve(chunkSize)
if acc != nil {
rw.SetAccounting(acc.AccountRead)
}
free := func() {
// return the memory and token
_ = rw.Close() // Can't return an error
tokens.Put()
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
// Read the chunk
var n int64
n, err = io.CopyN(rw, in, chunkSize)
if err == io.EOF {
if n == 0 && partNum != 0 { // end if no data and if not first chunk
free()
break
}
finished = true
} else if err != nil {
free()
return nil, fmt.Errorf("multipart upload: failed to read source: %w", err)
}
partNum := partNum
partOff := off
off += n
g.Go(func() (err error) {
defer free()
fs.Debugf(src, "multipart upload: starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(partOff), fs.SizeSuffix(size))
_, err = chunkWriter.WriteChunk(gCtx, int(partNum), rw)
return err
})
}
err = g.Wait()
if err != nil {
return nil, err
}
err = chunkWriter.Close(ctx)
if err != nil {
return nil, fmt.Errorf("multipart upload: failed to finalise: %w", err)
}
return chunkWriter, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/rest/rest.go | lib/rest/rest.go | // Package rest implements a simple REST wrapper
//
// All methods are safe for concurrent calling.
package rest
import (
"bytes"
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"maps"
"mime/multipart"
"net/http"
"net/url"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/readers"
)
// Client contains the info to sustain the API
type Client struct {
mu sync.RWMutex
c *http.Client
rootURL string
errorHandler func(resp *http.Response) error
headers map[string]string
signer SignerFn
}
// NewClient takes an oauth http.Client and makes a new api instance
func NewClient(c *http.Client) *Client {
api := &Client{
c: c,
errorHandler: defaultErrorHandler,
headers: make(map[string]string),
}
return api
}
// ReadBody reads resp.Body into result, closing the body
func ReadBody(resp *http.Response) (result []byte, err error) {
defer fs.CheckClose(resp.Body, &err)
return io.ReadAll(resp.Body)
}
// defaultErrorHandler doesn't attempt to parse the http body, just
// returns it in the error message closing resp.Body
func defaultErrorHandler(resp *http.Response) (err error) {
body, err := ReadBody(resp)
if err != nil {
return fmt.Errorf("error reading error out of body: %w", err)
}
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
}
// SetErrorHandler sets the handler to decode an error response when
// the HTTP status code is not 2xx. The handler should close resp.Body.
func (api *Client) SetErrorHandler(fn func(resp *http.Response) error) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.errorHandler = fn
return api
}
// SetRoot sets the default RootURL. You can override this on a per
// call basis using the RootURL field in Opts.
func (api *Client) SetRoot(RootURL string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.rootURL = RootURL
return api
}
// SetHeader sets a header for all requests
// Start the key with "*" for don't canonicalise
func (api *Client) SetHeader(key, value string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.headers[key] = value
return api
}
// RemoveHeader unsets a header for all requests
func (api *Client) RemoveHeader(key string) *Client {
api.mu.Lock()
defer api.mu.Unlock()
delete(api.headers, key)
return api
}
// SignerFn is used to sign an outgoing request
type SignerFn func(*http.Request) error
// SetSigner sets a signer for all requests
func (api *Client) SetSigner(signer SignerFn) *Client {
api.mu.Lock()
defer api.mu.Unlock()
api.signer = signer
return api
}
// SetUserPass creates an Authorization header for all requests with
// the UserName and Password passed in
func (api *Client) SetUserPass(UserName, Password string) *Client {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.SetBasicAuth(UserName, Password)
api.SetHeader("Authorization", req.Header.Get("Authorization"))
return api
}
// SetCookie creates a Cookies Header for all requests with the supplied
// cookies passed in.
// All cookies have to be supplied at once, all cookies will be overwritten
// on a new call to the method
func (api *Client) SetCookie(cks ...*http.Cookie) *Client {
req, _ := http.NewRequest("GET", "http://example.com", nil)
for _, ck := range cks {
req.AddCookie(ck)
}
api.SetHeader("Cookie", req.Header.Get("Cookie"))
return api
}
// Opts contains parameters for Call, CallJSON, etc.
type Opts struct {
Method string // GET, POST, etc.
Path string // relative to RootURL
RootURL string // override RootURL passed into SetRoot()
Body io.Reader
GetBody func() (io.ReadCloser, error) // body builder, needed to enable low-level HTTP/2 retries
NoResponse bool // set to close Body
ContentType string
ContentLength *int64
ContentRange string
ExtraHeaders map[string]string // extra headers, start them with "*" for don't canonicalise
UserName string // username for Basic Auth
Password string // password for Basic Auth
Options []fs.OpenOption
IgnoreStatus bool // if set then we don't check error status or parse error body
MultipartParams url.Values // if set do multipart form upload with attached file
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
MultipartContentName string // ..name of the parameter which is the attached file
MultipartFileName string // ..name of the file for the attached file
Parameters url.Values // any parameters for the final URL
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
Trailer *http.Header // set the request trailer
Close bool // set to close the connection after this transaction
NoRedirect bool // if this is set then the client won't follow redirects
// On Redirects, call this function - see the http.Client docs: https://pkg.go.dev/net/http#Client
CheckRedirect func(req *http.Request, via []*http.Request) error
AuthRedirect bool // if this is set then the client will redirect with Auth
}
// Copy creates a copy of the options
func (o *Opts) Copy() *Opts {
newOpts := *o
return &newOpts
}
const drainLimit = 10 * 1024 * 1024
// drainAndClose discards up to drainLimit bytes from r and closes
// it. Any errors from the Read or Close are returned.
func drainAndClose(r io.ReadCloser) (err error) {
_, readErr := io.CopyN(io.Discard, r, drainLimit)
if readErr == io.EOF {
readErr = nil
}
err = r.Close()
if readErr != nil {
return readErr
}
return err
}
// checkDrainAndClose is a utility function used to check the return
// from drainAndClose in a defer statement.
func checkDrainAndClose(r io.ReadCloser, err *error) {
cerr := drainAndClose(r)
if *err == nil {
*err = cerr
}
}
// DecodeJSON decodes resp.Body into result
func DecodeJSON(resp *http.Response, result any) (err error) {
defer checkDrainAndClose(resp.Body, &err)
decoder := json.NewDecoder(resp.Body)
return decoder.Decode(result)
}
// DecodeXML decodes resp.Body into result
func DecodeXML(resp *http.Response, result any) (err error) {
defer checkDrainAndClose(resp.Body, &err)
decoder := xml.NewDecoder(resp.Body)
// MEGAcmd has included escaped HTML entities in its XML output, so we have to be able to
// decode them.
decoder.Strict = false
decoder.Entity = xml.HTMLEntity
return decoder.Decode(result)
}
// ClientWithNoRedirects makes a new http client which won't follow redirects
func ClientWithNoRedirects(c *http.Client) *http.Client {
clientCopy := *c
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
return &clientCopy
}
// Do calls the internal http.Client.Do method
func (api *Client) Do(req *http.Request) (*http.Response, error) {
return api.c.Do(req)
}
// ClientWithAuthRedirects makes a new http client which will re-apply Auth on redirects
func ClientWithAuthRedirects(c *http.Client) *http.Client {
clientCopy := *c
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
} else if len(via) == 0 {
return nil
}
prevReq := via[len(via)-1]
resp := req.Response
if resp == nil {
return nil
}
// Look at previous response to see if it was a redirect and preserve auth if so
switch resp.StatusCode {
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther, http.StatusTemporaryRedirect, http.StatusPermanentRedirect:
// Reapply Auth (if any) from previous request on redirect
auth := prevReq.Header.Get("Authorization")
if auth != "" {
req.Header.Add("Authorization", auth)
}
}
return nil
}
return &clientCopy
}
// Call makes the call and returns the http.Response
//
// if err == nil then resp.Body will need to be closed unless
// opt.NoResponse is set
//
// if err != nil then resp.Body will have been closed
//
// it will return resp if at all possible, even if err is set
func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, err error) {
api.mu.RLock()
defer api.mu.RUnlock()
if opts == nil {
return nil, errors.New("call() called with nil opts")
}
url := api.rootURL
if opts.RootURL != "" {
url = opts.RootURL
}
if url == "" {
return nil, errors.New("RootURL not set")
}
url += opts.Path
if len(opts.Parameters) > 0 {
url += "?" + opts.Parameters.Encode()
}
body := readers.NoCloser(opts.Body)
// If length is set and zero then nil out the body to stop use
// use of chunked encoding and insert a "Content-Length: 0"
// header.
//
// If we don't do this we get "Content-Length" headers for all
// files except 0 length files.
if opts.ContentLength != nil && *opts.ContentLength == 0 {
body = nil
}
req, err := http.NewRequestWithContext(ctx, opts.Method, url, body)
if err != nil {
return
}
headers := make(map[string]string)
// Set default headers
maps.Copy(headers, api.headers)
if opts.ContentType != "" {
headers["Content-Type"] = opts.ContentType
}
if opts.ContentLength != nil {
req.ContentLength = *opts.ContentLength
}
if opts.ContentRange != "" {
headers["Content-Range"] = opts.ContentRange
}
if len(opts.TransferEncoding) != 0 {
req.TransferEncoding = opts.TransferEncoding
}
if opts.GetBody != nil {
req.GetBody = opts.GetBody
}
if opts.Trailer != nil {
req.Trailer = *opts.Trailer
}
if opts.Close {
req.Close = true
}
// Set any extra headers
maps.Copy(headers, opts.ExtraHeaders)
// add any options to the headers
fs.OpenOptionAddHeaders(opts.Options, headers)
// Now set the headers
for k, v := range headers {
if k != "" && v != "" {
if k[0] == '*' {
// Add non-canonical version if header starts with *
k = k[1:]
req.Header[k] = append(req.Header[k], v)
} else {
req.Header.Add(k, v)
}
}
}
if opts.UserName != "" || opts.Password != "" {
req.SetBasicAuth(opts.UserName, opts.Password)
}
var c *http.Client
if opts.NoRedirect {
c = ClientWithNoRedirects(api.c)
} else if opts.CheckRedirect != nil {
clientCopy := *api.c
clientCopy.CheckRedirect = opts.CheckRedirect
c = &clientCopy
} else if opts.AuthRedirect {
c = ClientWithAuthRedirects(api.c)
} else {
c = api.c
}
if api.signer != nil {
api.mu.RUnlock()
err = api.signer(req)
api.mu.RLock()
if err != nil {
return nil, fmt.Errorf("signer failed: %w", err)
}
}
api.mu.RUnlock()
resp, err = c.Do(req)
api.mu.RLock()
if err != nil {
return nil, err
}
if !opts.IgnoreStatus {
if resp.StatusCode < 200 || resp.StatusCode > 299 {
err = api.errorHandler(resp)
if err.Error() == "" {
// replace empty errors with something
err = fmt.Errorf("http error %d: %v", resp.StatusCode, resp.Status)
}
return resp, err
}
}
if opts.NoResponse {
return resp, drainAndClose(resp.Body)
}
return resp, nil
}
// MultipartUpload creates an io.Reader which produces an encoded a
// multipart form upload from the params passed in and the passed in
//
// in - the body of the file (may be nil)
// params - the form parameters
// fileName - is the name of the attached file
// contentName - the name of the parameter for the file
//
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
//
// NB This doesn't allow setting the content type of the attachment
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
bodyReader, bodyWriter := io.Pipe()
writer := multipart.NewWriter(bodyWriter)
contentType := writer.FormDataContentType()
// Create a Multipart Writer as base for calculating the Content-Length
buf := &bytes.Buffer{}
dummyMultipartWriter := multipart.NewWriter(buf)
err := dummyMultipartWriter.SetBoundary(writer.Boundary())
if err != nil {
return nil, "", 0, err
}
for key, vals := range params {
for _, val := range vals {
err := dummyMultipartWriter.WriteField(key, val)
if err != nil {
return nil, "", 0, err
}
}
}
if in != nil {
_, err = dummyMultipartWriter.CreateFormFile(contentName, fileName)
if err != nil {
return nil, "", 0, err
}
}
err = dummyMultipartWriter.Close()
if err != nil {
return nil, "", 0, err
}
multipartLength := int64(buf.Len())
// Make sure we close the pipe writer to release the reader on context cancel
quit := make(chan struct{})
go func() {
select {
case <-quit:
break
case <-ctx.Done():
_ = bodyWriter.CloseWithError(ctx.Err())
}
}()
// Pump the data in the background
go func() {
defer close(quit)
var err error
for key, vals := range params {
for _, val := range vals {
err = writer.WriteField(key, val)
if err != nil {
_ = bodyWriter.CloseWithError(fmt.Errorf("create metadata part: %w", err))
return
}
}
}
if in != nil {
part, err := writer.CreateFormFile(contentName, fileName)
if err != nil {
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return
}
_, err = io.Copy(part, in)
if err != nil {
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to copy data: %w", err))
return
}
}
err = writer.Close()
if err != nil {
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to close form: %w", err))
return
}
_ = bodyWriter.Close()
}()
return bodyReader, contentType, multipartLength, nil
}
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
//
// If request is not nil then it will be JSON encoded as the body of the request.
//
// If response is not nil then the response will be JSON decoded into
// it and resp.Body will be closed.
//
// If response is nil then the resp.Body will be closed only if
// opts.NoResponse is set.
//
// If (opts.MultipartParams or opts.MultipartContentName) and
// opts.Body are set then CallJSON will do a multipart upload with a
// file attached. opts.MultipartContentName is the name of the
// parameter and opts.MultipartFileName is the name of the file. If
// MultipartContentName is set, and request != nil is supplied, then
// the request will be marshalled into JSON and added to the form with
// parameter name MultipartMetadataName.
//
// It will return resp if at all possible, even if err is set
func (api *Client) CallJSON(ctx context.Context, opts *Opts, request any, response any) (resp *http.Response, err error) {
return api.callCodec(ctx, opts, request, response, json.Marshal, DecodeJSON, "application/json")
}
// CallXML runs Call and decodes the body as an XML object into response (if not nil)
//
// If request is not nil then it will be XML encoded as the body of the request.
//
// If response is not nil then the response will be XML decoded into
// it and resp.Body will be closed.
//
// If response is nil then the resp.Body will be closed only if
// opts.NoResponse is set.
//
// See CallJSON for a description of MultipartParams and related opts.
//
// It will return resp if at all possible, even if err is set
func (api *Client) CallXML(ctx context.Context, opts *Opts, request any, response any) (resp *http.Response, err error) {
return api.callCodec(ctx, opts, request, response, xml.Marshal, DecodeXML, "application/xml")
}
type marshalFn func(v any) ([]byte, error)
type decodeFn func(resp *http.Response, result any) (err error)
func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, response any, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) {
var requestBody []byte
// Marshal the request if given
if request != nil {
requestBody, err = marshal(request)
if err != nil {
return nil, err
}
// Set the body up as a marshalled object if no body passed in
if opts.Body == nil {
opts = opts.Copy()
opts.ContentType = contentType
opts.Body = bytes.NewBuffer(requestBody)
}
}
if opts.MultipartParams != nil || opts.MultipartContentName != "" {
params := opts.MultipartParams
if params == nil {
params = url.Values{}
}
if opts.MultipartMetadataName != "" {
params.Add(opts.MultipartMetadataName, string(requestBody))
}
opts = opts.Copy()
var overhead int64
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
if err != nil {
return nil, err
}
if opts.ContentLength != nil {
*opts.ContentLength += overhead
}
}
resp, err = api.Call(ctx, opts)
if err != nil {
return resp, err
}
// if opts.NoResponse is set, resp.Body will have been closed by Call()
if response == nil || opts.NoResponse {
return resp, nil
}
err = decode(resp, response)
return resp, err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/rest/url.go | lib/rest/url.go | package rest
import (
"fmt"
"net/url"
"strings"
)
// URLJoin joins a URL and a path returning a new URL
//
// path should be URL escaped
func URLJoin(base *url.URL, path string) (*url.URL, error) {
rel, err := url.Parse(path)
if err != nil {
return nil, fmt.Errorf("error parsing %q as URL: %w", path, err)
}
return base.ResolveReference(rel), nil
}
// URLPathEscape escapes URL path the in string using URL escaping rules
//
// This mimics url.PathEscape which only available from go 1.8
func URLPathEscape(in string) string {
var u url.URL
u.Path = in
return u.String()
}
// URLPathEscapeAll escapes URL path the in string using URL escaping rules
//
// It escapes every character except [A-Za-z0-9] and /
func URLPathEscapeAll(in string) string {
var b strings.Builder
b.Grow(len(in) * 3) // worst case: every byte escaped
const hex = "0123456789ABCDEF"
for i := range len(in) {
c := in[i]
if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') || c == '/' {
b.WriteByte(c)
} else {
b.WriteByte('%')
b.WriteByte(hex[c>>4])
b.WriteByte(hex[c&0x0F])
}
}
return b.String()
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/rest/headers_test.go | lib/rest/headers_test.go | package rest
import (
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseSizeFromHeaders(t *testing.T) {
testCases := []struct {
ContentLength, ContentRange string
Size int64
}{{
"", "", -1,
}, {
"42", "", 42,
}, {
"42", "invalid", -1,
}, {
"", "bytes 22-33/42", 42,
}, {
"12", "bytes 22-33/42", 42,
}, {
"12", "otherUnit 22-33/42", -1,
}, {
"12", "bytes 22-33/*", -1,
}, {
"0", "bytes */42", 42,
}}
for _, testCase := range testCases {
headers := make(http.Header, 2)
if len(testCase.ContentLength) > 0 {
headers.Set("Content-Length", testCase.ContentLength)
}
if len(testCase.ContentRange) > 0 {
headers.Set("Content-Range", testCase.ContentRange)
}
assert.Equalf(t, testCase.Size, ParseSizeFromHeaders(headers), "%+v", testCase)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/rest/url_test.go | lib/rest/url_test.go | package rest
import (
"fmt"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestURLJoin(t *testing.T) {
for i, test := range []struct {
base string
path string
wantOK bool
want string
}{
{"http://example.com/", "potato", true, "http://example.com/potato"},
{"http://example.com/dir/", "potato", true, "http://example.com/dir/potato"},
{"http://example.com/dir/", "../dir/potato", true, "http://example.com/dir/potato"},
{"http://example.com/dir/", "..", true, "http://example.com/"},
{"http://example.com/dir/", "http://example.com/", true, "http://example.com/"},
{"http://example.com/dir/", "http://example.com/dir/", true, "http://example.com/dir/"},
{"http://example.com/dir/", "http://example.com/dir/potato", true, "http://example.com/dir/potato"},
{"http://example.com/dir/", "/dir/", true, "http://example.com/dir/"},
{"http://example.com/dir/", "/dir/potato", true, "http://example.com/dir/potato"},
{"http://example.com/dir/", "subdir/potato", true, "http://example.com/dir/subdir/potato"},
{"http://example.com/dir/", "With percent %25.txt", true, "http://example.com/dir/With%20percent%20%25.txt"},
{"http://example.com/dir/", "With colon :", false, ""},
{"http://example.com/dir/", URLPathEscape("With colon :"), true, "http://example.com/dir/With%20colon%20:"},
} {
u, err := url.Parse(test.base)
require.NoError(t, err)
got, err := URLJoin(u, test.path)
gotOK := err == nil
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.path)
assert.Equal(t, test.wantOK, gotOK, what)
var gotString string
if gotOK {
gotString = got.String()
}
assert.Equal(t, test.want, gotString, what)
}
}
func TestURLPathEscape(t *testing.T) {
for i, test := range []struct {
path string
want string
}{
{"", ""},
{"/hello.txt", "/hello.txt"},
{"With Space", "With%20Space"},
{"With Colon:", "./With%20Colon:"},
{"With Percent%", "With%20Percent%25"},
} {
got := URLPathEscape(test.path)
assert.Equal(t, test.want, got, fmt.Sprintf("Test %d path = %q", i, test.path))
}
}
func TestURLPathEscapeAll(t *testing.T) {
tests := []struct {
in string
want string
}{
{"", ""},
{"/hello.txt", "/hello%2Etxt"},
{"With Space", "With%20Space"},
{"With Colon:", "With%20Colon%3A"},
{"With Percent%", "With%20Percent%25"},
{"abc/XYZ123", "abc/XYZ123"},
{"hello world", "hello%20world"},
{"$test", "%24test"},
{"ümlaut", "%C3%BCmlaut"},
{"", ""},
{" /?", "%20/%3F"},
}
for _, test := range tests {
got := URLPathEscapeAll(test.in)
assert.Equal(t, test.want, got)
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/rest/headers.go | lib/rest/headers.go | package rest
import (
"net/http"
"strconv"
"strings"
)
// ParseSizeFromHeaders parses HTTP response headers to get the full file size.
// Returns -1 if the headers did not exist or were invalid.
func ParseSizeFromHeaders(headers http.Header) (size int64) {
size = -1
var contentLength = headers.Get("Content-Length")
if len(contentLength) != 0 {
var err error
if size, err = strconv.ParseInt(contentLength, 10, 64); err != nil {
return -1
}
}
var contentRange = headers.Get("Content-Range")
if len(contentRange) == 0 {
return size
}
if !strings.HasPrefix(contentRange, "bytes ") {
return -1
}
slash := strings.IndexRune(contentRange, '/')
if slash < 0 {
return -1
}
ret, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err != nil {
return -1
}
return ret
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/random/random_test.go | lib/random/random_test.go | package random
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStringLength(t *testing.T) {
for i := range 100 {
s := String(i)
assert.Equal(t, i, len(s))
}
}
func TestStringDuplicates(t *testing.T) {
seen := map[string]bool{}
for range 100 {
s := String(8)
assert.False(t, seen[s])
assert.Equal(t, 8, len(s))
seen[s] = true
}
}
func TestPasswordLength(t *testing.T) {
for i := 0; i <= 128; i++ {
s, err := Password(i)
require.NoError(t, err)
// expected length is number of bytes rounded up
expected := i / 8
if i%8 != 0 {
expected++
}
// then converted to base 64
expected = (expected*8 + 5) / 6
assert.Equal(t, expected, len(s), i)
}
}
func TestPasswordDuplicates(t *testing.T) {
seen := map[string]bool{}
for range 100 {
s, err := Password(64)
require.NoError(t, err)
assert.False(t, seen[s])
seen[s] = true
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/random/random.go | lib/random/random.go | // Package random holds a few functions for working with random numbers
package random
import (
cryptorand "crypto/rand"
"encoding/base64"
"fmt"
"io"
)
// StringFn create a random string for test purposes using the random
// number generator function passed in.
//
// Do not use these for passwords.
func StringFn(n int, randReader io.Reader) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
var (
pattern = []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out = make([]byte, n)
p = 0
)
_, err := io.ReadFull(randReader, out)
if err != nil {
panic(fmt.Sprintf("internal error: failed to read from random reader: %v", err))
}
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
// this generation method means the distribution is slightly biased. However these
// strings are not for passwords so this is deemed OK.
out[i] = source[out[i]%byte(len(source))]
}
return string(out)
}
// String create a random string for test purposes.
//
// Do not use these for passwords.
func String(n int) string {
return StringFn(n, cryptorand.Reader)
}
// Password creates a crypto strong password which is just about
// memorable. The password is composed of printable ASCII characters
// from the URL encoding base64 alphabet (A-Za-z0-9_-).
//
// Requires password strength in bits.
// 64 is just about memorable
// 128 is secure
func Password(bits int) (password string, err error) {
bytes := bits / 8
if bits%8 != 0 {
bytes++
}
var pw = make([]byte, bytes)
n, err := cryptorand.Read(pw)
if err != nil {
return "", fmt.Errorf("password read failed: %w", err)
}
if n != bytes {
return "", fmt.Errorf("password short read: %d", n)
}
password = base64.RawURLEncoding.EncodeToString(pw)
return password, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/batcher/batcher_test.go | lib/batcher/batcher_test.go | package batcher
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type (
Result string
Item string
)
func TestBatcherNew(t *testing.T) {
ctx := context.Background()
ci := fs.GetConfig(ctx)
opt := Options{
Mode: "async",
Size: 100,
Timeout: 1 * time.Second,
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
commitBatch := func(ctx context.Context, items []Item, results []Result, errors []error) (err error) {
return nil
}
b, err := New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
require.True(t, b.Batching())
b.Shutdown()
opt.Mode = "sync"
b, err = New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
require.True(t, b.Batching())
b.Shutdown()
opt.Mode = "off"
b, err = New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
require.False(t, b.Batching())
b.Shutdown()
opt.Mode = "bad"
_, err = New[Item, Result](ctx, nil, commitBatch, opt)
require.ErrorContains(t, err, "batch mode")
opt.Mode = "async"
opt.Size = opt.MaxBatchSize + 1
_, err = New[Item, Result](ctx, nil, commitBatch, opt)
require.ErrorContains(t, err, "batch size")
opt.Mode = "sync"
opt.Size = 0
opt.Timeout = 0
b, err = New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
assert.Equal(t, ci.Transfers, b.opt.Size)
assert.Equal(t, opt.DefaultTimeoutSync, b.opt.Timeout)
b.Shutdown()
opt.Mode = "async"
opt.Size = 0
opt.Timeout = 0
b, err = New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
assert.Equal(t, opt.DefaultBatchSizeAsync, b.opt.Size)
assert.Equal(t, opt.DefaultTimeoutAsync, b.opt.Timeout)
b.Shutdown()
// Check we get an error on commit
_, err = b.Commit(ctx, "last", Item("last"))
require.ErrorContains(t, err, "shutting down")
}
func TestBatcherCommit(t *testing.T) {
ctx := context.Background()
opt := Options{
Mode: "sync",
Size: 3,
Timeout: 1 * time.Second,
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
var wg sync.WaitGroup
errFail := errors.New("fail")
var commits int
var totalSize int
commitBatch := func(ctx context.Context, items []Item, results []Result, errors []error) (err error) {
commits += 1
totalSize += len(items)
for i := range items {
if items[i] == "5" {
errors[i] = errFail
} else {
results[i] = Result(items[i]) + " result"
}
}
return nil
}
b, err := New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
defer b.Shutdown()
for i := range 10 {
wg.Add(1)
s := fmt.Sprintf("%d", i)
go func() {
defer wg.Done()
result, err := b.Commit(ctx, s, Item(s))
if s == "5" {
assert.True(t, errors.Is(err, errFail))
} else {
require.NoError(t, err)
assert.Equal(t, Result(s+" result"), result)
}
}()
}
wg.Wait()
assert.Equal(t, 4, commits)
assert.Equal(t, 10, totalSize)
}
func TestBatcherCommitFail(t *testing.T) {
ctx := context.Background()
opt := Options{
Mode: "sync",
Size: 3,
Timeout: 1 * time.Second,
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
var wg sync.WaitGroup
errFail := errors.New("fail")
var commits int
var totalSize int
commitBatch := func(ctx context.Context, items []Item, results []Result, errors []error) (err error) {
commits += 1
totalSize += len(items)
return errFail
}
b, err := New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
defer b.Shutdown()
for i := range 10 {
wg.Add(1)
s := fmt.Sprintf("%d", i)
go func() {
defer wg.Done()
_, err := b.Commit(ctx, s, Item(s))
assert.True(t, errors.Is(err, errFail))
}()
}
wg.Wait()
assert.Equal(t, 4, commits)
assert.Equal(t, 10, totalSize)
}
func TestBatcherCommitShutdown(t *testing.T) {
ctx := context.Background()
opt := Options{
Mode: "sync",
Size: 3,
Timeout: 1 * time.Second,
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
var wg sync.WaitGroup
var commits int
var totalSize int
commitBatch := func(ctx context.Context, items []Item, results []Result, errors []error) (err error) {
commits += 1
totalSize += len(items)
for i := range items {
results[i] = Result(items[i])
}
return nil
}
b, err := New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
for i := range 10 {
wg.Add(1)
s := fmt.Sprintf("%d", i)
go func() {
defer wg.Done()
result, err := b.Commit(ctx, s, Item(s))
assert.NoError(t, err)
assert.Equal(t, Result(s), result)
}()
}
time.Sleep(100 * time.Millisecond)
b.Shutdown() // shutdown with batches outstanding
wg.Wait()
assert.Equal(t, 4, commits)
assert.Equal(t, 10, totalSize)
}
func TestBatcherCommitAsync(t *testing.T) {
ctx := context.Background()
opt := Options{
Mode: "async",
Size: 3,
Timeout: 1 * time.Second,
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
var wg sync.WaitGroup
errFail := errors.New("fail")
var commits atomic.Int32
var totalSize atomic.Int32
commitBatch := func(ctx context.Context, items []Item, results []Result, errors []error) (err error) {
wg.Add(1)
defer wg.Done()
// t.Logf("commit %d", len(items))
commits.Add(1)
totalSize.Add(int32(len(items)))
for i := range items {
if items[i] == "5" {
errors[i] = errFail
} else {
results[i] = Result(items[i]) + " result"
}
}
return nil
}
b, err := New[Item, Result](ctx, nil, commitBatch, opt)
require.NoError(t, err)
defer b.Shutdown()
for i := range 10 {
wg.Add(1)
s := fmt.Sprintf("%d", i)
go func() {
defer wg.Done()
result, err := b.Commit(ctx, s, Item(s))
// Async just returns straight away
require.NoError(t, err)
assert.Equal(t, Result(""), result)
}()
}
time.Sleep(2 * time.Second) // wait for batch timeout - needed with async
wg.Wait()
assert.Equal(t, int32(4), commits.Load())
assert.Equal(t, int32(10), totalSize.Load())
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/batcher/options.go | lib/batcher/options.go | package batcher
import (
"fmt"
"time"
"github.com/rclone/rclone/fs"
)
// FsOptions returns the batch mode fs.Options
func (opt *Options) FsOptions(extra string) []fs.Option {
return []fs.Option{{
Name: "batch_mode",
Help: fmt.Sprintf(`Upload file batching sync|async|off.
This sets the batch mode used by rclone.
%sThis has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`, extra),
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: fmt.Sprintf(`Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than %d.
By default this is 0 which means rclone will calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is %d
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`, opt.MaxBatchSize, opt.DefaultBatchSizeAsync),
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: fmt.Sprintf(`Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is %v
- batch_mode: sync - default batch_timeout is %v
- batch_mode: off - not in use
`, opt.DefaultTimeoutAsync, opt.DefaultTimeoutSync),
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing. (no longer used)`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
Hide: fs.OptionHideBoth,
}}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/batcher/batcher.go | lib/batcher/batcher.go | // Package batcher implements a generic batcher.
//
// It uses two types:
//
// Item - the thing to be batched
// Result - the result from the batching
//
// And one function of type CommitBatchFn which is called to do the actual batching.
package batcher
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
// Options for configuring the batcher
type Options struct {
Mode string // mode of the batcher "sync", "async" or "off"
Size int // size of batch
Timeout time.Duration // timeout before committing the batch
MaxBatchSize int // max size the batch can be
DefaultTimeoutSync time.Duration // default time to kick off the batch if nothing added for this long (sync)
DefaultTimeoutAsync time.Duration // default time to kick off the batch if nothing added for this long (async)
DefaultBatchSizeAsync int // default batch size if async
}
// CommitBatchFn is called to commit a batch of Item and return Result to the callers.
//
// It should commit the batch of items then for each result i (of
// which there should be len(items)) it should set either results[i]
// or errors[i]
type CommitBatchFn[Item, Result any] func(ctx context.Context, items []Item, results []Result, errors []error) (err error)
// Batcher holds info about the current items waiting to be acted on.
type Batcher[Item, Result any] struct {
opt Options // options for configuring the batcher
f any // logging identity for fs.Debugf(f, ...)
commit CommitBatchFn[Item, Result] // User defined function to commit the batch
async bool // whether we are using async batching
in chan request[Item, Result] // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// request holds an incoming request with a place for a reply
type request[Item, Result any] struct {
item Item
name string
result chan<- response[Result]
quit bool // if set then quit
}
// response holds a response to be delivered to clients waiting
// for a batch to complete.
type response[Result any] struct {
err error
entry Result
}
// New creates a Batcher for Item and Result calling commit to do the actual committing.
func New[Item, Result any](ctx context.Context, f any, commit CommitBatchFn[Item, Result], opt Options) (*Batcher[Item, Result], error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if opt.Size > opt.MaxBatchSize || opt.Size < 0 {
return nil, fmt.Errorf("batcher: batch size must be < %d and >= 0 - it is currently %d", opt.MaxBatchSize, opt.Size)
}
async := false
switch opt.Mode {
case "sync":
if opt.Size <= 0 {
ci := fs.GetConfig(ctx)
opt.Size = ci.Transfers
}
if opt.Timeout <= 0 {
opt.Timeout = opt.DefaultTimeoutSync
}
case "async":
if opt.Size <= 0 {
opt.Size = opt.DefaultBatchSizeAsync
}
if opt.Timeout <= 0 {
opt.Timeout = opt.DefaultTimeoutAsync
}
async = true
case "off":
opt.Size = 0
default:
return nil, fmt.Errorf("batcher: batch mode must be sync|async|off not %q", opt.Mode)
}
b := &Batcher[Item, Result]{
opt: opt,
f: f,
commit: commit,
async: async,
in: make(chan request[Item, Result], opt.Size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *Batcher[Item, Result]) Batching() bool {
return b.opt.Size > 0
}
// commit a batch calling the user defined commit function then distributing the results.
func (b *Batcher[Item, Result]) commitBatch(ctx context.Context, requests []request[Item, Result]) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, req := range requests {
req.result <- response[Result]{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.opt.Mode, len(requests), requests[0].name)
fs.Debugf(b.f, "Committing %s", desc)
var (
items = make([]Item, len(requests))
results = make([]Result, len(requests))
errors = make([]error, len(requests))
)
for i := range requests {
items[i] = requests[i].item
}
// Commit the batch
err = b.commit(ctx, items, results, errors)
if err != nil {
return err
}
// Report results to clients
var (
lastError error
errorCount = 0
)
for i, req := range requests {
result := results[i]
err := errors[i]
resp := response[Result]{}
if err == nil {
resp.entry = result
} else {
errorCount++
lastError = err
resp.err = fmt.Errorf("batch upload failed: %w", err)
}
if !b.async {
req.result <- resp
}
}
// show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if lastError != nil {
return fmt.Errorf("batch had %d errors: last error: %w", errorCount, lastError)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *Batcher[Item, Result]) commitLoop(ctx context.Context) {
var (
requests []request[Item, Result] // current batch of uncommitted Items
idleTimer = time.NewTimer(b.opt.Timeout)
commit = func() {
err := b.commitBatch(ctx, requests)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.opt.Mode, len(requests), err)
}
requests = nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.quit {
break outer
}
requests = append(requests, req)
idleTimer.Stop()
if len(requests) >= b.opt.Size {
commit()
} else {
idleTimer.Reset(b.opt.Timeout)
}
case <-idleTimer.C:
if len(requests) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.opt.Timeout)
commit()
}
}
}
// commit any remaining items
if len(requests) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down.
//
// This is registered as an atexit handler by New.
func (b *Batcher[Item, Result]) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- request[Item, Result]{quit: true}
b.wg.Wait()
})
}
// Commit commits the Item getting a Result or error using a batch
// call, first adding it to the batch and then waiting for the batch
// to complete in a synchronous way if async is not set.
//
// If async is set then this will return no error and a nil/empty
// Result.
//
// This should not be called if batching is off - check first with
// IsBatching.
func (b *Batcher[Item, Result]) Commit(ctx context.Context, name string, item Item) (entry Result, err error) {
select {
case <-b.closed:
return entry, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", name)
resp := make(chan response[Result], 1)
b.in <- request[Item, Result]{
item: item,
name: name,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return entry, nil
}
result := <-resp
return result.entry, result.err
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/debug/common.go | lib/debug/common.go | // Package debug contains functions for dealing with runtime/debug functions across go versions
package debug
import (
"runtime/debug"
)
// SetGCPercent calls the runtime/debug.SetGCPercent function to set the garbage
// collection percentage.
func SetGCPercent(percent int) int {
return debug.SetGCPercent(percent)
}
// SetMemoryLimit calls the runtime/debug.SetMemoryLimit function to set the
// soft-memory limit.
func SetMemoryLimit(limit int64) int64 {
return debug.SetMemoryLimit(limit)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/dircache/dircache.go | lib/dircache/dircache.go | // Package dircache provides a simple cache for caching directory ID
// to path lookups and the inverse.
package dircache
// _methods are called without the lock
import (
"bytes"
"context"
"errors"
"fmt"
"path"
"strings"
"sync"
"github.com/rclone/rclone/fs"
)
// DirCache caches paths to directory IDs and vice versa
type DirCache struct {
cacheMu sync.RWMutex // protects cache and invCache
cache map[string]string
invCache map[string]string
mu sync.Mutex // protects the below
fs DirCacher // Interface to find and make directories
trueRootID string // ID of the absolute root
root string // the path the cache is rooted on
rootID string // ID of the root directory
rootParentID string // ID of the root's parent directory
foundRoot bool // Whether we have found the root or not
}
// DirCacher describes an interface for doing the low level directory work
//
// This should be implemented by the backend and will be called by the
// dircache package when appropriate.
type DirCacher interface {
FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error)
CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error)
}
// New makes a DirCache
//
// This is created with the true root ID and the root path.
//
// In order to use the cache FindRoot() must be called on it without
// error. This isn't done at initialization as it isn't known whether
// the root and intermediate directories need to be created or not.
//
// Most of the utility functions will call FindRoot() on the caller's
// behalf with the create flag passed in.
//
// The cache is safe for concurrent use
func New(root string, trueRootID string, fs DirCacher) *DirCache {
d := &DirCache{
trueRootID: trueRootID,
root: root,
fs: fs,
}
d.Flush()
d.ResetRoot()
return d
}
// String returns the directory cache in string form for debugging
func (dc *DirCache) String() string {
dc.cacheMu.RLock()
defer dc.cacheMu.RUnlock()
var buf bytes.Buffer
_, _ = buf.WriteString("DirCache{\n")
_, _ = fmt.Fprintf(&buf, "\ttrueRootID: %q,\n", dc.trueRootID)
_, _ = fmt.Fprintf(&buf, "\troot: %q,\n", dc.root)
_, _ = fmt.Fprintf(&buf, "\trootID: %q,\n", dc.rootID)
_, _ = fmt.Fprintf(&buf, "\trootParentID: %q,\n", dc.rootParentID)
_, _ = fmt.Fprintf(&buf, "\tfoundRoot: %v,\n", dc.foundRoot)
_, _ = buf.WriteString("\tcache: {\n")
for k, v := range dc.cache {
_, _ = fmt.Fprintf(&buf, "\t\t%q: %q,\n", k, v)
}
_, _ = buf.WriteString("\t},\n")
_, _ = buf.WriteString("\tinvCache: {\n")
for k, v := range dc.invCache {
_, _ = fmt.Fprintf(&buf, "\t\t%q: %q,\n", k, v)
}
_, _ = buf.WriteString("\t},\n")
_, _ = buf.WriteString("}\n")
return buf.String()
}
// Get a directory ID given a path
//
// Returns the ID and a boolean as to whether it was found or not in
// the cache.
func (dc *DirCache) Get(path string) (id string, ok bool) {
dc.cacheMu.RLock()
id, ok = dc.cache[path]
dc.cacheMu.RUnlock()
return id, ok
}
// GetInv gets a path given a directory ID
//
// Returns the path and a boolean as to whether it was found or not in
// the cache.
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
dc.cacheMu.RLock()
path, ok = dc.invCache[id]
dc.cacheMu.RUnlock()
return path, ok
}
// Put a (path, directory ID) pair into the cache
func (dc *DirCache) Put(path, id string) {
dc.cacheMu.Lock()
dc.cache[path] = id
dc.invCache[id] = path
dc.cacheMu.Unlock()
}
// Flush the cache of all data
func (dc *DirCache) Flush() {
dc.cacheMu.Lock()
dc.cache = make(map[string]string)
dc.invCache = make(map[string]string)
dc.cacheMu.Unlock()
}
// SetRootIDAlias sets the rootID to that passed in. This assumes that
// the new ID is just an alias for the old ID so does not flush
// anything.
//
// This should be called from FindLeaf (and only from FindLeaf) if it
// is discovered that the root ID is incorrect. For example some
// backends use "0" as a root ID, but it has a real ID which is needed
// for some operations.
func (dc *DirCache) SetRootIDAlias(rootID string) {
// No locking as this is called from FindLeaf
dc.rootID = rootID
dc.Put("", dc.rootID)
}
// FlushDir flushes the map of all data starting with the path
// dir.
//
// If dir is empty string then this is equivalent to calling ResetRoot
func (dc *DirCache) FlushDir(dir string) {
if dir == "" {
dc.ResetRoot()
return
}
dc.cacheMu.Lock()
// Delete the root dir
ID, ok := dc.cache[dir]
if ok {
delete(dc.cache, dir)
delete(dc.invCache, ID)
}
// And any sub directories
dir += "/"
for key, ID := range dc.cache {
if strings.HasPrefix(key, dir) {
delete(dc.cache, key)
delete(dc.invCache, ID)
}
}
dc.cacheMu.Unlock()
}
// SplitPath splits a path into directory, leaf
//
// Path shouldn't start or end with a /
//
// If there are no slashes then directory will be "" and leaf = path
func SplitPath(path string) (directory, leaf string) {
lastSlash := strings.LastIndex(path, "/")
if lastSlash >= 0 {
directory = path[:lastSlash]
leaf = path[lastSlash+1:]
} else {
directory = ""
leaf = path
}
return
}
// FindDir finds the directory passed in returning the directory ID
// starting from pathID
//
// Path shouldn't start or end with a /
//
// If create is set it will make the directory if not found.
//
// It will call FindRoot if it hasn't been called already
func (dc *DirCache) FindDir(ctx context.Context, path string, create bool) (pathID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
err = dc._findRoot(ctx, create)
if err != nil {
return "", err
}
return dc._findDir(ctx, path, create)
}
// Unlocked findDir
//
// Call with a lock on mu
func (dc *DirCache) _findDir(ctx context.Context, path string, create bool) (pathID string, err error) {
// If it is the root, then return it
if path == "" {
return dc.rootID, nil
}
// If it is in the cache then return it
pathID, ok := dc.Get(path)
if ok {
return pathID, nil
}
// Split the path into directory, leaf
directory, leaf := SplitPath(path)
// Recurse and find pathID for parent directory
parentPathID, err := dc._findDir(ctx, directory, create)
if err != nil {
return "", err
}
// Find the leaf in parentPathID
pathID, found, err := dc.fs.FindLeaf(ctx, parentPathID, leaf)
if err != nil {
return "", err
}
// If not found create the directory if required or return an error
if !found {
if create {
pathID, err = dc.fs.CreateDir(ctx, parentPathID, leaf)
if err != nil {
return "", fmt.Errorf("failed to make directory: %w", err)
}
} else {
return "", fs.ErrorDirNotFound
}
}
// Store the leaf directory in the cache
dc.Put(path, pathID)
// fmt.Println("Dir", path, "is", pathID)
return pathID, nil
}
// FindPath finds the leaf and directoryID from a path
//
// If called with path == "" then it will return the ID of the parent
// directory of the root and the leaf name of the root in that
// directory. Note that it won't create the root directory in this
// case even if create is true.
//
// If create is set parent directories will be created if they don't exist
//
// It will call FindRoot if it hasn't been called already
func (dc *DirCache) FindPath(ctx context.Context, path string, create bool) (leaf, directoryID string, err error) {
if path == "" {
_, leaf = SplitPath(dc.root)
directoryID, err = dc.RootParentID(ctx, create)
} else {
var directory string
directory, leaf = SplitPath(path)
directoryID, err = dc.FindDir(ctx, directory, create)
}
return leaf, directoryID, err
}
// FindRoot finds the root directory if not already found
//
// If successful this changes the root of the cache from the true root
// to the root specified by the path passed into New.
//
// Resets the root directory.
//
// If create is set it will make the directory if not found
func (dc *DirCache) FindRoot(ctx context.Context, create bool) error {
dc.mu.Lock()
defer dc.mu.Unlock()
return dc._findRoot(ctx, create)
}
// _findRoot finds the root directory if not already found
//
// Resets the root directory.
//
// If create is set it will make the directory if not found.
//
// Call with mu held
func (dc *DirCache) _findRoot(ctx context.Context, create bool) error {
if dc.foundRoot {
return nil
}
rootID, err := dc._findDir(ctx, dc.root, create)
if err != nil {
return err
}
dc.foundRoot = true
dc.rootID = rootID
// Find the parent of the root while we still have the root
// directory tree cached
rootParentPath, _ := SplitPath(dc.root)
dc.rootParentID, _ = dc.Get(rootParentPath)
// Reset the tree based on dc.root
dc.Flush()
// Put the root directory in
dc.Put("", dc.rootID)
return nil
}
// FoundRoot returns whether the root directory has been found yet
func (dc *DirCache) FoundRoot() bool {
dc.mu.Lock()
defer dc.mu.Unlock()
return dc.foundRoot
}
// RootID returns the ID of the root directory
//
// If create is set it will make the root directory if not found
func (dc *DirCache) RootID(ctx context.Context, create bool) (ID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
err = dc._findRoot(ctx, create)
if err != nil {
return "", err
}
return dc.rootID, nil
}
// RootParentID returns the ID of the parent of the root directory
//
// If create is set it will make the root parent directory if not found (but not the root)
func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, err error) {
dc.mu.Lock()
defer dc.mu.Unlock()
if !dc.foundRoot {
if dc.root == "" {
return "", errors.New("is root directory")
}
// Find the rootParentID without creating the root
rootParent, _ := SplitPath(dc.root)
rootParentID, err := dc._findDir(ctx, rootParent, create)
if err != nil {
return "", err
}
dc.rootParentID = rootParentID
} else if dc.rootID == dc.trueRootID {
return "", errors.New("is root directory")
}
if dc.rootParentID == "" {
return "", errors.New("internal error: didn't find rootParentID")
}
return dc.rootParentID, nil
}
// ResetRoot resets the root directory to the absolute root and clears
// the DirCache
func (dc *DirCache) ResetRoot() {
dc.mu.Lock()
defer dc.mu.Unlock()
dc.foundRoot = false
dc.Flush()
// Put the true root in
dc.rootID = dc.trueRootID
// Put the root directory in
dc.Put("", dc.rootID)
}
// DirMove prepares to move the directory (srcDC, srcRoot, srcRemote)
// into the directory (dc, dstRoot, dstRemote)
//
// It does all the checking, creates intermediate directories and
// returns leafs and IDs ready for the move.
//
// This returns:
//
// - srcID - ID of the source directory
// - srcDirectoryID - ID of the parent of the source directory
// - srcLeaf - leaf name of the source directory
// - dstDirectoryID - ID of the parent of the destination directory
// - dstLeaf - leaf name of the destination directory
//
// These should be used to do the actual move then
// srcDC.FlushDir(srcRemote) should be called.
func (dc *DirCache) DirMove(
ctx context.Context, srcDC *DirCache, srcRoot, srcRemote, dstRoot, dstRemote string) (srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf string, err error) {
var (
dstDC = dc
srcPath = path.Join(srcRoot, srcRemote)
dstPath = path.Join(dstRoot, dstRemote)
)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
// fs.Debugf(src, "DirMove error: Can't move root")
err = errors.New("can't move root directory")
return
}
// Find ID of dst parent, creating subdirs if necessary
dstLeaf, dstDirectoryID, err = dstDC.FindPath(ctx, dstRemote, true)
if err != nil {
return
}
// Check destination does not exist
_, err = dstDC.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return
} else {
err = fs.ErrorDirExists
return
}
// Find ID of src parent
srcLeaf, srcDirectoryID, err = srcDC.FindPath(ctx, srcRemote, false)
if err != nil {
return
}
// Find ID of src
srcID, err = srcDC.FindDir(ctx, srcRemote, false)
if err != nil {
return
}
return
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/middleware.go | lib/http/middleware.go | package http
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"regexp"
"strings"
"sync"
goauth "github.com/abbot/go-http-auth"
"github.com/rclone/rclone/fs"
)
// parseAuthorization parses the Authorization header into user, pass
// it returns a boolean as to whether the parse was successful
func parseAuthorization(r *http.Request) (user, pass string, ok bool) {
authHeader := r.Header.Get("Authorization")
if authHeader != "" {
s := strings.SplitN(authHeader, " ", 2)
if len(s) == 2 && s[0] == "Basic" {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
parts := strings.SplitN(string(b), ":", 2)
user = parts[0]
if len(parts) > 1 {
pass = parts[1]
ok = true
}
}
}
}
return
}
// LoggedBasicAuth simply wraps the goauth.BasicAuth struct
type LoggedBasicAuth struct {
goauth.BasicAuth
}
// CheckAuth extends BasicAuth.CheckAuth to emit a log entry for unauthorised requests
func (a *LoggedBasicAuth) CheckAuth(r *http.Request) string {
username := a.BasicAuth.CheckAuth(r)
if username == "" {
user, _, _ := parseAuthorization(r)
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user)
}
return username
}
// NewLoggedBasicAuthenticator instantiates a new instance of LoggedBasicAuthenticator
func NewLoggedBasicAuthenticator(realm string, secrets goauth.SecretProvider) *LoggedBasicAuth {
return &LoggedBasicAuth{BasicAuth: goauth.BasicAuth{Realm: realm, Secrets: secrets}}
}
// Helper to generate required interface for middleware
func basicAuth(authenticator *LoggedBasicAuth) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// skip auth for CORS preflight
if r.Method == "OPTIONS" {
next.ServeHTTP(w, r)
return
}
username := authenticator.CheckAuth(r)
if username == "" {
authenticator.RequireAuth(w, r)
return
}
ctx := context.WithValue(r.Context(), ctxKeyUser, username)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
}
// MiddlewareAuthCertificateUser instantiates middleware that extracts the authenticated user via client certificate common name
func MiddlewareAuthCertificateUser() Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, cert := range r.TLS.PeerCertificates {
if cert.Subject.CommonName != "" {
r = r.WithContext(context.WithValue(r.Context(), ctxKeyUser, cert.Subject.CommonName))
next.ServeHTTP(w, r)
return
}
}
code := http.StatusUnauthorized
w.Header().Set("Content-Type", "text/plain")
http.Error(w, http.StatusText(code), code)
})
}
}
// MiddlewareAuthHtpasswd instantiates middleware that authenticates against the passed htpasswd file
func MiddlewareAuthHtpasswd(path, realm string) Middleware {
fs.Infof(nil, "Using %q as htpasswd storage", path)
secretProvider := goauth.HtpasswdFileProvider(path)
authenticator := NewLoggedBasicAuthenticator(realm, secretProvider)
return basicAuth(authenticator)
}
// MiddlewareAuthBasic instantiates middleware that authenticates for a single user
func MiddlewareAuthBasic(user, pass, realm, salt string) Middleware {
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", user)
pass = string(goauth.MD5Crypt([]byte(pass), []byte(salt), []byte("$1$")))
secretProvider := func(u, r string) string {
if user == u {
return pass
}
return ""
}
authenticator := NewLoggedBasicAuthenticator(realm, secretProvider)
return basicAuth(authenticator)
}
// MiddlewareAuthCustom instantiates middleware that authenticates using a custom function
func MiddlewareAuthCustom(fn CustomAuthFn, realm string, userFromContext bool) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// skip auth for CORS preflight
if r.Method == "OPTIONS" {
next.ServeHTTP(w, r)
return
}
user, pass, ok := parseAuthorization(r)
if !ok && userFromContext {
user, ok = CtxGetUser(r.Context())
}
if !ok {
code := http.StatusUnauthorized
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("WWW-Authenticate", fmt.Sprintf(`Basic realm=%q, charset="UTF-8"`, realm))
http.Error(w, http.StatusText(code), code)
return
}
value, err := fn(user, pass)
if err != nil {
fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err)
goauth.NewBasicAuthenticator(realm, func(user, realm string) string { return "" }).RequireAuth(w, r) //Reuse BasicAuth error reporting
return
}
if value != nil {
r = r.WithContext(context.WithValue(r.Context(), ctxKeyAuth, value))
}
next.ServeHTTP(w, r)
})
}
}
var validUsernameRegexp = regexp.MustCompile(`^[\p{L}\d@._-]+$`)
// MiddlewareAuthGetUserFromHeader middleware that bypasses authentication and extracts the user via a specified HTTP header(ideal for proxied setups).
func MiddlewareAuthGetUserFromHeader(header string) Middleware {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username := strings.TrimSpace(r.Header.Get(header))
if username != "" && validUsernameRegexp.MatchString(username) {
r = r.WithContext(context.WithValue(r.Context(), ctxKeyUser, username))
next.ServeHTTP(w, r)
return
}
code := http.StatusUnauthorized
w.Header().Set("Content-Type", "text/plain")
http.Error(w, http.StatusText(code), code)
})
}
}
var onlyOnceWarningAllowOrigin sync.Once
// MiddlewareCORS instantiates middleware that handles basic CORS protections for rcd
func MiddlewareCORS(allowOrigin string) Middleware {
onlyOnceWarningAllowOrigin.Do(func() {
if allowOrigin == "*" {
fs.Logf(nil, "Warning: Allow origin set to *. This can cause serious security problems.")
}
})
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if allowOrigin != "" {
w.Header().Add("Access-Control-Allow-Origin", allowOrigin)
w.Header().Add("Access-Control-Allow-Headers", "authorization, Content-Type")
w.Header().Add("Access-Control-Allow-Methods", "COPY, DELETE, GET, HEAD, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, TRACE, UNLOCK")
w.Header().Add("Access-Control-Max-Age", "86400")
}
next.ServeHTTP(w, r)
})
}
}
// MiddlewareStripPrefix instantiates middleware that removes the BaseURL from the path
func MiddlewareStripPrefix(prefix string) Middleware {
return func(next http.Handler) http.Handler {
stripPrefixHandler := http.StripPrefix(prefix, next)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Allow OPTIONS on the root only
if r.URL.Path == "/" && r.Method == "OPTIONS" {
next.ServeHTTP(w, r)
return
}
stripPrefixHandler.ServeHTTP(w, r)
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/middleware_test.go | lib/http/middleware_test.go | package http
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net/http"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestMiddlewareAuth(t *testing.T) {
servers := []struct {
name string
expectedUser string
remoteUser string
http Config
auth AuthConfig
user string
pass string
}{
{
name: "Basic",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
Realm: "test",
BasicUser: "test",
BasicPass: "test",
},
user: "test",
pass: "test",
},
{
name: "Htpasswd/MD5",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
Realm: "test",
HtPasswd: "./testdata/.htpasswd",
},
user: "md5",
pass: "md5",
},
{
name: "Htpasswd/SHA",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
Realm: "test",
HtPasswd: "./testdata/.htpasswd",
},
user: "sha",
pass: "sha",
},
{
name: "Htpasswd/Bcrypt",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
Realm: "test",
HtPasswd: "./testdata/.htpasswd",
},
user: "bcrypt",
pass: "bcrypt",
},
{
name: "Custom",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
Realm: "test",
CustomAuthFn: func(user, pass string) (value any, err error) {
if user == "custom" && pass == "custom" {
return true, nil
}
return nil, errors.New("invalid credentials")
},
},
user: "custom",
pass: "custom",
}, {
name: "UserFromHeader",
remoteUser: "remoteUser",
expectedUser: "remoteUser",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
UserFromHeader: "X-Remote-User",
},
}, {
name: "UserFromHeader/MixedWithHtPasswd",
remoteUser: "remoteUser",
expectedUser: "md5",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
},
auth: AuthConfig{
UserFromHeader: "X-Remote-User",
Realm: "test",
HtPasswd: "./testdata/.htpasswd",
},
user: "md5",
pass: "md5",
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.http), WithAuth(ss.auth))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
expected := []byte("secret-page")
if ss.expectedUser != "" {
s.Router().Mount("/", testAuthUserHandler())
} else {
s.Router().Mount("/", testEchoHandler(expected))
}
s.Serve()
url := testGetServerURL(t, s)
t.Run("NoCreds", func(t *testing.T) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using no creds should return unauthorized")
if ss.auth.UserFromHeader == "" {
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
}
})
t.Run("BadCreds", func(t *testing.T) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
if ss.user != "" {
req.SetBasicAuth(ss.user+"BAD", ss.pass+"BAD")
}
if ss.auth.UserFromHeader != "" {
req.Header.Set(ss.auth.UserFromHeader, "/test:")
}
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using bad creds should return unauthorized")
if ss.auth.UserFromHeader == "" {
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
}
})
t.Run("GoodCreds", func(t *testing.T) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
if ss.user != "" {
req.SetBasicAuth(ss.user, ss.pass)
}
if ss.auth.UserFromHeader != "" {
req.Header.Set(ss.auth.UserFromHeader, ss.remoteUser)
}
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusOK, resp.StatusCode, "using good creds should return ok")
if ss.expectedUser != "" {
testExpectRespBody(t, resp, []byte(ss.expectedUser))
} else {
testExpectRespBody(t, resp, expected)
}
})
})
}
}
func TestMiddlewareAuthCertificateUser(t *testing.T) {
serverCertBytes := testReadTestdataFile(t, "local.crt")
serverKeyBytes := testReadTestdataFile(t, "local.key")
clientCertBytes := testReadTestdataFile(t, "client.crt")
clientKeyBytes := testReadTestdataFile(t, "client.key")
clientCert, err := tls.X509KeyPair(clientCertBytes, clientKeyBytes)
require.NoError(t, err)
emptyCertBytes := testReadTestdataFile(t, "emptyclient.crt")
emptyKeyBytes := testReadTestdataFile(t, "emptyclient.key")
emptyCert, err := tls.X509KeyPair(emptyCertBytes, emptyKeyBytes)
require.NoError(t, err)
invalidCert, err := tls.X509KeyPair(serverCertBytes, serverKeyBytes)
require.NoError(t, err)
servers := []struct {
name string
wantErr bool
status int
result string
http Config
auth AuthConfig
clientCerts []tls.Certificate
}{
{
name: "Missing",
wantErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
},
{
name: "Invalid",
wantErr: true,
clientCerts: []tls.Certificate{invalidCert},
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
},
{
name: "EmptyCommonName",
status: http.StatusUnauthorized,
result: fmt.Sprintf("%s\n", http.StatusText(http.StatusUnauthorized)),
clientCerts: []tls.Certificate{emptyCert},
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
},
{
name: "Valid",
status: http.StatusOK,
result: "rclone-dev-client",
clientCerts: []tls.Certificate{clientCert},
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
},
{
name: "CustomAuth/Invalid",
status: http.StatusUnauthorized,
result: fmt.Sprintf("%d %s\n", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)),
clientCerts: []tls.Certificate{clientCert},
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
auth: AuthConfig{
Realm: "test",
CustomAuthFn: func(user, pass string) (value any, err error) {
if user == "custom" && pass == "custom" {
return true, nil
}
return nil, errors.New("invalid credentials")
},
},
},
{
name: "CustomAuth/Valid",
status: http.StatusOK,
result: "rclone-dev-client",
clientCerts: []tls.Certificate{clientCert},
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
auth: AuthConfig{
Realm: "test",
CustomAuthFn: func(user, pass string) (value any, err error) {
fmt.Println("CUSTOMAUTH", user, pass)
if user == "rclone-dev-client" && pass == "" {
return true, nil
}
return nil, errors.New("invalid credentials")
},
},
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.http), WithAuth(ss.auth))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
s.Router().Mount("/", testAuthUserHandler())
s.Serve()
url := testGetServerURL(t, s)
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
Certificates: ss.clientCerts,
InsecureSkipVerify: true,
},
},
}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
if ss.wantErr {
require.Error(t, err)
return
}
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, ss.status, resp.StatusCode, fmt.Sprintf("should return status %d", ss.status))
testExpectRespBody(t, resp, []byte(ss.result))
})
}
}
var _testCORSHeaderKeys = []string{
"Access-Control-Allow-Origin",
"Access-Control-Allow-Headers",
"Access-Control-Allow-Methods",
}
func TestMiddlewareCORS(t *testing.T) {
servers := []struct {
name string
http Config
tryRoot bool
method string
status int
}{
{
name: "CustomOrigin",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
AllowOrigin: "http://test.rclone.org",
},
method: "GET",
status: http.StatusOK,
},
{
name: "WithBaseURL",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
AllowOrigin: "http://test.rclone.org",
BaseURL: "/baseurl/",
},
method: "GET",
status: http.StatusOK,
},
{
name: "WithBaseURLTryRootGET",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
AllowOrigin: "http://test.rclone.org",
BaseURL: "/baseurl/",
},
method: "GET",
status: http.StatusNotFound,
tryRoot: true,
},
{
name: "WithBaseURLTryRootOPTIONS",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
AllowOrigin: "http://test.rclone.org",
BaseURL: "/baseurl/",
},
method: "OPTIONS",
status: http.StatusOK,
tryRoot: true,
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.http))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
expected := []byte("data")
s.Router().Mount("/", testEchoHandler(expected))
s.Serve()
url := testGetServerURL(t, s)
// Try the query on the root, ignoring the baseURL
if ss.tryRoot {
slash := strings.LastIndex(url[:len(url)-1], "/")
url = url[:slash+1]
}
client := &http.Client{}
req, err := http.NewRequest(ss.method, url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, ss.status, resp.StatusCode, "should return expected error code")
if ss.status == http.StatusNotFound {
return
}
testExpectRespBody(t, resp, expected)
for _, key := range _testCORSHeaderKeys {
require.Contains(t, resp.Header, key, "CORS headers should be sent")
}
expectedOrigin := url
if ss.http.AllowOrigin != "" {
expectedOrigin = ss.http.AllowOrigin
}
require.Equal(t, expectedOrigin, resp.Header.Get("Access-Control-Allow-Origin"), "allow origin should match")
})
}
}
func TestMiddlewareCORSEmptyOrigin(t *testing.T) {
servers := []struct {
name string
http Config
}{
{
name: "EmptyOrigin",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
AllowOrigin: "",
},
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.http))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
expected := []byte("data")
s.Router().Mount("/", testEchoHandler(expected))
s.Serve()
url := testGetServerURL(t, s)
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusOK, resp.StatusCode, "should return ok")
testExpectRespBody(t, resp, expected)
for _, key := range _testCORSHeaderKeys {
require.NotContains(t, resp.Header, key, "CORS headers should not be sent")
}
})
}
}
func TestMiddlewareCORSWithAuth(t *testing.T) {
authServers := []struct {
name string
http Config
auth AuthConfig
}{
{
name: "ServerWithAuth",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
AllowOrigin: "http://test.rclone.org",
},
auth: AuthConfig{
Realm: "test",
BasicUser: "test_user",
BasicPass: "test_pass",
},
},
}
for _, ss := range authServers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.http))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
s.Router().Mount("/", testEmptyHandler())
s.Serve()
url := testGetServerURL(t, s)
client := &http.Client{}
req, err := http.NewRequest("OPTIONS", url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusOK, resp.StatusCode, "OPTIONS should return ok even if not authenticated")
testExpectRespBody(t, resp, []byte{})
for _, key := range _testCORSHeaderKeys {
require.Contains(t, resp.Header, key, "CORS headers should be sent even if not authenticated")
}
expectedOrigin := url
if ss.http.AllowOrigin != "" {
expectedOrigin = ss.http.AllowOrigin
}
require.Equal(t, expectedOrigin, resp.Header.Get("Access-Control-Allow-Origin"), "allow origin should match")
})
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/server_test.go | lib/http/server_test.go | package http
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func testEmptyHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})
}
func testEchoHandler(data []byte) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write(data)
})
}
func testAuthUserHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
userID, ok := CtxGetUser(r.Context())
if !ok {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
}
_, _ = w.Write([]byte(userID))
})
}
func testExpectRespBody(t *testing.T, resp *http.Response, expected []byte) {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, expected, body)
}
func testGetServerURL(t *testing.T, s *Server) string {
urls := s.URLs()
require.GreaterOrEqual(t, len(urls), 1, "server should return at least one url")
return urls[0]
}
func testNewHTTPClientUnix(path string) *http.Client {
return &http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", path)
},
},
}
}
func testReadTestdataFile(t *testing.T, path string) []byte {
data, err := os.ReadFile(filepath.Join("./testdata", path))
require.NoError(t, err, "")
return data
}
func TestNewServerUnix(t *testing.T) {
tempDir := t.TempDir()
path := filepath.Join(tempDir, "rclone.sock")
servers := []struct {
name string
status int
result string
cfg Config
auth AuthConfig
user string
pass string
}{
{
name: "ServerWithoutAuth/NoCreds",
status: http.StatusOK,
result: "hello world",
cfg: Config{
ListenAddr: []string{path},
},
}, {
name: "ServerWithAuth/NoCreds",
status: http.StatusUnauthorized,
cfg: Config{
ListenAddr: []string{path},
},
auth: AuthConfig{
BasicUser: "test",
BasicPass: "test",
},
}, {
name: "ServerWithAuth/GoodCreds",
status: http.StatusOK,
result: "hello world",
cfg: Config{
ListenAddr: []string{path},
},
auth: AuthConfig{
BasicUser: "test",
BasicPass: "test",
},
user: "test",
pass: "test",
}, {
name: "ServerWithAuth/BadCreds",
status: http.StatusUnauthorized,
cfg: Config{
ListenAddr: []string{path},
},
auth: AuthConfig{
BasicUser: "test",
BasicPass: "test",
},
user: "testBAD",
pass: "testBAD",
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.cfg), WithAuth(ss.auth))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
_, err := os.Stat(path)
require.ErrorIs(t, err, os.ErrNotExist, "shutdown should remove socket")
}()
require.Empty(t, s.URLs(), "unix socket should not appear in URLs")
s.Router().Mount("/", testEchoHandler([]byte(ss.result)))
s.Serve()
client := testNewHTTPClientUnix(path)
req, err := http.NewRequest("GET", "http://unix", nil)
require.NoError(t, err)
req.SetBasicAuth(ss.user, ss.pass)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, ss.status, resp.StatusCode, fmt.Sprintf("should return status %d", ss.status))
if ss.result != "" {
testExpectRespBody(t, resp, []byte(ss.result))
}
})
}
}
func TestNewServerHTTP(t *testing.T) {
ctx := context.Background()
cfg := DefaultCfg()
cfg.ListenAddr = []string{"127.0.0.1:0"}
auth := AuthConfig{
BasicUser: "test",
BasicPass: "test",
}
s, err := NewServer(ctx, WithConfig(cfg), WithAuth(auth))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
url := testGetServerURL(t, s)
require.True(t, strings.HasPrefix(url, "http://"), "url should have http scheme")
expected := []byte("hello world")
s.Router().Mount("/", testEchoHandler(expected))
s.Serve()
t.Run("StatusUnauthorized", func(t *testing.T) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "no basic auth creds should return unauthorized")
})
t.Run("StatusOK", func(t *testing.T) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
req.SetBasicAuth(auth.BasicUser, auth.BasicPass)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusOK, resp.StatusCode, "using basic auth creds should return ok")
testExpectRespBody(t, resp, expected)
})
}
func TestNewServerBaseURL(t *testing.T) {
servers := []struct {
name string
cfg Config
suffix string
}{
{
name: "Empty",
cfg: Config{
ListenAddr: []string{"127.0.0.1:0"},
BaseURL: "",
},
suffix: "/",
},
{
name: "Single/NoTrailingSlash",
cfg: Config{
ListenAddr: []string{"127.0.0.1:0"},
BaseURL: "/rclone",
},
suffix: "/rclone/",
},
{
name: "Single/TrailingSlash",
cfg: Config{
ListenAddr: []string{"127.0.0.1:0"},
BaseURL: "/rclone/",
},
suffix: "/rclone/",
},
{
name: "Multi/NoTrailingSlash",
cfg: Config{
ListenAddr: []string{"127.0.0.1:0"},
BaseURL: "/rclone/test/base/url",
},
suffix: "/rclone/test/base/url/",
},
{
name: "Multi/TrailingSlash",
cfg: Config{
ListenAddr: []string{"127.0.0.1:0"},
BaseURL: "/rclone/test/base/url/",
},
suffix: "/rclone/test/base/url/",
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.cfg))
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
expected := []byte("data")
s.Router().Get("/", testEchoHandler(expected).ServeHTTP)
s.Serve()
url := testGetServerURL(t, s)
require.True(t, strings.HasPrefix(url, "http://"), "url should have http scheme")
require.True(t, strings.HasSuffix(url, ss.suffix), "url should have the expected suffix")
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
require.NoError(t, err)
resp, err := client.Do(req)
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
t.Log(url, resp.Request.URL)
require.Equal(t, http.StatusOK, resp.StatusCode, "should return ok")
testExpectRespBody(t, resp, expected)
})
}
}
func TestNewServerTLS(t *testing.T) {
serverCertBytes := testReadTestdataFile(t, "local.crt")
serverKeyBytes := testReadTestdataFile(t, "local.key")
clientCertBytes := testReadTestdataFile(t, "client.crt")
clientKeyBytes := testReadTestdataFile(t, "client.key")
clientCert, err := tls.X509KeyPair(clientCertBytes, clientKeyBytes)
require.NoError(t, err)
// TODO: generate a proper cert with SAN
servers := []struct {
name string
clientCerts []tls.Certificate
wantErr bool
wantClientErr bool
err error
http Config
}{
{
name: "FromFile/Valid",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCert: "./testdata/local.crt",
TLSKey: "./testdata/local.key",
MinTLSVersion: "tls1.0",
},
},
{
name: "FromFile/NoCert",
wantErr: true,
err: ErrTLSFileMismatch,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCert: "",
TLSKey: "./testdata/local.key",
MinTLSVersion: "tls1.0",
},
},
{
name: "FromFile/InvalidCert",
wantErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCert: "./testdata/local.crt.invalid",
TLSKey: "./testdata/local.key",
MinTLSVersion: "tls1.0",
},
},
{
name: "FromFile/NoKey",
wantErr: true,
err: ErrTLSFileMismatch,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCert: "./testdata/local.crt",
TLSKey: "",
MinTLSVersion: "tls1.0",
},
},
{
name: "FromFile/InvalidKey",
wantErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCert: "./testdata/local.crt",
TLSKey: "./testdata/local.key.invalid",
MinTLSVersion: "tls1.0",
},
},
{
name: "FromBody/Valid",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
},
},
{
name: "FromBody/NoCert",
wantErr: true,
err: ErrTLSBodyMismatch,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: nil,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
},
},
{
name: "FromBody/InvalidCert",
wantErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: []byte("JUNK DATA"),
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
},
},
{
name: "FromBody/NoKey",
wantErr: true,
err: ErrTLSBodyMismatch,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: nil,
MinTLSVersion: "tls1.0",
},
},
{
name: "FromBody/InvalidKey",
wantErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: []byte("JUNK DATA"),
MinTLSVersion: "tls1.0",
},
},
{
name: "MinTLSVersion/Valid/1.1",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.1",
},
},
{
name: "MinTLSVersion/Valid/1.2",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.2",
},
},
{
name: "MinTLSVersion/Valid/1.3",
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.3",
},
},
{
name: "MinTLSVersion/Invalid",
wantErr: true,
err: ErrInvalidMinTLSVersion,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls0.9",
},
},
{
name: "MutualTLS/InvalidCA",
clientCerts: []tls.Certificate{clientCert},
wantErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt.invalid",
},
},
{
name: "MutualTLS/InvalidClient",
clientCerts: []tls.Certificate{},
wantClientErr: true,
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
},
{
name: "MutualTLS/Valid",
clientCerts: []tls.Certificate{clientCert},
http: Config{
ListenAddr: []string{"127.0.0.1:0"},
TLSCertBody: serverCertBytes,
TLSKeyBody: serverKeyBytes,
MinTLSVersion: "tls1.0",
ClientCA: "./testdata/client-ca.crt",
},
},
}
for _, ss := range servers {
t.Run(ss.name, func(t *testing.T) {
s, err := NewServer(context.Background(), WithConfig(ss.http))
if ss.wantErr == true {
if ss.err != nil {
require.ErrorIs(t, err, ss.err, "new server should return the expected error")
} else {
require.Error(t, err, "new server should return error for invalid TLS config")
}
return
}
require.NoError(t, err)
defer func() {
require.NoError(t, s.Shutdown())
}()
expected := []byte("secret-page")
s.Router().Mount("/", testEchoHandler(expected))
s.Serve()
url := testGetServerURL(t, s)
require.True(t, strings.HasPrefix(url, "https://"), "url should have https scheme")
client := &http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
dest := strings.TrimPrefix(url, "https://")
dest = strings.TrimSuffix(dest, "/")
return net.Dial("tcp", dest)
},
TLSClientConfig: &tls.Config{
Certificates: ss.clientCerts,
InsecureSkipVerify: true,
},
},
}
req, err := http.NewRequest("GET", "https://dev.rclone.org", nil)
require.NoError(t, err)
resp, err := client.Do(req)
if ss.wantClientErr {
require.Error(t, err, "new server client should return error")
return
}
require.NoError(t, err)
defer func() {
_ = resp.Body.Close()
}()
require.Equal(t, http.StatusOK, resp.StatusCode, "should return ok")
testExpectRespBody(t, resp, expected)
})
}
}
func TestHelpPrefixServer(t *testing.T) {
// This test assumes template variables are placed correctly.
const testPrefix = "server-help-test"
helpMessage := Help(testPrefix)
if !strings.Contains(helpMessage, testPrefix) {
t.Fatal("flag prefix not found")
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/server.go | lib/http/server.go | // Package http provides a registration interface for http services
package http
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"html/template"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/go-chi/chi/v5"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/lib/atexit"
sdActivation "github.com/rclone/rclone/lib/sdactivation"
"github.com/spf13/pflag"
)
// Help returns text describing the http server to add to the command
// help.
func Help(prefix string) string {
help := `### Server options
Use ` + "`--{{ .Prefix }}addr`" + ` to specify which IP address and port the server should
listen on, eg ` + "`--{{ .Prefix }}addr 1.2.3.4:8000` or `--{{ .Prefix }}addr :8080`" + ` to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set ` + "`--{{ .Prefix }}addr`" + ` to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
You can use a unix socket by setting the url to ` + "`unix:///path/to/socket`" + `
or just by using an absolute path name.
` + "`--{{ .Prefix }}addr`" + ` may be repeated to listen on multiple IPs/ports/sockets.
Socket activation, described further below, can also be used to accomplish the same.
` + "`--{{ .Prefix }}server-read-timeout` and `--{{ .Prefix }}server-write-timeout`" + ` can be used to
control the timeouts on the server. Note that this is the total time
for a transfer.
` + "`--{{ .Prefix }}max-header-bytes`" + ` controls the maximum number of bytes the server will
accept in the HTTP header.
` + "`--{{ .Prefix }}baseurl`" + ` controls the URL prefix that rclone serves from. By default
rclone will serve from the root. If you used ` + "`--{{ .Prefix }}baseurl \"/rclone\"`" + ` then
rclone would serve from a URL starting with "/rclone/". This is
useful if you wish to proxy rclone serve. Rclone automatically
inserts leading and trailing "/" on ` + "`--{{ .Prefix }}baseurl`" + `, so ` + "`--{{ .Prefix }}baseurl \"rclone\"`" + `,
` + "`--{{ .Prefix }}baseurl \"/rclone\"` and `--{{ .Prefix }}baseurl \"/rclone/\"`" + ` are all treated
identically.
` + "`--{{ .Prefix }}disable-zip`" + ` may be set to disable the zipping download option.
#### TLS (SSL)
By default this will serve over http. If you want you can serve over
https. You will need to supply the ` + "`--{{ .Prefix }}cert` and `--{{ .Prefix }}key`" + ` flags.
If you wish to do client side certificate validation then you will need to
supply ` + "`--{{ .Prefix }}client-ca`" + ` also.
` + "`--{{ .Prefix }}cert`" + ` must be set to the path of a file containing
either a PEM encoded certificate, or a concatenation of that with the CA
certificate. ` + "`--{{ .Prefix }}key`" + ` must be set to the path of a file
with the PEM encoded private key. ` + "If setting `--{{ .Prefix }}client-ca`" + `,
it should be set to the path of a file with PEM encoded client certificate
authority certificates.
` + "`--{{ .Prefix }}min-tls-version`" + ` is minimum TLS version that is acceptable. Valid
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
### Socket activation
Instead of the listening addresses specified above, rclone will listen to all
FDs passed by the service manager, if any (and ignore any arguments passed
by ` + "`--{{ .Prefix }}addr`" + `).
This allows rclone to be a socket-activated service.
It can be configured with .socket and .service unit files as described in
<https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html>.
Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command
` + "```console" + `
systemd-socket-activate -l 8000 -- rclone serve
` + "```" + `
This will socket-activate rclone on the first connection to port 8000 over TCP.
`
tmpl, err := template.New("server help").Parse(help)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Fatal error parsing template", err))
}
data := struct {
Prefix string
}{
Prefix: prefix,
}
buf := &bytes.Buffer{}
err = tmpl.Execute(buf, data)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Fatal error executing template", err))
}
return buf.String()
}
// Middleware function signature required by chi.Router.Use()
type Middleware func(http.Handler) http.Handler
// ConfigInfo descripts the Options in use
var ConfigInfo = fs.Options{{
Name: "addr",
Default: []string{"127.0.0.1:8080"},
Help: "IPaddress:Port or :Port to bind server to",
}, {
Name: "server_read_timeout",
Default: fs.Duration(1 * time.Hour),
Help: "Timeout for server reading data",
}, {
Name: "server_write_timeout",
Default: fs.Duration(1 * time.Hour),
Help: "Timeout for server writing data",
}, {
Name: "max_header_bytes",
Default: 4096,
Help: "Maximum size of request header",
}, {
Name: "cert",
Default: "",
Help: "TLS PEM key (concatenation of certificate and CA certificate)",
}, {
Name: "key",
Default: "",
Help: "TLS PEM Private key",
}, {
Name: "client_ca",
Default: "",
Help: "Client certificate authority to verify clients with",
}, {
Name: "baseurl",
Default: "",
Help: "Prefix for URLs - leave blank for root",
}, {
Name: "min_tls_version",
Default: "tls1.0",
Help: "Minimum TLS version that is acceptable",
}, {
Name: "allow_origin",
Default: "",
Help: "Origin which cross-domain request (CORS) can be executed from",
}}
// Config contains options for the http Server
type Config struct {
ListenAddr []string `config:"addr"` // Port to listen on
BaseURL string `config:"baseurl"` // prefix to strip from URLs
ServerReadTimeout fs.Duration `config:"server_read_timeout"` // Timeout for server reading data
ServerWriteTimeout fs.Duration `config:"server_write_timeout"` // Timeout for server writing data
MaxHeaderBytes int `config:"max_header_bytes"` // Maximum size of request header
TLSCert string `config:"cert"` // Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
TLSKey string `config:"key"` // Path to TLS PEM private key file
TLSCertBody []byte `config:"-"` // TLS PEM public key certificate body (can also include intermediate/CA certificates), ignores TLSCert
TLSKeyBody []byte `config:"-"` // TLS PEM private key body, ignores TLSKey
ClientCA string `config:"client_ca"` // Path to TLS PEM CA file with certificate authorities to verify clients with
MinTLSVersion string `config:"min_tls_version"` // MinTLSVersion contains the minimum TLS version that is acceptable.
AllowOrigin string `config:"allow_origin"` // AllowOrigin sets the Access-Control-Allow-Origin header
}
// AddFlagsPrefix adds flags for the httplib
func (cfg *Config) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
flags.StringArrayVarP(flagSet, &cfg.ListenAddr, prefix+"addr", "", cfg.ListenAddr, "IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to", prefix)
flags.FVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", "Timeout for server reading data", prefix)
flags.FVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", "Timeout for server writing data", prefix)
flags.IntVarP(flagSet, &cfg.MaxHeaderBytes, prefix+"max-header-bytes", "", cfg.MaxHeaderBytes, "Maximum size of request header", prefix)
flags.StringVarP(flagSet, &cfg.TLSCert, prefix+"cert", "", cfg.TLSCert, "Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)", prefix)
flags.StringVarP(flagSet, &cfg.TLSKey, prefix+"key", "", cfg.TLSKey, "Path to TLS PEM private key file", prefix)
flags.StringVarP(flagSet, &cfg.ClientCA, prefix+"client-ca", "", cfg.ClientCA, "Path to TLS PEM CA file with certificate authorities to verify clients with", prefix)
flags.StringVarP(flagSet, &cfg.BaseURL, prefix+"baseurl", "", cfg.BaseURL, "Prefix for URLs - leave blank for root", prefix)
flags.StringVarP(flagSet, &cfg.MinTLSVersion, prefix+"min-tls-version", "", cfg.MinTLSVersion, "Minimum TLS version that is acceptable", prefix)
flags.StringVarP(flagSet, &cfg.AllowOrigin, prefix+"allow-origin", "", cfg.AllowOrigin, "Origin which cross-domain request (CORS) can be executed from", prefix)
}
// AddHTTPFlagsPrefix adds flags for the httplib
func AddHTTPFlagsPrefix(flagSet *pflag.FlagSet, prefix string, cfg *Config) {
cfg.AddFlagsPrefix(flagSet, prefix)
}
// DefaultCfg is the default values used for Config
//
// Note that this needs to be kept in sync with ConfigInfo above and
// can be removed when all callers have been converted.
func DefaultCfg() Config {
return Config{
ListenAddr: []string{"127.0.0.1:8080"},
ServerReadTimeout: fs.Duration(1 * time.Hour),
ServerWriteTimeout: fs.Duration(1 * time.Hour),
MaxHeaderBytes: 4096,
MinTLSVersion: "tls1.0",
}
}
type instance struct {
url string
listener net.Listener
httpServer *http.Server
}
func (s instance) serve(wg *sync.WaitGroup) {
defer wg.Done()
err := s.httpServer.Serve(s.listener)
if err != http.ErrServerClosed && err != nil {
fs.Logf(nil, "%s: unexpected error: %s", s.listener.Addr(), err.Error())
}
}
// Server contains info about the running http server
type Server struct {
wg sync.WaitGroup
mux chi.Router
tlsConfig *tls.Config
instances []instance
auth AuthConfig
cfg Config
template *TemplateConfig
htmlTemplate *template.Template
usingAuth bool // set if we are using auth middleware
mu sync.Mutex // mutex protects RW variables below
atexitHandle atexit.FnHandle
}
// Option allows customizing the server
type Option func(*Server)
// WithAuth option initializes the appropriate auth middleware
func WithAuth(cfg AuthConfig) Option {
return func(s *Server) {
s.auth = cfg
}
}
// WithConfig option applies the Config to the server, overriding defaults
func WithConfig(cfg Config) Option {
return func(s *Server) {
s.cfg = cfg
}
}
// WithTemplate option allows the parsing of a template
func WithTemplate(cfg TemplateConfig) Option {
return func(s *Server) {
s.template = &cfg
}
}
// For a given listener, and optional tlsConfig, construct a instance.
// The url string ends up in the `url` field of the `instance`.
// This unconditionally wraps the listener with the provided TLS config if one
// is specified, so all decision logic on whether to use TLS needs to live at
// the callsite.
func newInstance(ctx context.Context, s *Server, listener net.Listener, tlsCfg *tls.Config, url string) *instance {
if tlsCfg != nil {
listener = tls.NewListener(listener, tlsCfg)
}
return &instance{
url: url,
listener: listener,
httpServer: &http.Server{
Handler: s.mux,
ReadTimeout: time.Duration(s.cfg.ServerReadTimeout),
WriteTimeout: time.Duration(s.cfg.ServerWriteTimeout),
MaxHeaderBytes: s.cfg.MaxHeaderBytes,
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
IdleTimeout: 60 * time.Second, // time to keep idle connections open
TLSConfig: tlsCfg,
BaseContext: NewBaseContext(ctx, url),
},
}
}
// NewServer instantiates a new http server using provided listeners and options
// This function is provided if the default http server does not meet a services requirements and should not generally be used
// A http server can listen using multiple listeners. For example, a listener for port 80, and a listener for port 443.
// tlsListeners are ignored if opt.TLSKey is not provided
func NewServer(ctx context.Context, options ...Option) (*Server, error) {
s := &Server{
mux: chi.NewRouter(),
cfg: DefaultCfg(),
}
// Make sure default logger is logging where everything else is
// middleware.DefaultLogger = middleware.RequestLogger(&middleware.DefaultLogFormatter{Logger: log.Default(), NoColor: true})
// Log requests
// s.mux.Use(middleware.Logger)
for _, opt := range options {
opt(s)
}
// Build base router
s.mux.MethodNotAllowed(func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
})
s.mux.NotFound(func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
})
// Ignore passing "/" for BaseURL
s.cfg.BaseURL = strings.Trim(s.cfg.BaseURL, "/")
if s.cfg.BaseURL != "" {
s.cfg.BaseURL = "/" + s.cfg.BaseURL
s.mux.Use(MiddlewareStripPrefix(s.cfg.BaseURL))
}
err := s.initTemplate()
if err != nil {
return nil, err
}
err = s.initTLS()
if err != nil {
return nil, err
}
s.mux.Use(MiddlewareCORS(s.cfg.AllowOrigin))
s.initAuth()
// (Only) listen on FDs provided by the service manager, if any.
sdListeners, err := sdActivation.ListenersWithNames()
if err != nil {
return nil, fmt.Errorf("unable to acquire listeners: %w", err)
}
if len(sdListeners) != 0 {
for listenerName, listeners := range sdListeners {
for i, listener := range listeners {
url := fmt.Sprintf("sd-listen:%s-%d/%s", listenerName, i, s.cfg.BaseURL)
if s.tlsConfig != nil {
url = fmt.Sprintf("sd-listen+tls:%s-%d/%s", listenerName, i, s.cfg.BaseURL)
}
instance := newInstance(ctx, s, listener, s.tlsConfig, url)
s.instances = append(s.instances, *instance)
}
}
return s, nil
}
// Process all listeners specified in the CLI Args.
for _, addr := range s.cfg.ListenAddr {
var instance *instance
if strings.HasPrefix(addr, "unix://") || filepath.IsAbs(addr) {
addr = strings.TrimPrefix(addr, "unix://")
listener, err := net.Listen("unix", addr)
if err != nil {
return nil, err
}
instance = newInstance(ctx, s, listener, s.tlsConfig, addr)
} else if strings.HasPrefix(addr, "tls://") || (len(s.cfg.ListenAddr) == 1 && s.tlsConfig != nil) {
addr = strings.TrimPrefix(addr, "tls://")
listener, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
instance = newInstance(ctx, s, listener, s.tlsConfig, fmt.Sprintf("https://%s%s/", listener.Addr().String(), s.cfg.BaseURL))
} else {
// HTTP case
addr = strings.TrimPrefix(addr, "http://")
listener, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
instance = newInstance(ctx, s, listener, nil, fmt.Sprintf("http://%s%s/", listener.Addr().String(), s.cfg.BaseURL))
}
s.instances = append(s.instances, *instance)
}
return s, nil
}
func (s *Server) initAuth() {
s.usingAuth = false
altUsernameEnabled := s.auth.HtPasswd == "" && s.auth.BasicUser == ""
if altUsernameEnabled {
s.usingAuth = true
if s.auth.UserFromHeader != "" {
s.mux.Use(MiddlewareAuthGetUserFromHeader(s.auth.UserFromHeader))
} else if s.tlsConfig != nil && s.tlsConfig.ClientAuth != tls.NoClientCert {
s.mux.Use(MiddlewareAuthCertificateUser())
} else {
s.usingAuth = false
altUsernameEnabled = false
}
}
if s.auth.CustomAuthFn != nil {
s.usingAuth = true
s.mux.Use(MiddlewareAuthCustom(s.auth.CustomAuthFn, s.auth.Realm, altUsernameEnabled))
return
}
if s.auth.HtPasswd != "" {
s.usingAuth = true
s.mux.Use(MiddlewareAuthHtpasswd(s.auth.HtPasswd, s.auth.Realm))
return
}
if s.auth.BasicUser != "" {
s.usingAuth = true
s.mux.Use(MiddlewareAuthBasic(s.auth.BasicUser, s.auth.BasicPass, s.auth.Realm, s.auth.Salt))
return
}
}
func (s *Server) initTemplate() error {
if s.template == nil {
return nil
}
var err error
s.htmlTemplate, err = GetTemplate(s.template.Path)
if err != nil {
err = fmt.Errorf("failed to get template: %w", err)
}
return err
}
var (
// ErrInvalidMinTLSVersion - hard coded errors, allowing for easier testing
ErrInvalidMinTLSVersion = errors.New("invalid value for --min-tls-version")
// ErrTLSBodyMismatch - hard coded errors, allowing for easier testing
ErrTLSBodyMismatch = errors.New("need both TLSCertBody and TLSKeyBody to use TLS")
// ErrTLSFileMismatch - hard coded errors, allowing for easier testing
ErrTLSFileMismatch = errors.New("need both --cert and --key to use TLS")
// ErrTLSParseCA - hard coded errors, allowing for easier testing
ErrTLSParseCA = errors.New("unable to parse client certificate authority")
)
func (s *Server) initTLS() error {
if s.cfg.TLSCert == "" && s.cfg.TLSKey == "" && len(s.cfg.TLSCertBody) == 0 && len(s.cfg.TLSKeyBody) == 0 {
return nil
}
if (len(s.cfg.TLSCertBody) > 0) != (len(s.cfg.TLSKeyBody) > 0) {
return ErrTLSBodyMismatch
}
if (s.cfg.TLSCert != "") != (s.cfg.TLSKey != "") {
return ErrTLSFileMismatch
}
var cert tls.Certificate
var err error
if len(s.cfg.TLSCertBody) > 0 {
cert, err = tls.X509KeyPair(s.cfg.TLSCertBody, s.cfg.TLSKeyBody)
} else {
cert, err = tls.LoadX509KeyPair(s.cfg.TLSCert, s.cfg.TLSKey)
}
if err != nil {
return err
}
var minTLSVersion uint16
switch s.cfg.MinTLSVersion {
case "tls1.0":
minTLSVersion = tls.VersionTLS10
case "tls1.1":
minTLSVersion = tls.VersionTLS11
case "tls1.2":
minTLSVersion = tls.VersionTLS12
case "tls1.3":
minTLSVersion = tls.VersionTLS13
default:
return fmt.Errorf("%w: %s", ErrInvalidMinTLSVersion, s.cfg.MinTLSVersion)
}
s.tlsConfig = &tls.Config{
MinVersion: minTLSVersion,
Certificates: []tls.Certificate{cert},
}
if s.cfg.ClientCA != "" {
// if !useTLS {
// err := errors.New("can't use --client-ca without --cert and --key")
// log.Fatalf(err.Error())
// }
certpool := x509.NewCertPool()
pem, err := os.ReadFile(s.cfg.ClientCA)
if err != nil {
return err
}
if !certpool.AppendCertsFromPEM(pem) {
return ErrTLSParseCA
}
s.tlsConfig.ClientCAs = certpool
s.tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
return nil
}
// Serve starts the HTTP server on each listener
func (s *Server) Serve() {
s.wg.Add(len(s.instances))
for _, ii := range s.instances {
go ii.serve(&s.wg)
}
// Install an atexit handler to shutdown gracefully
s.mu.Lock()
s.atexitHandle = atexit.Register(func() { _ = s.Shutdown() })
s.mu.Unlock()
}
// Wait blocks while the server is serving requests
func (s *Server) Wait() {
s.wg.Wait()
}
// Router returns the server base router
func (s *Server) Router() chi.Router {
return s.mux
}
// Time to wait to Shutdown an HTTP server
const gracefulShutdownTime = 10 * time.Second
// Shutdown gracefully shuts down the server
func (s *Server) Shutdown() error {
// Stop the atexit handler
s.mu.Lock()
if s.atexitHandle != nil {
atexit.Unregister(s.atexitHandle)
s.atexitHandle = nil
}
s.mu.Unlock()
for _, ii := range s.instances {
expiry := time.Now().Add(gracefulShutdownTime)
ctx, cancel := context.WithDeadline(context.Background(), expiry)
if err := ii.httpServer.Shutdown(ctx); err != nil {
fs.Logf(nil, "error shutting down server: %s", err)
}
cancel()
}
s.wg.Wait()
return nil
}
// HTMLTemplate returns the parsed template, if WithTemplate option was passed.
func (s *Server) HTMLTemplate() *template.Template {
return s.htmlTemplate
}
// URLs returns all configured URLS
func (s *Server) URLs() []string {
var out []string
for _, ii := range s.instances {
if ii.listener.Addr().Network() == "unix" {
continue
}
out = append(out, ii.url)
}
return out
}
// Addr returns the first configured address
func (s *Server) Addr() net.Addr {
if len(s.instances) == 0 || s.instances[0].listener == nil {
return nil
}
return s.instances[0].listener.Addr()
}
// UsingAuth returns true if authentication is required
func (s *Server) UsingAuth() bool {
return s.usingAuth
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/auth.go | lib/http/auth.go | package http
import (
"bytes"
"fmt"
"html/template"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
// AuthHelp returns text describing the http authentication to add to the command help.
func AuthHelp(prefix string) string {
help := `#### Authentication
By default this will serve files without needing a login.
You can either use an htpasswd file which can take lots of users, or
set a single username and password with the ` + "`--{{ .Prefix }}user` and `--{{ .Prefix }}pass`" + ` flags.
Alternatively, you can have the reverse proxy manage authentication and use the
username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}user-from-header=x-remote-user`" + `).
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration
may lead to unauthorized access.
If either of the above authentication methods is not configured and client
certificates are required by the ` + "`--client-ca`" + ` flag passed to the server, the
client certificate common name will be considered as the username.
Use ` + "`--{{ .Prefix }}htpasswd /path/to/htpasswd`" + ` to provide an htpasswd file. This is
in standard apache format and supports MD5, SHA1 and BCrypt for basic
authentication. Bcrypt is recommended.
To create an htpasswd file:
` + "```console" + `
touch htpasswd
htpasswd -B htpasswd user
htpasswd -B htpasswd anotherUser
` + "```" + `
The password file can be updated while rclone is running.
Use ` + "`--{{ .Prefix }}realm`" + ` to set the authentication realm.
Use ` + "`--{{ .Prefix }}salt`" + ` to change the password hashing salt from the default.
`
tmpl, err := template.New("auth help").Parse(help)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Fatal error parsing template", err))
}
data := struct {
Prefix string
}{
Prefix: prefix,
}
buf := &bytes.Buffer{}
err = tmpl.Execute(buf, data)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Fatal error executing template", err))
}
return buf.String()
}
// CustomAuthFn if used will be used to authenticate user, pass. If an error
// is returned then the user is not authenticated.
//
// If a non nil value is returned then it is added to the context under the key
type CustomAuthFn func(user, pass string) (value any, err error)
// AuthConfigInfo descripts the Options in use
var AuthConfigInfo = fs.Options{{
Name: "htpasswd",
Default: "",
Help: "A htpasswd file - if not provided no authentication is done",
}, {
Name: "realm",
Default: "",
Help: "Realm for authentication",
}, {
Name: "user",
Default: "",
Help: "User name for authentication",
}, {
Name: "pass",
Default: "",
Help: "Password for authentication",
}, {
Name: "salt",
Default: "dlPL2MqE",
Help: "Password hashing salt",
}, {
Name: "user_from_header",
Default: "",
Help: "User name from a defined HTTP header",
}}
// AuthConfig contains options for the http authentication
type AuthConfig struct {
HtPasswd string `config:"htpasswd"` // htpasswd file - if not provided no authentication is done
Realm string `config:"realm"` // realm for authentication
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
BasicPass string `config:"pass"` // password for BasicUser
Salt string `config:"salt"` // password hashing salt
UserFromHeader string `config:"user_from_header"` // retrieve user name from a defined HTTP header
CustomAuthFn CustomAuthFn `json:"-" config:"-"` // custom Auth (not set by command line flags)
}
// AddFlagsPrefix adds flags to the flag set for AuthConfig
func (cfg *AuthConfig) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
flags.StringVarP(flagSet, &cfg.HtPasswd, prefix+"htpasswd", "", cfg.HtPasswd, "A htpasswd file - if not provided no authentication is done", prefix)
flags.StringVarP(flagSet, &cfg.Realm, prefix+"realm", "", cfg.Realm, "Realm for authentication", prefix)
flags.StringVarP(flagSet, &cfg.BasicUser, prefix+"user", "", cfg.BasicUser, "User name for authentication", prefix)
flags.StringVarP(flagSet, &cfg.BasicPass, prefix+"pass", "", cfg.BasicPass, "Password for authentication", prefix)
flags.StringVarP(flagSet, &cfg.Salt, prefix+"salt", "", cfg.Salt, "Password hashing salt", prefix)
flags.StringVarP(flagSet, &cfg.UserFromHeader, prefix+"user-from-header", "", cfg.UserFromHeader, "Retrieve the username from a specified HTTP header if no other authentication methods are configured (ideal for proxied setups)", prefix)
}
// AddAuthFlagsPrefix adds flags to the flag set for AuthConfig
func AddAuthFlagsPrefix(flagSet *pflag.FlagSet, prefix string, cfg *AuthConfig) {
cfg.AddFlagsPrefix(flagSet, prefix)
}
// DefaultAuthCfg returns a new config which can be customized by command line flags
//
// Note that this needs to be kept in sync with AuthConfigInfo above and
// can be removed when all callers have been converted.
func DefaultAuthCfg() AuthConfig {
return AuthConfig{
Salt: "dlPL2MqE",
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/context.go | lib/http/context.go | package http
import (
"context"
"net"
"net/http"
)
type ctxKey int
const (
ctxKeyAuth ctxKey = iota
ctxKeyPublicURL
ctxKeyUnixSock
ctxKeyUser
)
// NewBaseContext initializes the context for all requests, adding info for use in middleware and handlers
func NewBaseContext(ctx context.Context, url string) func(l net.Listener) context.Context {
return func(l net.Listener) context.Context {
if l.Addr().Network() == "unix" {
return context.WithValue(ctx, ctxKeyUnixSock, true)
}
return context.WithValue(ctx, ctxKeyPublicURL, url)
}
}
// IsAuthenticated checks if this request was authenticated via a middleware
func IsAuthenticated(r *http.Request) bool {
if v := r.Context().Value(ctxKeyAuth); v != nil {
return true
}
if v := r.Context().Value(ctxKeyUser); v != nil {
return true
}
return false
}
// PublicURL returns the URL defined in NewBaseContext, used for logging & CORS
func PublicURL(r *http.Request) string {
v, _ := r.Context().Value(ctxKeyPublicURL).(string)
return v
}
// CtxGetAuth is a wrapper over the private Auth context key
func CtxGetAuth(ctx context.Context) any {
return ctx.Value(ctxKeyAuth)
}
// CtxGetUser is a wrapper over the private User context key
func CtxGetUser(ctx context.Context) (string, bool) {
v, ok := ctx.Value(ctxKeyUser).(string)
return v, ok
}
// CtxSetUser is a test helper that injects a User value into context
func CtxSetUser(ctx context.Context, value string) context.Context {
return context.WithValue(ctx, ctxKeyUser, value)
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/template_test.go | lib/http/template_test.go | package http
import (
"strings"
"testing"
)
func TestHelpPrefixTemplate(t *testing.T) {
// This test assumes template variables are placed correctly.
const testPrefix = "template-help-test"
helpMessage := TemplateHelp(testPrefix)
if !strings.Contains(helpMessage, testPrefix) {
t.Fatal("flag prefix not found")
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/template.go | lib/http/template.go | package http
import (
"bytes"
"embed"
"fmt"
"html/template"
"os"
"strings"
"time"
"github.com/spf13/pflag"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
)
// TemplateHelp returns a string that describes how to use a custom template
func TemplateHelp(prefix string) string {
help := `#### Template
` + "`--{{ .Prefix }}template`" + ` allows a user to specify a custom markup template for HTTP
and WebDAV serve functions. The server exports the following markup
to be used within the template to server pages:
| Parameter | Subparameter | Description |
| :---------- | :----------- | :---------- |
| .Name | | The full path of a file/directory. |
| .Title | | Directory listing of '.Name'. |
| .Sort | | The current sort used. This is changeable via '?sort=' parameter. Possible values: namedirfirst, name, size, time (default namedirfirst). |
| .Order | | The current ordering used. This is changeable via '?order=' parameter. Possible values: asc, desc (default asc). |
| .Query | | Currently unused. |
| .Breadcrumb | | Allows for creating a relative navigation. |
| | .Link | The link of the Text relative to the root. |
| | .Text | The Name of the directory. |
| .Entries | | Information about a specific file/directory. |
| | .URL | The url of an entry. |
| | .Leaf | Currently same as '.URL' but intended to be just the name. |
| | .IsDir | Boolean for if an entry is a directory or not. |
| | .Size | Size in bytes of the entry. |
| | .ModTime | The UTC timestamp of an entry. |
The server also makes the following functions available so that they can be used
within the template. These functions help extend the options for dynamic
rendering of HTML. They can be used to render HTML based on specific conditions.
| Function | Description |
| :---------- | :---------- |
| afterEpoch | Returns the time since the epoch for the given time. |
| contains | Checks whether a given substring is present or not in a given string. |
| hasPrefix | Checks whether the given string begins with the specified prefix. |
| hasSuffix | Checks whether the given string end with the specified suffix. |
`
tmpl, err := template.New("template help").Parse(help)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Fatal error parsing template", err))
}
data := struct {
Prefix string
}{
Prefix: prefix,
}
buf := &bytes.Buffer{}
err = tmpl.Execute(buf, data)
if err != nil {
fs.Fatal(nil, fmt.Sprint("Fatal error executing template", err))
}
return buf.String()
}
// TemplateConfigInfo descripts the Options in use
var TemplateConfigInfo = fs.Options{{
Name: "template",
Default: "",
Help: "User-specified template",
}}
// TemplateConfig for the templating functionality
type TemplateConfig struct {
Path string `config:"template"`
}
// AddFlagsPrefix for the templating functionality
func (cfg *TemplateConfig) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
flags.StringVarP(flagSet, &cfg.Path, prefix+"template", "", cfg.Path, "User-specified template", prefix)
}
// AddTemplateFlagsPrefix for the templating functionality
func AddTemplateFlagsPrefix(flagSet *pflag.FlagSet, prefix string, cfg *TemplateConfig) {
cfg.AddFlagsPrefix(flagSet, prefix)
}
// DefaultTemplateCfg returns a new config which can be customized by command line flags
//
// Note that this needs to be kept in sync with TemplateConfigInfo above and
// can be removed when all callers have been converted.
func DefaultTemplateCfg() TemplateConfig {
return TemplateConfig{}
}
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// Assets holds the embedded filesystem for the default template
//
//go:embed templates
var Assets embed.FS
// GetTemplate returns the HTML template for serving directories via HTTP/WebDAV
func GetTemplate(tmpl string) (*template.Template, error) {
var readFile = os.ReadFile
if tmpl == "" {
tmpl = "templates/index.html"
readFile = Assets.ReadFile
}
data, err := readFile(tmpl)
if err != nil {
return nil, err
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
"contains": strings.Contains,
"hasPrefix": strings.HasPrefix,
"hasSuffix": strings.HasSuffix,
}
tpl, err := template.New("index").Funcs(funcMap).Parse(string(data))
if err != nil {
return nil, err
}
return tpl, nil
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
rclone/rclone | https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/lib/http/auth_test.go | lib/http/auth_test.go | package http
import (
"strings"
"testing"
)
func TestHelpPrefixAuth(t *testing.T) {
// This test assumes template variables are placed correctly.
const testPrefix = "server-help-test"
helpMessage := AuthHelp(testPrefix)
if !strings.Contains(helpMessage, testPrefix) {
t.Fatal("flag prefix not found")
}
}
| go | MIT | 5f4e4b1a200708f5f36999a9d289823b742e4fd3 | 2026-01-07T08:35:43.525317Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.