repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/netutil/netutil_test.go
internal/netutil/netutil_test.go
// Copyright 2022 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package netutil import ( "testing" "github.com/stretchr/testify/assert" ) func TestIsLocalHostname(t *testing.T) { tests := []struct { hostname string allowlist []string want bool }{ {hostname: "localhost", want: true}, // #00 {hostname: "127.0.0.1", want: true}, // #01 {hostname: "::1", want: true}, // #02 {hostname: "0:0:0:0:0:0:0:1", want: true}, // #03 {hostname: "127.0.0.95", want: true}, // #04 {hostname: "0.0.0.0", want: true}, // #05 {hostname: "192.168.123.45", want: true}, // #06 {hostname: "gogs.io", want: false}, // #07 {hostname: "google.com", want: false}, // #08 {hostname: "165.232.140.255", want: false}, // #09 {hostname: "192.168.123.45", allowlist: []string{"10.0.0.17"}, want: true}, // #10 {hostname: "gogs.local", allowlist: []string{"gogs.local"}, want: false}, // #11 {hostname: "192.168.123.45", allowlist: []string{"*"}, want: false}, // #12 } for _, test := range tests { t.Run("", func(t *testing.T) { assert.Equal(t, test.want, IsBlockedLocalHostname(test.hostname, test.allowlist)) }) } }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/netutil/netutil.go
internal/netutil/netutil.go
// Copyright 2022 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package netutil import ( "fmt" "net" ) var localCIDRs []*net.IPNet func init() { // Parsing hardcoded CIDR strings should never fail, if in case it does, let's // fail it at start. rawCIDRs := []string{ // https://datatracker.ietf.org/doc/html/rfc5735: "127.0.0.0/8", // Loopback "0.0.0.0/8", // "This" network "100.64.0.0/10", // Shared address space "169.254.0.0/16", // Link local "172.16.0.0/12", // Private-use networks "192.0.0.0/24", // IETF Protocol assignments "192.0.2.0/24", // TEST-NET-1 "192.88.99.0/24", // 6to4 Relay anycast "192.168.0.0/16", // Private-use networks "198.18.0.0/15", // Network interconnect "198.51.100.0/24", // TEST-NET-2 "203.0.113.0/24", // TEST-NET-3 "255.255.255.255/32", // Limited broadcast // https://datatracker.ietf.org/doc/html/rfc1918: "10.0.0.0/8", // Private-use networks // https://datatracker.ietf.org/doc/html/rfc6890: "::1/128", // Loopback "FC00::/7", // Unique local address "FE80::/10", // Multicast address } for _, raw := range rawCIDRs { _, cidr, err := net.ParseCIDR(raw) if err != nil { panic(fmt.Sprintf("parse CIDR %q: %v", raw, err)) } localCIDRs = append(localCIDRs, cidr) } } // IsBlockedLocalHostname returns true if given hostname is resolved to a local // network address that is implicitly blocked (i.e. not exempted from the // allowlist). func IsBlockedLocalHostname(hostname string, allowlist []string) bool { for _, allow := range allowlist { if hostname == allow || allow == "*" { return false } } ips, err := net.LookupIP(hostname) if err != nil { return true } for _, ip := range ips { for _, cidr := range localCIDRs { if cidr.Contains(ip) { return true } } } return false }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/markup_test.go
internal/markup/markup_test.go
// Copyright 2017 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup_test import ( "testing" "github.com/stretchr/testify/assert" . "gogs.io/gogs/internal/markup" ) func Test_IsReadmeFile(t *testing.T) { tests := []struct { name string expVal bool }{ {name: "readme", expVal: true}, {name: "README", expVal: true}, {name: "readme.md", expVal: true}, {name: "readme.markdown", expVal: true}, {name: "readme.mdown", expVal: true}, {name: "readme.mkd", expVal: true}, {name: "readme.org", expVal: true}, {name: "readme.rst", expVal: true}, {name: "readme.asciidoc", expVal: true}, {name: "readme_ZH", expVal: true}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { assert.Equal(t, test.expVal, IsReadmeFile(test.name)) }) } } func Test_FindAllMentions(t *testing.T) { tests := []struct { input string expMatches []string }{ {input: "@unknwon, what do you think?", expMatches: []string{"unknwon"}}, {input: "@unknwon what do you think?", expMatches: []string{"unknwon"}}, {input: "Hi @unknwon, sounds good to me", expMatches: []string{"unknwon"}}, {input: "cc/ @unknwon @eddycjy", expMatches: []string{"unknwon", "eddycjy"}}, } for _, test := range tests { t.Run("", func(t *testing.T) { assert.Equal(t, test.expMatches, FindAllMentions(test.input)) }) } } func Test_RenderIssueIndexPattern(t *testing.T) { urlPrefix := "/prefix" t.Run("render to internal issue tracker", func(t *testing.T) { tests := []struct { input string expVal string }{ {input: "", expVal: ""}, {input: "this is a test", expVal: "this is a test"}, {input: "test 123 123 1234", expVal: "test 123 123 1234"}, {input: "#", expVal: "#"}, {input: "# # #", expVal: "# # #"}, {input: "# 123", expVal: "# 123"}, {input: "#abcd", expVal: "#abcd"}, {input: "##1234", expVal: "##1234"}, {input: "test#1234", expVal: "test#1234"}, {input: "#1234test", expVal: "#1234test"}, {input: " test #1234test", expVal: " test #1234test"}, {input: "#1234 test", expVal: "<a href=\"/prefix/issues/1234\">#1234</a> test"}, {input: "test #1234 issue", expVal: "test <a href=\"/prefix/issues/1234\">#1234</a> issue"}, {input: "test issue #1234", expVal: "test issue <a href=\"/prefix/issues/1234\">#1234</a>"}, {input: "#5 test", expVal: "<a href=\"/prefix/issues/5\">#5</a> test"}, {input: "test #5 issue", expVal: "test <a href=\"/prefix/issues/5\">#5</a> issue"}, {input: "test issue #5", expVal: "test issue <a href=\"/prefix/issues/5\">#5</a>"}, {input: "(#54321 issue)", expVal: "(<a href=\"/prefix/issues/54321\">#54321</a> issue)"}, {input: "test (#54321) issue", expVal: "test (<a href=\"/prefix/issues/54321\">#54321</a>) issue"}, {input: "test (#54321 extra) issue", expVal: "test (<a href=\"/prefix/issues/54321\">#54321</a> extra) issue"}, {input: "test (#54321 issue)", expVal: "test (<a href=\"/prefix/issues/54321\">#54321</a> issue)"}, {input: "test (#54321)", expVal: "test (<a href=\"/prefix/issues/54321\">#54321</a>)"}, {input: "[#54321 issue]", expVal: "[<a href=\"/prefix/issues/54321\">#54321</a> issue]"}, {input: "test [#54321] issue", expVal: "test [<a href=\"/prefix/issues/54321\">#54321</a>] issue"}, {input: "test [#54321 extra] issue", expVal: "test [<a href=\"/prefix/issues/54321\">#54321</a> extra] issue"}, {input: "test [#54321 issue]", expVal: "test [<a href=\"/prefix/issues/54321\">#54321</a> issue]"}, {input: "test [#54321]", expVal: "test [<a href=\"/prefix/issues/54321\">#54321</a>]"}, {input: "#54321 #1243", expVal: "<a href=\"/prefix/issues/54321\">#54321</a> <a href=\"/prefix/issues/1243\">#1243</a>"}, {input: "test #54321 #1243", expVal: "test <a href=\"/prefix/issues/54321\">#54321</a> <a href=\"/prefix/issues/1243\">#1243</a>"}, {input: "(#54321 #1243)", expVal: "(<a href=\"/prefix/issues/54321\">#54321</a> <a href=\"/prefix/issues/1243\">#1243</a>)"}, {input: "(#54321)(#1243)", expVal: "(<a href=\"/prefix/issues/54321\">#54321</a>)(<a href=\"/prefix/issues/1243\">#1243</a>)"}, {input: "text #54321 test #1243 issue", expVal: "text <a href=\"/prefix/issues/54321\">#54321</a> test <a href=\"/prefix/issues/1243\">#1243</a> issue"}, {input: "#1 (#4321) test", expVal: "<a href=\"/prefix/issues/1\">#1</a> (<a href=\"/prefix/issues/4321\">#4321</a>) test"}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { assert.Equal(t, test.expVal, string(RenderIssueIndexPattern([]byte(test.input), urlPrefix, nil))) }) } }) t.Run("render to external issue tracker", func(t *testing.T) { t.Run("numeric style", func(t *testing.T) { metas := map[string]string{ "format": "https://someurl.com/{user}/{repo}/{index}", "user": "someuser", "repo": "somerepo", "style": IssueNameStyleNumeric, } tests := []struct { input string expVal string }{ {input: "this is a test", expVal: "this is a test"}, {input: "test 123 123 1234", expVal: "test 123 123 1234"}, {input: "#", expVal: "#"}, {input: "# # #", expVal: "# # #"}, {input: "# 123", expVal: "# 123"}, {input: "#abcd", expVal: "#abcd"}, {input: "#1234 test", expVal: "<a href=\"https://someurl.com/someuser/somerepo/1234\">#1234</a> test"}, {input: "test #1234 issue", expVal: "test <a href=\"https://someurl.com/someuser/somerepo/1234\">#1234</a> issue"}, {input: "test issue #1234", expVal: "test issue <a href=\"https://someurl.com/someuser/somerepo/1234\">#1234</a>"}, {input: "#5 test", expVal: "<a href=\"https://someurl.com/someuser/somerepo/5\">#5</a> test"}, {input: "test #5 issue", expVal: "test <a href=\"https://someurl.com/someuser/somerepo/5\">#5</a> issue"}, {input: "test issue #5", expVal: "test issue <a href=\"https://someurl.com/someuser/somerepo/5\">#5</a>"}, {input: "(#54321 issue)", expVal: "(<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> issue)"}, {input: "test (#54321) issue", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a>) issue"}, {input: "test (#54321 extra) issue", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> extra) issue"}, {input: "test (#54321 issue)", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> issue)"}, {input: "test (#54321)", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a>)"}, {input: "#54321 #1243", expVal: "<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> <a href=\"https://someurl.com/someuser/somerepo/1243\">#1243</a>"}, {input: "test #54321 #1243", expVal: "test <a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> <a href=\"https://someurl.com/someuser/somerepo/1243\">#1243</a>"}, {input: "(#54321 #1243)", expVal: "(<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> <a href=\"https://someurl.com/someuser/somerepo/1243\">#1243</a>)"}, {input: "(#54321)(#1243)", expVal: "(<a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a>)(<a href=\"https://someurl.com/someuser/somerepo/1243\">#1243</a>)"}, {input: "text #54321 test #1243 issue", expVal: "text <a href=\"https://someurl.com/someuser/somerepo/54321\">#54321</a> test <a href=\"https://someurl.com/someuser/somerepo/1243\">#1243</a> issue"}, {input: "#1 (#4321) test", expVal: "<a href=\"https://someurl.com/someuser/somerepo/1\">#1</a> (<a href=\"https://someurl.com/someuser/somerepo/4321\">#4321</a>) test"}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { assert.Equal(t, test.expVal, string(RenderIssueIndexPattern([]byte(test.input), urlPrefix, metas))) }) } }) t.Run("alphanumeric style", func(t *testing.T) { metas := map[string]string{ "format": "https://someurl.com/{user}/{repo}/?b={index}", "user": "someuser", "repo": "somerepo", "style": IssueNameStyleAlphanumeric, } tests := []struct { input string expVal string }{ {input: "", expVal: ""}, {input: "this is a test", expVal: "this is a test"}, {input: "test 123 123 1234", expVal: "test 123 123 1234"}, {input: "#", expVal: "#"}, {input: "##1234", expVal: "##1234"}, {input: "# 123", expVal: "# 123"}, {input: "#abcd", expVal: "#abcd"}, {input: "test #123", expVal: "test #123"}, {input: "abc-1234", expVal: "abc-1234"}, // issue prefix must be capital {input: "ABc-1234", expVal: "ABc-1234"}, // issue prefix must be _all_ capital {input: "ABCDEFGHIJK-1234", expVal: "ABCDEFGHIJK-1234"}, // the limit is 10 characters in the prefix {input: "ABC1234", expVal: "ABC1234"}, // dash is required {input: "test ABC- test", expVal: "test ABC- test"}, // number is required {input: "test -1234 test", expVal: "test -1234 test"}, // prefix is required {input: "testABC-123 test", expVal: "testABC-123 test"}, // leading space is required {input: "test ABC-123test", expVal: "test ABC-123test"}, // trailing space is required {input: "ABC-0123", expVal: "ABC-0123"}, // no leading zero {input: "OTT-1234 test", expVal: "<a href=\"https://someurl.com/someuser/somerepo/?b=OTT-1234\">OTT-1234</a> test"}, {input: "test T-12 issue", expVal: "test <a href=\"https://someurl.com/someuser/somerepo/?b=T-12\">T-12</a> issue"}, {input: "test issue ABCDEFGHIJ-1234567890", expVal: "test issue <a href=\"https://someurl.com/someuser/somerepo/?b=ABCDEFGHIJ-1234567890\">ABCDEFGHIJ-1234567890</a>"}, {input: "A-1 test", expVal: "<a href=\"https://someurl.com/someuser/somerepo/?b=A-1\">A-1</a> test"}, {input: "test ZED-1 issue", expVal: "test <a href=\"https://someurl.com/someuser/somerepo/?b=ZED-1\">ZED-1</a> issue"}, {input: "test issue DEED-7154", expVal: "test issue <a href=\"https://someurl.com/someuser/somerepo/?b=DEED-7154\">DEED-7154</a>"}, {input: "(ABG-124 issue)", expVal: "(<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> issue)"}, {input: "test (ABG-124) issue", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a>) issue"}, {input: "test (ABG-124 extra) issue", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> extra) issue"}, {input: "test (ABG-124 issue)", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> issue)"}, {input: "test (ABG-124)", expVal: "test (<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a>)"}, {input: "[ABG-124] issue", expVal: "[<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a>] issue"}, {input: "test [ABG-124] issue", expVal: "test [<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a>] issue"}, {input: "test [ABG-124 extra] issue", expVal: "test [<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> extra] issue"}, {input: "test [ABG-124 issue]", expVal: "test [<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> issue]"}, {input: "test [ABG-124]", expVal: "test [<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a>]"}, {input: "ABG-124 OTT-4321", expVal: "<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> <a href=\"https://someurl.com/someuser/somerepo/?b=OTT-4321\">OTT-4321</a>"}, {input: "test ABG-124 OTT-4321", expVal: "test <a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> <a href=\"https://someurl.com/someuser/somerepo/?b=OTT-4321\">OTT-4321</a>"}, {input: "(ABG-124 OTT-4321)", expVal: "(<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> <a href=\"https://someurl.com/someuser/somerepo/?b=OTT-4321\">OTT-4321</a>)"}, {input: "(ABG-124)(OTT-4321)", expVal: "(<a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a>)(<a href=\"https://someurl.com/someuser/somerepo/?b=OTT-4321\">OTT-4321</a>)"}, {input: "text ABG-124 test OTT-4321 issue", expVal: "text <a href=\"https://someurl.com/someuser/somerepo/?b=ABG-124\">ABG-124</a> test <a href=\"https://someurl.com/someuser/somerepo/?b=OTT-4321\">OTT-4321</a> issue"}, {input: "A-1 (RRE-345) test", expVal: "<a href=\"https://someurl.com/someuser/somerepo/?b=A-1\">A-1</a> (<a href=\"https://someurl.com/someuser/somerepo/?b=RRE-345\">RRE-345</a>) test"}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { assert.Equal(t, test.expVal, string(RenderIssueIndexPattern([]byte(test.input), urlPrefix, metas))) }) } }) }) } func TestRenderSha1CurrentPattern(t *testing.T) { metas := map[string]string{ "repoLink": "/someuser/somerepo", } tests := []struct { desc string input string prefix string expVal string }{ { desc: "Full SHA (40 symbols)", input: "ad8ced4f57d9068cb2874557245be3c7f341149d", prefix: metas["repoLink"], expVal: `<a href="/someuser/somerepo/commit/ad8ced4f57d9068cb2874557245be3c7f341149d"><code>ad8ced4f57</code></a>`, }, { desc: "Short SHA (8 symbols)", input: "ad8ced4f", prefix: metas["repoLink"], expVal: `<a href="/someuser/somerepo/commit/ad8ced4f"><code>ad8ced4f</code></a>`, }, { desc: "9 digits", input: "123456789", prefix: metas["repoLink"], expVal: "123456789", }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { assert.Equal(t, test.expVal, string(RenderSha1CurrentPattern([]byte(test.input), test.prefix))) }) } }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/markdown_test.go
internal/markup/markdown_test.go
// Copyright 2016 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup_test import ( "bytes" "strings" "testing" "github.com/russross/blackfriday" "github.com/stretchr/testify/assert" "gogs.io/gogs/internal/conf" . "gogs.io/gogs/internal/markup" ) func Test_IsMarkdownFile(t *testing.T) { // TODO: Refactor to accept a list of extensions conf.Markdown.FileExtensions = strings.Split(".md,.markdown,.mdown,.mkd", ",") tests := []struct { ext string expVal bool }{ {ext: ".md", expVal: true}, {ext: ".markdown", expVal: true}, {ext: ".mdown", expVal: true}, {ext: ".mkd", expVal: true}, {ext: ".org", expVal: false}, {ext: ".rst", expVal: false}, {ext: ".asciidoc", expVal: false}, } for _, test := range tests { assert.Equal(t, test.expVal, IsMarkdownFile(test.ext)) } } func Test_Markdown(t *testing.T) { // TODO: Refactor to accept URL conf.Server.ExternalURL = "http://localhost:3000/" htmlFlags := 0 htmlFlags |= blackfriday.HTML_SKIP_STYLE htmlFlags |= blackfriday.HTML_OMIT_CONTENTS renderer := &MarkdownRenderer{ Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""), } tests := []struct { input string expVal string }{ // Issue URL {input: "http://localhost:3000/user/repo/issues/3333", expVal: "<a href=\"http://localhost:3000/user/repo/issues/3333\">#3333</a>"}, {input: "http://1111/2222/ssss-issues/3333?param=blah&blahh=333", expVal: "<a href=\"http://1111/2222/ssss-issues/3333?param=blah&amp;blahh=333\">http://1111/2222/ssss-issues/3333?param=blah&amp;blahh=333</a>"}, {input: "http://test.com/issues/33333", expVal: "<a href=\"http://test.com/issues/33333\">http://test.com/issues/33333</a>"}, {input: "http://test.com/issues/3", expVal: "<a href=\"http://test.com/issues/3\">http://test.com/issues/3</a>"}, {input: "http://issues/333", expVal: "<a href=\"http://issues/333\">http://issues/333</a>"}, {input: "https://issues/333", expVal: "<a href=\"https://issues/333\">https://issues/333</a>"}, {input: "http://tissues/0", expVal: "<a href=\"http://tissues/0\">http://tissues/0</a>"}, // Commit URL {input: "http://localhost:3000/user/project/commit/d8a994ef243349f321568f9e36d5c3f444b99cae", expVal: " <code><a href=\"http://localhost:3000/user/project/commit/d8a994ef243349f321568f9e36d5c3f444b99cae\">d8a994ef24</a></code>"}, {input: "http://localhost:3000/user/project/commit/d8a994ef243349f321568f9e36d5c3f444b99cae#diff-2", expVal: " <code><a href=\"http://localhost:3000/user/project/commit/d8a994ef243349f321568f9e36d5c3f444b99cae#diff-2\">d8a994ef24</a></code>"}, {input: "https://external-link.gogs.io/gogs/gogs/commit/d8a994ef243349f321568f9e36d5c3f444b99cae#diff-2", expVal: "<a href=\"https://external-link.gogs.io/gogs/gogs/commit/d8a994ef243349f321568f9e36d5c3f444b99cae#diff-2\">https://external-link.gogs.io/gogs/gogs/commit/d8a994ef243349f321568f9e36d5c3f444b99cae#diff-2</a>"}, {input: "https://commit/d8a994ef243349f321568f9e36d5c3f444b99cae", expVal: "<a href=\"https://commit/d8a994ef243349f321568f9e36d5c3f444b99cae\">https://commit/d8a994ef243349f321568f9e36d5c3f444b99cae</a>"}, } for _, test := range tests { t.Run("", func(t *testing.T) { buf := new(bytes.Buffer) renderer.AutoLink(buf, []byte(test.input), blackfriday.LINK_TYPE_NORMAL) assert.Equal(t, test.expVal, buf.String()) }) } }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/markup.go
internal/markup/markup.go
// Copyright 2017 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup import ( "bytes" "fmt" "io" "strings" "github.com/unknwon/com" "golang.org/x/net/html" "gogs.io/gogs/internal/conf" "gogs.io/gogs/internal/lazyregexp" "gogs.io/gogs/internal/tool" ) // IsReadmeFile reports whether name looks like a README file based on its extension. func IsReadmeFile(name string) bool { return strings.HasPrefix(strings.ToLower(name), "readme") } // IsIPythonNotebook reports whether name looks like a IPython notebook based on its extension. func IsIPythonNotebook(name string) bool { return strings.HasSuffix(name, ".ipynb") } const ( IssueNameStyleNumeric = "numeric" IssueNameStyleAlphanumeric = "alphanumeric" ) var ( // MentionPattern matches string that mentions someone, e.g. @Unknwon MentionPattern = lazyregexp.New(`(\s|^|\W)@[0-9a-zA-Z-_\.]+`) // CommitPattern matches link to certain commit with or without trailing hash, // e.g. https://try.gogs.io/gogs/gogs/commit/d8a994ef243349f321568f9e36d5c3f444b99cae#diff-2 CommitPattern = lazyregexp.New(`(\s|^)https?.*commit/[0-9a-zA-Z]+(#+[0-9a-zA-Z-]*)?`) // IssueFullPattern matches link to an issue with or without trailing hash, // e.g. https://try.gogs.io/gogs/gogs/issues/4#issue-685 IssueFullPattern = lazyregexp.New(`(\s|^)https?.*issues/[0-9]+(#+[0-9a-zA-Z-]*)?`) // IssueNumericPattern matches string that references to a numeric issue, e.g. #1287 IssueNumericPattern = lazyregexp.New(`( |^|\(|\[)#[0-9]+\b`) // IssueAlphanumericPattern matches string that references to an alphanumeric issue, e.g. ABC-1234 IssueAlphanumericPattern = lazyregexp.New(`( |^|\(|\[)[A-Z]{1,10}-[1-9][0-9]*\b`) // CrossReferenceIssueNumericPattern matches string that references a numeric issue in a difference repository // e.g. gogs/gogs#12345 CrossReferenceIssueNumericPattern = lazyregexp.New(`( |^)[0-9a-zA-Z-_\.]+/[0-9a-zA-Z-_\.]+#[0-9]+\b`) // Sha1CurrentPattern matches string that represents a commit SHA, e.g. d8a994ef243349f321568f9e36d5c3f444b99cae // FIXME: this pattern matches pure numbers as well, right now we do a hack to check in RenderSha1CurrentPattern by converting string to a number. Sha1CurrentPattern = lazyregexp.New(`\b[0-9a-f]{7,40}\b`) ) // FindAllMentions matches mention patterns in given content // and returns a list of found user names without @ prefix. func FindAllMentions(content string) []string { mentions := MentionPattern.FindAllString(content, -1) for i := range mentions { mentions[i] = mentions[i][strings.Index(mentions[i], "@")+1:] // Strip @ character } return mentions } // cutoutVerbosePrefix cutouts URL prefix including sub-path to // return a clean unified string of request URL path. func cutoutVerbosePrefix(prefix string) string { if prefix == "" || prefix[0] != '/' { return prefix } count := 0 for i := 0; i < len(prefix); i++ { if prefix[i] == '/' { count++ } if count >= 3+conf.Server.SubpathDepth { return prefix[:i] } } return prefix } // RenderIssueIndexPattern renders issue indexes to corresponding links. func RenderIssueIndexPattern(rawBytes []byte, urlPrefix string, metas map[string]string) []byte { urlPrefix = cutoutVerbosePrefix(urlPrefix) pattern := IssueNumericPattern if metas["style"] == IssueNameStyleAlphanumeric { pattern = IssueAlphanumericPattern } ms := pattern.FindAll(rawBytes, -1) for _, m := range ms { if m[0] == ' ' || m[0] == '(' || m[0] == '[' { // ignore leading space, opening parentheses, or opening square brackets m = m[1:] } var link string if metas == nil || metas["format"] == "" { link = fmt.Sprintf(`<a href="%s/issues/%s">%s</a>`, urlPrefix, m[1:], m) } else { // Support for external issue tracker if metas["style"] == IssueNameStyleAlphanumeric { metas["index"] = string(m) } else { metas["index"] = string(m[1:]) } link = fmt.Sprintf(`<a href="%s">%s</a>`, com.Expand(metas["format"], metas), m) } rawBytes = bytes.Replace(rawBytes, m, []byte(link), 1) } return rawBytes } // Note: this section is for purpose of increase performance and // reduce memory allocation at runtime since they are constant literals. var pound = []byte("#") // RenderCrossReferenceIssueIndexPattern renders issue indexes from other repositories to corresponding links. func RenderCrossReferenceIssueIndexPattern(rawBytes []byte, _ string, _ map[string]string) []byte { ms := CrossReferenceIssueNumericPattern.FindAll(rawBytes, -1) for _, m := range ms { if m[0] == ' ' || m[0] == '(' { m = m[1:] // ignore leading space or opening parentheses } delimIdx := bytes.Index(m, pound) repo := string(m[:delimIdx]) index := string(m[delimIdx+1:]) link := fmt.Sprintf(`<a href="%s%s/issues/%s">%s</a>`, conf.Server.ExternalURL, repo, index, m) rawBytes = bytes.Replace(rawBytes, m, []byte(link), 1) } return rawBytes } // RenderSha1CurrentPattern renders SHA1 strings to corresponding links that assumes in the same repository. func RenderSha1CurrentPattern(rawBytes []byte, urlPrefix string) []byte { return []byte(Sha1CurrentPattern.ReplaceAllStringFunc(string(rawBytes), func(m string) string { if com.StrTo(m).MustInt() > 0 { return m } return fmt.Sprintf(`<a href="%s/commit/%s"><code>%s</code></a>`, urlPrefix, m, tool.ShortSHA1(m)) })) } // RenderSpecialLink renders mentions, indexes and SHA1 strings to corresponding links. func RenderSpecialLink(rawBytes []byte, urlPrefix string, metas map[string]string) []byte { ms := MentionPattern.FindAll(rawBytes, -1) for _, m := range ms { m = m[bytes.Index(m, []byte("@")):] rawBytes = bytes.ReplaceAll(rawBytes, m, []byte(fmt.Sprintf(`<a href="%s/%s">%s</a>`, conf.Server.Subpath, m[1:], m))) } rawBytes = RenderIssueIndexPattern(rawBytes, urlPrefix, metas) rawBytes = RenderCrossReferenceIssueIndexPattern(rawBytes, urlPrefix, metas) rawBytes = RenderSha1CurrentPattern(rawBytes, metas["repoLink"]) return rawBytes } var ( leftAngleBracket = []byte("</") rightAngleBracket = []byte(">") ) var noEndTags = []string{"input", "br", "hr", "img"} // wrapImgWithLink warps link to standalone <img> tags. func wrapImgWithLink(urlPrefix string, buf *bytes.Buffer, token html.Token) { // Extract "src" and "alt" attributes var src, alt string for i := range token.Attr { switch token.Attr[i].Key { case "src": src = token.Attr[i].Val case "alt": alt = token.Attr[i].Val } } // Skip in case the "src" is empty if src == "" { buf.WriteString(token.String()) return } // Skip in case the "src" is data url if strings.HasPrefix(src, "data:") { buf.WriteString(token.String()) return } // Prepend repository base URL for internal links needPrepend := !isLink([]byte(src)) if needPrepend { urlPrefix = strings.Replace(urlPrefix, "/src/", "/raw/", 1) if src[0] != '/' { urlPrefix += "/" } } buf.WriteString(`<a href="`) if needPrepend { buf.WriteString(urlPrefix) buf.WriteString(src) } else { buf.WriteString(src) } buf.WriteString(`">`) if needPrepend { src = strings.ReplaceAll(urlPrefix+src, " ", "%20") buf.WriteString(`<img src="`) buf.WriteString(src) buf.WriteString(`"`) if len(alt) > 0 { buf.WriteString(` alt="`) buf.WriteString(alt) buf.WriteString(`"`) } buf.WriteString(`>`) } else { buf.WriteString(token.String()) } buf.WriteString(`</a>`) } // postProcessHTML treats different types of HTML differently, // and only renders special links for plain text blocks. func postProcessHTML(rawHTML []byte, urlPrefix string, metas map[string]string) []byte { startTags := make([]string, 0, 5) buf := bytes.NewBuffer(nil) tokenizer := html.NewTokenizer(bytes.NewReader(rawHTML)) outerLoop: for html.ErrorToken != tokenizer.Next() { token := tokenizer.Token() switch token.Type { case html.TextToken: buf.Write(RenderSpecialLink([]byte(token.String()), urlPrefix, metas)) case html.StartTagToken: tagName := token.Data if tagName == "img" { wrapImgWithLink(urlPrefix, buf, token) continue outerLoop } buf.WriteString(token.String()) // If this is an excluded tag, we skip processing all output until a close tag is encountered. if strings.EqualFold("a", tagName) || strings.EqualFold("code", tagName) || strings.EqualFold("pre", tagName) { stackNum := 1 for html.ErrorToken != tokenizer.Next() { token = tokenizer.Token() // Copy the token to the output verbatim buf.WriteString(token.String()) // Stack number doesn't increase for tags without end tags. if token.Type == html.StartTagToken && !com.IsSliceContainsStr(noEndTags, token.Data) { stackNum++ } // If this is the close tag to the outer-most, we are done if token.Type == html.EndTagToken { stackNum-- if stackNum <= 0 && strings.EqualFold(tagName, token.Data) { break } } } continue outerLoop } if !com.IsSliceContainsStr(noEndTags, tagName) { startTags = append(startTags, tagName) } case html.EndTagToken: if len(startTags) == 0 { buf.WriteString(token.String()) break } buf.Write(leftAngleBracket) buf.WriteString(startTags[len(startTags)-1]) buf.Write(rightAngleBracket) startTags = startTags[:len(startTags)-1] default: buf.WriteString(token.String()) } } if io.EOF == tokenizer.Err() { return buf.Bytes() } // If we are not at the end of the input, then some other parsing error has occurred, // so return the input verbatim. return rawHTML } type Type string const ( TypeUnrecognized Type = "unrecognized" TypeMarkdown Type = "markdown" TypeOrgMode Type = "orgmode" TypeIPythonNotebook Type = "ipynb" ) // Detect returns best guess of a markup type based on file name. func Detect(filename string) Type { switch { case IsMarkdownFile(filename): return TypeMarkdown case IsOrgModeFile(filename): return TypeOrgMode case IsIPythonNotebook(filename): return TypeIPythonNotebook default: return TypeUnrecognized } } // Render takes a string or []byte and renders to sanitized HTML in given type of syntax with special links. func Render(typ Type, input any, urlPrefix string, metas map[string]string) []byte { var rawBytes []byte switch v := input.(type) { case []byte: rawBytes = v case string: rawBytes = []byte(v) default: panic(fmt.Sprintf("unrecognized input content type: %T", input)) } urlPrefix = strings.TrimRight(strings.ReplaceAll(urlPrefix, " ", "%20"), "/") var rawHTML []byte switch typ { case TypeMarkdown: rawHTML = RawMarkdown(rawBytes, urlPrefix) case TypeOrgMode: rawHTML = RawOrgMode(rawBytes, urlPrefix) default: return rawBytes // Do nothing if syntax type is not recognized } rawHTML = postProcessHTML(rawHTML, urlPrefix, metas) return SanitizeBytes(rawHTML) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/sanitizer_test.go
internal/markup/sanitizer_test.go
// Copyright 2017 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup_test import ( "testing" "github.com/stretchr/testify/assert" . "gogs.io/gogs/internal/markup" ) func Test_Sanitizer(t *testing.T) { NewSanitizer() tests := []struct { input string expVal string }{ // Regular {input: `<a onblur="alert(secret)" href="http://www.google.com">Google</a>`, expVal: `<a href="http://www.google.com" rel="nofollow">Google</a>`}, // Code highlighting class {input: `<code class="random string"></code>`, expVal: `<code></code>`}, {input: `<code class="language-random ui tab active menu attached animating sidebar following bar center"></code>`, expVal: `<code></code>`}, {input: `<code class="language-go"></code>`, expVal: `<code class="language-go"></code>`}, // Input checkbox {input: `<input type="hidden">`, expVal: ``}, {input: `<input type="checkbox">`, expVal: `<input type="checkbox">`}, {input: `<input checked disabled autofocus>`, expVal: `<input checked="" disabled="">`}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { assert.Equal(t, test.expVal, Sanitize(test.input)) assert.Equal(t, test.expVal, string(SanitizeBytes([]byte(test.input)))) }) } }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/orgmode.go
internal/markup/orgmode.go
// Copyright 2017 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup import ( "bytes" "path/filepath" "strings" "github.com/niklasfasching/go-org/org" ) var orgModeExtensions = []string{".org"} // IsOrgModeFile reports whether name looks like a Org-mode file based on its extension. func IsOrgModeFile(name string) bool { extension := strings.ToLower(filepath.Ext(name)) for _, ext := range orgModeExtensions { if strings.ToLower(ext) == extension { return true } } return false } // RawOrgMode renders content in Org-mode syntax to HTML without handling special links. func RawOrgMode(body []byte, urlPrefix string) (result []byte) { html, err := org.New().Silent().Parse(bytes.NewReader(body), urlPrefix).Write(org.NewHTMLWriter()) if err != nil { return []byte(err.Error()) } return []byte(html) } // OrgMode takes a string or []byte and renders to HTML in Org-mode syntax with special links. func OrgMode(input any, urlPrefix string, metas map[string]string) []byte { return Render(TypeOrgMode, input, urlPrefix, metas) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/sanitizer.go
internal/markup/sanitizer.go
// Copyright 2017 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup import ( "sync" "github.com/microcosm-cc/bluemonday" "gogs.io/gogs/internal/conf" "gogs.io/gogs/internal/lazyregexp" ) // Sanitizer is a protection wrapper of *bluemonday.Policy which does not allow // any modification to the underlying policies once it's been created. type Sanitizer struct { policy *bluemonday.Policy init sync.Once } var sanitizer = &Sanitizer{ policy: bluemonday.UGCPolicy(), } // NewSanitizer initializes sanitizer with allowed attributes based on settings. // Multiple calls to this function will only create one instance of Sanitizer during // entire application lifecycle. func NewSanitizer() { sanitizer.init.Do(func() { // We only want to allow HighlightJS specific classes for code blocks sanitizer.policy.AllowAttrs("class").Matching(lazyregexp.New(`^language-\w+$`).Regexp()).OnElements("code") // Checkboxes sanitizer.policy.AllowAttrs("type").Matching(lazyregexp.New(`^checkbox$`).Regexp()).OnElements("input") sanitizer.policy.AllowAttrs("checked", "disabled").OnElements("input") // Data URLs sanitizer.policy.AllowURLSchemes("data") // Custom URL-Schemes sanitizer.policy.AllowURLSchemes(conf.Markdown.CustomURLSchemes...) }) } // Sanitize takes a string that contains a HTML fragment or document and applies policy whitelist. func Sanitize(s string) string { return sanitizer.policy.Sanitize(s) } // SanitizeBytes takes a []byte slice that contains a HTML fragment or document and applies policy whitelist. func SanitizeBytes(b []byte) []byte { return sanitizer.policy.SanitizeBytes(b) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/markup/markdown.go
internal/markup/markdown.go
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package markup import ( "bytes" "fmt" "path" "path/filepath" "strings" "github.com/russross/blackfriday" "gogs.io/gogs/internal/conf" "gogs.io/gogs/internal/lazyregexp" "gogs.io/gogs/internal/tool" ) // IsMarkdownFile reports whether name looks like a Markdown file based on its extension. func IsMarkdownFile(name string) bool { extension := strings.ToLower(filepath.Ext(name)) for _, ext := range conf.Markdown.FileExtensions { if strings.ToLower(ext) == extension { return true } } return false } // MarkdownRenderer is a extended version of underlying Markdown render object. type MarkdownRenderer struct { blackfriday.Renderer urlPrefix string } var validLinksPattern = lazyregexp.New(`^[a-z][\w-]+://|^mailto:`) // isLink reports whether link fits valid format. func isLink(link []byte) bool { return validLinksPattern.Match(link) } // Link defines how formal links should be processed to produce corresponding HTML elements. func (r *MarkdownRenderer) Link(out *bytes.Buffer, link, title, content []byte) { if len(link) > 0 && !isLink(link) { if link[0] != '#' { link = []byte(path.Join(r.urlPrefix, string(link))) } } r.Renderer.Link(out, link, title, content) } // AutoLink defines how auto-detected links should be processed to produce corresponding HTML elements. // Reference for kind: https://github.com/russross/blackfriday/blob/master/markdown.go#L69-L76 func (r *MarkdownRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { if kind != blackfriday.LINK_TYPE_NORMAL { r.Renderer.AutoLink(out, link, kind) return } // Since this method could only possibly serve one link at a time, // we do not need to find all. if bytes.HasPrefix(link, []byte(conf.Server.ExternalURL)) { m := CommitPattern.Find(link) if m != nil { m = bytes.TrimSpace(m) i := bytes.Index(m, []byte("commit/")) j := bytes.Index(m, []byte("#")) if j == -1 { j = len(m) } _, _ = fmt.Fprintf(out, ` <code><a href="%s">%s</a></code>`, m, tool.ShortSHA1(string(m[i+7:j]))) return } m = IssueFullPattern.Find(link) if m != nil { m = bytes.TrimSpace(m) i := bytes.Index(m, []byte("issues/")) j := bytes.Index(m, []byte("#")) if j == -1 { j = len(m) } index := string(m[i+7 : j]) fullRepoURL := conf.Server.ExternalURL + strings.TrimPrefix(r.urlPrefix, "/") var link string if strings.HasPrefix(string(m), fullRepoURL) { // Use a short issue reference if the URL refers to this repository link = fmt.Sprintf(`<a href="%s">#%s</a>`, m, index) } else { // Use a cross-repository issue reference if the URL refers to a different repository repo := string(m[len(conf.Server.ExternalURL) : i-1]) link = fmt.Sprintf(`<a href="%s">%s#%s</a>`, m, repo, index) } out.WriteString(link) return } } r.Renderer.AutoLink(out, link, kind) } // ListItem defines how list items should be processed to produce corresponding HTML elements. func (r *MarkdownRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { // Detect procedures to draw checkboxes. switch { case bytes.HasPrefix(text, []byte("[ ] ")): text = append([]byte(`<input type="checkbox" disabled="" />`), text[3:]...) case bytes.HasPrefix(text, []byte("[x] ")): text = append([]byte(`<input type="checkbox" disabled="" checked="" />`), text[3:]...) } r.Renderer.ListItem(out, text, flags) } // RawMarkdown renders content in Markdown syntax to HTML without handling special links. func RawMarkdown(body []byte, urlPrefix string) []byte { htmlFlags := 0 htmlFlags |= blackfriday.HTML_SKIP_STYLE htmlFlags |= blackfriday.HTML_OMIT_CONTENTS if conf.Smartypants.Enabled { htmlFlags |= blackfriday.HTML_USE_SMARTYPANTS if conf.Smartypants.Fractions { htmlFlags |= blackfriday.HTML_SMARTYPANTS_FRACTIONS } if conf.Smartypants.Dashes { htmlFlags |= blackfriday.HTML_SMARTYPANTS_DASHES } if conf.Smartypants.LatexDashes { htmlFlags |= blackfriday.HTML_SMARTYPANTS_LATEX_DASHES } if conf.Smartypants.AngledQuotes { htmlFlags |= blackfriday.HTML_SMARTYPANTS_ANGLED_QUOTES } } renderer := &MarkdownRenderer{ Renderer: blackfriday.HtmlRenderer(htmlFlags, "", ""), urlPrefix: urlPrefix, } // set up the parser extensions := 0 extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS extensions |= blackfriday.EXTENSION_TABLES extensions |= blackfriday.EXTENSION_FENCED_CODE extensions |= blackfriday.EXTENSION_AUTOLINK extensions |= blackfriday.EXTENSION_STRIKETHROUGH extensions |= blackfriday.EXTENSION_SPACE_HEADERS extensions |= blackfriday.EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK if conf.Markdown.EnableHardLineBreak { extensions |= blackfriday.EXTENSION_HARD_LINE_BREAK } return blackfriday.Markdown(body, renderer, extensions) } // Markdown takes a string or []byte and renders to HTML in Markdown syntax with special links. func Markdown(input any, urlPrefix string, metas map[string]string) []byte { return Render(TypeMarkdown, input, urlPrefix, metas) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/sync/exclusive_pool.go
internal/sync/exclusive_pool.go
// Copyright 2016 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package sync import ( "sync" ) // ExclusivePool is a pool of non-identical instances // that only one instance with same identity is in the pool at a time. // In other words, only instances with different identities can be in // the pool the same time. If another instance with same identity tries // to get into the pool, it hangs until previous instance left the pool. // // This pool is particularly useful for performing tasks on same resource // on the file system in different goroutines. type ExclusivePool struct { lock sync.Mutex // pool maintains locks for each instance in the pool. pool map[string]*sync.Mutex // count maintains the number of times an instance with same identity checks in // to the pool, and should be reduced to 0 (removed from map) by checking out // with same number of times. // The purpose of count is to delete lock when count down to 0 and recycle memory // from map object. count map[string]int } // NewExclusivePool initializes and returns a new ExclusivePool object. func NewExclusivePool() *ExclusivePool { return &ExclusivePool{ pool: make(map[string]*sync.Mutex), count: make(map[string]int), } } // CheckIn checks in an instance to the pool and hangs while instance // with same indentity is using the lock. func (p *ExclusivePool) CheckIn(identity string) { p.lock.Lock() lock, has := p.pool[identity] if !has { lock = &sync.Mutex{} p.pool[identity] = lock } p.count[identity]++ p.lock.Unlock() lock.Lock() } // CheckOut checks out an instance from the pool and releases the lock // to let other instances with same identity to grab the lock. func (p *ExclusivePool) CheckOut(identity string) { p.lock.Lock() defer p.lock.Unlock() p.pool[identity].Unlock() if p.count[identity] == 1 { delete(p.pool, identity) delete(p.count, identity) } else { p.count[identity]-- } }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/sync/status_table.go
internal/sync/status_table.go
// Copyright 2016 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package sync import ( "sync" ) // StatusTable is a table maintains true/false values. // // This table is particularly useful for un/marking and checking values // in different goroutines. type StatusTable struct { sync.RWMutex pool map[string]bool } // NewStatusTable initializes and returns a new StatusTable object. func NewStatusTable() *StatusTable { return &StatusTable{ pool: make(map[string]bool), } } // Start sets value of given name to true in the pool. func (p *StatusTable) Start(name string) { p.Lock() defer p.Unlock() p.pool[name] = true } // Stop sets value of given name to false in the pool. func (p *StatusTable) Stop(name string) { p.Lock() defer p.Unlock() p.pool[name] = false } // IsRunning checks if value of given name is set to true in the pool. func (p *StatusTable) IsRunning(name string) bool { p.RLock() defer p.RUnlock() return p.pool[name] }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/sync/unique_queue.go
internal/sync/unique_queue.go
// Copyright 2016 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package sync import ( "github.com/unknwon/com" ) // UniqueQueue is a queue which guarantees only one instance of same // identity is in the line. Instances with same identity will be // discarded if there is already one in the line. // // This queue is particularly useful for preventing duplicated task // of same purpose. type UniqueQueue struct { table *StatusTable queue chan string } // NewUniqueQueue initializes and returns a new UniqueQueue object. func NewUniqueQueue(queueLength int) *UniqueQueue { if queueLength <= 0 { queueLength = 100 } return &UniqueQueue{ table: NewStatusTable(), queue: make(chan string, queueLength), } } // Queue returns channel of queue for retrieving instances. func (q *UniqueQueue) Queue() <-chan string { return q.queue } // Exist returns true if there is an instance with given indentity // exists in the queue. func (q *UniqueQueue) Exist(id any) bool { return q.table.IsRunning(com.ToStr(id)) } // AddFunc adds new instance to the queue with a custom runnable function, // the queue is blocked until the function exits. func (q *UniqueQueue) AddFunc(id any, fn func()) { if q.Exist(id) { return } idStr := com.ToStr(id) q.table.Lock() q.table.pool[idStr] = true if fn != nil { fn() } q.table.Unlock() q.queue <- idStr } // Add adds new instance to the queue. func (q *UniqueQueue) Add(id any) { q.AddFunc(id, nil) } // Remove removes instance from the queue. func (q *UniqueQueue) Remove(id any) { q.table.Stop(com.ToStr(id)) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/tool/tool.go
internal/tool/tool.go
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tool import ( "crypto/sha1" "encoding/base64" "encoding/hex" "fmt" "html/template" "strings" "time" "unicode" "unicode/utf8" "github.com/unknwon/com" "github.com/unknwon/i18n" log "unknwon.dev/clog/v2" "github.com/gogs/chardet" "gogs.io/gogs/internal/conf" "gogs.io/gogs/internal/cryptoutil" ) // ShortSHA1 truncates SHA1 string length to at most 10. func ShortSHA1(sha1 string) string { if len(sha1) > 10 { return sha1[:10] } return sha1 } // DetectEncoding returns best guess of encoding of given content. func DetectEncoding(content []byte) (string, error) { if utf8.Valid(content) { log.Trace("Detected encoding: UTF-8 (fast)") return "UTF-8", nil } result, err := chardet.NewTextDetector().DetectBest(content) if result.Charset != "UTF-8" && len(conf.Repository.ANSICharset) > 0 { log.Trace("Using default ANSICharset: %s", conf.Repository.ANSICharset) return conf.Repository.ANSICharset, err } log.Trace("Detected encoding: %s", result.Charset) return result.Charset, err } // BasicAuthDecode decodes username and password portions of HTTP Basic Authentication // from encoded content. func BasicAuthDecode(encoded string) (string, string, error) { s, err := base64.StdEncoding.DecodeString(encoded) if err != nil { return "", "", err } auth := strings.SplitN(string(s), ":", 2) return auth[0], auth[1], nil } // verify time limit code func VerifyTimeLimitCode(data string, minutes int, code string) bool { if len(code) <= 18 { return false } // split code start := code[:12] lives := code[12:18] if d, err := com.StrTo(lives).Int(); err == nil { minutes = d } // right active code retCode := CreateTimeLimitCode(data, minutes, start) if retCode == code && minutes > 0 { // check time is expired or not before, _ := time.ParseInLocation("200601021504", start, time.Local) now := time.Now() if before.Add(time.Minute*time.Duration(minutes)).Unix() > now.Unix() { return true } } return false } const TimeLimitCodeLength = 12 + 6 + 40 // CreateTimeLimitCode generates a time limit code based on given input data. // Format: 12 length date time string + 6 minutes string + 40 sha1 encoded string func CreateTimeLimitCode(data string, minutes int, startInf any) string { format := "200601021504" var start, end time.Time var startStr, endStr string if startInf == nil { // Use now time create code start = time.Now() startStr = start.Format(format) } else { // use start string create code startStr = startInf.(string) start, _ = time.ParseInLocation(format, startStr, time.Local) startStr = start.Format(format) } end = start.Add(time.Minute * time.Duration(minutes)) endStr = end.Format(format) // create sha1 encode string sh := sha1.New() _, _ = sh.Write([]byte(data + conf.Security.SecretKey + startStr + endStr + com.ToStr(minutes))) encoded := hex.EncodeToString(sh.Sum(nil)) code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded) return code } // HashEmail hashes email address to MD5 string. // https://en.gravatar.com/site/implement/hash/ func HashEmail(email string) string { return cryptoutil.MD5(strings.ToLower(strings.TrimSpace(email))) } // AvatarLink returns relative avatar link to the site domain by given email, // which includes app sub-url as prefix. However, it is possible // to return full URL if user enables Gravatar-like service. func AvatarLink(email string) (url string) { if conf.Picture.EnableFederatedAvatar && conf.Picture.LibravatarService != nil && strings.Contains(email, "@") { var err error url, err = conf.Picture.LibravatarService.FromEmail(email) if err != nil { log.Warn("AvatarLink.LibravatarService.FromEmail [%s]: %v", email, err) } } if url == "" && !conf.Picture.DisableGravatar { url = conf.Picture.GravatarSource + HashEmail(email) + "?d=identicon" } if url == "" { url = conf.Server.Subpath + "/img/avatar_default.png" } return url } // AppendAvatarSize appends avatar size query parameter to the URL in the correct format. func AppendAvatarSize(url string, size int) string { if strings.Contains(url, "?") { return url + "&s=" + com.ToStr(size) } return url + "?s=" + com.ToStr(size) } // Seconds-based time units const ( Minute = 60 Hour = 60 * Minute Day = 24 * Hour Week = 7 * Day Month = 30 * Day Year = 12 * Month ) func computeTimeDiff(diff int64) (int64, string) { diffStr := "" switch { case diff <= 0: diff = 0 diffStr = "now" case diff < 2: diff = 0 diffStr = "1 second" case diff < 1*Minute: diffStr = fmt.Sprintf("%d seconds", diff) diff = 0 case diff < 2*Minute: diff -= 1 * Minute diffStr = "1 minute" case diff < 1*Hour: diffStr = fmt.Sprintf("%d minutes", diff/Minute) diff -= diff / Minute * Minute case diff < 2*Hour: diff -= 1 * Hour diffStr = "1 hour" case diff < 1*Day: diffStr = fmt.Sprintf("%d hours", diff/Hour) diff -= diff / Hour * Hour case diff < 2*Day: diff -= 1 * Day diffStr = "1 day" case diff < 1*Week: diffStr = fmt.Sprintf("%d days", diff/Day) diff -= diff / Day * Day case diff < 2*Week: diff -= 1 * Week diffStr = "1 week" case diff < 1*Month: diffStr = fmt.Sprintf("%d weeks", diff/Week) diff -= diff / Week * Week case diff < 2*Month: diff -= 1 * Month diffStr = "1 month" case diff < 1*Year: diffStr = fmt.Sprintf("%d months", diff/Month) diff -= diff / Month * Month case diff < 2*Year: diff -= 1 * Year diffStr = "1 year" default: diffStr = fmt.Sprintf("%d years", diff/Year) diff = 0 } return diff, diffStr } // TimeSincePro calculates the time interval and generate full user-friendly string. func TimeSincePro(then time.Time) string { now := time.Now() diff := now.Unix() - then.Unix() if then.After(now) { return "future" } var timeStr, diffStr string for diff != 0 { diff, diffStr = computeTimeDiff(diff) timeStr += ", " + diffStr } return strings.TrimPrefix(timeStr, ", ") } func timeSince(then time.Time, lang string) string { now := time.Now() lbl := i18n.Tr(lang, "tool.ago") diff := now.Unix() - then.Unix() if then.After(now) { lbl = i18n.Tr(lang, "tool.from_now") diff = then.Unix() - now.Unix() } switch { case diff <= 0: return i18n.Tr(lang, "tool.now") case diff <= 2: return i18n.Tr(lang, "tool.1s", lbl) case diff < 1*Minute: return i18n.Tr(lang, "tool.seconds", diff, lbl) case diff < 2*Minute: return i18n.Tr(lang, "tool.1m", lbl) case diff < 1*Hour: return i18n.Tr(lang, "tool.minutes", diff/Minute, lbl) case diff < 2*Hour: return i18n.Tr(lang, "tool.1h", lbl) case diff < 1*Day: return i18n.Tr(lang, "tool.hours", diff/Hour, lbl) case diff < 2*Day: return i18n.Tr(lang, "tool.1d", lbl) case diff < 1*Week: return i18n.Tr(lang, "tool.days", diff/Day, lbl) case diff < 2*Week: return i18n.Tr(lang, "tool.1w", lbl) case diff < 1*Month: return i18n.Tr(lang, "tool.weeks", diff/Week, lbl) case diff < 2*Month: return i18n.Tr(lang, "tool.1mon", lbl) case diff < 1*Year: return i18n.Tr(lang, "tool.months", diff/Month, lbl) case diff < 2*Year: return i18n.Tr(lang, "tool.1y", lbl) default: return i18n.Tr(lang, "tool.years", diff/Year, lbl) } } func RawTimeSince(t time.Time, lang string) string { return timeSince(t, lang) } // TimeSince calculates the time interval and generate user-friendly string. func TimeSince(t time.Time, lang string) template.HTML { return template.HTML(fmt.Sprintf(`<span class="time-since" title="%s">%s</span>`, t.Format(conf.Time.FormatLayout), timeSince(t, lang))) } // Subtract deals with subtraction of all types of number. func Subtract(left, right any) any { var rleft, rright int64 var fleft, fright float64 isInt := true switch left := left.(type) { case int: rleft = int64(left) case int8: rleft = int64(left) case int16: rleft = int64(left) case int32: rleft = int64(left) case int64: rleft = left case float32: fleft = float64(left) isInt = false case float64: fleft = left isInt = false } switch right := right.(type) { case int: rright = int64(right) case int8: rright = int64(right) case int16: rright = int64(right) case int32: rright = int64(right) case int64: rright = right case float32: fright = float64(left.(float32)) isInt = false case float64: fleft = left.(float64) isInt = false } if isInt { return rleft - rright } else { return fleft + float64(rleft) - (fright + float64(rright)) } } // StringsToInt64s converts a slice of string to a slice of int64. func StringsToInt64s(strs []string) []int64 { ints := make([]int64, len(strs)) for i := range strs { ints[i] = com.StrTo(strs[i]).MustInt64() } return ints } // Int64sToStrings converts a slice of int64 to a slice of string. func Int64sToStrings(ints []int64) []string { strs := make([]string, len(ints)) for i := range ints { strs[i] = com.ToStr(ints[i]) } return strs } // Int64sToMap converts a slice of int64 to a int64 map. func Int64sToMap(ints []int64) map[int64]bool { m := make(map[int64]bool) for _, i := range ints { m[i] = true } return m } // IsLetter reports whether the rune is a letter (category L). // https://github.com/golang/go/blob/master/src/go/scanner/scanner.go#L257 func IsLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/tool/file.go
internal/tool/file.go
// Copyright 2017 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tool import ( "fmt" "math" "net/http" "strings" ) // IsTextFile returns true if file content format is plain text or empty. func IsTextFile(data []byte) bool { if len(data) == 0 { return true } return strings.Contains(http.DetectContentType(data), "text/") } func IsImageFile(data []byte) bool { return strings.Contains(http.DetectContentType(data), "image/") } func IsPDFFile(data []byte) bool { return strings.Contains(http.DetectContentType(data), "application/pdf") } func IsVideoFile(data []byte) bool { return strings.Contains(http.DetectContentType(data), "video/") } func logn(n, b float64) float64 { return math.Log(n) / math.Log(b) } func humanateBytes(s uint64, base float64, sizes []string) string { if s < 10 { return fmt.Sprintf("%d B", s) } e := math.Floor(logn(float64(s), base)) suffix := sizes[int(e)] val := float64(s) / math.Pow(base, math.Floor(e)) f := "%.0f" if val < 10 { f = "%.1f" } return fmt.Sprintf(f+" %s", val, suffix) } // FileSize calculates the file size and generate user-friendly string. func FileSize(s int64) string { sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"} return humanateBytes(uint64(s), 1024, sizes) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/tool/path.go
internal/tool/path.go
// Copyright 2018 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tool import ( "path/filepath" "strings" ) // IsSameSiteURLPath returns true if the URL path belongs to the same site, false otherwise. // False: //url, http://url, /\url // True: /url func IsSameSiteURLPath(url string) bool { return len(url) >= 2 && url[0] == '/' && url[1] != '/' && url[1] != '\\' } // IsMaliciousPath returns true if given path is an absolute path or contains malicious content // which has potential to traverse upper level directories. func IsMaliciousPath(path string) bool { return filepath.IsAbs(path) || strings.Contains(path, "..") }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/internal/tool/path_test.go
internal/tool/path_test.go
// Copyright 2018 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package tool import ( "testing" "github.com/stretchr/testify/assert" ) func Test_IsSameSiteURLPath(t *testing.T) { tests := []struct { url string expVal bool }{ {url: "//github.com", expVal: false}, {url: "http://github.com", expVal: false}, {url: "https://github.com", expVal: false}, {url: "/\\github.com", expVal: false}, {url: "/admin", expVal: true}, {url: "/user/repo", expVal: true}, } for _, test := range tests { t.Run(test.url, func(t *testing.T) { assert.Equal(t, test.expVal, IsSameSiteURLPath(test.url)) }) } } func Test_IsMaliciousPath(t *testing.T) { tests := []struct { path string expVal bool }{ {path: "../../../../../../../../../data/gogs/data/sessions/a/9/a9f0ab6c3ef63dd8", expVal: true}, {path: "..\\/..\\/../data/gogs/data/sessions/a/9/a9f0ab6c3ef63dd8", expVal: true}, {path: "data/gogs/../../../../../../../../../data/sessions/a/9/a9f0ab6c3ef63dd8", expVal: true}, {path: "..\\..\\..\\..\\..\\..\\..\\..\\..\\data\\gogs\\data\\sessions\\a\\9\\a9f0ab6c3ef63dd8", expVal: true}, {path: "data\\gogs\\..\\..\\..\\..\\..\\..\\..\\..\\..\\data\\sessions\\a\\9\\a9f0ab6c3ef63dd8", expVal: true}, {path: "data/sessions/a/9/a9f0ab6c3ef63dd8", expVal: false}, {path: "data\\sessions\\a\\9\\a9f0ab6c3ef63dd8", expVal: false}, } for _, test := range tests { t.Run(test.path, func(t *testing.T) { assert.Equal(t, test.expVal, IsMaliciousPath(test.path)) }) } }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/templates/embed.go
templates/embed.go
// Copyright 2020 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package templates import ( "bytes" "embed" "fmt" "io" "io/fs" "os" "path" "strings" "gopkg.in/macaron.v1" "gogs.io/gogs/internal/osutil" ) //go:embed *.tmpl **/* var files embed.FS // fileSystem implements the macaron.TemplateFileSystem interface. type fileSystem struct { files []macaron.TemplateFile } func (fs *fileSystem) ListFiles() []macaron.TemplateFile { return fs.files } func (fs *fileSystem) Get(name string) (io.Reader, error) { for i := range fs.files { if fs.files[i].Name()+fs.files[i].Ext() == name { return bytes.NewReader(fs.files[i].Data()), nil } } return nil, fmt.Errorf("file %q not found", name) } func mustNames(fsys fs.FS) []string { var names []string walkDirFunc := func(path string, d fs.DirEntry, err error) error { if !d.IsDir() { names = append(names, path) } return nil } if err := fs.WalkDir(fsys, ".", walkDirFunc); err != nil { panic("assetNames failure: " + err.Error()) } return names } // NewTemplateFileSystem returns a macaron.TemplateFileSystem instance for embedded assets. // The argument "dir" can be used to serve subset of embedded assets. Template file // found under the "customDir" on disk has higher precedence over embedded assets. func NewTemplateFileSystem(dir, customDir string) macaron.TemplateFileSystem { if dir != "" && !strings.HasSuffix(dir, "/") { dir += "/" } var err error var tmplFiles []macaron.TemplateFile names := mustNames(files) for _, name := range names { if !strings.HasPrefix(name, dir) { continue } // Check if corresponding custom file exists var data []byte fpath := path.Join(customDir, name) if osutil.IsFile(fpath) { data, err = os.ReadFile(fpath) } else { data, err = files.ReadFile(name) } if err != nil { panic(err) } name = strings.TrimPrefix(name, dir) ext := path.Ext(name) name = strings.TrimSuffix(name, ext) tmplFiles = append(tmplFiles, macaron.NewTplFile(name, data, ext)) } return &fileSystem{files: tmplFiles} }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/conf/embed.go
conf/embed.go
// Copyright 2022 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package conf import ( "embed" ) //go:embed app.ini **/* var Files embed.FS // FileNames returns a list of filenames exists in the given direction within // Files. The list includes names of subdirectories. func FileNames(dir string) ([]string, error) { entries, err := Files.ReadDir(dir) if err != nil { return nil, err } fileNames := make([]string, 0, len(entries)) for _, entry := range entries { fileNames = append(fileNames, entry.Name()) } return fileNames, nil }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
gogs/gogs
https://github.com/gogs/gogs/blob/e68949dd1307d1b72a2fe885976ea0be72ee31d5/conf/embed_test.go
conf/embed_test.go
// Copyright 2022 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package conf import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestFileNames(t *testing.T) { names, err := FileNames(".") require.NoError(t, err) want := []string{"app.ini", "auth.d", "gitignore", "label", "license", "locale", "readme"} assert.Equal(t, want, names) }
go
MIT
e68949dd1307d1b72a2fe885976ea0be72ee31d5
2026-01-07T08:35:43.578986Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/rclone.go
rclone.go
// Sync files and directories to and from local and remote object stores // // Nick Craig-Wood <nick@craig-wood.com> package main import ( _ "github.com/rclone/rclone/backend/all" // import all backends "github.com/rclone/rclone/cmd" _ "github.com/rclone/rclone/cmd/all" // import all commands _ "github.com/rclone/rclone/lib/plugin" // import plugins ) func main() { cmd.Main() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/librclone/librclone.go
librclone/librclone.go
// Package librclone exports shims for C library use // // This directory contains code to build rclone as a C library and the // shims for accessing rclone from C. // // The shims are a thin wrapper over the rclone RPC. // // Build a shared library like this: // // go build --buildmode=c-shared -o librclone.so github.com/rclone/rclone/librclone // // Build a static library like this: // // go build --buildmode=c-archive -o librclone.a github.com/rclone/rclone/librclone // // Both the above commands will also generate `librclone.h` which should // be `#include`d in `C` programs wishing to use the library. // // The library will depend on `libdl` and `libpthread`. package main /* #include <stdlib.h> struct RcloneRPCResult { char* Output; int Status; }; */ import "C" import ( "unsafe" "github.com/rclone/rclone/librclone/librclone" _ "github.com/rclone/rclone/backend/all" // import all backends _ "github.com/rclone/rclone/cmd/cmount" // import cmount _ "github.com/rclone/rclone/cmd/mount" // import mount _ "github.com/rclone/rclone/cmd/mount2" // import mount2 _ "github.com/rclone/rclone/fs/operations" // import operations/* rc commands _ "github.com/rclone/rclone/fs/sync" // import sync/* _ "github.com/rclone/rclone/lib/plugin" // import plugins ) // RcloneInitialize initializes rclone as a library // //export RcloneInitialize func RcloneInitialize() { librclone.Initialize() } // RcloneFinalize finalizes the library // //export RcloneFinalize func RcloneFinalize() { librclone.Finalize() } // RcloneRPCResult is returned from RcloneRPC // // Output will be returned as a serialized JSON object // Status is a HTTP status return (200=OK anything else fail) type RcloneRPCResult struct { //nolint:deadcode Output *C.char Status C.int } // RcloneRPC does a single RPC call. The inputs are (method, input) // and the output is (output, status). This is an exported interface // to the rclone API as described in https://rclone.org/rc/ // // method is a string, eg "operations/list" // input should be a string with a serialized JSON object // result.Output will be returned as a string with a serialized JSON object // result.Status is a HTTP status return (200=OK anything else fail) // // All strings are UTF-8 encoded, on all platforms. // // Caller is responsible for freeing the memory for result.Output // (see RcloneFreeString), result itself is passed on the stack. // //export RcloneRPC func RcloneRPC(method *C.char, input *C.char) (result C.struct_RcloneRPCResult) { //nolint:golint output, status := librclone.RPC(C.GoString(method), C.GoString(input)) result.Output = C.CString(output) result.Status = C.int(status) return result } // RcloneFreeString may be used to free the string returned by RcloneRPC // // If the caller has access to the C standard library, the free function can // normally be called directly instead. In some cases the caller uses a // runtime library which is not compatible, and then this function can be // used to release the memory with the same library that allocated it. // //export RcloneFreeString func RcloneFreeString(str *C.char) { C.free(unsafe.Pointer(str)) } // do nothing here - necessary for building into a C library func main() {}
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/librclone/librclone/librclone.go
librclone/librclone/librclone.go
// Package librclone exports shims for library use // // This is the internal implementation which is used for C and // Gomobile libraries which need slightly different export styles. // // The shims are a thin wrapper over the rclone RPC. package librclone import ( "context" "encoding/json" "fmt" "net/http" "runtime" "runtime/debug" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc/jobs" ) // Initialize initializes rclone as a library // //export Initialize func Initialize() { // A subset of initialisation copied from cmd.go // Note that we don't want to pull in anything which depends on pflags ctx := context.Background() // Start the logger log.InitLogging() // Load the config - this may need to be configurable configfile.Install() // Start accounting accounting.Start(ctx) } // Finalize finalizes the library func Finalize() { // TODO: how to clean up? what happens when rcserver terminates? // what about unfinished async jobs? runtime.GC() } // writeError returns a formatted error string and the status passed in func writeError(path string, in rc.Params, err error, status int) (string, int) { fs.Errorf(nil, "rc: %q: error: %v", path, err) params, status := rc.Error(path, in, err, status) var w strings.Builder err = rc.WriteJSON(&w, params) if err != nil { // ultimate fallback error fs.Errorf(nil, "writeError: failed to write JSON output from %#v: %v", in, err) status = http.StatusInternalServerError w.Reset() fmt.Fprintf(&w, `{ "error": %q, "path": %q, "status": %d }`, err, path, status) } return w.String(), status } // RPC runs a transaction over the RC // // Calling an rc function using JSON to input parameters and output the resulted JSON. // // operations/uploadfile and core/command are not supported as they need request or response object // modified from handlePost in rcserver.go func RPC(method string, input string) (output string, status int) { in := make(rc.Params) // Catch panics defer func() { if r := recover(); r != nil { output, status = writeError(method, in, fmt.Errorf("panic: %v\n%s", r, debug.Stack()), http.StatusInternalServerError) return } }() // create a buffer to capture the output if input != "" { err := json.NewDecoder(strings.NewReader(input)).Decode(&in) if err != nil { return writeError(method, in, fmt.Errorf("failed to read input JSON: %w", err), http.StatusBadRequest) } } // Find the call call := rc.Calls.Get(method) if call == nil { return writeError(method, in, fmt.Errorf("couldn't find method %q", method), http.StatusNotFound) } // TODO: handle these cases if call.NeedsRequest { return writeError(method, in, fmt.Errorf("method %q needs request, not supported", method), http.StatusNotFound) // Add the request to RC //in["_request"] = r } if call.NeedsResponse { return writeError(method, in, fmt.Errorf("method %q need response, not supported", method), http.StatusNotFound) //in["_response"] = w } fs.Debugf(nil, "rc: %q: with parameters %+v", method, in) _, out, err := jobs.NewJob(context.Background(), call.Fn, in) if err != nil { return writeError(method, in, err, http.StatusInternalServerError) } if out == nil { out = make(rc.Params) } fs.Debugf(nil, "rc: %q: reply %+v: %v", method, out, err) var w strings.Builder err = rc.WriteJSON(&w, out) if err != nil { fs.Errorf(nil, "rc: failed to write JSON output: %v", err) return writeError(method, in, err, http.StatusInternalServerError) } return w.String(), http.StatusOK }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/librclone/gomobile/gomobile.go
librclone/gomobile/gomobile.go
// Package gomobile exports shims for gomobile use package gomobile import ( "github.com/rclone/rclone/librclone/librclone" _ "github.com/rclone/rclone/backend/all" // import all backends _ "github.com/rclone/rclone/lib/plugin" // import plugins _ "golang.org/x/mobile/event/key" // make go.mod add this as a dependency ) // RcloneInitialize initializes rclone as a library func RcloneInitialize() { librclone.Initialize() } // RcloneFinalize finalizes the library func RcloneFinalize() { librclone.Finalize() } // RcloneRPCResult is returned from RcloneRPC // // Output will be returned as a serialized JSON object // Status is a HTTP status return (200=OK anything else fail) type RcloneRPCResult struct { Output string Status int } // RcloneRPC has an interface optimised for gomobile, in particular // the function signature is valid under gobind rules. // // https://pkg.go.dev/golang.org/x/mobile/cmd/gobind#hdr-Type_restrictions func RcloneRPC(method string, input string) (result *RcloneRPCResult) { //nolint:deadcode output, status := librclone.RPC(method, input) return &RcloneRPCResult{ Output: output, Status: status, } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/fstest.go
fstest/fstest.go
// Package fstest provides utilities for testing the Fs package fstest // FIXME put name of test FS in Fs structure import ( "bytes" "compress/gzip" "context" "flag" "fmt" "io" "os" "path" "path/filepath" "regexp" "runtime" "sort" "strings" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fstest/testy" "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/text/unicode/norm" ) // Globals var ( RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem") Verbose = flag.Bool("verbose", false, "Set to enable logging") DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)") DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)") Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower") LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries") UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.") // SizeLimit signals tests to skip maximum test file size and skip inappropriate runs SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size") // ListRetries is the number of times to retry a listing to overcome eventual consistency ListRetries = flag.Int("list-retries", 3, "Number or times to retry listing") // MatchTestRemote matches the remote names used for testing MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{12}$`) ) // Initialise rclone for testing func Initialise() { ctx := context.Background() ci := fs.GetConfig(ctx) // Never ask for passwords, fail instead. // If your local config is encrypted set environment variable // "RCLONE_CONFIG_PASS=hunter2" (or your password) ci.AskPassword = false // Override the config file from the environment - we don't // parse the flags any more so this doesn't happen // automatically if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" { _ = config.SetConfigPath(envConfig) } if *RemoteName == "local" { *RemoteName = "" } configfile.Install() accounting.Start(ctx) if *Verbose { ci.LogLevel = fs.LogLevelDebug } if *DumpHeaders { ci.Dump |= fs.DumpHeaders } if *DumpBodies { ci.Dump |= fs.DumpBodies } ci.LowLevelRetries = *LowLevelRetries ci.UseListR = *UseListR log.InitLogging() _ = fs.LogReload(ci) } // Item represents an item for checking type Item struct { Path string Hashes map[hash.Type]string ModTime time.Time Size int64 } // NewItem creates an item from a string content func NewItem(Path, Content string, modTime time.Time) Item { i := Item{ Path: Path, ModTime: modTime, Size: int64(len(Content)), } hash := hash.NewMultiHasher() buf := bytes.NewBufferString(Content) _, err := io.Copy(hash, buf) if err != nil { fs.Fatalf(nil, "Failed to create item: %v", err) } i.Hashes = hash.Sums() return i } // CheckTimeEqualWithPrecision checks the times are equal within the // precision, returns the delta and a flag func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) { dt := t0.Sub(t1) if dt >= precision || dt <= -precision { return dt, false } return dt, true } // AssertTimeEqualWithPrecision checks that want is within precision // of got, asserting that with t and logging remote func AssertTimeEqualWithPrecision(t *testing.T, remote string, want, got time.Time, precision time.Duration) { dt, ok := CheckTimeEqualWithPrecision(want, got, precision) assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (want %s vs got %s) (precision %s)", remote, dt, precision, want, got, precision)) } // CheckModTime checks the mod time to the given precision func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) { AssertTimeEqualWithPrecision(t, obj.Remote(), i.ModTime, modTime, precision) } // CheckHashes checks all the hashes the object supports are correct func (i *Item) CheckHashes(t *testing.T, obj fs.Object) { require.NotNil(t, obj) types := obj.Fs().Hashes().Array() for _, Hash := range types { // Check attributes sum, err := obj.Hash(context.Background(), Hash) require.NoError(t, err) assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum)) } } // Check checks all the attributes of the object are correct func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) { i.CheckHashes(t, obj) assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size())) i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision) } // Normalize runs a utf8 normalization on the string if running on OS // X. This is because OS X denormalizes file names it writes to the // local file system. func Normalize(name string) string { if runtime.GOOS == "darwin" { name = norm.NFC.String(name) } return name } // Items represents all items for checking type Items struct { byName map[string]*Item byNameAlt map[string]*Item items []Item } // NewItems makes an Items func NewItems(items []Item) *Items { is := &Items{ byName: make(map[string]*Item), byNameAlt: make(map[string]*Item), items: items, } // Fill up byName for i := range items { is.byName[Normalize(items[i].Path)] = &items[i] } return is } // Find checks off an item func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) { remote := Normalize(obj.Remote()) i, ok := is.byName[remote] if !ok { i, ok = is.byNameAlt[remote] assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote)) } if i != nil { delete(is.byName, i.Path) i.Check(t, obj, precision) } } // Done checks all finished func (is *Items) Done(t *testing.T) { if len(is.byName) != 0 { for name := range is.byName { t.Logf("Not found %q", name) } } assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName))) } // makeListingFromItems returns a string representation of the items // // it returns two possible strings, one normal and one for windows func makeListingFromItems(items []Item) string { nameLengths := make([]string, len(items)) for i, item := range items { remote := Normalize(item.Path) nameLengths[i] = fmt.Sprintf("%s (%d)", remote, item.Size) } sort.Strings(nameLengths) return strings.Join(nameLengths, ", ") } // makeListingFromObjects returns a string representation of the objects func makeListingFromObjects(objs []fs.Object) string { nameLengths := make([]string, len(objs)) for i, obj := range objs { nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size()) } sort.Strings(nameLengths) return strings.Join(nameLengths, ", ") } // filterEmptyDirs removes any empty (or containing only directories) // directories from expectedDirs func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) { dirs := map[string]struct{}{"": {}} for _, item := range items { base := item.Path for { base = path.Dir(base) if base == "." || base == "/" { break } dirs[base] = struct{}{} } } for _, expectedDir := range expectedDirs { if _, found := dirs[expectedDir]; found { newExpectedDirs = append(newExpectedDirs, expectedDir) } else { t.Logf("Filtering empty directory %q", expectedDir) } } return newExpectedDirs } // CheckListingWithRoot checks the fs to see if it has the // expected contents with the given precision. // // If expectedDirs is non nil then we check those too. Note that no // directories returned is also OK as some remotes don't return // directories. // // dir is the directory used for the listing. func CheckListingWithRoot(t *testing.T, f fs.Fs, dir string, items []Item, expectedDirs []string, precision time.Duration) { if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories { expectedDirs = filterEmptyDirs(t, items, expectedDirs) } is := NewItems(items) ctx := context.Background() oldErrors := accounting.Stats(ctx).GetErrors() var objs []fs.Object var dirs []fs.Directory var err error retries := *ListRetries sleep := time.Second / 2 wantListing := makeListingFromItems(items) gotListing := "<unset>" listingOK := false for i := 1; i <= retries; i++ { objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1) if err != nil && err != fs.ErrorDirNotFound { t.Fatalf("Error listing: %v", err) } gotListing = makeListingFromObjects(objs) listingOK = wantListing == gotListing if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) { // Put an extra sleep in if we did any retries just to make sure it really // is consistent if i != 1 { extraSleep := 5*time.Second + sleep t.Logf("Sleeping for %v just to make sure", extraSleep) time.Sleep(extraSleep) } break } sleep *= 2 t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries) time.Sleep(sleep) if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil { t.Logf("Flushing the directory cache") doDirCacheFlush() } } assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n %s got\n %s", wantListing, gotListing)) for _, obj := range objs { require.NotNil(t, obj) is.Find(t, obj, precision) } is.Done(t) // Don't notice an error when listing an empty directory if len(items) == 0 && oldErrors == 0 && accounting.Stats(ctx).GetErrors() == 1 { accounting.Stats(ctx).ResetErrors() } // Check the directories if expectedDirs != nil { expectedDirsCopy := make([]string, len(expectedDirs)) for i, dir := range expectedDirs { expectedDirsCopy[i] = Normalize(dir) } actualDirs := []string{} for _, dir := range dirs { actualDirs = append(actualDirs, Normalize(dir.Remote())) } sort.Strings(actualDirs) sort.Strings(expectedDirsCopy) assert.Equal(t, expectedDirsCopy, actualDirs, "directories") } } // CheckListingWithPrecision checks the fs to see if it has the // expected contents with the given precision. // // If expectedDirs is non nil then we check those too. Note that no // directories returned is also OK as some remotes don't return // directories. func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) { CheckListingWithRoot(t, f, "", items, expectedDirs, precision) } // CheckListing checks the fs to see if it has the expected contents func CheckListing(t *testing.T, f fs.Fs, items []Item) { precision := f.Precision() CheckListingWithPrecision(t, f, items, nil, precision) } // CheckItemsWithPrecision checks the fs with the specified precision // to see if it has the expected items. func CheckItemsWithPrecision(t *testing.T, f fs.Fs, precision time.Duration, items ...Item) { CheckListingWithPrecision(t, f, items, nil, precision) } // CheckItems checks the fs to see if it has only the items passed in // using a precision of fs.Config.ModifyWindow func CheckItems(t *testing.T, f fs.Fs, items ...Item) { CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(context.TODO(), f)) } // CompareItems compares a set of DirEntries to a slice of items and a list of dirs // The modtimes are compared with the precision supplied func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDirs []string, precision time.Duration, what string) { is := NewItems(items) var objs []fs.Object var dirs []fs.Directory wantListing := makeListingFromItems(items) for _, entry := range entries { switch x := entry.(type) { case fs.Directory: dirs = append(dirs, x) case fs.Object: objs = append(objs, x) // do nothing default: t.Fatalf("unknown object type %T", entry) } } gotListing := makeListingFromObjects(objs) listingOK := wantListing == gotListing assert.True(t, listingOK, fmt.Sprintf("%s not equal, want\n %s got\n %s", what, wantListing, gotListing)) for _, obj := range objs { require.NotNil(t, obj) is.Find(t, obj, precision) } is.Done(t) // Check the directories if expectedDirs != nil { expectedDirsCopy := make([]string, len(expectedDirs)) for i, dir := range expectedDirs { expectedDirsCopy[i] = Normalize(dir) } actualDirs := []string{} for _, dir := range dirs { actualDirs = append(actualDirs, Normalize(dir.Remote())) } sort.Strings(actualDirs) sort.Strings(expectedDirsCopy) assert.Equal(t, expectedDirsCopy, actualDirs, "directories not equal") } } // Time parses a time string or logs a fatal error func Time(timeString string) time.Time { t, err := time.Parse(time.RFC3339Nano, timeString) if err != nil { fs.Fatalf(nil, "Failed to parse time %q: %v", timeString, err) } return t } // LocalRemote creates a temporary directory name for local remotes func LocalRemote() (path string, err error) { path, err = os.MkdirTemp("", "rclone") if err == nil { // Now remove the directory err = os.Remove(path) } path = filepath.ToSlash(path) return } // RandomRemoteName makes a random bucket or subdirectory name // // Returns a random remote name plus the leaf name func RandomRemoteName(remoteName string) (string, string, error) { var err error var leafName string // Make a directory if remote name is null if remoteName == "" { remoteName, err = LocalRemote() if err != nil { return "", "", err } } else { if !strings.HasSuffix(remoteName, ":") { remoteName += "/" } leafName = "rclone-test-" + random.String(12) if !MatchTestRemote.MatchString(leafName) { fs.Fatalf(nil, "%q didn't match the test remote name regexp", leafName) } remoteName += leafName } return remoteName, leafName, nil } // RandomRemote makes a random bucket or subdirectory on the remote // from the -remote parameter // // Call the finalise function returned to Purge the fs at the end (and // the parent if necessary) // // Returns the remote, its url, a finaliser and an error func RandomRemote() (fs.Fs, string, func(), error) { var err error var parentRemote fs.Fs remoteName := *RemoteName remoteName, _, err = RandomRemoteName(remoteName) if err != nil { return nil, "", nil, err } remote, err := fs.NewFs(context.Background(), remoteName) if err != nil { return nil, "", nil, err } finalise := func() { Purge(remote) if parentRemote != nil { Purge(parentRemote) if err != nil { fs.Logf(nil, "Failed to purge %v: %v", parentRemote, err) } } } return remote, remoteName, finalise, nil } // Purge is a simplified re-implementation of operations.Purge for the // test routine cleanup to avoid circular dependencies. // // It logs errors rather than returning them func Purge(f fs.Fs) { // Create a stats group here so errors in the cleanup don't // interfere with the global stats. ctx := accounting.WithStatsGroup(context.Background(), "test-cleanup") var err error doFallbackPurge := true if doPurge := f.Features().Purge; doPurge != nil { doFallbackPurge = false fs.Debugf(f, "Purge remote") err = doPurge(ctx, "") if err == fs.ErrorCantPurge { doFallbackPurge = true } } if doFallbackPurge { dirs := []string{""} err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error { var err error entries.ForObject(func(obj fs.Object) { fs.Debugf(f, "Purge object %q", obj.Remote()) err = obj.Remove(ctx) if err != nil { fs.Logf(nil, "purge failed to remove %q: %v", obj.Remote(), err) } }) entries.ForDir(func(dir fs.Directory) { dirs = append(dirs, dir.Remote()) }) return nil }) sort.Strings(dirs) for i := len(dirs) - 1; i >= 0; i-- { dir := dirs[i] fs.Debugf(f, "Purge dir %q", dir) err := f.Rmdir(ctx, dir) if err != nil { fs.Logf(nil, "purge failed to rmdir %q: %v", dir, err) } } } if err != nil { fs.Logf(nil, "purge failed: %v", err) } } // NewObject finds the object on the remote func NewObject(ctx context.Context, t *testing.T, f fs.Fs, remote string) fs.Object { var obj fs.Object var err error sleepTime := 1 * time.Second for i := 1; i <= *ListRetries; i++ { obj, err = f.NewObject(ctx, remote) if err == nil { break } t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *ListRetries, err) time.Sleep(sleepTime) sleepTime = (sleepTime * 3) / 2 } require.NoError(t, err) return obj } // NewDirectoryRetries finds the directory with remote in f // // If directory can't be found it returns an error wrapping fs.ErrorDirNotFound // // One day this will be an rclone primitive func NewDirectoryRetries(ctx context.Context, t *testing.T, f fs.Fs, remote string, retries int) (fs.Directory, error) { var err error var dir fs.Directory sleepTime := 1 * time.Second root := path.Dir(remote) if root == "." { root = "" } for i := 1; i <= retries; i++ { var entries fs.DirEntries entries, err = f.List(ctx, root) if err != nil { continue } for _, entry := range entries { var ok bool dir, ok = entry.(fs.Directory) if ok && dir.Remote() == remote { return dir, nil } } err = fmt.Errorf("directory %q not found in %q: %w", remote, root, fs.ErrorDirNotFound) if i < retries { t.Logf("Sleeping for %v for NewDirectoryRetries eventual consistency: %d/%d (%v)", sleepTime, i, retries, err) time.Sleep(sleepTime) sleepTime = (sleepTime * 3) / 2 } } return dir, err } // NewDirectory finds the directory with remote in f // // One day this will be an rclone primitive func NewDirectory(ctx context.Context, t *testing.T, f fs.Fs, remote string) fs.Directory { dir, err := NewDirectoryRetries(ctx, t, f, remote, *ListRetries) require.NoError(t, err) return dir } // CheckEntryMetadata checks the metadata on the directory // // This checks a limited set of metadata on the directory func CheckEntryMetadata(ctx context.Context, t *testing.T, f fs.Fs, entry fs.DirEntry, wantMeta fs.Metadata) { features := f.Features() do, ok := entry.(fs.Metadataer) require.True(t, ok, "Didn't find expected Metadata() method on %T", entry) gotMeta, err := do.Metadata(ctx) require.NoError(t, err) for k, v := range wantMeta { switch k { case "mtime", "atime", "btime", "ctime": // Check the system time Metadata wantT, err := time.Parse(time.RFC3339, v) require.NoError(t, err) gotT, err := time.Parse(time.RFC3339, gotMeta[k]) require.NoError(t, err) AssertTimeEqualWithPrecision(t, entry.Remote(), wantT, gotT, f.Precision()) default: // Check the User metadata if we can _, isDir := entry.(fs.Directory) if (isDir && features.UserDirMetadata) || (!isDir && features.UserMetadata) { assert.Equal(t, v, gotMeta[k]) } } } } // CheckDirModTime checks the modtime on the directory func CheckDirModTime(ctx context.Context, t *testing.T, f fs.Fs, dir fs.Directory, wantT time.Time) { if f.Features().DirSetModTime == nil && f.Features().MkdirMetadata == nil { fs.Debugf(f, "Skipping modtime test as remote does not support DirSetModTime or MkdirMetadata") return } gotT := dir.ModTime(ctx) precision := f.Precision() // For unknown reasons the precision of modification times of // directories on the CI is about >15mS. The tests work fine // when run in Virtualbox though so I conjecture this is // something to do with the file system used there. if runtime.GOOS == "windows" && testy.CI() { precision = 100 * time.Millisecond } AssertTimeEqualWithPrecision(t, dir.Remote(), wantT, gotT, precision) } // Gz returns a compressed version of its input string func Gz(t *testing.T, s string) string { var buf bytes.Buffer zw := gzip.NewWriter(&buf) _, err := zw.Write([]byte(s)) require.NoError(t, err) err = zw.Close() require.NoError(t, err) return buf.String() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/run.go
fstest/run.go
/* This provides Run for use in creating test suites To use this declare a TestMain // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } And then make and destroy a Run in each test func TestMkdir(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() // test stuff } This will make r.Fremote and r.Flocal for a remote and a local remote. The remote is determined by the -remote flag passed in. */ package fstest import ( "bytes" "context" "flag" "fmt" "log" "os" "path" "path/filepath" "sort" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/lib/file" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Run holds the remotes for a test run type Run struct { LocalName string Flocal fs.Fs Fremote fs.Fs FremoteName string Precision time.Duration cleanRemote func() mkdir map[string]bool // whether the remote has been made yet for the fs name Logf, Fatalf func(text string, args ...any) } // ResetRun re-reads the command line arguments into the global run. func ResetRun() { oneRun = newRun() } // TestMain drives the tests func TestMain(m *testing.M) { flag.Parse() if !*Individual { ResetRun() } rc := m.Run() if !*Individual { oneRun.Finalise() } os.Exit(rc) } // oneRun holds the master run data if individual is not set var oneRun *Run // newRun initialise the remote and local for testing and returns a // run object. // // r.Flocal is an empty local Fs // r.Fremote is an empty remote Fs // // Finalise() will tidy them away when done. func newRun() *Run { r := &Run{ Logf: log.Printf, Fatalf: log.Fatalf, mkdir: make(map[string]bool), } Initialise() var err error r.Fremote, r.FremoteName, r.cleanRemote, err = RandomRemote() if err != nil { r.Fatalf("Failed to open remote %q: %v", *RemoteName, err) } r.LocalName, err = os.MkdirTemp("", "rclone") if err != nil { r.Fatalf("Failed to create temp dir: %v", err) } r.LocalName = filepath.ToSlash(r.LocalName) r.Flocal, err = fs.NewFs(context.Background(), r.LocalName) if err != nil { r.Fatalf("Failed to make %q: %v", r.LocalName, err) } r.Precision = fs.GetModifyWindow(context.Background(), r.Fremote, r.Flocal) return r } // run f(), retrying it until it returns with no error or the limit // expires and it calls t.Fatalf func retry(t *testing.T, what string, f func() error) { var err error for try := 1; try <= *ListRetries; try++ { err = f() if err == nil { return } t.Logf("%s failed - try %d/%d: %v", what, try, *ListRetries, err) time.Sleep(time.Second) } t.Logf("%s failed: %v", what, err) } // newRunIndividual initialise the remote and local for testing and // returns a run object. Pass in true to make individual tests or // false to use the global one. // // r.Flocal is an empty local Fs // r.Fremote is an empty remote Fs // // Finalise() will tidy them away when done. func newRunIndividual(t *testing.T, individual bool) *Run { ctx := context.Background() var r *Run if individual { r = newRun() } else { // If not individual, use the global one with the clean method overridden r = new(Run) *r = *oneRun r.cleanRemote = func() { var toDelete []string err := walk.ListR(ctx, r.Fremote, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error { for _, entry := range entries { switch x := entry.(type) { case fs.Object: retry(t, fmt.Sprintf("removing file %q", x.Remote()), func() error { return x.Remove(ctx) }) case fs.Directory: toDelete = append(toDelete, x.Remote()) } } return nil }) if err == fs.ErrorDirNotFound { return } require.NoError(t, err) sort.Strings(toDelete) for i := len(toDelete) - 1; i >= 0; i-- { dir := toDelete[i] retry(t, fmt.Sprintf("removing dir %q", dir), func() error { return r.Fremote.Rmdir(ctx, dir) }) } // Check remote is empty CheckListingWithPrecision(t, r.Fremote, []Item{}, []string{}, r.Fremote.Precision()) // Clear the remote cache cache.Clear() } } r.Logf = t.Logf r.Fatalf = t.Fatalf r.Logf("Remote %q, Local %q, Modify Window %q", r.Fremote, r.Flocal, fs.GetModifyWindow(ctx, r.Fremote)) t.Cleanup(r.Finalise) return r } // NewRun initialise the remote and local for testing and returns a // run object. Call this from the tests. // // r.Flocal is an empty local Fs // r.Fremote is an empty remote Fs func NewRun(t *testing.T) *Run { return newRunIndividual(t, *Individual) } // NewRunIndividual as per NewRun but makes an individual remote for this test func NewRunIndividual(t *testing.T) *Run { return newRunIndividual(t, true) } // RenameFile renames a file in local func (r *Run) RenameFile(item Item, newpath string) Item { oldFilepath := path.Join(r.LocalName, item.Path) newFilepath := path.Join(r.LocalName, newpath) if err := os.Rename(oldFilepath, newFilepath); err != nil { r.Fatalf("Failed to rename file from %q to %q: %v", item.Path, newpath, err) } item.Path = newpath return item } // WriteFile writes a file to local func (r *Run) WriteFile(filePath, content string, t time.Time) Item { item := NewItem(filePath, content, t) // FIXME make directories? filePath = path.Join(r.LocalName, filePath) dirPath := path.Dir(filePath) err := file.MkdirAll(dirPath, 0770) if err != nil { r.Fatalf("Failed to make directories %q: %v", dirPath, err) } err = os.WriteFile(filePath, []byte(content), 0600) if err != nil { r.Fatalf("Failed to write file %q: %v", filePath, err) } err = os.Chtimes(filePath, t, t) if err != nil { r.Fatalf("Failed to chtimes file %q: %v", filePath, err) } return item } // ForceMkdir creates the remote func (r *Run) ForceMkdir(ctx context.Context, f fs.Fs) { err := f.Mkdir(ctx, "") if err != nil { r.Fatalf("Failed to mkdir %q: %v", f, err) } r.mkdir[f.String()] = true } // Mkdir creates the remote if it hasn't been created already func (r *Run) Mkdir(ctx context.Context, f fs.Fs) { if !r.mkdir[f.String()] { r.ForceMkdir(ctx, f) } } // WriteObjectTo writes an object to the fs, remote passed in func (r *Run) WriteObjectTo(ctx context.Context, f fs.Fs, remote, content string, modTime time.Time, useUnchecked bool) Item { put := f.Put if useUnchecked { put = f.Features().PutUnchecked if put == nil { r.Fatalf("Fs doesn't support PutUnchecked") } } r.Mkdir(ctx, f) // calculate all hashes f supports for content hash, err := hash.NewMultiHasherTypes(f.Hashes()) if err != nil { r.Fatalf("Failed to make new multi hasher: %v", err) } _, err = hash.Write([]byte(content)) if err != nil { r.Fatalf("Failed to make write to hash: %v", err) } hashSums := hash.Sums() const maxTries = 10 for tries := 1; ; tries++ { in := bytes.NewBufferString(content) objinfo := object.NewStaticObjectInfo(remote, modTime, int64(len(content)), true, hashSums, nil) _, err := put(ctx, in, objinfo) if err == nil { break } // Retry if err returned a retry error if fserrors.IsRetryError(err) && tries < maxTries { r.Logf("Retry Put of %q to %v: %d/%d (%v)", remote, f, tries, maxTries, err) time.Sleep(2 * time.Second) continue } r.Fatalf("Failed to put %q to %q: %v", remote, f, err) } return NewItem(remote, content, modTime) } // WriteObject writes an object to the remote func (r *Run) WriteObject(ctx context.Context, remote, content string, modTime time.Time) Item { return r.WriteObjectTo(ctx, r.Fremote, remote, content, modTime, false) } // WriteUncheckedObject writes an object to the remote not checking for duplicates func (r *Run) WriteUncheckedObject(ctx context.Context, remote, content string, modTime time.Time) Item { return r.WriteObjectTo(ctx, r.Fremote, remote, content, modTime, true) } // WriteBoth calls WriteObject and WriteFile with the same arguments func (r *Run) WriteBoth(ctx context.Context, remote, content string, modTime time.Time) Item { r.WriteFile(remote, content, modTime) return r.WriteObject(ctx, remote, content, modTime) } // CheckWithDuplicates does a test but allows duplicates func (r *Run) CheckWithDuplicates(t *testing.T, items ...Item) { var want, got []string // construct a []string of desired items for _, item := range items { want = append(want, fmt.Sprintf("%q %d", item.Path, item.Size)) } sort.Strings(want) // do the listing objs, _, err := walk.GetAll(context.Background(), r.Fremote, "", true, -1) if err != nil && err != fs.ErrorDirNotFound { t.Fatalf("Error listing: %v", err) } // construct a []string of actual items for _, o := range objs { got = append(got, fmt.Sprintf("%q %d", o.Remote(), o.Size())) } sort.Strings(got) assert.Equal(t, want, got) } // CheckLocalItems checks the local fs with proper precision // to see if it has the expected items. func (r *Run) CheckLocalItems(t *testing.T, items ...Item) { CheckItemsWithPrecision(t, r.Flocal, r.Precision, items...) } // CheckRemoteItems checks the remote fs with proper precision // to see if it has the expected items. func (r *Run) CheckRemoteItems(t *testing.T, items ...Item) { CheckItemsWithPrecision(t, r.Fremote, r.Precision, items...) } // CheckLocalListing checks the local fs with proper precision // to see if it has the expected contents. // // If expectedDirs is non nil then we check those too. Note that no // directories returned is also OK as some remotes don't return // directories. func (r *Run) CheckLocalListing(t *testing.T, items []Item, expectedDirs []string) { CheckListingWithPrecision(t, r.Flocal, items, expectedDirs, r.Precision) } // CheckRemoteListing checks the remote fs with proper precision // to see if it has the expected contents. // // If expectedDirs is non nil then we check those too. Note that no // directories returned is also OK as some remotes don't return // directories. func (r *Run) CheckRemoteListing(t *testing.T, items []Item, expectedDirs []string) { CheckListingWithPrecision(t, r.Fremote, items, expectedDirs, r.Precision) } // CheckDirectoryModTimes checks that the directory names in r.Flocal has the correct modtime compared to r.Fremote func (r *Run) CheckDirectoryModTimes(t *testing.T, names ...string) { if r.Fremote.Features().DirSetModTime == nil && r.Fremote.Features().MkdirMetadata == nil { fs.Debugf(r.Fremote, "Skipping modtime test as remote does not support DirSetModTime or MkdirMetadata") return } ctx := context.Background() for _, name := range names { wantT := NewDirectory(ctx, t, r.Flocal, name).ModTime(ctx) got := NewDirectory(ctx, t, r.Fremote, name) CheckDirModTime(ctx, t, r.Fremote, got, wantT) } } // Clean the temporary directory func (r *Run) cleanTempDir() { err := os.RemoveAll(r.LocalName) if err != nil { r.Logf("Failed to clean temporary directory %q: %v", r.LocalName, err) } } // Finalise cleans the remote and local func (r *Run) Finalise() { // r.Logf("Cleaning remote %q", r.Fremote) r.cleanRemote() // r.Logf("Cleaning local %q", r.LocalName) r.cleanTempDir() // Clear the remote cache cache.Clear() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/mockobject/mockobject.go
fstest/mockobject/mockobject.go
// Package mockobject provides a mock object which can be created from a string package mockobject import ( "bytes" "context" "errors" "fmt" "io" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" ) var errNotImpl = errors.New("not implemented") // Object is a mock fs.Object useful for testing type Object string // New returns mock fs.Object useful for testing func New(name string) Object { return Object(name) } // String returns a description of the Object func (o Object) String() string { return string(o) } // Fs returns read only access to the Fs that this object is part of func (o Object) Fs() fs.Info { return nil } // Remote returns the remote path func (o Object) Remote() string { return string(o) } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o Object) Hash(ctx context.Context, t hash.Type) (string, error) { return "", errNotImpl } // ModTime returns the modification date of the file // It should return a best guess if one isn't available func (o Object) ModTime(ctx context.Context) (t time.Time) { return t } // Size returns the size of the file func (o Object) Size() int64 { return 0 } // Storable says whether this object can be stored func (o Object) Storable() bool { return true } // SetModTime sets the metadata on the object to set the modification date func (o Object) SetModTime(ctx context.Context, t time.Time) error { return errNotImpl } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { return nil, errNotImpl } // Update in to the object with the modTime given of the given size func (o Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { return errNotImpl } // Remove this object func (o Object) Remove(ctx context.Context) error { return errNotImpl } // SeekMode specifies the optional Seek interface for the ReadCloser returned by Open type SeekMode int const ( // SeekModeNone specifies no seek interface SeekModeNone SeekMode = iota // SeekModeRegular specifies the regular io.Seek interface SeekModeRegular // SeekModeRange specifies the fs.RangeSeek interface SeekModeRange ) // SeekModes contains all valid SeekMode's var SeekModes = []SeekMode{SeekModeNone, SeekModeRegular, SeekModeRange} // ContentMockObject mocks an fs.Object and has content, mod time type ContentMockObject struct { Object content []byte seekMode SeekMode f fs.Fs unknownSize bool modTime time.Time } // WithContent returns an fs.Object with the given content. func (o Object) WithContent(content []byte, mode SeekMode) *ContentMockObject { return &ContentMockObject{ Object: o, content: content, seekMode: mode, } } // SetFs sets the return value of the Fs() call func (o *ContentMockObject) SetFs(f fs.Fs) { o.f = f } // SetUnknownSize makes the mock object return -1 for size if true func (o *ContentMockObject) SetUnknownSize(unknownSize bool) { o.unknownSize = unknownSize } // Fs returns read only access to the Fs that this object is part of // // This is nil unless SetFs has been called func (o *ContentMockObject) Fs() fs.Info { return o.f } // Open opens the file for read. Call Close() on the returned io.ReadCloser func (o *ContentMockObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { size := int64(len(o.content)) var offset, limit int64 = 0, -1 for _, option := range options { switch x := option.(type) { case *fs.SeekOption: offset = x.Offset case *fs.RangeOption: offset, limit = x.Decode(size) default: if option.Mandatory() { return nil, fmt.Errorf("unsupported mandatory option: %v", option) } } } if limit == -1 || offset+limit > size { limit = size - offset } var r *bytes.Reader if o.seekMode == SeekModeNone { r = bytes.NewReader(o.content[offset : offset+limit]) } else { r = bytes.NewReader(o.content) _, err := r.Seek(offset, io.SeekStart) if err != nil { return nil, err } } switch o.seekMode { case SeekModeNone: return &readCloser{r}, nil case SeekModeRegular: return &readSeekCloser{r}, nil case SeekModeRange: return &readRangeSeekCloser{r}, nil default: return nil, errors.New(o.seekMode.String()) } } // Size returns the size of the file func (o *ContentMockObject) Size() int64 { if o.unknownSize { return -1 } return int64(len(o.content)) } // Hash returns the selected checksum of the file // If no checksum is available it returns "" func (o *ContentMockObject) Hash(ctx context.Context, t hash.Type) (string, error) { hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(t)) if err != nil { return "", err } _, err = hasher.Write(o.content) if err != nil { return "", err } return hasher.Sums()[t], nil } // ModTime returns the modification date of the file // It should return a best guess if one isn't available func (o *ContentMockObject) ModTime(ctx context.Context) time.Time { return o.modTime } // SetModTime sets the metadata on the object to set the modification date func (o *ContentMockObject) SetModTime(ctx context.Context, t time.Time) error { o.modTime = t return nil } type readCloser struct{ io.Reader } func (r *readCloser) Close() error { return nil } type readSeekCloser struct{ io.ReadSeeker } func (r *readSeekCloser) Close() error { return nil } type readRangeSeekCloser struct{ io.ReadSeeker } func (r *readRangeSeekCloser) RangeSeek(offset int64, whence int, length int64) (int64, error) { return r.ReadSeeker.Seek(offset, whence) } func (r *readRangeSeekCloser) Close() error { return nil } func (m SeekMode) String() string { switch m { case SeekModeNone: return "SeekModeNone" case SeekModeRegular: return "SeekModeRegular" case SeekModeRange: return "SeekModeRange" default: return fmt.Sprintf("SeekModeInvalid(%d)", m) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/fstests/fstests.go
fstest/fstests/fstests.go
// Package fstests provides generic integration tests for the Fs and // Object interfaces. // // These tests are concerned with the basic functionality of a // backend. The tests in fs/sync and fs/operations tests more // cornercases that these tests don't. package fstests import ( "bytes" "context" "errors" "fmt" "io" "math/bits" "os" "path" "path/filepath" "reflect" "slices" "sort" "strconv" "strings" "testing" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/testserver" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // InternalTester is an optional interface for Fs which allows to execute internal tests // // This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go type InternalTester interface { InternalTest(*testing.T) } // ChunkedUploadConfig contains the values used by TestFsPutChunked // to determine the limits of chunked uploading type ChunkedUploadConfig struct { // Minimum allowed chunk size MinChunkSize fs.SizeSuffix // Maximum allowed chunk size, 0 is no limit MaxChunkSize fs.SizeSuffix // Rounds the given chunk size up to the next valid value // nil will disable rounding // e.g. the next power of 2 CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix // More than one chunk is required on upload NeedMultipleChunks bool // Skip this particular remote Skip bool } // SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime type SetUploadChunkSizer interface { // Change the configured UploadChunkSize. // Will only be called while no transfer is in progress. SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error) } // SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime type SetUploadCutoffer interface { // Change the configured UploadCutoff. // Will only be called while no transfer is in progress. SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error) } // SetCopyCutoffer is a test only interface to change the copy cutoff size at runtime type SetCopyCutoffer interface { // Change the configured CopyCutoff. // Will only be called while no transfer is in progress. // Return fs.ErrorNotImplemented if you can't implement this SetCopyCutoff(fs.SizeSuffix) (fs.SizeSuffix, error) } // NextPowerOfTwo returns the current or next bigger power of two. // All values less or equal 0 will return 0 func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix { return 1 << uint(64-bits.LeadingZeros64(uint64(i)-1)) } // NextMultipleOf returns a function that can be used as a CeilChunkSize function. // This function will return the next multiple of m that is equal or bigger than i. // All values less or equal 0 will return 0. func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix { if m <= 0 { panic(fmt.Sprintf("invalid multiplier %s", m)) } return func(i fs.SizeSuffix) fs.SizeSuffix { if i <= 0 { return 0 } return (((i - 1) / m) + 1) * m } } // dirsToNames returns a sorted list of names func dirsToNames(dirs []fs.Directory) []string { names := []string{} for _, dir := range dirs { names = append(names, fstest.Normalize(dir.Remote())) } sort.Strings(names) return names } // objsToNames returns a sorted list of object names func objsToNames(objs []fs.Object) []string { names := []string{} for _, obj := range objs { names = append(names, fstest.Normalize(obj.Remote())) } sort.Strings(names) return names } // retry f() until no retriable error func retry(t *testing.T, what string, f func() error) { const maxTries = 10 var err error for tries := 1; tries <= maxTries; tries++ { err = f() // exit if no error, or error is not retriable if err == nil || !fserrors.IsRetryError(err) { break } t.Logf("%s error: %v - low level retry %d/%d", what, err, tries, maxTries) time.Sleep(2 * time.Second) } require.NoError(t, err, what) } // check interface // PutTestContentsMetadata puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove // // It uploads the object with the mimeType and metadata passed in if set. // // It returns the object which will have been checked if check is set func PutTestContentsMetadata(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, useFileHashes bool, contents string, check bool, mimeType string, metadata fs.Metadata, options ...fs.OpenOption) fs.Object { var ( err error obj fs.Object uploadHash *hash.MultiHasher ) retry(t, "Put", func() error { buf := bytes.NewBufferString(contents) uploadHash = hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) file.Size = int64(buf.Len()) // The caller explicitly indicates whether the hashes in the file parameter should be used. If hashes is nil, // then NewStaticObjectInfo will calculate default hashes for use in the check. hashes := file.Hashes if !useFileHashes { hashes = nil } obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, hashes, nil) if mimeType != "" || metadata != nil { // force the --metadata flag on temporarily if metadata != nil { ci := fs.GetConfig(ctx) previousMetadata := ci.Metadata ci.Metadata = true defer func() { ci.Metadata = previousMetadata }() } obji.WithMetadata(metadata).WithMimeType(mimeType) } obj, err = f.Put(ctx, in, obji, options...) return err }) file.Hashes = uploadHash.Sums() if check { // Overwrite time with that in metadata if it is already specified mtime, ok := metadata["mtime"] if ok { modTime, err := time.Parse(time.RFC3339Nano, mtime) require.NoError(t, err) file.ModTime = modTime } file.Check(t, obj, f.Precision()) // Re-read the object and check again obj = fstest.NewObject(ctx, t, f, file.Path) file.Check(t, obj, f.Precision()) } return obj } // PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove func PutTestContents(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool) fs.Object { return PutTestContentsMetadata(ctx, t, f, file, false, contents, check, "", nil) } // testPut puts file with random contents to the remote func testPut(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) { return testPutMimeType(ctx, t, f, file, "", nil) } // testPutMimeType puts file with random contents to the remote and the mime type given func testPutMimeType(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, mimeType string, metadata fs.Metadata) (string, fs.Object) { contents := random.String(100) // We just generated new contents, but file may contain hashes generated by a previous operation if len(file.Hashes) > 0 { file.Hashes = make(map[hash.Type]string) } return contents, PutTestContentsMetadata(ctx, t, f, file, false, contents, true, mimeType, metadata) } // testPutLarge puts file to the remote, checks it and removes it on success. // // If stream is set, then it uploads the file with size -1 func testPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, stream bool) { var ( err error obj fs.Object uploadHash *hash.MultiHasher ) retry(t, "PutLarge", func() error { r := readers.NewPatternReader(file.Size) uploadHash = hash.NewMultiHasher() in := io.TeeReader(r, uploadHash) size := file.Size if stream { size = -1 } obji := object.NewStaticObjectInfo(file.Path, file.ModTime, size, true, nil, nil) obj, err = f.Put(ctx, in, obji) if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles { t.Skip("Can't upload zero length files") } return err }) file.Hashes = uploadHash.Sums() file.Check(t, obj, f.Precision()) // Re-read the object and check again obj = fstest.NewObject(ctx, t, f, file.Path) file.Check(t, obj, f.Precision()) // Download the object and check it is OK downloadHash := hash.NewMultiHasher() download, err := obj.Open(ctx) require.NoError(t, err) n, err := io.Copy(downloadHash, download) require.NoError(t, err) assert.Equal(t, file.Size, n) require.NoError(t, download.Close()) assert.Equal(t, file.Hashes, downloadHash.Sums()) // Remove the object require.NoError(t, obj.Remove(ctx)) } // TestPutLarge puts file to the remote, checks it and removes it on success. func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) { testPutLarge(ctx, t, f, file, false) } // TestPutLargeStreamed puts file of unknown size to the remote, checks it and removes it on success. func TestPutLargeStreamed(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) { testPutLarge(ctx, t, f, file, true) } // ReadObject reads the contents of an object as a string func ReadObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string { what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options) in, err := obj.Open(ctx, options...) require.NoError(t, err, what) var r io.Reader = in if limit >= 0 { r = &io.LimitedReader{R: r, N: limit} } contents, err := io.ReadAll(r) require.NoError(t, err, what) err = in.Close() require.NoError(t, err, what) return string(contents) } // ExtraConfigItem describes a config item for the tests type ExtraConfigItem struct{ Name, Key, Value string } // Opt is options for Run type Opt struct { RemoteName string NilObject fs.Object ExtraConfig []ExtraConfigItem SkipBadWindowsCharacters bool // skips unusable characters for windows if set SkipFsMatch bool // if set skip exact matching of Fs value TiersToTest []string // List of tiers which can be tested in setTier test ChunkedUpload ChunkedUploadConfig UnimplementableFsMethods []string // List of Fs methods which can't be implemented in this wrapping Fs UnimplementableObjectMethods []string // List of Object methods which can't be implemented in this wrapping Fs UnimplementableDirectoryMethods []string // List of Directory methods which can't be implemented in this wrapping Fs SkipFsCheckWrap bool // if set skip FsCheckWrap SkipObjectCheckWrap bool // if set skip ObjectCheckWrap SkipDirectoryCheckWrap bool // if set skip DirectoryCheckWrap SkipInvalidUTF8 bool // if set skip invalid UTF-8 checks SkipLeadingDot bool // if set skip leading dot checks QuickTestOK bool // if set, run this test with make quicktest } // returns true if x is found in ss func stringsContains(x string, ss []string) bool { return slices.Contains(ss, x) } // toUpperASCII returns a copy of the string s with all Unicode // letters mapped to their upper case. func toUpperASCII(s string) string { return strings.Map(func(r rune) rune { if 'a' <= r && r <= 'z' { r -= 'a' - 'A' } return r }, s) } // removeConfigID removes any {xyz} parts of the name put in for // config disambiguation func removeConfigID(s string) string { bra := strings.IndexRune(s, '{') ket := strings.IndexRune(s, '}') if bra >= 0 && ket > bra { s = s[:bra] + s[ket+1:] } return s } // InternalTestFiles is the state of the remote at the moment the internal tests are called var InternalTestFiles []fstest.Item // Run runs the basic integration tests for a remote using the options passed in. // // They are structured in a hierarchical way so that dependencies for the tests can be created. // // For example some tests require the directory to be created - these // are inside the "FsMkdir" test. Some tests require some tests files // - these are inside the "FsPutFiles" test. func Run(t *testing.T, opt *Opt) { var ( f fs.Fs remoteName = opt.RemoteName subRemoteName string subRemoteLeaf string file1 = fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: "file name.txt", } file1Contents string file1MimeType = "text/csv" file1Metadata = fs.Metadata{"rclonetest": "potato"} file2 = fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"), Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`, } isLocalRemote bool purged bool // whether the dir has been purged or not ctx = context.Background() ci = fs.GetConfig(ctx) unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever ) if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" && !opt.QuickTestOK { t.Skip("quicktest only") } // Skip the test if the remote isn't configured skipIfNotOk := func(t *testing.T) { if f == nil { t.Skipf("WARN: %q not configured", remoteName) } } // Skip if remote is not ListR capable, otherwise set the useListR // flag, returning a function to restore its value skipIfNotListR := func(t *testing.T) func() { skipIfNotOk(t) if f.Features().ListR == nil { t.Skip("FS has no ListR interface") } previous := ci.UseListR ci.UseListR = true return func() { ci.UseListR = previous } } // Skip if remote is not SetTier and GetTier capable skipIfNotSetTier := func(t *testing.T) { skipIfNotOk(t) if !f.Features().SetTier || !f.Features().GetTier { t.Skip("FS has no SetTier & GetTier interfaces") } } // Return true if f (or any of the things it wraps) is bucket // based but not at the root. isBucketBasedButNotRoot := func(f fs.Fs) bool { f = fs.UnWrapFs(f) return f.Features().BucketBased && strings.Contains(strings.Trim(f.Root(), "/"), "/") } // Initialise the remote fstest.Initialise() // Set extra config if supplied for _, item := range opt.ExtraConfig { config.FileSetValue(item.Name, item.Key, item.Value) } if *fstest.RemoteName != "" { remoteName = *fstest.RemoteName } oldFstestRemoteName := fstest.RemoteName fstest.RemoteName = &remoteName defer func() { fstest.RemoteName = oldFstestRemoteName }() t.Logf("Using remote %q", remoteName) var err error if remoteName == "" { remoteName, err = fstest.LocalRemote() require.NoError(t, err) isLocalRemote = true } // Start any test servers if required finish, err := testserver.Start(remoteName) require.NoError(t, err) defer finish() // Make the Fs we are testing with, initialising the local variables // subRemoteName - name of the remote after the TestRemote: // subRemoteLeaf - a subdirectory to use under that // remote - the result of fs.NewFs(TestRemote:subRemoteName) subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName) require.NoError(t, err) f, err = fs.NewFs(context.Background(), subRemoteName) if errors.Is(err, fs.ErrorNotFoundInConfigFile) { t.Logf("Didn't find %q in config file - skipping tests", remoteName) return } require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err)) // Get fsInfo which contains type, etc. of the fs fsInfo, _, _, _, err := fs.ConfigFs(subRemoteName) require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err)) // Skip the rest if it failed skipIfNotOk(t) // Check to see if Fs that wrap other Fs implement all the optional methods t.Run("FsCheckWrap", func(t *testing.T) { skipIfNotOk(t) if opt.SkipFsCheckWrap { t.Skip("Skipping FsCheckWrap on this Fs") } ft := new(fs.Features).Fill(ctx, f) if ft.UnWrap == nil && !f.Features().Overlay { t.Skip("Not a wrapping Fs") } v := reflect.ValueOf(ft).Elem() vType := v.Type() for i := range v.NumField() { vName := vType.Field(i).Name if stringsContains(vName, opt.UnimplementableFsMethods) { continue } if stringsContains(vName, unwrappableFsMethods) { continue } field := v.Field(i) // skip the bools if field.Type().Kind() == reflect.Bool { continue } if field.IsNil() { t.Errorf("Missing Fs wrapper for %s", vName) } } }) // Check to see if Fs advertises commands and they work and have docs t.Run("FsCommand", func(t *testing.T) { skipIfNotOk(t) doCommand := f.Features().Command if doCommand == nil { t.Skip("No commands in this remote") } // Check the correct error is generated _, err := doCommand(context.Background(), "NOTFOUND", nil, nil) assert.Equal(t, fs.ErrorCommandNotFound, err, "Incorrect error generated on command not found") // Check there are some commands in the fsInfo fsInfo, _, _, _, err := fs.ConfigFs(remoteName) require.NoError(t, err) assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp") }) // TestFsRmdirNotFound tests deleting a nonexistent directory t.Run("FsRmdirNotFound", func(t *testing.T) { skipIfNotOk(t) if isBucketBasedButNotRoot(f) { t.Skip("Skipping test as non root bucket-based remote") } err := f.Rmdir(ctx, "") assert.Error(t, err, "Expecting error on Rmdir nonexistent") }) // Make the directory err = f.Mkdir(ctx, "") require.NoError(t, err) fstest.CheckListing(t, f, []fstest.Item{}) // TestFsString tests the String method t.Run("FsString", func(t *testing.T) { skipIfNotOk(t) str := f.String() require.NotEqual(t, "", str) }) // TestFsName tests the Name method t.Run("FsName", func(t *testing.T) { skipIfNotOk(t) got := removeConfigID(f.Name()) var want string if isLocalRemote { want = "local" } else { want = remoteName[:strings.LastIndex(remoteName, ":")] comma := strings.IndexRune(remoteName, ',') if comma >= 0 { want = want[:comma] } } require.Equal(t, want, got) }) // TestFsRoot tests the Root method t.Run("FsRoot", func(t *testing.T) { skipIfNotOk(t) got := f.Root() want := subRemoteName colon := strings.LastIndex(want, ":") if colon >= 0 { want = want[colon+1:] } if isLocalRemote { // only check last path element on local require.Equal(t, filepath.Base(subRemoteName), filepath.Base(got)) } else { require.Equal(t, want, got) } }) // TestFsRmdirEmpty tests deleting an empty directory t.Run("FsRmdirEmpty", func(t *testing.T) { skipIfNotOk(t) err := f.Rmdir(ctx, "") require.NoError(t, err) }) // TestFsMkdir tests making a directory // // Tests that require the directory to be made are within this t.Run("FsMkdir", func(t *testing.T) { skipIfNotOk(t) err := f.Mkdir(ctx, "") require.NoError(t, err) fstest.CheckListing(t, f, []fstest.Item{}) err = f.Mkdir(ctx, "") require.NoError(t, err) // TestFsMkdirRmdirSubdir tests making and removing a sub directory t.Run("FsMkdirRmdirSubdir", func(t *testing.T) { skipIfNotOk(t) dir := "dir/subdir" err := operations.Mkdir(ctx, f, dir) require.NoError(t, err) fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(ctx, f)) err = operations.Rmdir(ctx, f, dir) require.NoError(t, err) fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(ctx, f)) err = operations.Rmdir(ctx, f, "dir") require.NoError(t, err) fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f)) }) // TestFsListEmpty tests listing an empty directory t.Run("FsListEmpty", func(t *testing.T) { skipIfNotOk(t) fstest.CheckListing(t, f, []fstest.Item{}) }) // TestFsListDirEmpty tests listing the directories from an empty directory TestFsListDirEmpty := func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, f, "", true, 1) if !f.Features().CanHaveEmptyDirectories { if err != fs.ErrorDirNotFound { require.NoError(t, err) } } else { require.NoError(t, err) } assert.Equal(t, []string{}, objsToNames(objs)) assert.Equal(t, []string{}, dirsToNames(dirs)) } t.Run("FsListDirEmpty", TestFsListDirEmpty) // TestFsListRDirEmpty tests listing the directories from an empty directory using ListR t.Run("FsListRDirEmpty", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListDirEmpty(t) }) // TestFsListDirNotFound tests listing the directories from an empty directory TestFsListDirNotFound := func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, f, "does not exist", true, 1) if !f.Features().CanHaveEmptyDirectories { if err != fs.ErrorDirNotFound { assert.NoError(t, err) assert.Equal(t, 0, len(objs)+len(dirs)) } } else { assert.Equal(t, fs.ErrorDirNotFound, err) } } t.Run("FsListDirNotFound", TestFsListDirNotFound) // TestFsListRDirNotFound tests listing the directories from an empty directory using ListR t.Run("FsListRDirNotFound", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListDirNotFound(t) }) // FsEncoding tests that file name encodings are // working by uploading a series of unusual files // Must be run in an empty directory t.Run("FsEncoding", func(t *testing.T) { skipIfNotOk(t) if testing.Short() { t.Skip("not running with -short") } // check no files or dirs as pre-requisite fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f)) for _, test := range []struct { name string path string }{ // See lib/encoder/encoder.go for list of things that go here {"control chars", "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F"}, {"dot", "."}, {"dot dot", ".."}, {"punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"}, {"leading space", " leading space"}, {"leading tilde", "~leading tilde"}, {"leading CR", "\rleading CR"}, {"leading LF", "\nleading LF"}, {"leading HT", "\tleading HT"}, {"leading VT", "\vleading VT"}, {"leading dot", ".leading dot"}, {"trailing space", "trailing space "}, {"trailing CR", "trailing CR\r"}, {"trailing LF", "trailing LF\n"}, {"trailing HT", "trailing HT\t"}, {"trailing VT", "trailing VT\v"}, {"trailing dot", "trailing dot."}, {"invalid UTF-8", "invalid utf-8\xfe"}, {"URL encoding", "test%46.txt"}, } { t.Run(test.name, func(t *testing.T) { if opt.SkipInvalidUTF8 && test.name == "invalid UTF-8" { t.Skip("Skipping " + test.name) } if opt.SkipLeadingDot && test.name == "leading dot" { t.Skip("Skipping " + test.name) } // turn raw strings into Standard encoding fileName := encoder.Standard.Encode(test.path) dirName := fileName t.Logf("testing %q", fileName) assert.NoError(t, f.Mkdir(ctx, dirName)) file := fstest.Item{ ModTime: time.Now(), Path: dirName + "/" + fileName, // test creating a file and dir with that name } _, o := testPut(context.Background(), t, f, &file) fstest.CheckListingWithPrecision(t, f, []fstest.Item{file}, []string{dirName}, fs.GetModifyWindow(ctx, f)) assert.NoError(t, o.Remove(ctx)) assert.NoError(t, f.Rmdir(ctx, dirName)) fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f)) }) } }) // TestFsNewObjectNotFound tests not finding an object t.Run("FsNewObjectNotFound", func(t *testing.T) { skipIfNotOk(t) // Object in an existing directory o, err := f.NewObject(ctx, "potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) // Now try an object in a nonexistent directory o, err = f.NewObject(ctx, "directory/not/found/potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) }) // TestFsPutError tests uploading a file where there is an error // // It makes sure that aborting a file half way through does not create // a file on the remote. // // go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$' t.Run("FsPutError", func(t *testing.T) { skipIfNotOk(t) var N int64 = 5 * 1024 if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit { N = *fstest.SizeLimit t.Logf("Reduce file size due to limit %d", N) } // Read N bytes then produce an error contents := random.String(int(N)) buf := bytes.NewBufferString(contents) er := &readers.ErrorReader{Err: errors.New("potato")} in := io.MultiReader(buf, er) obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil) _, err := f.Put(ctx, in, obji) // assert.Nil(t, obj) - FIXME some remotes return the object even on nil assert.NotNil(t, err) retry(t, "FsPutError: test object does not exist", func() error { obj, err := f.NewObject(ctx, file2.Path) if err == nil { return fserrors.RetryErrorf("object is present") } assert.Nil(t, obj) assert.Equal(t, fs.ErrorObjectNotFound, err) return nil }) }) t.Run("FsPutZeroLength", func(t *testing.T) { skipIfNotOk(t) TestPutLarge(ctx, t, f, &fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: "zero-length-file", Size: int64(0), }) }) t.Run("FsOpenWriterAt", func(t *testing.T) { skipIfNotOk(t) openWriterAt := f.Features().OpenWriterAt if openWriterAt == nil { t.Skip("FS has no OpenWriterAt interface") } path := "writer-at-subdir/writer-at-file" out, err := openWriterAt(ctx, path, -1) require.NoError(t, err) var n int n, err = out.WriteAt([]byte("def"), 3) assert.NoError(t, err) assert.Equal(t, 3, n) n, err = out.WriteAt([]byte("ghi"), 6) assert.NoError(t, err) assert.Equal(t, 3, n) n, err = out.WriteAt([]byte("abc"), 0) assert.NoError(t, err) assert.Equal(t, 3, n) assert.NoError(t, out.Close()) obj := fstest.NewObject(ctx, t, f, path) assert.Equal(t, "abcdefghi", ReadObject(ctx, t, obj, -1), "contents of file differ") assert.NoError(t, obj.Remove(ctx)) assert.NoError(t, f.Rmdir(ctx, "writer-at-subdir")) }) // TestFsOpenChunkWriter tests writing in chunks to fs // then reads back the contents and check if they match // go test -v -run 'TestIntegration/FsMkdir/FsOpenChunkWriter' t.Run("FsOpenChunkWriter", func(t *testing.T) { skipIfNotOk(t) openChunkWriter := f.Features().OpenChunkWriter if openChunkWriter == nil { t.Skip("FS has no OpenChunkWriter interface") } size5MBs := 5 * 1024 * 1024 contents1 := random.String(size5MBs) contents2 := random.String(size5MBs) size1MB := 1 * 1024 * 1024 contents3 := random.String(size1MB) path := "writer-at-subdir/writer-at-file" objSrc := object.NewStaticObjectInfo(path+"-WRONG-REMOTE", file1.ModTime, -1, true, nil, nil) _, out, err := openChunkWriter(ctx, path, objSrc, &fs.ChunkOption{ ChunkSize: int64(size5MBs), }) require.NoError(t, err) var n int64 n, err = out.WriteChunk(ctx, 1, strings.NewReader(contents2)) assert.NoError(t, err) assert.Equal(t, int64(size5MBs), n) n, err = out.WriteChunk(ctx, 2, strings.NewReader(contents3)) assert.NoError(t, err) assert.Equal(t, int64(size1MB), n) n, err = out.WriteChunk(ctx, 0, strings.NewReader(contents1)) assert.NoError(t, err) assert.Equal(t, int64(size5MBs), n) assert.NoError(t, out.Close(ctx)) obj := fstest.NewObject(ctx, t, f, path) originalContents := contents1 + contents2 + contents3 fileContents := ReadObject(ctx, t, obj, -1) isEqual := originalContents == fileContents assert.True(t, isEqual, "contents of file differ") assert.NoError(t, obj.Remove(ctx)) assert.NoError(t, f.Rmdir(ctx, "writer-at-subdir")) }) // TestFsChangeNotify tests that changes are properly // propagated // // go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose t.Run("FsChangeNotify", func(t *testing.T) { skipIfNotOk(t) // Check have ChangeNotify doChangeNotify := f.Features().ChangeNotify if doChangeNotify == nil { t.Skip("FS has no ChangeNotify interface") } err := operations.Mkdir(ctx, f, "dir") require.NoError(t, err) pollInterval := make(chan time.Duration) dirChanges := map[string]struct{}{} objChanges := map[string]struct{}{} doChangeNotify(ctx, func(x string, e fs.EntryType) { fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e) if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) { fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e) return } if e == fs.EntryDirectory { dirChanges[x] = struct{}{} } else if e == fs.EntryObject { objChanges[x] = struct{}{} } }, pollInterval) defer func() { close(pollInterval) }() pollInterval <- time.Second var dirs []string for _, idx := range []int{1, 3, 2} { dir := fmt.Sprintf("dir/subdir%d", idx) err = operations.Mkdir(ctx, f, dir) require.NoError(t, err) dirs = append(dirs, dir) } var objs []fs.Object for _, idx := range []int{2, 4, 3} { file := fstest.Item{ ModTime: time.Now(), Path: fmt.Sprintf("dir/file%d", idx), } _, o := testPut(ctx, t, f, &file) objs = append(objs, o) } // Looks for each item in wants in changes - // if they are all found it returns true contains := func(changes map[string]struct{}, wants []string) bool { for _, want := range wants { _, ok := changes[want] if !ok { return false } } return true } // Wait a little while for the changes to come in wantDirChanges := []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"} wantObjChanges := []string{"dir/file2", "dir/file4", "dir/file3"} ok := false for tries := 1; tries < 10; tries++ { ok = contains(dirChanges, wantDirChanges) && contains(objChanges, wantObjChanges) if ok { break } t.Logf("Try %d/10 waiting for dirChanges and objChanges", tries) time.Sleep(3 * time.Second) } if !ok { t.Errorf("%+v does not contain %+v or \n%+v does not contain %+v", dirChanges, wantDirChanges, objChanges, wantObjChanges) } // tidy up afterwards for _, o := range objs { assert.NoError(t, o.Remove(ctx)) } dirs = append(dirs, "dir") for _, dir := range dirs { assert.NoError(t, f.Rmdir(ctx, dir)) } }) // TestFsPut files writes file1, file2 and tests an update // // Tests that require file1, file2 are within this t.Run("FsPutFiles", func(t *testing.T) { skipIfNotOk(t) file1Contents, _ = testPut(ctx, t, f, &file1) /* file2Contents = */ testPut(ctx, t, f, &file2) file1Contents, _ = testPutMimeType(ctx, t, f, &file1, file1MimeType, file1Metadata) // Note that the next test will check there are no duplicated file names // TestFsListDirFile2 tests the files are correctly uploaded by doing // Depth 1 directory listings TestFsListDirFile2 := func(t *testing.T) { skipIfNotOk(t) list := func(dir string, expectedDirNames, expectedObjNames []string) { var objNames, dirNames []string for i := 1; i <= *fstest.ListRetries; i++ { objs, dirs, err := walk.GetAll(ctx, f, dir, true, 1) if errors.Is(err, fs.ErrorDirNotFound) { objs, dirs, err = walk.GetAll(ctx, f, dir, true, 1) } require.NoError(t, err) objNames = objsToNames(objs) dirNames = dirsToNames(dirs) if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) { break } t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries) time.Sleep(1 * time.Second) } assert.Equal(t, expectedDirNames, dirNames) assert.Equal(t, expectedObjNames, objNames) } dir := file2.Path deepest := true for dir != "" { expectedObjNames := []string{} expectedDirNames := []string{} child := dir dir = path.Dir(dir) if dir == "." { dir = "" expectedObjNames = append(expectedObjNames, file1.Path) } if deepest { expectedObjNames = append(expectedObjNames, file2.Path) deepest = false } else { expectedDirNames = append(expectedDirNames, child) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
true
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/test_all/test_all.go
fstest/test_all/test_all.go
// Run tests for all the remotes. Run this with package names which // need integration testing. // // See the `test` target in the Makefile. package main /* FIXME Make TesTrun have a []string of flags to try - that then makes it generic */ import ( "encoding/csv" "flag" "fmt" "math/rand" "os" "path" "strings" "time" _ "github.com/rclone/rclone/backend/all" // import all fs "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fstest/runs" "github.com/rclone/rclone/lib/pacer" ) func init() { // Flags flag.IntVar(&Opt.MaxTries, "maxtries", 5, "Number of times to try each test") flag.IntVar(&Opt.MaxN, "n", 20, "Maximum number of tests to run at once") flag.StringVar(&Opt.TestRemotes, "remotes", "", "Comma separated list of remotes to test, e.g. 'TestSwift:,TestS3'") flag.StringVar(&Opt.TestBackends, "backends", "", "Comma separated list of backends to test, e.g. 's3,googlecloudstorage") flag.StringVar(&Opt.TestTests, "tests", "", "Comma separated list of tests to test, e.g. 'fs/sync,fs/operations'") flag.BoolVar(&Opt.Clean, "clean", false, "Instead of testing, clean all left over test directories") flag.StringVar(&Opt.RunOnly, "run", "", "Run only those tests matching the regexp supplied") flag.DurationVar(&Opt.Timeout, "timeout", 60*time.Minute, "Maximum time to run each test for before giving up") flag.BoolVar(&Opt.Race, "race", false, "If set run the tests under the race detector") flag.StringVar(&Opt.ConfigFile, "config", "fstest/test_all/config.yaml", "Path to config file") flag.StringVar(&Opt.OutputDir, "output", path.Join(os.TempDir(), "rclone-integration-tests"), "Place to store results") flag.StringVar(&Opt.EmailReport, "email", "", "Set to email the report to the address supplied") flag.BoolVar(&Opt.DryRun, "dry-run", false, "Print commands which would be executed only") flag.StringVar(&Opt.URLBase, "url-base", "https://pub.rclone.org/integration-tests/", "Base for the online version") flag.StringVar(&Opt.UploadPath, "upload", "", "Set this to an rclone path to upload the results here") flag.BoolVar(&Opt.Verbose, "verbose", false, "Set to enable verbose logging in the tests") flag.IntVar(&Opt.ListRetries, "list-retries", -1, "Number or times to retry listing - set to override the default") } var Opt = &runs.RunOpt{} func main() { flag.Parse() conf, err := runs.NewConfig(Opt.ConfigFile) if err != nil { fs.Log(nil, "test_all should be run from the root of the rclone source code") fs.Fatal(nil, fmt.Sprint(err)) } configfile.Install() // Seed the random number generator randInstance := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) // Filter selection if Opt.TestRemotes != "" { // CSV parse to support connection string remotes with commas like -remotes local,\"TestGoogleCloudStorage,directory_markers:\" r := csv.NewReader(strings.NewReader(Opt.TestRemotes)) remotes, err := r.Read() if err != nil { fs.Fatalf(Opt.TestRemotes, "error CSV-parsing -remotes string: %v", err) } fs.Debugf(Opt.TestRemotes, "using remotes: %v", remotes) conf.FilterBackendsByRemotes(remotes) } if Opt.TestBackends != "" { conf.FilterBackendsByBackends(strings.Split(Opt.TestBackends, ",")) } if Opt.TestTests != "" { conf.FilterTests(strings.Split(Opt.TestTests, ",")) } // Just clean the directories if required if Opt.Clean { err := cleanRemotes(conf, *Opt) if err != nil { fs.Fatalf(nil, "Failed to clean: %v", err) } return } var names []string for _, remote := range conf.Backends { names = append(names, remote.Remote) } fs.Logf(nil, "Testing remotes: %s", strings.Join(names, ", ")) // Runs we will do for this test in random order testRuns := conf.MakeRuns() randInstance.Shuffle(len(testRuns), testRuns.Swap) // Create Report report := runs.NewReport(*Opt) // Make the test binaries, one per Path found in the tests done := map[string]struct{}{} for _, run := range testRuns { if _, found := done[run.Path]; !found { done[run.Path] = struct{}{} if !run.NoBinary { run.MakeTestBinary(*Opt) defer run.RemoveTestBinary(*Opt) } } } // workaround for cache backend as we run simultaneous tests _ = os.Setenv("RCLONE_CACHE_DB_WAIT_TIME", "30m") // start the tests results := make(chan *runs.Run, len(testRuns)) awaiting := 0 tokens := pacer.NewTokenDispenser(Opt.MaxN) for _, run := range testRuns { tokens.Get() go func(run *runs.Run) { defer tokens.Put() run.Run(*Opt, report.LogDir, results) }(run) awaiting++ } // Wait for the tests to finish for ; awaiting > 0; awaiting-- { t := <-results report.RecordResult(t) } // Log and exit report.End() report.LogSummary() report.LogJSON() report.LogHTML() report.EmailHTML(*Opt) report.Upload(*Opt) if !report.AllPassed() { os.Exit(1) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/test_all/clean.go
fstest/test_all/clean.go
// Clean the left over test files package main import ( "context" "fmt" "regexp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest/runs" ) // MatchTestRemote matches the remote names used for testing (copied // from fstest/fstest.go so we don't have to import that and get all // its flags) var MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{12,24}(_segments)?$`) // cleanFs runs a single clean fs for left over directories func cleanFs(ctx context.Context, remote string, cleanup bool, Opt runs.RunOpt) error { f, err := fs.NewFs(context.Background(), remote) if err != nil { return err } var lastErr error if cleanup { fs.Logf(nil, "%q - running cleanup", remote) err = operations.CleanUp(ctx, f) if err != nil { lastErr = err fs.Errorf(f, "Cleanup failed: %v", err) } } entries, err := list.DirSorted(ctx, f, true, "") if err != nil { return err } err = entries.ForDirError(func(dir fs.Directory) error { dirPath := dir.Remote() fullPath := fspath.JoinRootPath(remote, dirPath) if MatchTestRemote.MatchString(dirPath) { if Opt.DryRun { fs.Logf(nil, "Not Purging %s - -dry-run", fullPath) return nil } fs.Logf(nil, "Purging %s", fullPath) dir, err := fs.NewFs(context.Background(), fullPath) if err != nil { err = fmt.Errorf("NewFs failed: %w", err) lastErr = err fs.Errorf(fullPath, "%v", err) return nil } err = operations.Purge(ctx, dir, "") if err != nil { err = fmt.Errorf("purge failed: %w", err) lastErr = err fs.Errorf(dir, "%v", err) return nil } } return nil }) if err != nil { return err } return lastErr } // cleanRemotes cleans the list of remotes passed in func cleanRemotes(conf *runs.Config, Opt runs.RunOpt) error { var lastError error for _, backend := range conf.Backends { remote := backend.Remote fs.Logf(nil, "%q - Cleaning", remote) err := cleanFs(context.Background(), remote, backend.CleanUp, Opt) if err != nil { lastError = err fs.Logf(nil, "Failed to purge %q: %v", remote, err) } } return lastError }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/mockdir/dir.go
fstest/mockdir/dir.go
// Package mockdir makes a mock fs.Directory object package mockdir import ( "time" "github.com/rclone/rclone/fs" ) // New makes a mock directory object with the name given func New(name string) fs.Directory { return fs.NewDir(name, time.Time{}) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/testy/testy.go
fstest/testy/testy.go
// Package testy contains test utilities for rclone package testy import ( "os" "testing" ) // CI returns true if we are running on the CI server func CI() bool { return os.Getenv("CI") != "" } // SkipUnreliable skips this test if running on CI func SkipUnreliable(t *testing.T) { if !CI() { return } t.Skip("Skipping Unreliable Test on CI") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/mockfs/mockfs.go
fstest/mockfs/mockfs.go
// Package mockfs provides mock Fs for testing. package mockfs import ( "context" "errors" "fmt" "io" "path" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/hash" ) // Register with Fs func Register() { fs.Register(&fs.RegInfo{ Name: "mockfs", Description: "Mock FS", NewFs: NewFs, Options: []fs.Option{{ Name: "potato", Help: "Does it have a potato?.", Required: true, }}, }) } // Fs is a minimal mock Fs type Fs struct { name string // the name of the remote root string // The root directory (OS path) features *fs.Features // optional features rootDir fs.DirEntries // directory listing of root hashes hash.Set // which hashes we support } // ErrNotImplemented is returned by unimplemented methods var ErrNotImplemented = errors.New("not implemented") // NewFs returns a new mock Fs func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { f := &Fs{ name: name, root: root, } f.features = (&fs.Features{}).Fill(ctx, f) return f, nil } // AddObject adds an Object for List to return // Only works for the root for the moment func (f *Fs) AddObject(o fs.Object) { f.rootDir = append(f.rootDir, o) // Make this object part of mockfs if possible do, ok := o.(interface{ SetFs(f fs.Fs) }) if ok { do.SetFs(f) } } // Name of the remote (as passed into NewFs) func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) func (f *Fs) Root() string { return f.root } // String returns a description of the FS func (f *Fs) String() string { return fmt.Sprintf("Mock file system at %s", f.root) } // Precision of the ModTimes in this Fs func (f *Fs) Precision() time.Duration { return time.Second } // Hashes returns the supported hash types of the filesystem func (f *Fs) Hashes() hash.Set { return f.hashes } // SetHashes sets the hashes that this supports func (f *Fs) SetHashes(hashes hash.Set) { f.hashes = hashes } // Features returns the optional features of this Fs func (f *Fs) Features() *fs.Features { return f.features } // List the objects and directories in dir into entries. The // entries can be returned in any order but should be for a // complete directory. // // dir should be "" to list the root, and should not have // trailing slashes. // // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { if dir == "" { return f.rootDir, nil } return entries, fs.ErrorDirNotFound } // NewObject finds the Object at remote. If it can't be found // it returns the error ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { dirPath := path.Dir(remote) if dirPath == "" || dirPath == "." { for _, entry := range f.rootDir { if entry.Remote() == remote { return entry.(fs.Object), nil } } } return nil, fs.ErrorObjectNotFound } // Put in to the remote path with the modTime given of the given size // // May create the object even if it returns an error - if so // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { return nil, ErrNotImplemented } // Mkdir makes the directory (container, bucket) // // Shouldn't return an error if it already exists func (f *Fs) Mkdir(ctx context.Context, dir string) error { return ErrNotImplemented } // Rmdir removes the directory (container, bucket) if empty // // Return an error if it doesn't exist or isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) error { return ErrNotImplemented } // Assert it is the correct type var _ fs.Fs = (*Fs)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/testserver/testserver.go
fstest/testserver/testserver.go
// Package testserver starts and stops test servers if required package testserver import ( "bytes" "errors" "fmt" "net" "os" "os/exec" "path/filepath" "regexp" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fspath" ) var ( findConfigOnce sync.Once configDir string // where the config is stored ) // Assume we are run somewhere within the rclone root func findConfig() (string, error) { dir := filepath.Join("fstest", "testserver", "init.d") for range 5 { fi, err := os.Stat(dir) if err == nil && fi.IsDir() { return filepath.Abs(dir) } else if !os.IsNotExist(err) { return "", err } dir = filepath.Join("..", dir) } return "", errors.New("couldn't find testserver config files - run from within rclone source") } // returns path to a script to start this server func cmdPath(name string) string { return filepath.Join(configDir, name) } // return true if the server with name has a start command func hasStartCommand(name string) bool { fi, err := os.Stat(cmdPath(name)) return err == nil && !fi.IsDir() } // run the command returning the output and an error func run(name, command string) (out []byte, err error) { script := cmdPath(name) cmd := exec.Command(script, command) out, err = cmd.CombinedOutput() if err != nil { err = fmt.Errorf("failed to run %s %s\n%s: %w", script, command, string(out), err) } return out, err } // envKey returns the environment variable name to set name, key func envKey(name, key string) string { return fmt.Sprintf("RCLONE_CONFIG_%s_%s", strings.ToUpper(name), strings.ToUpper(key)) } // match a line of config var=value var matchLine = regexp.MustCompile(`^([a-zA-Z_]+)=(.*)$`) // Start the server and env vars so rclone can use it func start(name string) error { fs.Logf(name, "Starting server") out, err := run(name, "start") if err != nil { return err } // parse the output and set environment vars from it var connect string var connectDelay time.Duration for line := range bytes.SplitSeq(out, []byte("\n")) { line = bytes.TrimSpace(line) part := matchLine.FindSubmatch(line) if part != nil { key, value := part[1], part[2] if string(key) == "_connect" { connect = string(value) continue } else if string(key) == "_connect_delay" { connectDelay, err = time.ParseDuration(string(value)) if err != nil { return fmt.Errorf("bad _connect_delay: %w", err) } continue } // fs.Debugf(name, "key = %q, envKey = %q, value = %q", key, envKey(name, string(key)), value) err = os.Setenv(envKey(name, string(key)), string(value)) if err != nil { return err } } } if connect == "" { fs.Logf(name, "Started server") return nil } // If we got a _connect value then try to connect to it const maxTries = 100 var rdBuf = make([]byte, 1) for i := 1; i <= maxTries; i++ { if i != 0 { time.Sleep(time.Second) } fs.Logf(name, "Attempting to connect to %q try %d/%d", connect, i, maxTries) conn, err := net.DialTimeout("tcp", connect, time.Second) if err != nil { fs.Debugf(name, "Connection to %q failed try %d/%d: %v", connect, i, maxTries, err) continue } err = conn.SetReadDeadline(time.Now().Add(time.Second)) if err != nil { return fmt.Errorf("failed to set deadline: %w", err) } n, err := conn.Read(rdBuf) _ = conn.Close() fs.Debugf(name, "Read %d, error: %v", n, err) if err != nil && !errors.Is(err, os.ErrDeadlineExceeded) { // Try again continue } if connectDelay > 0 { fs.Logf(name, "Connect delay %v", connectDelay) time.Sleep(connectDelay) } fs.Logf(name, "Started server and connected to %q", connect) return nil } return fmt.Errorf("failed to connect to %q on %q", name, connect) } // Stops the named test server func stop(name string) { fs.Logf(name, "Stopping server") _, err := run(name, "stop") if err != nil { fs.Errorf(name, "Failed to stop server: %v", err) } } // No server to stop so do nothing func stopNothing() { } // Start starts the test server for remoteName. // // This must be stopped by calling the function returned when finished. func Start(remote string) (fn func(), err error) { // don't start the local backend if remote == "" { return stopNothing, nil } parsed, err := fspath.Parse(remote) if err != nil { return nil, err } name := parsed.ConfigString // don't start the local backend if name == "" { return stopNothing, nil } // Make sure we know where the config is findConfigOnce.Do(func() { configDir, err = findConfig() }) if err != nil { return nil, err } // If remote has no start command then do nothing if !hasStartCommand(name) { return stopNothing, nil } // Start the server err = start(name) if err != nil { return nil, err } // And return a function to stop it return func() { stop(name) }, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/runs/config.go
fstest/runs/config.go
// Config handling // Package runs provides the types used by test_all package runs import ( "fmt" "os" "path" "slices" "github.com/rclone/rclone/fs" yaml "gopkg.in/yaml.v3" ) // Test describes an integration test to run with `go test` type Test struct { Path string // path to the source directory FastList bool // if it is possible to add -fast-list to tests Short bool // if it is possible to run the test with -short AddBackend bool // set if Path needs the current backend appending NoRetries bool // set if no retries should be performed NoBinary bool // set to not build a binary in advance LocalOnly bool // if set only run with the local backend } // Backend describes a backend test // // FIXME make bucket-based remotes set sub-dir automatically??? type Backend struct { Backend string // name of the backend directory Remote string // name of the test remote FastList bool // set to test with -fast-list Short bool // set to test with -short OneOnly bool // set to run only one backend test at once MaxFile string // file size limit CleanUp bool // when running clean, run cleanup first Ignore []string // test names to ignore the failure of Tests []string // paths of tests to run, blank for all IgnoreTests []string // paths of tests not to run, blank for none ListRetries int // -list-retries if > 0 ExtraTime float64 // factor to multiply the timeout by Env []string // environment variables to set in form KEY=VALUE } // includeTest returns true if this backend should be included in this // test func (b *Backend) includeTest(t *Test) bool { // Is this test ignored if slices.Contains(b.IgnoreTests, t.Path) { return false } // Empty b.Tests imples do all of them except the ignored if len(b.Tests) == 0 { return true } return slices.Contains(b.Tests, t.Path) } // MakeRuns creates Run objects the Backend and Test // // There can be several created, one for each combination of optional // flags (e.g. FastList) func (b *Backend) MakeRuns(t *Test) (runs []*Run) { if !b.includeTest(t) { return runs } maxSize := fs.SizeSuffix(0) if b.MaxFile != "" { if err := maxSize.Set(b.MaxFile); err != nil { fs.Logf(nil, "Invalid maxfile value %q: %v", b.MaxFile, err) } } fastlists := []bool{false} if b.FastList && t.FastList { fastlists = append(fastlists, true) } ignore := make(map[string]struct{}, len(b.Ignore)) for _, item := range b.Ignore { ignore[item] = struct{}{} } for _, fastlist := range fastlists { if t.LocalOnly && b.Backend != "local" { continue } run := &Run{ Remote: b.Remote, Backend: b.Backend, Path: t.Path, FastList: fastlist, Short: (b.Short && t.Short), NoRetries: t.NoRetries, OneOnly: b.OneOnly, NoBinary: t.NoBinary, SizeLimit: int64(maxSize), Ignore: ignore, ListRetries: b.ListRetries, ExtraTime: b.ExtraTime, Env: b.Env, } if t.AddBackend { run.Path = path.Join(run.Path, b.Backend) } runs = append(runs, run) } return runs } // Config describes the config for this program type Config struct { Tests []Test Backends []Backend } // NewConfig reads the config file func NewConfig(configFile string) (*Config, error) { d, err := os.ReadFile(configFile) if err != nil { return nil, fmt.Errorf("failed to read config file: %w", err) } config := &Config{} err = yaml.Unmarshal(d, &config) if err != nil { return nil, fmt.Errorf("failed to parse config file: %w", err) } // d, err = yaml.Marshal(&config) // if err != nil { // log.Fatalf("error: %v", err) // } // fmt.Printf("--- m dump:\n%s\n\n", string(d)) return config, nil } // MakeRuns makes Run objects for each combination of Backend and Test // in the config func (c *Config) MakeRuns() (runs Runs) { for _, backend := range c.Backends { for _, test := range c.Tests { runs = append(runs, backend.MakeRuns(&test)...) } } return runs } // FilterBackendsByRemotes filters the Backends with the remotes passed in. // // If no backend is found with a remote is found then synthesize one func (c *Config) FilterBackendsByRemotes(remotes []string) { var newBackends []Backend for _, name := range remotes { found := false for i := range c.Backends { if c.Backends[i].Remote == name { newBackends = append(newBackends, c.Backends[i]) found = true } } if !found { fs.Logf(nil, "Remote %q not found - inserting with default flags", name) // Lookup which backend fsInfo, _, _, _, err := fs.ConfigFs(name) if err != nil { fs.Fatalf(nil, "couldn't find remote %q: %v", name, err) } newBackends = append(newBackends, Backend{Backend: fsInfo.FileName(), Remote: name}) } } c.Backends = newBackends } // FilterBackendsByBackends filters the Backends with the backendNames passed in func (c *Config) FilterBackendsByBackends(backendNames []string) { var newBackends []Backend for _, name := range backendNames { for i := range c.Backends { if c.Backends[i].Backend == name { newBackends = append(newBackends, c.Backends[i]) } } } c.Backends = newBackends } // FilterTests filters the incoming tests into the backends selected func (c *Config) FilterTests(paths []string) { var newTests []Test for _, path := range paths { for i := range c.Tests { if c.Tests[i].Path == path { newTests = append(newTests, c.Tests[i]) } } } c.Tests = newTests }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/runs/run_test.go
fstest/runs/run_test.go
package runs import ( "fmt" "os/exec" "regexp" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTestsToRegexp(t *testing.T) { for _, test := range []struct { in []string want string }{ { in: []string{}, want: "", }, { in: []string{"TestOne"}, want: "^TestOne$", }, { in: []string{"TestOne", "TestTwo"}, want: "^(TestOne|TestTwo)$", }, { in: []string{"TestOne", "TestTwo", "TestThree"}, want: "^(TestOne|TestThree|TestTwo)$", }, { in: []string{"TestOne/Sub1"}, want: "^TestOne$/^Sub1$", }, { in: []string{ "TestOne/Sub1", "TestTwo", }, want: "^TestOne$/^Sub1$|^TestTwo$", }, { in: []string{ "TestOne/Sub1", "TestOne/Sub2", "TestTwo", }, want: "^TestOne$/^(Sub1|Sub2)$|^TestTwo$", }, { in: []string{ "TestOne/Sub1", "TestOne/Sub2/SubSub1", "TestTwo", }, want: "^TestOne$/^Sub1$|^TestOne$/^Sub2$/^SubSub1$|^TestTwo$", }, { in: []string{ "TestTests/A1", "TestTests/B/B1", "TestTests/C/C3/C31", }, want: "^TestTests$/^A1$|^TestTests$/^B$/^B1$|^TestTests$/^C$/^C3$/^C31$", }, } { got := testsToRegexp(test.in) assert.Equal(t, test.want, got, fmt.Sprintf("in=%v want=%q got=%q", test.in, test.want, got)) } } var runRe = regexp.MustCompile(`(?m)^\s*=== RUN\s*(Test.*?)\s*$`) // Test the regexp work with the -run flag in actually selecting the right tests func TestTestsToRegexpLive(t *testing.T) { for _, test := range []struct { in []string want []string }{ { in: []string{ "TestTests/A1", "TestTests/C/C3", }, want: []string{ "TestTests", "TestTests/A1", "TestTests/C", "TestTests/C/C3", "TestTests/C/C3/C31", "TestTests/C/C3/C32", }, }, { in: []string{ "TestTests", "TestTests/A1", "TestTests/B", "TestTests/B/B1", "TestTests/C", }, want: []string{ "TestTests", "TestTests/A1", "TestTests/B", "TestTests/B/B1", "TestTests/C", "TestTests/C/C1", "TestTests/C/C2", "TestTests/C/C3", "TestTests/C/C3/C31", "TestTests/C/C3/C32", }, }, { in: []string{ "TestTests/A1", "TestTests/B/B1", "TestTests/C/C3/C31", }, want: []string{ "TestTests", "TestTests/A1", "TestTests/B", "TestTests/B/B1", "TestTests/C", "TestTests/C/C3", "TestTests/C/C3/C31", }, }, { in: []string{ "TestTests/B/B1", "TestTests/C/C3/C31", }, want: []string{ "TestTests", "TestTests/B", "TestTests/B/B1", "TestTests/C", "TestTests/C/C3", "TestTests/C/C3/C31", }, }, } { runRegexp := testsToRegexp(test.in) cmd := exec.Command("go", "test", "-v", "-run", runRegexp) out, err := cmd.CombinedOutput() require.NoError(t, err) var got []string for _, match := range runRe.FindAllSubmatch(out, -1) { got = append(got, string(match[1])) } assert.Equal(t, test.want, got, fmt.Sprintf("in=%v want=%v got=%v, runRegexp=%q", test.in, test.want, got, runRegexp)) } } var nilTest = func(t *testing.T) {} // Nested tests for TestTestsToRegexpLive to run func TestTests(t *testing.T) { t.Run("A1", nilTest) t.Run("A2", nilTest) t.Run("B", func(t *testing.T) { t.Run("B1", nilTest) t.Run("B2", nilTest) }) t.Run("C", func(t *testing.T) { t.Run("C1", nilTest) t.Run("C2", nilTest) t.Run("C3", func(t *testing.T) { t.Run("C31", nilTest) t.Run("C32", nilTest) }) }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/runs/run.go
fstest/runs/run.go
// Run a test package runs import ( "bytes" "context" "fmt" "go/build" "io" "os" "os/exec" "path" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fstest/testserver" ) // Control concurrency per backend if required var ( oneOnlyMu sync.Mutex oneOnly = map[string]*sync.Mutex{} ) // RunOpt holds the options for the Run type RunOpt struct { MaxTries int // Number of times to try each test MaxN int // Maximum number of tests to run at once TestRemotes string // Comma separated list of remotes to test, e.g. 'TestSwift:,TestS3' TestBackends string // Comma separated list of backends to test, e.g. 's3,googlecloudstorage TestTests string // Comma separated list of tests to test, e.g. 'fs/sync,fs/operations' Clean bool // Instead of testing, clean all left over test directories RunOnly string // Run only those tests matching the regexp supplied Timeout time.Duration // Maximum time to run each test for before giving up Race bool // If set run the tests under the race detector ConfigFile string // Path to config file OutputDir string // Place to store results EmailReport string // Set to email the report to the address supplied DryRun bool // Print commands which would be executed only URLBase string // Base for the online version UploadPath string // Set this to an rclone path to upload the results here Verbose bool // Set to enable verbose logging in the tests ListRetries int // Number or times to retry listing - set to override the default } // Run holds info about a running test // // A run just runs one command line, but it can be run multiple times // if retries are needed. type Run struct { // Config Remote string // name of the test remote Backend string // name of the backend Path string // path to the source directory FastList bool // add -fast-list to tests Short bool // add -short NoRetries bool // don't retry if set OneOnly bool // only run test for this backend at once NoBinary bool // set to not build a binary SizeLimit int64 // maximum test file size Ignore map[string]struct{} ListRetries int // -list-retries if > 0 ExtraTime float64 // multiply the timeout by this Env []string // environment variables in form KEY=VALUE // Internals CmdLine []string CmdString string Try int err error output []byte FailedTests []string RunFlag string LogDir string // directory to place the logs TrialName string // name/log file name of current trial TrialNames []string // list of all the trials } // Runs records multiple Run objects type Runs []*Run // Sort interface func (rs Runs) Len() int { return len(rs) } func (rs Runs) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } func (rs Runs) Less(i, j int) bool { a, b := rs[i], rs[j] if a.Backend < b.Backend { return true } else if a.Backend > b.Backend { return false } if a.Remote < b.Remote { return true } else if a.Remote > b.Remote { return false } if a.Path < b.Path { return true } else if a.Path > b.Path { return false } if !a.FastList && b.FastList { return true } else if a.FastList && !b.FastList { return false } return false } // dumpOutput prints the error output func (r *Run) dumpOutput() { fs.Log(nil, "------------------------------------------------------------") fs.Logf(nil, "---- %q ----", r.CmdString) fs.Log(nil, string(r.output)) fs.Log(nil, "------------------------------------------------------------") } // trie for storing runs type trie map[string]trie // turn a trie into multiple regexp matches // // We can't ever have a / in a regexp as it doesn't work. func match(current trie) []string { var names []string var parts []string for name, value := range current { matchName := "^" + name + "$" if len(value) == 0 { names = append(names, name) } else { for _, part := range match(value) { parts = append(parts, matchName+"/"+part) } } } sort.Strings(names) if len(names) > 1 { parts = append(parts, "^("+strings.Join(names, "|")+")$") } else if len(names) == 1 { parts = append(parts, "^"+names[0]+"$") } sort.Strings(parts) return parts } // This converts a slice of test names into a regexp which matches // them. func testsToRegexp(tests []string) string { split := trie{} // Make a trie showing which parts are used at each level for _, test := range tests { parent := split for name := range strings.SplitSeq(test, "/") { current := parent[name] if current == nil { current = trie{} parent[name] = current } parent = current } } parts := match(split) return strings.Join(parts, "|") } var failRe = regexp.MustCompile(`(?m)^\s*--- FAIL: (Test.*?) \(`) // findFailures looks for all the tests which failed func (r *Run) findFailures() { oldFailedTests := r.FailedTests r.FailedTests = nil excludeParents := map[string]struct{}{} ignored := 0 for _, matches := range failRe.FindAllSubmatch(r.output, -1) { failedTest := string(matches[1]) // Skip any ignored failures if _, found := r.Ignore[failedTest]; found { ignored++ } else { r.FailedTests = append(r.FailedTests, failedTest) } // Find all the parents of this test parts := strings.Split(failedTest, "/") for i := len(parts) - 1; i >= 1; i-- { excludeParents[strings.Join(parts[:i], "/")] = struct{}{} } } // Exclude the parents newTests := r.FailedTests[:0] for _, failedTest := range r.FailedTests { if _, excluded := excludeParents[failedTest]; !excluded { newTests = append(newTests, failedTest) } } r.FailedTests = newTests if len(r.FailedTests) == 0 && ignored > 0 { fs.Logf(nil, "%q - Found %d ignored errors only - marking as good", r.CmdString, ignored) r.err = nil r.dumpOutput() return } if len(r.FailedTests) != 0 { r.RunFlag = testsToRegexp(r.FailedTests) } else { r.RunFlag = "" } if r.passed() && len(r.FailedTests) != 0 { fs.Logf(nil, "%q - Expecting no errors but got: %v", r.CmdString, r.FailedTests) r.dumpOutput() } else if !r.passed() && len(r.FailedTests) == 0 { fs.Logf(nil, "%q - Expecting errors but got none: %v", r.CmdString, r.FailedTests) r.dumpOutput() r.FailedTests = oldFailedTests } } // nextCmdLine returns the next command line func (r *Run) nextCmdLine() []string { CmdLine := r.CmdLine if r.RunFlag != "" { CmdLine = append(CmdLine, "-test.run", r.RunFlag) } return CmdLine } // trial runs a single test func (r *Run) trial(Opt RunOpt) { CmdLine := r.nextCmdLine() CmdString := toShell(CmdLine) msg := fmt.Sprintf("%q - Starting (try %d/%d)", CmdString, r.Try, Opt.MaxTries) fs.Log(nil, msg) logName := path.Join(r.LogDir, r.TrialName) out, err := os.Create(logName) if err != nil { fs.Fatalf(nil, "Couldn't create log file: %v", err) } defer func() { err := out.Close() if err != nil { fs.Fatalf(nil, "Failed to close log file: %v", err) } }() _, _ = fmt.Fprintln(out, msg) // Early exit if --try-run if Opt.DryRun { fs.Logf(nil, "Not executing as --dry-run: %v", CmdLine) _, _ = fmt.Fprintln(out, "--dry-run is set - not running") return } // Start the test server if required finish, err := testserver.Start(r.Remote) if err != nil { fs.Logf(nil, "%s: Failed to start test server: %v", r.Remote, err) _, _ = fmt.Fprintf(out, "%s: Failed to start test server: %v\n", r.Remote, err) r.err = err return } defer finish() // Internal buffer var b bytes.Buffer multiOut := io.MultiWriter(out, &b) cmd := exec.Command(CmdLine[0], CmdLine[1:]...) cmd.Stderr = multiOut cmd.Stdout = multiOut cmd.Dir = r.Path cmd.Env = append(os.Environ(), r.Env...) start := time.Now() r.err = cmd.Run() r.output = b.Bytes() duration := time.Since(start) r.findFailures() if r.passed() { msg = fmt.Sprintf("%q - Finished OK in %v (try %d/%d)", CmdString, duration, r.Try, Opt.MaxTries) } else { msg = fmt.Sprintf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", CmdString, duration, r.Try, Opt.MaxTries, r.err, r.FailedTests) } fs.Log(nil, msg) _, _ = fmt.Fprintln(out, msg) } // passed returns true if the test passed func (r *Run) passed() bool { return r.err == nil } // GOPATH returns the current GOPATH func GOPATH() string { gopath := os.Getenv("GOPATH") if gopath == "" { gopath = build.Default.GOPATH } return gopath } // BinaryName turns a package name into a binary name func (r *Run) BinaryName() string { binary := path.Base(r.Path) + ".test" if runtime.GOOS == "windows" { binary += ".exe" } return binary } // BinaryPath turns a package name into a binary path func (r *Run) BinaryPath() string { return path.Join(r.Path, r.BinaryName()) } // PackagePath returns the path to the package func (r *Run) PackagePath() string { return path.Join(GOPATH(), "src", r.Path) } // MakeTestBinary makes the binary we will run func (r *Run) MakeTestBinary(Opt RunOpt) { binary := r.BinaryPath() binaryName := r.BinaryName() fs.Logf(nil, "%s: Making test binary %q", r.Path, binaryName) CmdLine := []string{"go", "test", "-c"} if Opt.Race { CmdLine = append(CmdLine, "-race") } if Opt.DryRun { fs.Logf(nil, "Not executing: %v", CmdLine) return } cmd := exec.Command(CmdLine[0], CmdLine[1:]...) cmd.Dir = r.Path err := cmd.Run() if err != nil { fs.Fatalf(nil, "Failed to make test binary: %v", err) } if _, err := os.Stat(binary); err != nil { fs.Fatalf(nil, "Couldn't find test binary %q", binary) } } // RemoveTestBinary removes the binary made in makeTestBinary func (r *Run) RemoveTestBinary(Opt RunOpt) { if Opt.DryRun { return } binary := r.BinaryPath() err := os.Remove(binary) // Delete the binary when finished if err != nil { fs.Logf(nil, "Error removing test binary %q: %v", binary, err) } } // Name returns the run name as a file name friendly string func (r *Run) Name() string { ns := []string{ r.Backend, strings.ReplaceAll(r.Path, "/", "."), r.Remote, } if r.FastList { ns = append(ns, "fastlist") } ns = append(ns, fmt.Sprintf("%d", r.Try)) s := strings.Join(ns, "-") s = strings.ReplaceAll(s, ":", "") return s } // Init the Run func (r *Run) Init(Opt RunOpt) { prefix := "-test." if r.NoBinary { prefix = "-" r.CmdLine = []string{"go", "test"} } else { r.CmdLine = []string{"./" + r.BinaryName()} } testTimeout := Opt.Timeout if r.ExtraTime > 0 { testTimeout = time.Duration(float64(testTimeout) * r.ExtraTime) } r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", testTimeout.String(), "-remote", r.Remote) listRetries := Opt.ListRetries if r.ListRetries > 0 { listRetries = r.ListRetries } if listRetries > 0 { r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries)) } r.Try = 1 ci := fs.GetConfig(context.Background()) if Opt.Verbose { r.CmdLine = append(r.CmdLine, "-verbose") ci.LogLevel = fs.LogLevelDebug } if Opt.RunOnly != "" { r.CmdLine = append(r.CmdLine, prefix+"run", Opt.RunOnly) } if r.FastList { r.CmdLine = append(r.CmdLine, "-fast-list") } if r.Short { r.CmdLine = append(r.CmdLine, "-short") } if r.SizeLimit > 0 { r.CmdLine = append(r.CmdLine, "-size-limit", strconv.FormatInt(r.SizeLimit, 10)) } r.CmdString = toShell(r.CmdLine) } // Logs returns all the log names func (r *Run) Logs() []string { return r.TrialNames } // FailedTestsCSV returns the failed tests as a comma separated string, limiting the number func (r *Run) FailedTestsCSV() string { const maxTests = 5 ts := r.FailedTests if len(ts) > maxTests { ts = ts[:maxTests:maxTests] ts = append(ts, fmt.Sprintf("… (%d more)", len(r.FailedTests)-maxTests)) } return strings.Join(ts, ", ") } // Run runs all the trials for this test func (r *Run) Run(Opt RunOpt, LogDir string, result chan<- *Run) { if r.OneOnly { oneOnlyMu.Lock() mu := oneOnly[r.Backend] if mu == nil { mu = new(sync.Mutex) oneOnly[r.Backend] = mu } oneOnlyMu.Unlock() mu.Lock() defer mu.Unlock() } r.Init(Opt) r.LogDir = LogDir for r.Try = 1; r.Try <= Opt.MaxTries; r.Try++ { r.TrialName = r.Name() + ".txt" r.TrialNames = append(r.TrialNames, r.TrialName) fs.Logf(nil, "Starting run with log %q", r.TrialName) r.trial(Opt) if r.passed() || r.NoRetries { break } } if !r.passed() { r.dumpOutput() } result <- r } // if matches then is definitely OK in the shell var shellOK = regexp.MustCompile("^[A-Za-z0-9./_:-]+$") // converts an argv style input into a shell command func toShell(args []string) (result string) { for _, arg := range args { if result != "" { result += " " } if shellOK.MatchString(arg) { result += arg } else { result += "'" + arg + "'" } } return result }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/fstest/runs/report.go
fstest/runs/report.go
package runs import ( "bytes" "encoding/json" "fmt" "html/template" "os" "os/exec" "path" "runtime" "sort" "strings" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/file" "github.com/skratchdot/open-golang/open" ) const timeFormat = "2006-01-02-150405" // Report holds the info to make a report on a series of test runs type Report struct { LogDir string // output directory for logs and report StartTime time.Time // time started DateTime string // directory name for output Duration time.Duration // time the run took Failed Runs // failed runs Passed Runs // passed runs Runs []ReportRun // runs to report Version string // rclone version Previous string // previous test name if known IndexHTML string // path to the index.html file URL string // online version Branch string // rclone branch Commit string // rclone commit GOOS string // Go OS GOARCH string // Go Arch GoVersion string // Go Version } // ReportRun is used in the templates to report on a test run type ReportRun struct { Name string Runs Runs } // FIXME take -issue or -pr parameter... // NewReport initialises and returns a Report func NewReport(Opt RunOpt) *Report { r := &Report{ StartTime: time.Now(), Version: fs.Version, GOOS: runtime.GOOS, GOARCH: runtime.GOARCH, GoVersion: runtime.Version(), } r.DateTime = r.StartTime.Format(timeFormat) // Find previous log directory if possible names, err := os.ReadDir(Opt.OutputDir) if err == nil && len(names) > 0 { r.Previous = names[len(names)-1].Name() } // Create output directory for logs and report r.LogDir = path.Join(Opt.OutputDir, r.DateTime) err = file.MkdirAll(r.LogDir, 0777) if err != nil { fs.Fatalf(nil, "Failed to make log directory: %v", err) } // Online version r.URL = Opt.URLBase + r.DateTime + "/index.html" // Get branch/commit r.Branch, r.Commit = gitBranchAndCommit() return r } // gitBranchAndCommit returns the current branch and commit hash. // // It returns "" on error. func gitBranchAndCommit() (branch, commit string) { // branch (empty if detached) var b bytes.Buffer cmdB := exec.Command("git", "symbolic-ref", "--short", "-q", "HEAD") cmdB.Stdout = &b if e := cmdB.Run(); e == nil { branch = strings.TrimSpace(b.String()) } // commit (full SHA) var c bytes.Buffer cmdC := exec.Command("git", "rev-parse", "HEAD") cmdC.Stdout = &c if e := cmdC.Run(); e == nil { commit = strings.TrimSpace(c.String()) } return branch, commit } // End should be called when the tests are complete func (r *Report) End() { r.Duration = time.Since(r.StartTime) sort.Sort(r.Failed) sort.Sort(r.Passed) r.Runs = []ReportRun{ {Name: "Failed", Runs: r.Failed}, {Name: "Passed", Runs: r.Passed}, } } // AllPassed returns true if there were no failed tests func (r *Report) AllPassed() bool { return len(r.Failed) == 0 } // RecordResult should be called with a Run when it has finished to be // recorded into the Report func (r *Report) RecordResult(t *Run) { if !t.passed() { r.Failed = append(r.Failed, t) } else { r.Passed = append(r.Passed, t) } } // Title returns a human-readable summary title for the Report func (r *Report) Title() string { if r.AllPassed() { return fmt.Sprintf("PASS: All tests finished OK in %v", r.Duration) } return fmt.Sprintf("FAIL: %d tests failed in %v", len(r.Failed), r.Duration) } // LogSummary writes the summary to the log file func (r *Report) LogSummary() { fs.Logf(nil, "Logs in %q", r.LogDir) // Summarise results fs.Logf(nil, "SUMMARY") fs.Log(nil, r.Title()) if !r.AllPassed() { for _, t := range r.Failed { fs.Logf(nil, " * %s", toShell(t.nextCmdLine())) fs.Logf(nil, " * Failed tests: %v", t.FailedTests) } } } // LogJSON writes the summary to index.json in LogDir func (r *Report) LogJSON() { out, err := json.MarshalIndent(r, "", "\t") if err != nil { fs.Fatalf(nil, "Failed to marshal data for index.json: %v", err) } err = os.WriteFile(path.Join(r.LogDir, "index.json"), out, 0666) if err != nil { fs.Fatalf(nil, "Failed to write index.json: %v", err) } } // LogHTML writes the summary to index.html in LogDir func (r *Report) LogHTML() { r.IndexHTML = path.Join(r.LogDir, "index.html") out, err := os.Create(r.IndexHTML) if err != nil { fs.Fatalf(nil, "Failed to open index.html: %v", err) } defer func() { err := out.Close() if err != nil { fs.Fatalf(nil, "Failed to close index.html: %v", err) } }() err = reportTemplate.Execute(out, r) if err != nil { fs.Fatalf(nil, "Failed to execute template: %v", err) } _ = open.Start("file://" + r.IndexHTML) } var reportHTML = `<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>{{ .Title }}</title> <style> table { border-collapse: collapse; border-spacing: 0; border: 1px solid #ddd; } table.tests { width: 100%; } table, th, td { border: 1px solid #264653; } .Failed { color: #BE5B43; } .Passed { color: #17564E; } .false { font-weight: lighter; } .true { font-weight: bold; } th, td { text-align: left; padding: 4px; } tr:nth-child(even) { background-color: #f2f2f2; } a { color: #5B1955; text-decoration: none; } a:hover, a:focus { color: #F4A261; text-decoration:underline; } a:focus { outline: thin dotted; outline: 5px auto; } </style> </head> <body> <h1>{{ .Title }}</h1> <table> <tr><th>Version</th><td>{{ .Version }}</td></tr> <tr><th>Test</th><td><a href="{{ .URL }}">{{ .DateTime}}</a></td></tr> <tr><th>Branch</th><td><a href="https://github.com/rclone/rclone/tree/{{ .Branch }}">{{ .Branch }}</a></td></tr> {{ if .Commit}}<tr><th>Commit</th><td><a href="https://github.com/rclone/rclone/commit/{{ .Commit }}">{{ .Commit }}</a></td></tr>{{ end }} <tr><th>Go</th><td>{{ .GoVersion }} {{ .GOOS }}/{{ .GOARCH }}</td></tr> <tr><th>Duration</th><td>{{ .Duration }}</td></tr> {{ if .Previous}}<tr><th>Previous</th><td><a href="../{{ .Previous }}/index.html">{{ .Previous }}</a></td></tr>{{ end }} <tr><th>Up</th><td><a href="../">Older Tests</a></td></tr> </table> {{ range .Runs }} {{ if .Runs }} <h2 class="{{ .Name }}">{{ .Name }}: {{ len .Runs }}</h2> <table class="{{ .Name }} tests"> <tr> <th>Backend</th> <th>Remote</th> <th>Test</th> <th>FastList</th> <th>Failed</th> <th>Logs</th> </tr> {{ $prevBackend := "" }} {{ $prevRemote := "" }} {{ range .Runs}} <tr> <td>{{ if ne $prevBackend .Backend }}{{ .Backend }}{{ end }}{{ $prevBackend = .Backend }}</td> <td>{{ if ne $prevRemote .Remote }}{{ .Remote }}{{ end }}{{ $prevRemote = .Remote }}</td> <td>{{ .Path }}</td> <td><span class="{{ .FastList }}">{{ .FastList }}</span></td> <td>{{ .FailedTestsCSV }}</td> <td>{{ range $i, $v := .Logs }}<a href="{{ $v }}">#{{ $i }}</a> {{ end }}</td> </tr> {{ end }} </table> {{ end }} {{ end }} </body> </html> ` var reportTemplate = template.Must(template.New("Report").Parse(reportHTML)) // EmailHTML sends the summary report to the email address supplied func (r *Report) EmailHTML(Opt RunOpt) { if Opt.EmailReport == "" || r.IndexHTML == "" { return } fs.Logf(nil, "Sending email summary to %q", Opt.EmailReport) cmdLine := []string{"mail", "-a", "Content-Type: text/html", Opt.EmailReport, "-s", "rclone integration tests: " + r.Title()} cmd := exec.Command(cmdLine[0], cmdLine[1:]...) in, err := os.Open(r.IndexHTML) if err != nil { fs.Fatalf(nil, "Failed to open index.html: %v", err) } cmd.Stdin = in cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { fs.Fatalf(nil, "Failed to send email: %v", err) } _ = in.Close() } // uploadTo uploads a copy of the report online to the dir given func (r *Report) uploadTo(Opt RunOpt, uploadDir string) { dst := path.Join(Opt.UploadPath, uploadDir) fs.Logf(nil, "Uploading results to %q", dst) cmdLine := []string{"rclone", "sync", "--stats-log-level", "NOTICE", r.LogDir, dst} cmd := exec.Command(cmdLine[0], cmdLine[1:]...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() if err != nil { fs.Fatalf(nil, "Failed to upload results: %v", err) } } // Upload uploads a copy of the report online func (r *Report) Upload(Opt RunOpt) { if Opt.UploadPath == "" || r.IndexHTML == "" { return } // Upload into dated directory r.uploadTo(Opt, r.DateTime) // And again into current r.uploadTo(Opt, "current") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate_enabled.go
cmd/selfupdate_enabled.go
//go:build !noselfupdate package cmd // This constant must be in the `cmd` package rather than `cmd/selfupdate` // to prevent build failure due to dependency loop. const selfupdateEnabled = true
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/progress.go
cmd/progress.go
// Show the dynamic progress bar package cmd import ( "bytes" "fmt" "log/slog" "strings" "sync" "time" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/terminal" ) const ( // interval between progress prints defaultProgressInterval = 500 * time.Millisecond ) // startProgress starts the progress bar printing // // It returns a func which should be called to stop the stats. func startProgress() func() { stopStats := make(chan struct{}) oldSyncPrint := operations.SyncPrintf if !log.Redirected() { // Intercept the log calls if not logging to file or syslog log.Handler.SetOutput(func(level slog.Level, text string) { printProgress(text) }) } // Intercept output from functions such as HashLister to stdout operations.SyncPrintf = func(format string, a ...any) { printProgress(fmt.Sprintf(format, a...)) } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() progressInterval := defaultProgressInterval if ShowStats() && *statsInterval > 0 { progressInterval = *statsInterval } ticker := time.NewTicker(progressInterval) for { select { case <-ticker.C: printProgress("") case <-stopStats: ticker.Stop() printProgress("") if !log.Redirected() { // Reset intercept of the log calls log.Handler.ResetOutput() } operations.SyncPrintf = oldSyncPrint fmt.Println("") return } } }() return func() { close(stopStats) wg.Wait() } } // state for the progress printing var ( nlines = 0 // number of lines in the previous stats block ) // printProgress prints the progress with an optional log func printProgress(logMessage string) { operations.StdoutMutex.Lock() defer operations.StdoutMutex.Unlock() var buf bytes.Buffer w, _ := terminal.GetSize() stats := strings.TrimSpace(accounting.GlobalStats().String()) logMessage = strings.TrimSpace(logMessage) out := func(s string) { buf.WriteString(s) } if logMessage != "" { out("\n") out(terminal.MoveUp) } // Move to the start of the block we wrote erasing all the previous lines for range nlines - 1 { out(terminal.EraseLine) out(terminal.MoveUp) } out(terminal.EraseLine) out(terminal.MoveToStartOfLine) if logMessage != "" { out(terminal.EraseLine) out(logMessage + "\n") } fixedLines := strings.Split(stats, "\n") nlines = len(fixedLines) for i, line := range fixedLines { if len(line) > w { line = line[:w] } out(line) if i != nlines-1 { out("\n") } } terminal.Write(buf.Bytes()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate_disabled.go
cmd/selfupdate_disabled.go
//go:build noselfupdate package cmd const selfupdateEnabled = false
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/siginfo_bsd.go
cmd/siginfo_bsd.go
//go:build darwin || freebsd || netbsd || dragonfly || openbsd package cmd import ( "os" "os/signal" "syscall" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" ) // SigInfoHandler creates SigInfo handler func SigInfoHandler() { signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINFO) go func() { for range signals { fs.Printf(nil, "%v\n", accounting.GlobalStats()) } }() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cmd.go
cmd/cmd.go
// Package cmd implements the rclone command // // It is in a sub package so it's internals can be reused elsewhere package cmd // FIXME only attach the remote flags when using a remote??? // would probably mean bringing all the flags in to here? Or define some flagsets in fs... import ( "context" "errors" "fmt" "os" "os/exec" "path" "runtime" "runtime/pprof" "strconv" "strings" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/config/configflags" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fspath" fslog "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc/rcserver" fssync "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/buildinfo" "github.com/rclone/rclone/lib/exitcode" "github.com/rclone/rclone/lib/terminal" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // Globals var ( // Flags cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file", "Debugging") memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file", "Debugging") statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)", "Logging") version bool // Errors errorCommandNotFound = errors.New("command not found") errorNotEnoughArguments = errors.New("not enough arguments") errorTooManyArguments = errors.New("too many arguments") ) // ShowVersion prints the version to stdout func ShowVersion() { osVersion, osKernel := buildinfo.GetOSVersion() if osVersion == "" { osVersion = "unknown" } if osKernel == "" { osKernel = "unknown" } linking, tagString := buildinfo.GetLinkingAndTags() arch := buildinfo.GetArch() fmt.Printf("rclone %s\n", fs.Version) fmt.Printf("- os/version: %s\n", osVersion) fmt.Printf("- os/kernel: %s\n", osKernel) fmt.Printf("- os/type: %s\n", runtime.GOOS) fmt.Printf("- os/arch: %s\n", arch) fmt.Printf("- go/version: %s\n", runtime.Version()) fmt.Printf("- go/linking: %s\n", linking) fmt.Printf("- go/tags: %s\n", tagString) } // NewFsFile creates an Fs from a name but may point to a file. // // It returns a string with the file name if points to a file // otherwise "". func NewFsFile(remote string) (fs.Fs, string) { ctx := context.Background() _, fsPath, err := fspath.SplitFs(remote) if err != nil { err = fs.CountError(ctx, err) fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err) } f, err := cache.Get(ctx, remote) switch err { case fs.ErrorIsFile: cache.Pin(f) // pin indefinitely since it was on the CLI return f, path.Base(fsPath) case nil: cache.Pin(f) // pin indefinitely since it was on the CLI return f, "" default: err = fs.CountError(ctx, err) fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err) } return nil, "" } // newFsFileAddFilter creates an src Fs from a name // // This works the same as NewFsFile however it adds filters to the Fs // to limit it to a single file if the remote pointed to a file. func newFsFileAddFilter(remote string) (fs.Fs, string) { ctx := context.Background() fi := filter.GetConfig(ctx) f, fileName := NewFsFile(remote) if fileName != "" { if !fi.InActive() { err := fmt.Errorf("can't limit to single files when using filters: %v", remote) err = fs.CountError(ctx, err) fs.Fatal(nil, err.Error()) } // Limit transfers to this file err := fi.AddFile(fileName) if err != nil { err = fs.CountError(ctx, err) fs.Fatalf(nil, "Failed to limit to single file %q: %v", remote, err) } } return f, fileName } // NewFsSrc creates a new src fs from the arguments. // // The source can be a file or a directory - if a file then it will // limit the Fs to a single file. func NewFsSrc(args []string) fs.Fs { fsrc, _ := newFsFileAddFilter(args[0]) return fsrc } // newFsDir creates an Fs from a name // // This must point to a directory func newFsDir(remote string) fs.Fs { ctx := context.Background() f, err := cache.Get(ctx, remote) if err != nil { err = fs.CountError(ctx, err) fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err) } cache.Pin(f) // pin indefinitely since it was on the CLI return f } // NewFsDir creates a new Fs from the arguments // // The argument must point a directory func NewFsDir(args []string) fs.Fs { fdst := newFsDir(args[0]) return fdst } // NewFsSrcDst creates a new src and dst fs from the arguments func NewFsSrcDst(args []string) (fs.Fs, fs.Fs) { fsrc, _ := newFsFileAddFilter(args[0]) fdst := newFsDir(args[1]) return fsrc, fdst } // NewFsSrcFileDst creates a new src and dst fs from the arguments // // The source may be a file, in which case the source Fs and file name is returned func NewFsSrcFileDst(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs) { fsrc, srcFileName = NewFsFile(args[0]) fdst = newFsDir(args[1]) return fsrc, srcFileName, fdst } // NewFsSrcDstFiles creates a new src and dst fs from the arguments // If src is a file then srcFileName and dstFileName will be non-empty func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) { ctx := context.Background() fsrc, srcFileName = newFsFileAddFilter(args[0]) // If copying a file... dstRemote := args[1] // If file exists then srcFileName != "", however if the file // doesn't exist then we assume it is a directory... if srcFileName != "" { var err error dstRemote, dstFileName, err = fspath.Split(dstRemote) if err != nil { fs.Fatalf(nil, "Parsing %q failed: %v", args[1], err) } if dstRemote == "" { dstRemote = "." } if dstFileName == "" { fs.Fatalf(nil, "%q is a directory", args[1]) } } fdst, err := cache.Get(ctx, dstRemote) switch err { case fs.ErrorIsFile: _ = fs.CountError(ctx, err) fs.Fatalf(nil, "Source doesn't exist or is a directory and destination is a file") case nil: default: _ = fs.CountError(ctx, err) fs.Fatalf(nil, "Failed to create file system for destination %q: %v", dstRemote, err) } cache.Pin(fdst) // pin indefinitely since it was on the CLI return } // NewFsDstFile creates a new dst fs with a destination file name from the arguments func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) { dstRemote, dstFileName, err := fspath.Split(args[0]) if err != nil { fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err) } if dstRemote == "" { dstRemote = "." } if dstFileName == "" { fs.Fatalf(nil, "%q is a directory", args[0]) } fdst = newFsDir(dstRemote) return } // ShowStats returns true if the user added a `--stats` flag to the command line. // // This is called by Run to override the default value of the // showStats passed in. func ShowStats() bool { statsIntervalFlag := pflag.Lookup("stats") return statsIntervalFlag != nil && statsIntervalFlag.Changed } // Run the function with stats and retries if required func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { ctx := context.Background() ci := fs.GetConfig(ctx) var cmdErr error stopStats := func() {} if !showStats && ShowStats() { showStats = true } if ci.Progress { stopStats = startProgress() } else if showStats { stopStats = StartStats() } SigInfoHandler() for try := 1; try <= ci.Retries; try++ { cmdErr = f() cmdErr = fs.CountError(ctx, cmdErr) lastErr := accounting.GlobalStats().GetLastError() if cmdErr == nil { cmdErr = lastErr } if !Retry || !accounting.GlobalStats().Errored() { if try > 1 { fs.Errorf(nil, "Attempt %d/%d succeeded", try, ci.Retries) } break } if accounting.GlobalStats().HadFatalError() { fs.Errorf(nil, "Fatal error received - not attempting retries") break } if accounting.GlobalStats().Errored() && !accounting.GlobalStats().HadRetryError() { fs.Errorf(nil, "Can't retry any of the errors - not attempting retries") break } if retryAfter := accounting.GlobalStats().RetryAfter(); !retryAfter.IsZero() { d := time.Until(retryAfter) if d > 0 { fs.Logf(nil, "Received retry after error - sleeping until %s (%v)", retryAfter.Format(time.RFC3339Nano), d) time.Sleep(d) } } if lastErr != nil { fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, ci.Retries, accounting.GlobalStats().GetErrors(), lastErr) } else { fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, ci.Retries, accounting.GlobalStats().GetErrors()) } if try < ci.Retries { accounting.GlobalStats().ResetErrors() } if ci.RetriesInterval > 0 { time.Sleep(time.Duration(ci.RetriesInterval)) } } stopStats() if showStats && (accounting.GlobalStats().Errored() || *statsInterval > 0) { accounting.GlobalStats().Log() } fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine()) if ci.Progress && ci.ProgressTerminalTitle { // Clear terminal title terminal.WriteTerminalTitle("") } // dump all running go-routines if ci.Dump&fs.DumpGoRoutines != 0 { err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) if err != nil { fs.Errorf(nil, "Failed to dump goroutines: %v", err) } } // dump open files if ci.Dump&fs.DumpOpenFiles != 0 { c := exec.Command("lsof", "-p", strconv.Itoa(os.Getpid())) c.Stdout = os.Stdout c.Stderr = os.Stderr err := c.Run() if err != nil { fs.Errorf(nil, "Failed to list open files: %v", err) } } // clear cache and shutdown backends cache.Clear() if lastErr := accounting.GlobalStats().GetLastError(); cmdErr == nil { cmdErr = lastErr } // Log the final error message and exit if cmdErr != nil { nerrs := accounting.GlobalStats().GetErrors() if nerrs <= 1 { fs.Logf(nil, "Failed to %s: %v", cmd.Name(), cmdErr) } else { fs.Logf(nil, "Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr) } } resolveExitCode(cmdErr) } // CheckArgs checks there are enough arguments and prints a message if not func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) { if len(args) < MinArgs { _ = cmd.Usage() _, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args) resolveExitCode(errorNotEnoughArguments) } else if len(args) > MaxArgs { _ = cmd.Usage() _, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args) resolveExitCode(errorTooManyArguments) } } // StartStats prints the stats every statsInterval // // It returns a func which should be called to stop the stats. func StartStats() func() { if *statsInterval <= 0 { return func() {} } stopStats := make(chan struct{}) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() ticker := time.NewTicker(*statsInterval) for { select { case <-ticker.C: accounting.GlobalStats().Log() case <-stopStats: ticker.Stop() return } } }() return func() { close(stopStats) wg.Wait() } } // initConfig is run by cobra after initialising the flags func initConfig() { // Set the global options from the flags err := fs.GlobalOptionsInit() if err != nil { fs.Fatalf(nil, "Failed to initialise global options: %v", err) } ctx := context.Background() ci := fs.GetConfig(ctx) // Start the logger fslog.InitLogging() // Finish parsing any command line flags configflags.SetFlags(ci) // Load the config configfile.Install() // Start accounting accounting.Start(ctx) // Configure console if ci.NoConsole { // Hide the console window terminal.HideConsole() } else { // Enable color support on stdout if possible. // This enables virtual terminal processing on Windows 10, // adding native support for ANSI/VT100 escape sequences. terminal.EnableColorsStdout() } // Write the args for debug purposes fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args) // Inform user about systemd log support now that we have a logger if fslog.Opt.LogSystemdSupport { fs.Debugf("rclone", "systemd logging support activated") } // Start the remote control server if configured _, err = rcserver.Start(ctx, &rc.Opt) if err != nil { fs.Fatalf(nil, "Failed to start remote control: %v", err) } // Start the metrics server if configured and not running the "rc" command if len(os.Args) >= 2 && os.Args[1] != "rc" { _, err = rcserver.MetricsStart(ctx, &rc.Opt) if err != nil { fs.Fatalf(nil, "Failed to start metrics server: %v", err) } } // Setup CPU profiling if desired if *cpuProfile != "" { fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile) f, err := os.Create(*cpuProfile) if err != nil { err = fs.CountError(ctx, err) fs.Fatal(nil, fmt.Sprint(err)) } err = pprof.StartCPUProfile(f) if err != nil { err = fs.CountError(ctx, err) fs.Fatal(nil, fmt.Sprint(err)) } atexit.Register(func() { pprof.StopCPUProfile() err := f.Close() if err != nil { err = fs.CountError(ctx, err) fs.Fatal(nil, fmt.Sprint(err)) } }) } // Setup memory profiling if desired if *memProfile != "" { atexit.Register(func() { fs.Infof(nil, "Saving Memory profile %q\n", *memProfile) f, err := os.Create(*memProfile) if err != nil { err = fs.CountError(ctx, err) fs.Fatal(nil, fmt.Sprint(err)) } err = pprof.WriteHeapProfile(f) if err != nil { err = fs.CountError(ctx, err) fs.Fatal(nil, fmt.Sprint(err)) } err = f.Close() if err != nil { err = fs.CountError(ctx, err) fs.Fatal(nil, fmt.Sprint(err)) } }) } } func resolveExitCode(err error) { ctx := context.Background() ci := fs.GetConfig(ctx) atexit.Run() if err == nil { if ci.ErrorOnNoTransfer { if accounting.GlobalStats().GetTransfers() == 0 { os.Exit(exitcode.NoFilesTransferred) } } os.Exit(exitcode.Success) } switch { case errors.Is(err, fs.ErrorDirNotFound): os.Exit(exitcode.DirNotFound) case errors.Is(err, fs.ErrorObjectNotFound): os.Exit(exitcode.FileNotFound) case errors.Is(err, accounting.ErrorMaxTransferLimitReached): os.Exit(exitcode.TransferExceeded) case errors.Is(err, fssync.ErrorMaxDurationReached): os.Exit(exitcode.DurationExceeded) case fserrors.ShouldRetry(err): os.Exit(exitcode.RetryError) case fserrors.IsNoRetryError(err), fserrors.IsNoLowLevelRetryError(err): os.Exit(exitcode.NoRetryError) case fserrors.IsFatalError(err): os.Exit(exitcode.FatalError) case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments): os.Exit(exitcode.UsageError) default: os.Exit(exitcode.UncategorizedError) } } var backendFlags map[string]struct{} // AddBackendFlags creates flags for all the backend options func AddBackendFlags() { backendFlags = map[string]struct{}{} for _, fsInfo := range fs.Registry { flags.AddFlagsFromOptions(pflag.CommandLine, fsInfo.Prefix, fsInfo.Options) // Store the backend flag names for the help generator for i := range fsInfo.Options { opt := &fsInfo.Options[i] name := opt.FlagName(fsInfo.Prefix) backendFlags[name] = struct{}{} } } } // Main runs rclone interpreting flags and commands out of os.Args func Main() { setupRootCommand(Root) AddBackendFlags() if err := Root.Execute(); err != nil { if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled { Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath()) } fs.Logf(nil, "Fatal error: %v", err) os.Exit(exitcode.UsageError) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/siginfo_others.go
cmd/siginfo_others.go
//go:build !darwin && !freebsd && !netbsd && !dragonfly && !openbsd package cmd // SigInfoHandler creates SigInfo handler func SigInfoHandler() { }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/completion.go
cmd/completion.go
package cmd import ( "context" "fmt" "os" "path/filepath" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/fspath" "github.com/spf13/cobra" ) // Make a debug message while doing the completion. // // These end up in the file specified by BASH_COMP_DEBUG_FILE func compLogf(format string, a ...any) { cobra.CompDebugln(fmt.Sprintf(format, a...), true) } // Add remotes to the completions being built up func addRemotes(toComplete string, completions []string) []string { remotes := config.FileSections() for _, remote := range remotes { remote += ":" if strings.HasPrefix(remote, toComplete) { completions = append(completions, remote) } } return completions } // Add local files to the completions being built up func addLocalFiles(toComplete string, result cobra.ShellCompDirective, completions []string) (cobra.ShellCompDirective, []string) { path := filepath.Clean(toComplete) dir, file := filepath.Split(path) if dir == "" { dir = "." } if len(dir) > 0 && dir[0] != filepath.Separator && dir[0] != '/' { dir = strings.TrimRight(dir, string(filepath.Separator)) dir = strings.TrimRight(dir, "/") } fi, err := os.Stat(toComplete) if err == nil { if fi.IsDir() { dir = toComplete file = "" } } fis, err := os.ReadDir(dir) if err != nil { compLogf("Failed to read directory %q: %v", dir, err) return result, completions } for _, fi := range fis { name := fi.Name() if strings.HasPrefix(name, file) { path := filepath.Join(dir, name) if fi.IsDir() { path += string(filepath.Separator) result |= cobra.ShellCompDirectiveNoSpace } completions = append(completions, path) } } return result, completions } // Add remote files to the completions being built up func addRemoteFiles(toComplete string, result cobra.ShellCompDirective, completions []string) (cobra.ShellCompDirective, []string) { ctx := context.Background() parent, _, err := fspath.Split(toComplete) if err != nil { compLogf("Failed to split path %q: %v", toComplete, err) return result, completions } f, err := cache.Get(ctx, parent) if err == fs.ErrorIsFile { completions = append(completions, toComplete) return result, completions } else if err != nil { compLogf("Failed to make Fs %q: %v", parent, err) return result, completions } fis, err := f.List(ctx, "") if err != nil { compLogf("Failed to list Fs %q: %v", parent, err) return result, completions } for _, fi := range fis { remote := fi.Remote() path := parent + remote if strings.HasPrefix(path, toComplete) { if _, ok := fi.(fs.Directory); ok { path += "/" result |= cobra.ShellCompDirectiveNoSpace } completions = append(completions, path) } } return result, completions } // Workaround doesn't seem to be needed for BashCompletionV2 const useColonWorkaround = false // do command completion // // This is called by the command completion scripts using a hidden __complete or __completeNoDesc commands. func validArgs(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { compLogf("ValidArgsFunction called with args=%q toComplete=%q", args, toComplete) fixBug := -1 if useColonWorkaround { // Work around what I think is a bug in cobra's bash // completion which seems to be splitting the arguments on : // Or there is something I don't understand - ncw args = append(args, toComplete) colonArg := -1 for i, arg := range args { if arg == ":" { colonArg = i } } if colonArg > 0 { newToComplete := strings.Join(args[colonArg-1:], "") fixBug = len(newToComplete) - len(toComplete) toComplete = newToComplete } compLogf("...shuffled args=%q toComplete=%q", args, toComplete) } result := cobra.ShellCompDirectiveDefault completions := []string{} // See whether we have a valid remote yet _, err := fspath.Parse(toComplete) parseOK := err == nil hasColon := strings.ContainsRune(toComplete, ':') validRemote := parseOK && hasColon compLogf("valid remote = %v", validRemote) // Add remotes for completion if !validRemote { completions = addRemotes(toComplete, completions) } // Add local files for completion if !validRemote { result, completions = addLocalFiles(toComplete, result, completions) } // Add remote files for completion if validRemote { result, completions = addRemoteFiles(toComplete, result, completions) } // If using bug workaround, adjust completions to start with : if useColonWorkaround && fixBug >= 0 { for i := range completions { if len(completions[i]) >= fixBug { completions[i] = completions[i][fixBug:] } } } return completions, result }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/help.go
cmd/help.go
package cmd import ( "context" "fmt" "os" "regexp" "sort" "strings" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configflags" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter/filterflags" "github.com/rclone/rclone/fs/log/logflags" "github.com/rclone/rclone/fs/rc/rcflags" "github.com/rclone/rclone/lib/atexit" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/text/cases" "golang.org/x/text/language" ) // Root is the main rclone command var Root = &cobra.Command{ Use: "rclone", Short: "Show help for rclone commands, flags and backends.", Long: `Rclone syncs files to and from cloud storage providers as well as mounting them, listing them in lots of different ways. See the home page (https://rclone.org/) for installation, usage, documentation, changelog and configuration walkthroughs.`, PersistentPostRun: func(cmd *cobra.Command, args []string) { fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args) atexit.Run() }, ValidArgsFunction: validArgs, DisableAutoGenTag: true, } // GeneratingDocs is set by rclone gendocs to alter the format of the // output suitable for the documentation. var GeneratingDocs = false // root help command var helpCommand = &cobra.Command{ Use: "help", Short: Root.Short, Long: Root.Long, Run: func(command *cobra.Command, args []string) { Root.SetOut(os.Stdout) _ = Root.Usage() }, } // to filter the flags with var ( filterFlagsGroup string filterFlagsRe *regexp.Regexp filterFlagsNamesOnly bool ) // Show the flags var helpFlags = &cobra.Command{ Use: "flags [<filter>]", Short: "Show the global flags for rclone", Run: func(command *cobra.Command, args []string) { command.Flags() if GeneratingDocs { Root.SetUsageTemplate(docFlagsTemplate) } else { if len(args) > 0 { re, err := filter.GlobStringToRegexp(args[0], false, true) if err != nil { fs.Fatalf(nil, "Invalid flag filter: %v", err) } fs.Debugf(nil, "Flag filter: %s", re.String()) filterFlagsRe = re } if filterFlagsGroup != "" { Root.SetUsageTemplate(filterFlagsSingleGroupTemplate) } else if len(args) > 0 { Root.SetUsageTemplate(filterFlagsMultiGroupTemplate) } Root.SetOut(os.Stdout) } _ = command.Usage() }, } // Show the backends var helpBackends = &cobra.Command{ Use: "backends", Short: "List the backends available", Run: func(command *cobra.Command, args []string) { showBackends() }, } // Show a single backend var helpBackend = &cobra.Command{ Use: "backend <name>", Short: "List full info about a backend", Run: func(command *cobra.Command, args []string) { if len(args) == 0 { Root.SetOut(os.Stdout) _ = command.Usage() return } showBackend(args[0]) }, } // runRoot implements the main rclone command with no subcommands func runRoot(cmd *cobra.Command, args []string) { if version { ShowVersion() resolveExitCode(nil) } else { _ = cmd.Usage() if len(args) > 0 { _, _ = fmt.Fprintf(os.Stderr, "Command not found.\n") } resolveExitCode(errorCommandNotFound) } } // setupRootCommand sets default usage, help, and error handling for // the root command. // // Helpful example: https://github.com/moby/moby/blob/master/cli/cobra.go func setupRootCommand(rootCmd *cobra.Command) { ci := fs.GetConfig(context.Background()) // Add global flags configflags.AddFlags(ci, pflag.CommandLine) filterflags.AddFlags(pflag.CommandLine) rcflags.AddFlags(pflag.CommandLine) logflags.AddFlags(pflag.CommandLine) Root.Run = runRoot Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number") cobra.AddTemplateFunc("showGlobalFlags", func(cmd *cobra.Command) bool { return cmd.CalledAs() == "flags" || cmd.Annotations["groups"] != "" }) cobra.AddTemplateFunc("showCommands", func(cmd *cobra.Command) bool { return cmd.CalledAs() != "flags" }) cobra.AddTemplateFunc("showLocalFlags", func(cmd *cobra.Command) bool { // Don't show local flags (which are the global ones on the root) on "rclone" and // "rclone help" (which shows the global help) return cmd.CalledAs() != "rclone" && cmd.CalledAs() != "" }) cobra.AddTemplateFunc("flagGroups", func(cmd *cobra.Command) []*flags.Group { // Add the backend flags and check all flags backendGroup := flags.All.NewGroup("Backend", "Backend-only flags (these can be set in the config file also)") allRegistered := flags.All.AllRegistered() cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { if _, ok := backendFlags[flag.Name]; ok { backendGroup.Add(flag) } else if _, ok := allRegistered[flag]; ok { // flag is in a group already } else { fs.Errorf(nil, "Flag --%s is unknown", flag.Name) } }) groups := flags.All.Filter(filterFlagsGroup, filterFlagsRe, filterFlagsNamesOnly).Include(cmd.Annotations["groups"]) return groups.Groups }) rootCmd.SetUsageTemplate(usageTemplate) // rootCmd.SetHelpTemplate(helpTemplate) // rootCmd.SetFlagErrorFunc(FlagErrorFunc) rootCmd.SetHelpCommand(helpCommand) // rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") // rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") rootCmd.AddCommand(helpCommand) helpCommand.AddCommand(helpFlags) helpFlagsFlags := helpFlags.Flags() flags.StringVarP(helpFlagsFlags, &filterFlagsGroup, "group", "", "", "Only include flags from specific group", "") flags.BoolVarP(helpFlagsFlags, &filterFlagsNamesOnly, "name", "", false, "Apply filter only on flag names", "") helpCommand.AddCommand(helpBackends) helpCommand.AddCommand(helpBackend) // Set command completion for all functions to be the same traverseCommands(rootCmd, func(cmd *cobra.Command) { cmd.ValidArgsFunction = validArgs }) cobra.OnInitialize(initConfig) } // Traverse the tree of commands running fn on each // // I was surprised there wasn't a cobra command to do this func traverseCommands(cmd *cobra.Command, fn func(*cobra.Command)) { fn(cmd) for _, childCmd := range cmd.Commands() { traverseCommands(childCmd, fn) } } var usageTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} Aliases: {{.NameAndAliases}}{{end}}{{if .HasExample}} Examples: {{.Example}}{{end}}{{if and (showCommands .) .HasAvailableSubCommands}} Available commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} {{rpad .Name .NamePadding}} {{.Short}}{{end}}{{end}}{{end}}{{if and (showLocalFlags .) .HasAvailableLocalFlags}} Flags: {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if and (showGlobalFlags .) .HasAvailableInheritedFlags}}{{range flagGroups .}}{{if .Flags.HasFlags}} {{.Help}} (flag group {{.Name}}): {{.Flags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{end}}{{end}}{{if .HasHelpSubCommands}} Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}} Use "rclone [command] --help" for more information about a command. Use "rclone help flags" for to see the global flags. Use "rclone help backends" for a list of supported services. ` var filterFlagsSingleGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}}{{.Flags.FlagUsages | trimTrailingWhitespaces}} {{end}}{{end}} ` var filterFlagsMultiGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}}{{.Help}} (flag group {{.Name}}): {{.Flags.FlagUsages | trimTrailingWhitespaces}} {{end}}{{end}}` var docFlagsTemplate = `--- title: "Global Flags" description: "Rclone Global Flags" # autogenerated - DO NOT EDIT --- # Global Flags This describes the global flags available to every rclone command split into groups. {{range flagGroups .}}{{if .Flags.HasFlags}} ## {{.Name}} {{.Help}}. ` + "```" + ` {{.Flags.FlagUsages | trimTrailingWhitespaces}} ` + "```" + ` {{end}}{{end}} ` // show all the backends func showBackends() { fmt.Printf("All rclone backends:\n\n") for _, backend := range fs.Registry { fmt.Printf(" %-12s %s\n", backend.Prefix, backend.Description) } fmt.Printf("\nTo see more info about a particular backend use:\n") fmt.Printf(" rclone help backend <name>\n") } func quoteString(v any) string { switch v.(type) { case string: return fmt.Sprintf("%q", v) } return fmt.Sprint(v) } // show a single backend func showBackend(name string) { backend, err := fs.Find(name) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } var standardOptions, advancedOptions fs.Options done := map[string]struct{}{} for _, opt := range backend.Options { // Skip if done already (e.g. with Provider options) if _, doneAlready := done[opt.Name]; doneAlready { continue } done[opt.Name] = struct{}{} if opt.Advanced { advancedOptions = append(advancedOptions, opt) } else { standardOptions = append(standardOptions, opt) } } optionsType := "standard" for _, opts := range []fs.Options{standardOptions, advancedOptions} { if len(opts) == 0 { optionsType = "advanced" continue } optionsType = cases.Title(language.Und, cases.NoLower).String(optionsType) fmt.Printf("### %s options\n\n", optionsType) fmt.Printf("Here are the %s options specific to %s (%s).\n\n", optionsType, backend.Name, backend.Description) optionsType = "advanced" for _, opt := range opts { done[opt.Name] = struct{}{} shortOpt := "" if opt.ShortOpt != "" { shortOpt = fmt.Sprintf(" / -%s", opt.ShortOpt) } fmt.Printf("#### --%s%s\n\n", opt.FlagName(backend.Prefix), shortOpt) fmt.Printf("%s\n\n", opt.Help) if opt.IsPassword { fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n") } fmt.Printf("Properties:\n\n") fmt.Printf("- Config: %s\n", opt.Name) fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix)) if opt.Provider != "" { fmt.Printf("- Provider: %s\n", opt.Provider) } fmt.Printf("- Type: %s\n", opt.Type()) defaultValue := opt.GetValue() // Default value and Required are related: Required means option must // have a value, but if there is a default then a value does not have // to be explicitly set and then Required makes no difference. if defaultValue != "" { fmt.Printf("- Default: %s\n", quoteString(defaultValue)) } else { fmt.Printf("- Required: %v\n", opt.Required) } // List examples / possible choices if len(opt.Examples) > 0 { if opt.Exclusive { fmt.Printf("- Choices:\n") } else { fmt.Printf("- Examples:\n") } for _, ex := range opt.Examples { fmt.Printf(" - %s\n", quoteString(ex.Value)) for line := range strings.SplitSeq(ex.Help, "\n") { fmt.Printf(" - %s\n", line) } if ex.Provider != "" { fmt.Printf(" - Provider: %s\n", ex.Provider) } } } fmt.Printf("\n") } } if backend.MetadataInfo != nil { fmt.Printf("### Metadata\n\n") fmt.Printf("%s\n\n", strings.TrimSpace(backend.MetadataInfo.Help)) if len(backend.MetadataInfo.System) > 0 { fmt.Printf("Here are the possible system metadata items for the %s backend.\n\n", backend.Name) keys := []string{} for k := range backend.MetadataInfo.System { keys = append(keys, k) } sort.Strings(keys) fmt.Printf("| Name | Help | Type | Example | Read Only |\n") fmt.Printf("|------|------|------|---------|-----------|\n") for _, k := range keys { v := backend.MetadataInfo.System[k] ro := "N" if v.ReadOnly { ro = "**Y**" } fmt.Printf("| %s | %s | %s | %s | %s |\n", k, v.Help, v.Type, v.Example, ro) } fmt.Printf("\n") } fmt.Printf("See the [metadata](/docs/#metadata) docs for more info.\n\n") } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/dedupe/dedupe.go
cmd/dedupe/dedupe.go
// Package dedupe provides the dedupe command. package dedupe import ( "context" "fmt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( dedupeMode = operations.DeduplicateInteractive byHash = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlag := commandDefinition.Flags() flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename", "") flags.BoolVarP(cmdFlag, &byHash, "by-hash", "", false, "Find identical hashes rather than names", "") } var commandDefinition = &cobra.Command{ Use: "dedupe [mode] remote:path", Short: `Interactively find duplicate filenames and delete/rename them.`, Long: `By default ` + "`dedupe`" + ` interactively finds files with duplicate names and offers to delete all but one or rename them to be different. This is known as deduping by name. Deduping by name is only useful with a small group of backends (e.g. Google Drive, Opendrive) that can have duplicate file names. It can be run on wrapping backends (e.g. crypt) if they wrap a backend which supports duplicate file names. However if ` + "`--by-hash`" + ` is passed in then dedupe will find files with duplicate hashes instead which will work on any backend which supports at least one hash. This can be used to find files with duplicate content. This is known as deduping by hash. If deduping by name, first rclone will merge directories with the same name. It will do this iteratively until all the identically named directories have been merged. Next, if deduping by name, for every group of duplicate file names / hashes, it will delete all but one identical file it finds without confirmation. This means that for most duplicated files the ` + "`dedupe`" + ` command will not be interactive. ` + "`dedupe`" + ` considers files to be identical if they have the same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping Google Drive) then they will never be found to be identical. If you use the ` + "`--size-only`" + ` flag then files will be considered identical if they have the same size (any hash will be ignored). This can be useful on crypt backends which do not support hashes. Next rclone will resolve the remaining duplicates. Exactly which action is taken depends on the dedupe mode. By default, rclone will interactively query the user for each one. **Important**: Since this can cause data loss, test first with the ` + "`--dry-run` or the `--interactive`/`-i`" + ` flag. Here is an example run. Before - with duplicates ` + "```console" + ` $ rclone lsl drive:dupes 6048320 2016-03-05 16:23:16.798000000 one.txt 6048320 2016-03-05 16:23:11.775000000 one.txt 564374 2016-03-05 16:23:06.731000000 one.txt 6048320 2016-03-05 16:18:26.092000000 one.txt 6048320 2016-03-05 16:22:46.185000000 two.txt 1744073 2016-03-05 16:22:38.104000000 two.txt 564374 2016-03-05 16:22:52.118000000 two.txt ` + "```" + ` Now the ` + "`dedupe`" + ` session ` + "```console" + ` $ rclone dedupe drive:dupes 2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode. one.txt: Found 4 files with duplicate names one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36") one.txt: 2 duplicates remain 1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36 2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81 s) Skip and do nothing k) Keep just one (choose which in next step) r) Rename all to be different (by changing file.jpg to file-1.jpg) s/k/r> k Enter the number of the file to keep> 1 one.txt: Deleted 1 extra copies two.txt: Found 3 files with duplicate names two.txt: 3 duplicates remain 1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81 2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36 3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802 s) Skip and do nothing k) Keep just one (choose which in next step) r) Rename all to be different (by changing file.jpg to file-1.jpg) s/k/r> r two-1.txt: renamed from: two.txt two-2.txt: renamed from: two.txt two-3.txt: renamed from: two.txt ` + "```" + ` The result being ` + "```console" + ` $ rclone lsl drive:dupes 6048320 2016-03-05 16:23:16.798000000 one.txt 564374 2016-03-05 16:22:52.118000000 two-1.txt 6048320 2016-03-05 16:22:46.185000000 two-2.txt 1744073 2016-03-05 16:22:38.104000000 two-3.txt ` + "```" + ` Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value - ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above. - ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left. - ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one. - ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one. - ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one. - ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one. - ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one. - ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different. - ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing. For example, to rename all the identically named photos in your Google Photos directory, do ` + "```console" + ` rclone dedupe --dedupe-mode rename "drive:Google Photos" ` + "```" + ` Or ` + "```console" + ` rclone dedupe rename "drive:Google Photos" ` + "```", Annotations: map[string]string{ "versionIntroduced": "v1.27", "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 2, command, args) if len(args) > 1 { err := dedupeMode.Set(args[0]) if err != nil { fs.Fatal(nil, fmt.Sprint(err)) } args = args[1:] } fdst := cmd.NewFsSrc(args) if !byHash && !fdst.Features().DuplicateFiles { fs.Logf(fdst, "Can't have duplicate names here. Perhaps you wanted --by-hash ? Continuing anyway.") } cmd.Run(false, false, command, func() error { return operations.Deduplicate(context.Background(), fdst, dedupeMode, byHash) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/verify.go
cmd/selfupdate/verify.go
//go:build !noselfupdate package selfupdate import ( "bytes" "context" "errors" "fmt" "strings" "github.com/ProtonMail/go-crypto/openpgp" "github.com/ProtonMail/go-crypto/openpgp/clearsign" "github.com/rclone/rclone/fs" ) var ncwPublicKeyPGP = `-----BEGIN PGP PUBLIC KEY BLOCK----- mQGiBDuy3V0RBADVQOAF5aFiCxD3t2h6iAF2WMiaMlgZ6kX2i/u7addNkzX71VU9 7NpI0SnsP5YWt+gEedST6OmFbtLfZWCR4KWn5XnNdjCMNhxaH6WccVqNm4ALPIqT 59uVjkgf8RISmmoNJ1d+2wMWjQTUfwOEmoIgH6n+2MYNUKuctBrwAACflwCg1I1Q O/prv/5hczdpQCs+fL87DxsD/Rt7pIXvsIOZyQWbIhSvNpGalJuMkW5Jx92UjsE9 1Ipo3Xr6SGRPgW9+NxAZAsiZfCX/19knAyNrN9blwL0rcPDnkhdGwK69kfjF+wq+ QbogRGodbKhqY4v+cMNkKiemBuTQiWPkpKjifwNsD1fNjNKfDP3pJ64Yz7a4fuzV X1YwBACpKVuEen34lmcX6ziY4jq8rKibKBs4JjQCRO24kYoHDULVe+RS9krQWY5b e0foDhru4dsKccefK099G+WEzKVCKxupstWkTT/iJwajR8mIqd4AhD0wO9W3MCfV Ov8ykMDZ7qBWk1DHc87Ep3W1o8t8wq74ifV+HjhhWg8QAylXg7QlTmljayBDcmFp Zy1Xb29kIDxuaWNrQGNyYWlnLXdvb2QuY29tPoh0BBMRCAA0BQsHCgMEAxUDAgMW AgECF4ACGQEWIQT79zfs6firGGBL0qyTk14C/ztU+gUCZS/mXAIbIwAKCRCTk14C /ztU+tX+AJ9CUAnPvT4w5yRAPRfDiwWIPUqBOgCgiTelkzvUxvLWnYmpowwzKmsx qaSJAjMEEAEIAB0WIQTjs1jchY+zB/SBcLnLDb68XzLIHQUCZPRnNAAKCRDLDb68 XzLIHZSAD/oCk9Z0xJfbpriphTBxFy7bWyPKF1lM1GZZaLKkktGfunf1i0Q7rhwp Nu+u1launlOTp6ZoY36Ce2Qa1eSxWAQdjVajw9kOHXCAewrTREOMY/mb7RVGjajo 0Egl8T9iD3JRyaxu2iVtbpZYuqehtGG28CaCzmtqE+EJcx1cGqAGSuuaDWRYlVX8 KDip44GQB5Lut30vwSIoZG1CPCR6VE82u4cl3mYZUfcJkCHsiLzoeadVzb+fOd+2 ybzBn8Y77ifGgM+dSFSHe03mFfcHPdp0QImF9HQR7XI0UMZmEJsw7c2vDrRa+kRY 2A4/amGn4Tahuazq8g2yqgGm3yAj49qGNarAau849lDr7R49j73ESnNVBGJ9ShzU 4Ls+S1A5gohZVu2s1fkE3mbAmoTfU4JCrpRydOuL9xRJk5gbL44sKeuGODNshyTP JzG9DmRHpLsBn59v8mg5tqSfBIGqcqBxxnYHJnkK801MkaLW2m7wDmtz6P3TW86g GukzfIN3/OufLjnpN3Nx376JwWDDIyif7sn6/q+ZMwGz9uLKZkAeM5c3Dh4ygpgl iSLoV2bZzDz0iLxKWW7QOVVdWHmlEqbTldpQ7gUEPG7mxpzVo0xd6nHncSq0M91x 29It4B3fATx/iJB2eardMzSsbzHiwTg0eswhYYGpSKZLgp4RShnVAbkCDQQ7st2B EAgAjpB0UGDf/FrWAUo9jLWKFX15J0arBZkYm+iRax8K8fLnXzS2P+9Q04sAmt2q CUxK9681Nd7xtPrkPrjbcACwuFyH3Cr9o2qseiVNgAHPFGKCNxLX/9PKWfmdoZTO VVBcNV+sOTcx382uR04WPuv9jIwXT6JbCkXPaoCMv3mLnB9VnWRYatPYCaK8TXAP WxZP8lrcUMjQ1GRTQ1vP9rRMp7iaXyItW1lelNFvHEII92QddeBLK7V5ng2sX/BM m6/AafXZMnUQX3lpWQfEBTDT4qYsZ1zIEb4gq4dqauyNYgBcZdX//8oDE+BS2Fxx DTccyOW0Wyt2Z6flDTfhgzd46wADBQf+MAqIgADwulmZk+e30Znj46VmnbZUB/J8 M4WXg6X5xaOQsCCMAWybmCc4pxFIT/1c/GdCqSHDv5nKBi5QyBMMn33/kgzVRAve ihL6gWsNoT31Lxst457XuyRx1dwD8rzdWoP2b3etBGdu0P7vnOoqRmf1Y0XIoJeD k/o8U901hG2VAo5zAVH2YdEtSZqlBIAzxjakKAAtnsZWIpBxrz9NPVOBmT18kxlg Z7P4iU4/FMnGOfzT6/LCTj/B0hZKJCP7y7lHNP2yOabvvBsxU0ZGph1b8R6Zb1nP 2+LQIi8kaBs8ypy7HDx7/mWe5DoyLe4NHQ/ZE0gCEWt1mlVIwTzFBohGBBgRAgAG BQI7st2BAAoJEJOTXgL/O1T6YsEAoLZx0XLt4tpAC/LNwTZUrodUiOckAKC4DTRv EtC4nj5EImssVk/xmU3axw== =VUqh -----END PGP PUBLIC KEY BLOCK----- ` func verifyHashsum(ctx context.Context, siteURL, version, archive string, hash []byte) error { sumsURL := fmt.Sprintf("%s/%s/SHA256SUMS", siteURL, version) sumsBuf, err := downloadFile(ctx, sumsURL) if err != nil { return err } fs.Debugf(nil, "downloaded hashsum list: %s", sumsURL) return verifyHashsumDownloaded(ctx, sumsBuf, archive, hash) } func verifyHashsumDownloaded(ctx context.Context, sumsBuf []byte, archive string, hash []byte) error { keyRing, err := openpgp.ReadArmoredKeyRing(strings.NewReader(ncwPublicKeyPGP)) if err != nil { return fmt.Errorf("unsupported signing key: %w", err) } block, rest := clearsign.Decode(sumsBuf) if block == nil { return errors.New("invalid hashsum signature: couldn't find detached signature") } if len(rest) > 0 { return fmt.Errorf("invalid hashsum signature: %d bytes of unsigned data", len(rest)) } _, err = openpgp.CheckDetachedSignature(keyRing, bytes.NewReader(block.Bytes), block.ArmoredSignature.Body, nil) if err != nil { return fmt.Errorf("invalid hashsum signature: %w", err) } wantHash, err := findFileHash(sumsBuf, archive) if err != nil { return err } if !bytes.Equal(hash, wantHash) { return fmt.Errorf("archive hash mismatch: want %02x vs got %02x", wantHash, hash) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/writable_windows.go
cmd/selfupdate/writable_windows.go
//go:build windows && !noselfupdate package selfupdate import ( "os" ) func writable(path string) bool { info, err := os.Stat(path) const UserWritableBit = 128 if err == nil { return info.Mode().Perm()&UserWritableBit != 0 } return false }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/noselfupdate.go
cmd/selfupdate/noselfupdate.go
//go:build noselfupdate package selfupdate import ( "github.com/rclone/rclone/lib/buildinfo" ) func init() { buildinfo.Tags = append(buildinfo.Tags, "noselfupdate") }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/selfupdate_test.go
cmd/selfupdate/selfupdate_test.go
//go:build !noselfupdate package selfupdate import ( "context" "os" "os/exec" "path/filepath" "regexp" "runtime" "testing" "time" "github.com/rclone/rclone/fs" _ "github.com/rclone/rclone/fstest" // needed to run under integration tests "github.com/rclone/rclone/fstest/testy" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGetVersion(t *testing.T) { testy.SkipUnreliable(t) ctx := context.Background() // a beta version can only have "v" prepended resultVer, _, err := GetVersion(ctx, true, "1.2.3.4") assert.NoError(t, err) assert.Equal(t, "v1.2.3.4", resultVer) // but a stable version syntax should be checked _, _, err = GetVersion(ctx, false, "1") assert.Error(t, err) _, _, err = GetVersion(ctx, false, "1.") assert.Error(t, err) _, _, err = GetVersion(ctx, false, "1.2.") assert.Error(t, err) _, _, err = GetVersion(ctx, false, "1.2.3.4") assert.Error(t, err) // incomplete stable version should have micro release added resultVer, _, err = GetVersion(ctx, false, "1.52") assert.NoError(t, err) assert.Equal(t, "v1.52.3", resultVer) } func TestInstallOnLinux(t *testing.T) { testy.SkipUnreliable(t) if runtime.GOOS != "linux" { t.Skip("this is a Linux only test") } // Prepare for test ctx := context.Background() testDir := t.TempDir() path := filepath.Join(testDir, "rclone") regexVer := regexp.MustCompile(`v[0-9]\S+`) betaVer, _, err := GetVersion(ctx, true, "") assert.NoError(t, err) // Must do nothing if version isn't changing assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path, Version: fs.Version})) // Must fail on non-writable file assert.NoError(t, os.WriteFile(path, []byte("test"), 0644)) assert.NoError(t, os.Chmod(path, 0000)) defer func() { _ = os.Chmod(path, 0644) }() err = (InstallUpdate(ctx, &Options{Beta: true, Output: path})) assert.Error(t, err) assert.Contains(t, err.Error(), "run self-update as root") // Must keep non-standard permissions assert.NoError(t, os.Chmod(path, 0644)) require.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path})) info, err := os.Stat(path) assert.NoError(t, err) assert.Equal(t, os.FileMode(0644), info.Mode().Perm()) // Must remove temporary files files, err := os.ReadDir(testDir) assert.NoError(t, err) assert.Equal(t, 1, len(files)) // Must contain valid executable assert.NoError(t, os.Chmod(path, 0755)) cmd := exec.Command(path, "version") output, err := cmd.CombinedOutput() assert.NoError(t, err) assert.True(t, cmd.ProcessState.Success()) assert.Equal(t, betaVer, regexVer.FindString(string(output))) } func TestRenameOnWindows(t *testing.T) { testy.SkipUnreliable(t) if runtime.GOOS != "windows" { t.Skip("this is a Windows only test") } // Prepare for test ctx := context.Background() testDir := t.TempDir() path := filepath.Join(testDir, "rclone.exe") regexVer := regexp.MustCompile(`v[0-9]\S+`) stableVer, _, err := GetVersion(ctx, false, "") assert.NoError(t, err) betaVer, _, err := GetVersion(ctx, true, "") assert.NoError(t, err) // Must not create temporary files when target doesn't exist assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path})) files, err := os.ReadDir(testDir) assert.NoError(t, err) assert.Equal(t, 1, len(files)) // Must save running executable as the "old" file cmdWait := exec.Command(path, "config") stdinWait, err := cmdWait.StdinPipe() // Make it run waiting for input assert.NoError(t, err) assert.NoError(t, cmdWait.Start()) assert.NoError(t, InstallUpdate(ctx, &Options{Beta: false, Output: path})) files, err = os.ReadDir(testDir) assert.NoError(t, err) assert.Equal(t, 2, len(files)) pathOld := filepath.Join(testDir, "rclone.old.exe") _, err = os.Stat(pathOld) assert.NoError(t, err) cmd := exec.Command(path, "version") output, err := cmd.CombinedOutput() assert.NoError(t, err) assert.True(t, cmd.ProcessState.Success()) assert.Equal(t, stableVer, regexVer.FindString(string(output))) cmdOld := exec.Command(pathOld, "version") output, err = cmdOld.CombinedOutput() assert.NoError(t, err) assert.True(t, cmdOld.ProcessState.Success()) assert.Equal(t, betaVer, regexVer.FindString(string(output))) // Stop previous waiting executable, run new and saved executables _ = stdinWait.Close() _ = cmdWait.Wait() time.Sleep(100 * time.Millisecond) cmdWait = exec.Command(path, "config") stdinWait, err = cmdWait.StdinPipe() assert.NoError(t, err) assert.NoError(t, cmdWait.Start()) cmdWaitOld := exec.Command(pathOld, "config") stdinWaitOld, err := cmdWaitOld.StdinPipe() assert.NoError(t, err) assert.NoError(t, cmdWaitOld.Start()) // Updating when the "old" executable is running must produce a random "old" file assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path})) files, err = os.ReadDir(testDir) assert.NoError(t, err) assert.Equal(t, 3, len(files)) // Stop all waiting executables _ = stdinWait.Close() _ = cmdWait.Wait() _ = stdinWaitOld.Close() _ = cmdWaitOld.Wait() time.Sleep(100 * time.Millisecond) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/selfupdate.go
cmd/selfupdate/selfupdate.go
//go:build !noselfupdate // Package selfupdate provides the selfupdate command. package selfupdate import ( "archive/zip" "bufio" "bytes" "context" "crypto/sha256" _ "embed" "encoding/hex" "errors" "fmt" "io" "net/http" "os" "os/exec" "path/filepath" "regexp" "runtime" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/cmount" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/buildinfo" "github.com/rclone/rclone/lib/random" "github.com/spf13/cobra" versionCmd "github.com/rclone/rclone/cmd/version" ) //go:embed selfupdate.md var selfUpdateHelp string // Options contains options for the self-update command type Options struct { Check bool Output string // output path Beta bool // mutually exclusive with Stable (false means "stable") Stable bool // mutually exclusive with Beta Version string Package string // package format: zip, deb, rpm (empty string means "zip") } // Opt is options set via command line var Opt = Options{} func init() { cmd.Root.AddCommand(cmdSelfUpdate) cmdFlags := cmdSelfUpdate.Flags() flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download", "") flags.StringVarP(cmdFlags, &Opt.Output, "output", "", Opt.Output, "Save the downloaded binary at a given path (default: replace running binary)", "") flags.BoolVarP(cmdFlags, &Opt.Stable, "stable", "", Opt.Stable, "Install stable release (this is the default)", "") flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "Install beta release", "") flags.StringVarP(cmdFlags, &Opt.Version, "version", "", Opt.Version, "Install the given rclone version (default: latest)", "") flags.StringVarP(cmdFlags, &Opt.Package, "package", "", Opt.Package, "Package format: zip|deb|rpm (default: zip)", "") } var cmdSelfUpdate = &cobra.Command{ Use: "selfupdate", Aliases: []string{"self-update"}, Short: `Update the rclone binary.`, Long: strings.TrimSpace(selfUpdateHelp), Annotations: map[string]string{ "versionIntroduced": "v1.55", }, Run: func(command *cobra.Command, args []string) { ctx := context.Background() cmd.CheckArgs(0, 0, command, args) if Opt.Package == "" { Opt.Package = "zip" } gotActionFlags := Opt.Stable || Opt.Beta || Opt.Output != "" || Opt.Version != "" || Opt.Package != "zip" if Opt.Check && !gotActionFlags { versionCmd.CheckVersion(ctx) return } if Opt.Package != "zip" { if Opt.Package != "deb" && Opt.Package != "rpm" { fs.Fatalf(nil, "--package should be one of zip|deb|rpm") } if runtime.GOOS != "linux" { fs.Fatalf(nil, ".deb and .rpm packages are supported only on Linux") } else if os.Geteuid() != 0 && !Opt.Check { fs.Fatalf(nil, ".deb and .rpm must be installed by root") } if Opt.Output != "" && !Opt.Check { fmt.Println("Warning: --output is ignored with --package deb|rpm") } } if err := InstallUpdate(context.Background(), &Opt); err != nil { fs.Fatalf(nil, "Error: %v", err) } }, } // GetVersion can get the latest release number from the download site // or massage a stable release number - prepend semantic "v" prefix // or find the latest micro release for a given major.minor release. // Note: this will not be applied to beta releases. func GetVersion(ctx context.Context, beta bool, version string) (newVersion, siteURL string, err error) { siteURL = "https://downloads.rclone.org" if beta { siteURL = "https://beta.rclone.org" } if version == "" { // Request the latest release number from the download site _, newVersion, _, err = versionCmd.GetVersion(ctx, siteURL+"/version.txt") return } newVersion = version if version[0] != 'v' { newVersion = "v" + version } if beta { return } if valid, _ := regexp.MatchString(`^v\d+\.\d+(\.\d+)?$`, newVersion); !valid { return "", siteURL, errors.New("invalid semantic version") } // Find the latest stable micro release if strings.Count(newVersion, ".") == 1 { html, err := downloadFile(ctx, siteURL) if err != nil { return "", siteURL, fmt.Errorf("failed to get list of releases: %w", err) } reSubver := fmt.Sprintf(`href="\./%s\.\d+/"`, regexp.QuoteMeta(newVersion)) allSubvers := regexp.MustCompile(reSubver).FindAllString(string(html), -1) if allSubvers == nil { return "", siteURL, errors.New("could not find the minor release") } // Use the fact that releases in the index are sorted by date lastSubver := allSubvers[len(allSubvers)-1] newVersion = lastSubver[8 : len(lastSubver)-2] } return } // InstallUpdate performs rclone self-update func InstallUpdate(ctx context.Context, opt *Options) error { // Find the latest release number if opt.Stable && opt.Beta { return errors.New("--stable and --beta are mutually exclusive") } // The `cmount` tag is added by cmd/cmount/mount.go only if build is static. _, tags := buildinfo.GetLinkingAndTags() if strings.Contains(" "+tags+" ", " cmount ") && !cmount.ProvidedBy(runtime.GOOS) { return errors.New("updating would discard the mount FUSE capability, aborting") } newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version) if err != nil { return fmt.Errorf("unable to detect new version: %w", err) } oldVersion := fs.Version if newVersion == oldVersion { fs.Logf(nil, "rclone is up to date") return nil } // Install .deb/.rpm package if requested by user if opt.Package == "deb" || opt.Package == "rpm" { if opt.Check { fmt.Println("Warning: --package flag is ignored in --check mode") } else { err := installPackage(ctx, opt.Beta, newVersion, siteURL, opt.Package) if err == nil { fs.Logf(nil, "Successfully updated rclone package from version %s to version %s", oldVersion, newVersion) } return err } } // Get the current executable path executable, err := os.Executable() if err != nil { return fmt.Errorf("unable to find executable: %w", err) } targetFile := opt.Output if targetFile == "" { targetFile = executable } if opt.Check { fmt.Printf("Without --check this would install rclone version %s at %s\n", newVersion, targetFile) return nil } // Make temporary file names and check for possible access errors in advance var newFile string if newFile, err = makeRandomExeName(targetFile, "new"); err != nil { return err } savedFile := "" if runtime.GOOS == "windows" { savedFile = targetFile savedFile = strings.TrimSuffix(savedFile, ".exe") savedFile += ".old.exe" } if savedFile == executable || newFile == executable { return fmt.Errorf("%s: a temporary file would overwrite the executable, specify a different --output path", targetFile) } if err := verifyAccess(targetFile); err != nil { return err } // Download the update as a temporary file err = downloadUpdate(ctx, opt.Beta, newVersion, siteURL, newFile, "zip") if err != nil { return fmt.Errorf("failed to update rclone: %w", err) } err = replaceExecutable(targetFile, newFile, savedFile) if err == nil { fs.Logf(nil, "Successfully updated rclone from version %s to version %s", oldVersion, newVersion) } return err } func installPackage(ctx context.Context, beta bool, version, siteURL, packageFormat string) error { tempFile, err := os.CreateTemp("", "rclone.*."+packageFormat) if err != nil { return fmt.Errorf("unable to write temporary package: %w", err) } packageFile := tempFile.Name() _ = tempFile.Close() defer func() { if rmErr := os.Remove(packageFile); rmErr != nil { fs.Errorf(nil, "%s: could not remove temporary package: %v", packageFile, rmErr) } }() if err := downloadUpdate(ctx, beta, version, siteURL, packageFile, packageFormat); err != nil { return err } packageCommand := "dpkg" if packageFormat == "rpm" { packageCommand = "rpm" } cmd := exec.Command(packageCommand, "-i", packageFile) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return fmt.Errorf("failed to run %s: %v", packageCommand, err) } return nil } func replaceExecutable(targetFile, newFile, savedFile string) error { // Copy permission bits from the old executable // (it was extracted with mode 0755) fileInfo, err := os.Lstat(targetFile) if err == nil { if err = os.Chmod(newFile, fileInfo.Mode()); err != nil { return fmt.Errorf("failed to set permission: %w", err) } } if err = os.Remove(targetFile); os.IsNotExist(err) { err = nil } if err != nil && savedFile != "" { // Windows forbids removal of a running executable so we rename it. // For starters, rename download as the original file with ".old.exe" appended. var saveErr error if saveErr = os.Remove(savedFile); os.IsNotExist(saveErr) { saveErr = nil } if saveErr == nil { saveErr = os.Rename(targetFile, savedFile) } if saveErr != nil { // The ".old" file cannot be removed or cannot be renamed to. // This usually means that the running executable has a name with ".old". // This can happen in very rare cases, but we ought to handle it. // Try inserting a randomness in the name to mitigate it. fs.Debugf(nil, "%s: cannot replace old file, randomizing name", savedFile) savedFile, saveErr = makeRandomExeName(targetFile, "old") if saveErr == nil { if saveErr = os.Remove(savedFile); os.IsNotExist(saveErr) { saveErr = nil } } if saveErr == nil { saveErr = os.Rename(targetFile, savedFile) } } if saveErr == nil { fs.Infof(nil, "The old executable was saved as %s", savedFile) err = nil } } if err == nil { err = os.Rename(newFile, targetFile) } if err != nil { if rmErr := os.Remove(newFile); rmErr != nil { fs.Errorf(nil, "%s: could not remove temporary file: %v", newFile, rmErr) } return err } return nil } func makeRandomExeName(baseName, extension string) (string, error) { const maxAttempts = 5 if runtime.GOOS == "windows" { baseName = strings.TrimSuffix(baseName, ".exe") extension += ".exe" } for range maxAttempts { filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension) if _, err := os.Stat(filename); os.IsNotExist(err) { return filename, nil } } return "", fmt.Errorf("cannot find a file name like %s.xxxx.%s", baseName, extension) } func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error { osName := runtime.GOOS if osName == "darwin" { osName = "osx" } arch := runtime.GOARCH if arch == "arm" { // Check the ARM compatibility level of the current CPU. // We don't know if this matches the rclone binary currently running, it // could for example be a ARMv6 variant running on a ARMv7 compatible CPU, // so we will simply pick the best possible variant. switch buildinfo.GetSupportedGOARM() { case 7: // This system can run any binaries built with GOARCH=arm, including GOARM=7. // Pick the ARMv7 variant of rclone, published with suffix "arm-v7". arch = "arm-v7" case 6: // This system can run binaries built with GOARCH=arm and GOARM=6 or lower. // Pick the ARMv6 variant of rclone, published with suffix "arm-v6". arch = "arm-v6" case 5: // This system can only run binaries built with GOARCH=arm and GOARM=5. // Pick the ARMv5 variant of rclone, which also works without hardfloat, // published with suffix "arm". arch = "arm" } } archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat) archiveURL := fmt.Sprintf("%s/%s/%s", siteURL, version, archiveFilename) archiveBuf, err := downloadFile(ctx, archiveURL) if err != nil { return err } gotHash := sha256.Sum256(archiveBuf) strHash := hex.EncodeToString(gotHash[:]) fs.Debugf(nil, "downloaded release archive with hashsum %s from %s", strHash, archiveURL) // CI/CD does not provide hashsums for beta releases if !beta { if err := verifyHashsum(ctx, siteURL, version, archiveFilename, gotHash[:]); err != nil { return err } } if packageFormat == "deb" || packageFormat == "rpm" { if err := os.WriteFile(newFile, archiveBuf, 0644); err != nil { return fmt.Errorf("cannot write temporary .%s: %w", packageFormat, err) } return nil } entryName := fmt.Sprintf("rclone-%s-%s-%s/rclone", version, osName, arch) if runtime.GOOS == "windows" { entryName += ".exe" } // Extract executable to a temporary file, then replace it by an instant rename err = extractZipToFile(archiveBuf, entryName, newFile) if err != nil { return err } fs.Debugf(nil, "extracted %s to %s", entryName, newFile) return nil } func verifyAccess(file string) error { admin := "root" if runtime.GOOS == "windows" { admin = "Administrator" } fileInfo, fileErr := os.Lstat(file) if fileErr != nil { dir := filepath.Dir(file) dirInfo, dirErr := os.Lstat(dir) if dirErr != nil { return dirErr } if !dirInfo.Mode().IsDir() { return fmt.Errorf("%s: parent path is not a directory, specify a different path using --output", dir) } if !writable(dir) { return fmt.Errorf("%s: directory is not writable, please run self-update as %s", dir, admin) } } if fileErr == nil && !fileInfo.Mode().IsRegular() { return fmt.Errorf("%s: path is not a normal file, specify a different path using --output", file) } if fileErr == nil && !writable(file) { return fmt.Errorf("%s: file is not writable, run self-update as %s", file, admin) } return nil } func findFileHash(buf []byte, filename string) (hash []byte, err error) { lines := bufio.NewScanner(bytes.NewReader(buf)) for lines.Scan() { tokens := strings.Split(lines.Text(), " ") if len(tokens) == 2 && tokens[1] == filename { if hash, err := hex.DecodeString(tokens[0]); err == nil { return hash, nil } } } return nil, fmt.Errorf("%s: unable to find hash", filename) } func extractZipToFile(buf []byte, entryName, newFile string) error { zipReader, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) if err != nil { return err } var reader io.ReadCloser for _, entry := range zipReader.File { if entry.Name == entryName { reader, err = entry.Open() break } } if reader == nil || err != nil { return fmt.Errorf("%s: file not found in archive", entryName) } defer func() { _ = reader.Close() }() err = os.Remove(newFile) if err != nil && !os.IsNotExist(err) { return fmt.Errorf("%s: unable to create new file: %v", newFile, err) } writer, err := os.OpenFile(newFile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, os.FileMode(0755)) if err != nil { return err } _, err = io.Copy(writer, reader) _ = writer.Close() if err != nil { if rmErr := os.Remove(newFile); rmErr != nil { fs.Errorf(nil, "%s: could not remove temporary file: %v", newFile, rmErr) } } return err } func downloadFile(ctx context.Context, url string) ([]byte, error) { resp, err := fshttp.NewClient(ctx).Get(url) if err != nil { return nil, err } defer fs.CheckClose(resp.Body, &err) if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed with %s downloading %s", resp.Status, url) } return io.ReadAll(resp.Body) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/writable_unix.go
cmd/selfupdate/writable_unix.go
//go:build !windows && !plan9 && !js && !noselfupdate package selfupdate import ( "golang.org/x/sys/unix" ) func writable(path string) bool { return unix.Access(path, unix.W_OK) == nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/verify_test.go
cmd/selfupdate/verify_test.go
//go:build !noselfupdate package selfupdate import ( "context" "encoding/hex" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestVerify(t *testing.T) { ctx := context.Background() sumsBuf, err := os.ReadFile("testdata/verify/SHA256SUMS") require.NoError(t, err) hash, err := hex.DecodeString("b20b47f579a2c790ca752fb5d8e5651fade7d5867cbac0a4f71e805fc5c468d0") require.NoError(t, err) t.Run("NoError", func(t *testing.T) { err = verifyHashsumDownloaded(ctx, sumsBuf, "archive.zip", hash) require.NoError(t, err) }) t.Run("BadSig", func(t *testing.T) { sumsBuf[0x60] ^= 1 // change the signature by one bit err = verifyHashsumDownloaded(ctx, sumsBuf, "archive.zip", hash) assert.ErrorContains(t, err, "invalid signature") sumsBuf[0x60] ^= 1 // undo the change }) t.Run("BadSum", func(t *testing.T) { hash[0] ^= 1 // change the SHA256 by one bit err = verifyHashsumDownloaded(ctx, sumsBuf, "archive.zip", hash) assert.ErrorContains(t, err, "archive hash mismatch") hash[0] ^= 1 // undo the change }) t.Run("BadName", func(t *testing.T) { err = verifyHashsumDownloaded(ctx, sumsBuf, "archive.zipX", hash) assert.ErrorContains(t, err, "unable to find hash") }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/selfupdate/writable_unsupported.go
cmd/selfupdate/writable_unsupported.go
//go:build (plan9 || js) && !noselfupdate package selfupdate func writable(path string) bool { return true }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/rmdir/rmdir.go
cmd/rmdir/rmdir.go
// Package rmdir provides the rmdir command. package rmdir import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "rmdir remote:path", Short: `Remove the empty directory at path.`, Long: `This removes empty directory given by path. Will not remove the path if it has any objects in it, not even empty subdirectories. Use command [rmdirs](/commands/rclone_rmdirs/) (or [delete](/commands/rclone_delete/) with option ` + "`--rmdirs`" + `) to do that. To delete a path and any objects in it, use [purge](/commands/rclone_purge/) command.`, Annotations: map[string]string{ "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fdst := cmd.NewFsDir(args) cmd.Run(true, false, command, func() error { return operations.Rmdir(context.Background(), fdst, "") }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/copyto/copyto.go
cmd/copyto/copyto.go
// Package copyto provides the copyto command. package copyto import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations/operationsflags" "github.com/rclone/rclone/fs/sync" "github.com/spf13/cobra" ) var ( loggerOpt = operations.LoggerOpt{} loggerFlagsOpt = operationsflags.AddLoggerFlagsOptions{} ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() operationsflags.AddLoggerFlags(cmdFlags, &loggerOpt, &loggerFlagsOpt) loggerOpt.LoggerFn = operations.NewDefaultLoggerFn(&loggerOpt) } var commandDefinition = &cobra.Command{ Use: "copyto source:path dest:path", Short: `Copy files from source to dest, skipping identical files.`, Long: `If source:path is a file or directory then it copies it to a file or directory named dest:path. This can be used to upload single files to other than their current name. If the source is a directory then it acts exactly like the [copy](/commands/rclone_copy/) command. So ` + "```console" + ` rclone copyto src dst ` + "```" + ` where src and dst are rclone paths, either ` + "`remote:path`" + ` or ` + "`/path/to/local`" + ` or ` + "`C:\\windows\\path\\if\\on\\windows`" + `. This will: ` + "```text" + ` if src is file copy it to dst, overwriting an existing file if it exists if src is directory copy it to dst, overwriting existing files if they exist see copy command for full details ` + "```" + ` This doesn't transfer files that are identical on src and dst, testing by size and modification time or MD5SUM. It doesn't delete files from the destination. *If you are looking to copy just a byte range of a file, please see ` + "`rclone cat --offset X --count Y`" + `.* **Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics. ` + operationsflags.Help(), Annotations: map[string]string{ "versionIntroduced": "v1.35", "groups": "Copy,Filter,Listing,Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 2, command, args) fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args) cmd.Run(true, true, command, func() error { ctx := context.Background() close, err := operationsflags.ConfigureLoggers(ctx, fdst, command, &loggerOpt, loggerFlagsOpt) if err != nil { return err } defer close() if loggerFlagsOpt.AnySet() { ctx = operations.WithSyncLogger(ctx, loggerOpt) } if srcFileName == "" { return sync.CopyDir(ctx, fdst, fsrc, false) } return operations.CopyFile(ctx, fdst, fsrc, dstFileName, srcFileName) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/md5sum/md5sum.go
cmd/md5sum/md5sum.go
// Package md5sum provides the md5sum command. package md5sum import ( "context" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/hashsum" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() hashsum.AddHashsumFlags(cmdFlags) } var commandDefinition = &cobra.Command{ Use: "md5sum remote:path", Short: `Produces an md5sum file for all the objects in the path.`, Long: `Produces an md5sum file for all the objects in the path. This is in the same format as the standard md5sum tool produces. By default, the hash is requested from the remote. If MD5 is not supported by the remote, no hash will be returned. With the download flag, the file will be downloaded from the remote and hashed locally enabling MD5 for any remote. For other algorithms, see the [hashsum](/commands/rclone_hashsum/) command. Running ` + "`rclone md5sum remote:path`" + ` is equivalent to running ` + "`rclone hashsum MD5 remote:path`" + `. This command can also hash data received on standard input (stdin), by not passing a remote:path, or by passing a hyphen as remote:path when there is data to read (if not, the hyphen will be treated literally, as a relative path).`, Annotations: map[string]string{ "versionIntroduced": "v1.02", "groups": "Filter,Listing", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(0, 1, command, args) if found, err := hashsum.CreateFromStdinArg(hash.MD5, args, 0); found { return err } fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { if hashsum.ChecksumFile != "" { fsum, sumFile := cmd.NewFsFile(hashsum.ChecksumFile) return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hash.MD5, nil, hashsum.DownloadFlag) } if hashsum.HashsumOutfile == "" { return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil) } output, close, err := hashsum.GetHashsumOutput(hashsum.HashsumOutfile) if err != nil { return err } defer close() return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output) }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/delete/delete.go
cmd/delete/delete.go
// Package delete provides the delete command. package delete import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( rmdirs = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact", "") } var commandDefinition = &cobra.Command{ Use: "delete remote:path", Short: `Remove the files in path.`, // Warning! "|" will be replaced by backticks below Long: strings.ReplaceAll(`Remove the files in path. Unlike [purge](/commands/rclone_purge/) it obeys include/exclude filters so can be used to selectively delete files. |rclone delete| only deletes files but leaves the directory structure alone. If you want to delete a directory and all of its contents use the [purge](/commands/rclone_purge/) command. If you supply the |--rmdirs| flag, it will remove all empty directories along with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only. For example, to delete all files bigger than 100 MiB, you may first want to check what would be deleted (use either): |||sh rclone --min-size 100M lsl remote:path rclone --dry-run --min-size 100M delete remote:path ||| Then proceed with the actual delete: |||sh rclone --min-size 100M delete remote:path ||| That reads "delete everything with a minimum size of 100 MiB", hence delete all files bigger than 100 MiB. **Important**: Since this can cause data loss, test first with the |--dry-run| or the |--interactive|/|-i| flag.`, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.27", "groups": "Important,Filter,Listing", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(true, false, command, func() error { if err := operations.Delete(context.Background(), fsrc); err != nil { return err } if rmdirs { fdst := cmd.NewFsDir(args) return operations.Rmdirs(context.Background(), fdst, "", true) } return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/convmv/convmv_test.go
cmd/convmv/convmv_test.go
// Package convmv provides the convmv command. package convmv import ( "cmp" "context" "fmt" "path" "slices" "strings" "testing" _ "github.com/rclone/rclone/backend/all" // import all backends "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/transform" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/text/unicode/norm" ) // Some times used in the tests var ( t1 = fstest.Time("2001-02-03T04:05:06.499999999Z") debug = `` ) // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } func TestTransform(t *testing.T) { type args struct { TransformOpt []string TransformBackOpt []string Lossless bool // whether the TransformBackAlgo is always losslessly invertible } tests := []struct { name string args args }{ {name: "NFC", args: args{ TransformOpt: []string{"all,nfc"}, TransformBackOpt: []string{"all,nfd"}, Lossless: false, }}, {name: "NFD", args: args{ TransformOpt: []string{"all,nfd"}, TransformBackOpt: []string{"all,nfc"}, Lossless: false, }}, {name: "base64", args: args{ TransformOpt: []string{"all,base64encode"}, TransformBackOpt: []string{"all,base64encode"}, Lossless: false, }}, {name: "prefix", args: args{ TransformOpt: []string{"all,prefix=PREFIX"}, TransformBackOpt: []string{"all,trimprefix=PREFIX"}, Lossless: true, }}, {name: "suffix", args: args{ TransformOpt: []string{"all,suffix=SUFFIX"}, TransformBackOpt: []string{"all,trimsuffix=SUFFIX"}, Lossless: true, }}, {name: "truncate", args: args{ TransformOpt: []string{"all,truncate=10"}, TransformBackOpt: []string{"all,truncate=10"}, Lossless: false, }}, {name: "encoder", args: args{ TransformOpt: []string{"all,encoder=Colon,SquareBracket"}, TransformBackOpt: []string{"all,decoder=Colon,SquareBracket"}, Lossless: true, }}, {name: "ISO-8859-1", args: args{ TransformOpt: []string{"all,ISO-8859-1"}, TransformBackOpt: []string{"all,ISO-8859-1"}, Lossless: false, }}, {name: "charmap", args: args{ TransformOpt: []string{"all,charmap=ISO-8859-7"}, TransformBackOpt: []string{"all,charmap=ISO-8859-7"}, Lossless: false, }}, {name: "lowercase", args: args{ TransformOpt: []string{"all,lowercase"}, TransformBackOpt: []string{"all,lowercase"}, Lossless: false, }}, {name: "ascii", args: args{ TransformOpt: []string{"all,ascii"}, TransformBackOpt: []string{"all,ascii"}, Lossless: false, }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() ctx := context.Background() r.Mkdir(ctx, r.Flocal) r.Mkdir(ctx, r.Fremote) items := makeTestFiles(t, r, "dir1") err := r.Fremote.Mkdir(ctx, "empty/empty") require.NoError(t, err) err = r.Flocal.Mkdir(ctx, "empty/empty") require.NoError(t, err) deleteDSStore(t, r) r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"}) r.CheckLocalListing(t, items, []string{"dir1", "empty", "empty/empty"}) err = transform.SetOptions(ctx, tt.args.TransformOpt...) require.NoError(t, err) err = sync.Transform(ctx, r.Fremote, true, true) assert.NoError(t, err) compareNames(ctx, t, r, items) transformedItems := transformItems(ctx, t, items) r.CheckRemoteListing(t, transformedItems, []string{transform.Path(ctx, "dir1", true), transform.Path(ctx, "empty", true), transform.Path(ctx, "empty/empty", true)}) err = transform.SetOptions(ctx, tt.args.TransformBackOpt...) require.NoError(t, err) err = sync.Transform(ctx, r.Fremote, true, true) assert.NoError(t, err) compareNames(ctx, t, r, transformedItems) if tt.args.Lossless { deleteDSStore(t, r) r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"}) } }) } } // const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ" const alphabet = "abcdefg123456789Ü" var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"} func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item { t.Helper() n := 0 // Create test files items := []fstest.Item{} for _, c := range alphabet { var out strings.Builder for i := range rune(7) { out.WriteRune(c + i) } fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String())) fileName = strings.ToValidUTF8(fileName, "") fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows if debug != "" { fileName = debug } item := r.WriteObject(context.Background(), fileName, fileName, t1) r.WriteFile(fileName, fileName, t1) items = append(items, item) n++ if debug != "" { break } } for _, extra := range extras { item := r.WriteObject(context.Background(), extra, extra, t1) r.WriteFile(extra, extra, t1) items = append(items, item) } return items } func deleteDSStore(t *testing.T, r *fstest.Run) { ctxDSStore, fi := filter.AddConfig(context.Background()) err := fi.AddRule(`+ *.DS_Store`) assert.NoError(t, err) err = fi.AddRule(`- **`) assert.NoError(t, err) err = operations.Delete(ctxDSStore, r.Fremote) assert.NoError(t, err) } func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) { var entries fs.DirEntries deleteDSStore(t, r) err := walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(e fs.DirEntries) error { entries = append(entries, e...) return nil }) assert.NoError(t, err) entries = slices.DeleteFunc(entries, func(E fs.DirEntry) bool { // remove those pesky .DS_Store files if strings.Contains(E.Remote(), ".DS_Store") { err := operations.DeleteFile(context.Background(), E.(fs.Object)) assert.NoError(t, err) return true } return false }) require.Equal(t, len(items), entries.Len()) // sort by CONVERTED name slices.SortStableFunc(items, func(a, b fstest.Item) int { aConv := transform.Path(ctx, a.Path, false) bConv := transform.Path(ctx, b.Path, false) return cmp.Compare(aConv, bConv) }) slices.SortStableFunc(entries, func(a, b fs.DirEntry) int { return cmp.Compare(a.Remote(), b.Remote()) }) for i, e := range entries { expect := transform.Path(ctx, items[i].Path, false) msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote())) assert.Equal(t, expect, e.Remote(), msg) } } func transformItems(ctx context.Context, t *testing.T, items []fstest.Item) []fstest.Item { transformedItems := []fstest.Item{} for _, item := range items { newPath := transform.Path(ctx, item.Path, false) newItem := item newItem.Path = newPath transformedItems = append(transformedItems, newItem) } return transformedItems } func detectEncoding(s string) string { if norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) { return "BOTH" } if !norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) { return "NFD" } if norm.NFC.IsNormalString(s) && !norm.NFD.IsNormalString(s) { return "NFC" } return "OTHER" } func TestUnicodeEquivalence(t *testing.T) { r := fstest.NewRun(t) defer r.Finalise() ctx := context.Background() r.Mkdir(ctx, r.Fremote) const remote = "Über" item := r.WriteObject(ctx, remote, "", t1) obj, err := r.Fremote.NewObject(ctx, remote) // can't use r.CheckRemoteListing here as it forces NFC require.NoError(t, err) require.NotEmpty(t, obj) err = transform.SetOptions(ctx, "all,nfc") require.NoError(t, err) err = sync.Transform(ctx, r.Fremote, true, true) assert.NoError(t, err) item.Path = norm.NFC.String(item.Path) r.CheckRemoteListing(t, []fstest.Item{item}, nil) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/convmv/convmv.go
cmd/convmv/convmv.go
// Package convmv provides the convmv command. package convmv import ( "context" "errors" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/lib/transform" "github.com/spf13/cobra" ) // Globals var ( deleteEmptySrcDirs = false createEmptySrcDirs = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "") flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "") } var commandDefinition = &cobra.Command{ Use: "convmv dest:path --name-transform XXX", Short: `Convert file and directory names in place.`, // Warning¡ "¡" will be replaced by backticks below Long: strings.ReplaceAll(`convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations. `+transform.Help()+`The regex command generally accepts Perl-style regular expressions, the exact syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/). The replacement string may contain capturing group variables, referencing capturing groups using the syntax ¡$name¡ or ¡${name}¡, where the name can refer to a named capturing group or it can simply be the index as a number. To insert a literal $, use $$. Multiple transformations can be used in sequence, applied in the order they are specified on the command line. The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡. ### Files vs Directories By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed. However some of the transforms would be better applied to the whole path or just directories. To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡. | Tag | Effect | |------|------| | ¡file¡ | Only transform the leaf name of files (DEFAULT) | | ¡dir¡ | Only transform name of directories - these may appear anywhere in the path | | ¡all¡ | Transform the entire path for files and directories | This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡. For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡. Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error. ### Ordering and Conflicts - Transformations will be applied in the order specified by the user. - If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed. - If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed - If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed - Each transformation will be run one path segment at a time. - If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error. - It is up to the user to put the transformations in a sensible order. - Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible. - Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the user, allowing for intentional use cases (e.g., trimming one prefix before adding another). - Users should be aware that certain combinations may lead to unexpected results and should verify transformations using ¡--dry-run¡ before execution. ### Race Conditions and Non-Deterministic Behavior Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name. This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these. - If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic. - Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results. To minimize risks, users should: - Carefully review transformations that may introduce conflicts. - Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations). - Avoid transformations that cause multiple distinct source files to map to the same destination name. - Consider disabling concurrency with ¡--transfers=1¡ if necessary. - Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.`, "¡", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.70", "groups": "Filter,Listing,Important,Copy", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fdst, srcFileName := cmd.NewFsFile(args[0]) cmd.Run(false, true, command, func() error { if !transform.Transforming(context.Background()) { return errors.New("--name-transform must be set") } if srcFileName == "" { return sync.Transform(context.Background(), fdst, deleteEmptySrcDirs, createEmptySrcDirs) } return operations.TransformFile(context.Background(), fdst, srcFileName) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/version/version_test.go
cmd/version/version_test.go
package version import ( "os" "runtime" "testing" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs/config" "github.com/stretchr/testify/assert" ) func TestVersionWorksWithoutAccessibleConfigFile(t *testing.T) { // create temp config file tempFile, err := os.CreateTemp("", "unreadable_config.conf") assert.NoError(t, err) path := tempFile.Name() defer func() { err := os.Remove(path) assert.NoError(t, err) }() assert.NoError(t, tempFile.Close()) if runtime.GOOS != "windows" { assert.NoError(t, os.Chmod(path, 0000)) } // re-wire oldOsStdout := os.Stdout oldConfigPath := config.GetConfigPath() assert.NoError(t, config.SetConfigPath(path)) os.Stdout = nil defer func() { os.Stdout = oldOsStdout assert.NoError(t, config.SetConfigPath(oldConfigPath)) }() cmd.Root.SetArgs([]string{"version"}) assert.NotPanics(t, func() { assert.NoError(t, cmd.Root.Execute()) }) // This causes rclone to exit and the tests to stop! // cmd.Root.SetArgs([]string{"--version"}) // assert.NotPanics(t, func() { // assert.NoError(t, cmd.Root.Execute()) // }) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/version/version.go
cmd/version/version.go
// Package version provides the version command. package version import ( "context" "debug/buildinfo" "errors" "fmt" "io" "net/http" "os" "runtime/debug" "strings" "time" "github.com/coreos/go-semver/semver" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/fshttp" "github.com/spf13/cobra" ) var ( check = false deps = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version", "") flags.BoolVarP(cmdFlags, &deps, "deps", "", false, "Show the Go dependencies", "") } var commandDefinition = &cobra.Command{ Use: "version", Short: `Show the version number.`, Long: `Show the rclone version number, the go version, the build target OS and architecture, the runtime OS and kernel version and bitness, build tags and the type of executable (static or dynamic). For example: ` + "```console" + ` $ rclone version rclone v1.55.0 - os/version: ubuntu 18.04 (64 bit) - os/kernel: 4.15.0-136-generic (x86_64) - os/type: linux - os/arch: amd64 - go/version: go1.16 - go/linking: static - go/tags: none ` + "```" + ` Note: before rclone version 1.55 the os/type and os/arch lines were merged, and the "go/version" line was tagged as "go version". If you supply the --check flag, then it will do an online check to compare your version with the latest release and the latest beta. ` + "```console" + ` $ rclone version --check yours: 1.42.0.6 latest: 1.42 (released 2018-06-16) beta: 1.42.0.5 (released 2018-06-17) ` + "```" + ` Or ` + "```console" + ` $ rclone version --check yours: 1.41 latest: 1.42 (released 2018-06-16) upgrade: https://downloads.rclone.org/v1.42 beta: 1.42.0.5 (released 2018-06-17) upgrade: https://beta.rclone.org/v1.42-005-g56e1e820 ` + "```" + ` If you supply the --deps flag then rclone will print a list of all the packages it depends on and their versions along with some other information about the build.`, Annotations: map[string]string{ "versionIntroduced": "v1.33", }, RunE: func(command *cobra.Command, args []string) error { ctx := context.Background() cmd.CheckArgs(0, 0, command, args) if deps { return printDependencies() } if check { CheckVersion(ctx) } else { cmd.ShowVersion() } return nil }, } // strip a leading v off the string func stripV(s string) string { if len(s) > 0 && s[0] == 'v' { return s[1:] } return s } // GetVersion gets the version available for download func GetVersion(ctx context.Context, url string) (v *semver.Version, vs string, date time.Time, err error) { resp, err := fshttp.NewClient(ctx).Get(url) if err != nil { return v, vs, date, err } defer fs.CheckClose(resp.Body, &err) if resp.StatusCode != http.StatusOK { return v, vs, date, errors.New(resp.Status) } bodyBytes, err := io.ReadAll(resp.Body) if err != nil { return v, vs, date, err } vs = strings.TrimSpace(string(bodyBytes)) vs = strings.TrimPrefix(vs, "rclone ") vs = strings.TrimRight(vs, "β") date, err = http.ParseTime(resp.Header.Get("Last-Modified")) if err != nil { return v, vs, date, err } v, err = semver.NewVersion(stripV(vs)) return v, vs, date, err } // CheckVersion checks the installed version against available downloads func CheckVersion(ctx context.Context) { vCurrent, err := semver.NewVersion(stripV(fs.Version)) if err != nil { fs.Errorf(nil, "Failed to parse version: %v", err) } const timeFormat = "2006-01-02" printVersion := func(what, url string) { v, vs, t, err := GetVersion(ctx, url+"version.txt") if err != nil { fs.Errorf(nil, "Failed to get rclone %s version: %v", what, err) return } fmt.Printf("%-8s%-40v %20s\n", what+":", v, "(released "+t.Format(timeFormat)+")", ) if v.Compare(*vCurrent) > 0 { fmt.Printf(" upgrade: %s\n", url+vs) } } fmt.Printf("yours: %-13s\n", vCurrent) printVersion( "latest", "https://downloads.rclone.org/", ) printVersion( "beta", "https://beta.rclone.org/", ) if strings.HasSuffix(fs.Version, "-DEV") { fmt.Println("Your version is compiled from git so comparisons may be wrong.") } } // Print info about a build module func printModule(module *debug.Module) { if module.Replace != nil { fmt.Printf("- %s %s (replaced by %s %s)\n", module.Path, module.Version, module.Replace.Path, module.Replace.Version) } else { fmt.Printf("- %s %s\n", module.Path, module.Version) } } // printDependencies shows the packages we use in a format like go.mod func printDependencies() error { info, err := buildinfo.ReadFile(os.Args[0]) if err != nil { return fmt.Errorf("error reading build info: %w", err) } fmt.Println("Go Version:") fmt.Printf("- %s\n", info.GoVersion) fmt.Println("Main package:") printModule(&info.Main) fmt.Println("Binary path:") fmt.Printf("- %s\n", info.Path) fmt.Println("Settings:") for _, setting := range info.Settings { fmt.Printf("- %s: %s\n", setting.Key, setting.Value) } fmt.Println("Dependencies:") for _, dep := range info.Deps { printModule(dep) } return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/ncdu/ncdu_unsupported.go
cmd/ncdu/ncdu_unsupported.go
// Build for ncdu for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 || js || aix // Package ncdu implements a text based user interface for exploring a remote package ncdu
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/ncdu/ncdu.go
cmd/ncdu/ncdu.go
//go:build !plan9 && !js && !aix // Package ncdu implements a text based user interface for exploring a remote package ncdu import ( "context" "fmt" "log/slog" "os" "path" "reflect" "sort" "strings" "github.com/atotto/clipboard" "github.com/gdamore/tcell/v2" runewidth "github.com/mattn/go-runewidth" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/ncdu/scan" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" "github.com/rivo/uniseg" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "ncdu remote:path", Short: `Explore a remote with a text based user interface.`, Long: `This displays a text based user interface allowing the navigation of a remote. It is most useful for answering the question - "What is using all my disk space?". {{< asciinema 157793 >}} To make the user interface it first scans the entire remote given and builds an in memory representation. rclone ncdu can be used during this scanning phase and you will see it building up the directory structure as it goes along. You can interact with the user interface using key presses, press '?' to toggle the help on and off. The supported keys are: ` + "```text" + ` ` + strings.Join(helpText()[1:], "\n") + ` ` + "```" + ` Listed files/directories may be prefixed by a one-character flag, some of them combined with a description in brackets at end of line. These flags have the following meaning: ` + "```text" + ` e means this is an empty directory, i.e. contains no files (but may contain empty subdirectories) ~ means this is a directory where some of the files (possibly in subdirectories) have unknown size, and therefore the directory size may be underestimated (and average size inaccurate, as it is average of the files with known sizes). . means an error occurred while reading a subdirectory, and therefore the directory size may be underestimated (and average size inaccurate) ! means an error occurred while reading this directory ` + "```" + ` This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for rclone remotes. It is missing lots of features at the moment but is useful as it stands. Unlike ncdu it does not show excluded files. Note that it might take some time to delete big files/directories. The UI won't respond in the meantime since the deletion is done synchronously. For a non-interactive listing of the remote, see the [tree](/commands/rclone_tree/) command. To just get the total size of the remote you can also use the [size](/commands/rclone_size/) command.`, Annotations: map[string]string{ "versionIntroduced": "v1.37", "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { return NewUI(fsrc).Run() }) }, } // helpText returns help text for ncdu func helpText() (tr []string) { tr = []string{ "rclone ncdu", " ↑,↓ or k,j to Move", " →,l to enter", " ←,h to return", " g toggle graph", " c toggle counts", " a toggle average size in directory", " m toggle modified time", " u toggle human-readable format", " n,s,C,A,M sort by name,size,count,asize,mtime", " d delete file/directory", " v select file/directory", " V enter visual select mode", " D delete selected files/directories", } if !clipboard.Unsupported { tr = append(tr, " y copy current path to clipboard") } tr = append(tr, []string{ " Y display current path", " ^L refresh screen (fix screen corruption)", " r recalculate file sizes", " ? to toggle help on and off", " ESC to close the menu box", " q/^c to quit", }...) return } // UI contains the state of the user interface type UI struct { s tcell.Screen f fs.Fs // fs being displayed cancel func() // cancel the current scanning process fsName string // human name of Fs root *scan.Dir // root directory d *scan.Dir // current directory being displayed path string // path of current directory showBox bool // whether to show a box boxText []string // text to show in box boxMenu []string // box menu options boxMenuButton int boxMenuHandler func(fs fs.Fs, path string, option int) (string, error) entries fs.DirEntries // entries of current directory sortPerm []int // order to display entries in after sorting invSortPerm []int // inverse order dirListHeight int // height of listing listing bool // whether listing is in progress showGraph bool // toggle showing graph showCounts bool // toggle showing counts showDirAverageSize bool // toggle average size showModTime bool // toggle showing timestamps humanReadable bool // toggle human-readable format visualSelectMode bool // toggle visual selection mode sortByName int8 // +1 for normal (lexical), 0 for off, -1 for reverse sortBySize int8 // +1 for normal (largest first), 0 for off, -1 for reverse (smallest first) sortByCount int8 sortByAverageSize int8 sortByModTime int8 // +1 for normal (newest first), 0 for off, -1 for reverse (oldest first) dirPosMap map[string]dirPos // store for directory positions selectedEntries map[string]dirPos // selected entries of current directory } // Where we have got to in the directory listing type dirPos struct { entry int offset int } // graphemeWidth returns the number of cells in rs. // // The original [runewidth.StringWidth] iterates through graphemes // and uses this same logic. To avoid iterating through graphemes // repeatedly, we separate that out into its own function. func graphemeWidth(rs []rune) (wd int) { // copied/adapted from [runewidth.StringWidth] for _, r := range rs { wd = runewidth.RuneWidth(r) if wd > 0 { break } } return } // Print a string func (u *UI) Print(x, y int, style tcell.Style, msg string) { g := uniseg.NewGraphemes(msg) for g.Next() { rs := g.Runes() u.s.SetContent(x, y, rs[0], rs[1:], style) x += graphemeWidth(rs) } } // Printf a string func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...any) { s := fmt.Sprintf(format, args...) u.Print(x, y, style, s) } // Line prints a string to given xmax, with given space func (u *UI) Line(x, y, xmax int, style tcell.Style, spacer rune, msg string) { g := uniseg.NewGraphemes(msg) for g.Next() { rs := g.Runes() u.s.SetContent(x, y, rs[0], rs[1:], style) x += graphemeWidth(rs) if x >= xmax { return } } for ; x < xmax; x++ { u.s.SetContent(x, y, spacer, nil, style) } } // Linef a string func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...any) { s := fmt.Sprintf(format, args...) u.Line(x, y, xmax, style, spacer, s) } // LineOptions Print line of selectable options func (u *UI) LineOptions(x, y, xmax int, style tcell.Style, options []string, selected int) { for x := x; x < xmax; x++ { u.s.SetContent(x, y, ' ', nil, style) // fill } x += ((xmax - x) - lineOptionLength(options)) / 2 // center for i, o := range options { u.s.SetContent(x, y, ' ', nil, style) x++ ostyle := style if i == selected { ostyle = tcell.StyleDefault } u.s.SetContent(x, y, '<', nil, ostyle) x++ g := uniseg.NewGraphemes(o) for g.Next() { rs := g.Runes() u.s.SetContent(x, y, rs[0], rs[1:], ostyle) x += graphemeWidth(rs) } u.s.SetContent(x, y, '>', nil, ostyle) x++ u.s.SetContent(x, y, ' ', nil, style) x++ } } func lineOptionLength(o []string) int { count := 0 for _, i := range o { count += len(i) } return count + 4*len(o) // spacer and arrows <entry> } // Box the u.boxText onto the screen func (u *UI) Box() { w, h := u.s.Size() // Find dimensions of text boxWidth := 10 for _, s := range u.boxText { if len(s) > boxWidth && len(s) < w-4 { boxWidth = len(s) } } boxHeight := len(u.boxText) // position x := (w - boxWidth) / 2 y := (h - boxHeight) / 2 xmax := x + boxWidth if len(u.boxMenu) != 0 { count := lineOptionLength(u.boxMenu) xmax = max(x+boxWidth, x+count) } ymax := y + len(u.boxText) // draw text style := tcell.StyleDefault.Background(tcell.ColorRed).Reverse(true) for i, s := range u.boxText { u.Line(x, y+i, xmax, style, ' ', s) style = tcell.StyleDefault.Reverse(true) } if len(u.boxMenu) != 0 { u.LineOptions(x, ymax, xmax, style, u.boxMenu, u.boxMenuButton) ymax++ } // draw top border for i := y; i < ymax; i++ { u.s.SetContent(x-1, i, tcell.RuneVLine, nil, style) u.s.SetContent(xmax, i, tcell.RuneVLine, nil, style) } for j := x; j < xmax; j++ { u.s.SetContent(j, y-1, tcell.RuneHLine, nil, style) u.s.SetContent(j, ymax, tcell.RuneHLine, nil, style) } u.s.SetContent(x-1, y-1, tcell.RuneULCorner, nil, style) u.s.SetContent(xmax, y-1, tcell.RuneURCorner, nil, style) u.s.SetContent(x-1, ymax, tcell.RuneLLCorner, nil, style) u.s.SetContent(xmax, ymax, tcell.RuneLRCorner, nil, style) } func (u *UI) moveBox(to int) { if len(u.boxMenu) == 0 { return } if to > 0 { // move right u.boxMenuButton++ } else { // move left u.boxMenuButton-- } if u.boxMenuButton >= len(u.boxMenu) { u.boxMenuButton = len(u.boxMenu) - 1 } else if u.boxMenuButton < 0 { u.boxMenuButton = 0 } } // find the biggest entry in the current listing func (u *UI) biggestEntry() (biggest int64) { if u.d == nil { return } for i := range u.entries { attrs, _ := u.d.AttrI(u.sortPerm[i]) if attrs.Size > biggest { biggest = attrs.Size } } return } // hasEmptyDir returns true if there is empty folder in current listing func (u *UI) hasEmptyDir() bool { if u.d == nil { return false } for i := range u.entries { attrs, _ := u.d.AttrI(u.sortPerm[i]) if attrs.IsDir && attrs.Count == 0 { return true } } return false } // Draw the current screen func (u *UI) Draw() { ctx := context.Background() w, h := u.s.Size() u.dirListHeight = h - 3 // Plot u.s.Clear() // Header line u.Linef(0, 0, w, tcell.StyleDefault.Reverse(true), ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version) // Directory line u.Linef(0, 1, w, tcell.StyleDefault, '-', "-- %s ", u.path) // graphs const ( graphBars = 10 graph = "########## " ) // Directory listing if u.d != nil { y := 2 perBar := u.biggestEntry() / graphBars if perBar == 0 { perBar = 1 } showEmptyDir := u.hasEmptyDir() dirPos := u.dirPosMap[u.path] // Check to see if a rescan has invalidated the position if dirPos.offset >= len(u.sortPerm) { delete(u.dirPosMap, u.path) dirPos.offset = 0 dirPos.entry = 0 } for i, j := range u.sortPerm[dirPos.offset:] { entry := u.entries[j] n := i + dirPos.offset if y >= h-1 { break } var attrs scan.Attrs var err error if u.showModTime { attrs, err = u.d.AttrWithModTimeI(ctx, u.sortPerm[n]) } else { attrs, err = u.d.AttrI(u.sortPerm[n]) } _, isSelected := u.selectedEntries[entry.String()] style := tcell.StyleDefault if attrs.EntriesHaveErrors { style = style.Foreground(tcell.ColorYellow) } if err != nil { style = style.Foreground(tcell.ColorRed) } if isSelected { style = style.Foreground(tcell.ColorLightYellow) } if n == dirPos.entry { style = style.Reverse(true) } mark := ' ' if attrs.IsDir { mark = '/' } fileFlag := ' ' message := "" if !attrs.Readable { message = " [not read yet]" } if attrs.CountUnknownSize > 0 { message = fmt.Sprintf(" [%d of %d files have unknown size, size may be underestimated]", attrs.CountUnknownSize, attrs.Count) fileFlag = '~' } if attrs.EntriesHaveErrors { message = " [some subdirectories could not be read, size may be underestimated]" fileFlag = '.' } if err != nil { message = fmt.Sprintf(" [%s]", err) fileFlag = '!' } extras := "" if u.showCounts { ss := operations.CountStringField(attrs.Count, u.humanReadable, 9) + " " if attrs.Count > 0 { extras += ss } else { extras += strings.Repeat(" ", len(ss)) } } if u.showDirAverageSize { avg := attrs.AverageSize() ss := operations.SizeStringField(int64(avg), u.humanReadable, 9) + " " if avg > 0 { extras += ss } else { extras += strings.Repeat(" ", len(ss)) } } if u.showModTime { extras += attrs.ModTime.Local().Format("2006-01-02 15:04:05") + " " } if showEmptyDir { if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' { fileFlag = 'e' } } if u.showGraph { bars := (attrs.Size + perBar/2 - 1) / perBar // clip if necessary - only happens during startup if bars > 10 { bars = 10 } else if bars < 0 { bars = 0 } extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] " } u.Linef(0, y, w, style, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message) y++ } } // Footer if u.d == nil { u.Line(0, h-1, w, tcell.StyleDefault.Reverse(true), ' ', "Waiting for root directory...") } else { message := "" if u.listing { message = " [listing in progress]" } size, count := u.d.Attr() u.Linef(0, h-1, w, tcell.StyleDefault.Reverse(true), ' ', "Total usage: %s, Objects: %s%s", operations.SizeString(size, u.humanReadable), operations.CountString(count, u.humanReadable), message) } // Show the box on top if required if u.showBox { u.Box() } } // Move the cursor this many spaces adjusting the viewport as necessary func (u *UI) move(d int) { if u.d == nil { return } absD := d if d < 0 { absD = -d } entries := len(u.entries) // Fetch current dirPos dirPos := u.dirPosMap[u.path] dirPos.entry += d // check entry in range if dirPos.entry < 0 { dirPos.entry = 0 } else if dirPos.entry >= entries { dirPos.entry = entries - 1 } // check cursor still on screen p := dirPos.entry - dirPos.offset // where dirPos.entry appears on the screen if p < 0 { dirPos.offset -= absD } else if p >= u.dirListHeight { dirPos.offset += absD } // check dirPos.offset in bounds if entries == 0 || dirPos.offset < 0 { dirPos.offset = 0 } else if dirPos.offset >= entries { dirPos.offset = entries - 1 } // toggle the current file for selection in selection mode if u.visualSelectMode { u.toggleSelectForCursor() } // write dirPos back for later u.dirPosMap[u.path] = dirPos } func (u *UI) removeEntry(pos int) { u.d.Remove(pos) u.setCurrentDir(u.d) } func (u *UI) delete() { if u.d == nil || len(u.entries) == 0 { return } if len(u.selectedEntries) > 0 { u.deleteSelected() } else { u.deleteSingle() } } // delete the entry at the current position func (u *UI) deleteSingle() { ctx := context.Background() cursorPos := u.dirPosMap[u.path] dirPos := u.sortPerm[cursorPos.entry] dirEntry := u.entries[dirPos] u.boxMenu = []string{"cancel", "confirm"} if obj, isFile := dirEntry.(fs.Object); isFile { u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) { if o != 1 { return "Aborted!", nil } err := operations.DeleteFile(ctx, obj) if err != nil { return "", err } u.removeEntry(dirPos) if cursorPos.entry >= len(u.entries) { u.move(-1) // move back onto a valid entry } return "Successfully deleted file!", nil } u.popupBox([]string{ "Delete this file?", fspath.JoinRootPath(u.fsName, dirEntry.String())}) } else { u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) { if o != 1 { return "Aborted!", nil } err := operations.Purge(ctx, f, dirEntry.String()) if err != nil { return "", err } u.removeEntry(dirPos) if cursorPos.entry >= len(u.entries) { u.move(-1) // move back onto a valid entry } return "Successfully purged folder!", nil } u.popupBox([]string{ "Purge this directory?", "ALL files in it will be deleted", fspath.JoinRootPath(u.fsName, dirEntry.String())}) } } func (u *UI) deleteSelected() { ctx := context.Background() u.boxMenu = []string{"cancel", "confirm"} u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) { if o != 1 { return "Aborted!", nil } positionsToDelete := make([]int, len(u.selectedEntries)) i := 0 for key, cursorPos := range u.selectedEntries { dirPos := u.sortPerm[cursorPos.entry] dirEntry := u.entries[dirPos] var err error if obj, isFile := dirEntry.(fs.Object); isFile { err = operations.DeleteFile(ctx, obj) } else { err = operations.Purge(ctx, f, dirEntry.String()) } if err != nil { return "", err } delete(u.selectedEntries, key) positionsToDelete[i] = dirPos i++ } // deleting all entries at once, as doing it during the deletions // could cause issues. sort.Slice(positionsToDelete, func(i, j int) bool { return positionsToDelete[i] > positionsToDelete[j] }) for _, dirPos := range positionsToDelete { u.removeEntry(dirPos) } // move cursor at end if needed cursorPos := u.dirPosMap[u.path] if cursorPos.entry >= len(u.entries) { u.move(-1) } return "Successfully deleted all items!", nil } u.popupBox([]string{ "Delete selected items?", fmt.Sprintf("ALL %d items will be deleted", len(u.selectedEntries))}) } func (u *UI) displayPath() { u.togglePopupBox([]string{ "Current Path", u.path, }) } func (u *UI) copyPath() { if !clipboard.Unsupported { _ = clipboard.WriteAll(u.path) } } // Sort by the configured sort method type ncduSort struct { sortPerm []int entries fs.DirEntries d *scan.Dir u *UI } // Less is part of sort.Interface. func (ds *ncduSort) Less(i, j int) bool { var iAvgSize, jAvgSize float64 var iattrs, jattrs scan.Attrs if ds.u.sortByModTime != 0 { ctx := context.Background() iattrs, _ = ds.d.AttrWithModTimeI(ctx, ds.sortPerm[i]) jattrs, _ = ds.d.AttrWithModTimeI(ctx, ds.sortPerm[j]) } else { iattrs, _ = ds.d.AttrI(ds.sortPerm[i]) jattrs, _ = ds.d.AttrI(ds.sortPerm[j]) } iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote() if iattrs.Count > 0 { iAvgSize = iattrs.AverageSize() } if jattrs.Count > 0 { jAvgSize = jattrs.AverageSize() } switch { case ds.u.sortByName < 0: return iname > jname case ds.u.sortByName > 0: break case ds.u.sortBySize < 0: if iattrs.Size != jattrs.Size { return iattrs.Size < jattrs.Size } case ds.u.sortBySize > 0: if iattrs.Size != jattrs.Size { return iattrs.Size > jattrs.Size } case ds.u.sortByModTime < 0: if iattrs.ModTime != jattrs.ModTime { return iattrs.ModTime.Before(jattrs.ModTime) } case ds.u.sortByModTime > 0: if iattrs.ModTime != jattrs.ModTime { return iattrs.ModTime.After(jattrs.ModTime) } case ds.u.sortByCount < 0: if iattrs.Count != jattrs.Count { return iattrs.Count < jattrs.Count } case ds.u.sortByCount > 0: if iattrs.Count != jattrs.Count { return iattrs.Count > jattrs.Count } case ds.u.sortByAverageSize < 0: if iAvgSize != jAvgSize { return iAvgSize < jAvgSize } // if avgSize is equal, sort by size if iattrs.Size != jattrs.Size { return iattrs.Size < jattrs.Size } case ds.u.sortByAverageSize > 0: if iAvgSize != jAvgSize { return iAvgSize > jAvgSize } // if avgSize is equal, sort by size if iattrs.Size != jattrs.Size { return iattrs.Size > jattrs.Size } } // if everything equal, sort by name return iname < jname } // Swap is part of sort.Interface. func (ds *ncduSort) Swap(i, j int) { ds.sortPerm[i], ds.sortPerm[j] = ds.sortPerm[j], ds.sortPerm[i] } // Len is part of sort.Interface. func (ds *ncduSort) Len() int { return len(ds.sortPerm) } // sort the permutation map of the current directory func (u *UI) sortCurrentDir() { u.sortPerm = u.sortPerm[:0] for i := range u.entries { u.sortPerm = append(u.sortPerm, i) } data := ncduSort{ sortPerm: u.sortPerm, entries: u.entries, d: u.d, u: u, } sort.Sort(&data) if len(u.invSortPerm) < len(u.sortPerm) { u.invSortPerm = make([]int, len(u.sortPerm)) } for i, j := range u.sortPerm { u.invSortPerm[j] = i } } // setCurrentDir sets the current directory func (u *UI) setCurrentDir(d *scan.Dir) { u.d = d u.entries = d.Entries() u.path = fspath.JoinRootPath(u.fsName, d.Path()) u.selectedEntries = make(map[string]dirPos) u.visualSelectMode = false u.sortCurrentDir() } // enters the current entry func (u *UI) enter() { if u.d == nil || len(u.entries) == 0 { return } dirPos := u.dirPosMap[u.path] d, _ := u.d.GetDir(u.sortPerm[dirPos.entry]) if d == nil { return } u.setCurrentDir(d) } // handles a box option that was selected func (u *UI) handleBoxOption() { msg, err := u.boxMenuHandler(u.f, u.path, u.boxMenuButton) // reset u.boxMenuButton = 0 u.boxMenu = []string{} u.boxMenuHandler = nil if err != nil { u.popupBox([]string{ "error:", err.Error(), }) return } u.popupBox([]string{"Finished:", msg}) } // up goes up to the parent directory func (u *UI) up() { if u.d == nil { return } parent := u.d.Parent() if parent != nil { u.setCurrentDir(parent) } } // popupBox shows a box with the text in func (u *UI) popupBox(text []string) { u.boxText = text u.showBox = true } // togglePopupBox shows a box with the text in func (u *UI) togglePopupBox(text []string) { if u.showBox && reflect.DeepEqual(u.boxText, text) { u.showBox = false } else { u.popupBox(text) } } // toggle the sorting for the flag passed in func (u *UI) toggleSort(sortType *int8) { old := *sortType u.sortBySize = 0 u.sortByCount = 0 u.sortByName = 0 u.sortByAverageSize = 0 if old == 0 { *sortType = 1 } else { *sortType = -old } u.sortCurrentDir() } func (u *UI) toggleSelectForCursor() { cursorPos := u.dirPosMap[u.path] dirPos := u.sortPerm[cursorPos.entry] dirEntry := u.entries[dirPos] _, present := u.selectedEntries[dirEntry.String()] if present { delete(u.selectedEntries, dirEntry.String()) } else { u.selectedEntries[dirEntry.String()] = cursorPos } } // NewUI creates a new user interface for ncdu on f func NewUI(f fs.Fs) *UI { return &UI{ f: f, path: "Waiting for root...", dirListHeight: 20, // updated in Draw fsName: fs.ConfigString(f), showGraph: true, showCounts: false, showDirAverageSize: false, humanReadable: true, sortByName: 0, sortBySize: 1, // Sort by largest first sortByModTime: 0, sortByCount: 0, dirPosMap: make(map[string]dirPos), selectedEntries: make(map[string]dirPos), } } func (u *UI) scan() (chan *scan.Dir, chan error, chan struct{}) { if cancel := u.cancel; cancel != nil { cancel() } u.listing = true ctx := context.Background() ctx, u.cancel = context.WithCancel(ctx) return scan.Scan(ctx, u.f) } // Run shows the user interface func (u *UI) Run() error { var err error u.s, err = tcell.NewScreen() if err != nil { return fmt.Errorf("screen new: %w", err) } err = u.s.Init() if err != nil { return fmt.Errorf("screen init: %w", err) } // Hijack log output so that it doesn't corrupt the screen. if !log.Redirected() { var logs []string log.Handler.SetOutput(func(level slog.Level, text string) { if len(logs) > 100 { logs = logs[len(logs)-100:] } logs = append(logs, text) }) defer func() { log.Handler.ResetOutput() for _, text := range logs { _, _ = os.Stderr.WriteString(text) } }() } defer u.s.Fini() // scan the disk in the background rootChan, errChan, updated := u.scan() // Poll the events into a channel events := make(chan tcell.Event) go u.s.ChannelEvents(events, nil) // Main loop, waiting for events and channels outer: for { select { case root := <-rootChan: u.root = root u.setCurrentDir(root) case err := <-errChan: if err != nil { return fmt.Errorf("ncdu directory listing: %w", err) } u.listing = false case <-updated: // TODO: might want to limit updates per second u.sortCurrentDir() case ev := <-events: switch ev := ev.(type) { case *tcell.EventResize: u.Draw() u.s.Sync() continue // don't draw again case *tcell.EventKey: var c rune if k := ev.Key(); k == tcell.KeyRune { c = ev.Rune() } else { c = key(k) } switch c { case key(tcell.KeyEsc), key(tcell.KeyCtrlC), 'q': if u.showBox || c == key(tcell.KeyEsc) { u.showBox = false } else { break outer } case key(tcell.KeyDown), 'j': u.move(1) case key(tcell.KeyUp), 'k': u.move(-1) case key(tcell.KeyPgDn), '-', '_': u.move(u.dirListHeight) case key(tcell.KeyPgUp), '=', '+': u.move(-u.dirListHeight) case key(tcell.KeyLeft), 'h': if u.showBox { u.moveBox(-1) break } u.up() case key(tcell.KeyEnter): if len(u.boxMenu) > 0 { u.handleBoxOption() break } u.enter() case key(tcell.KeyRight), 'l': if u.showBox { u.moveBox(1) break } u.enter() case 'c': u.showCounts = !u.showCounts case 'm': u.showModTime = !u.showModTime case 'g': u.showGraph = !u.showGraph case 'a': u.showDirAverageSize = !u.showDirAverageSize case 'n': u.toggleSort(&u.sortByName) case 's': u.toggleSort(&u.sortBySize) case 'M': u.toggleSort(&u.sortByModTime) case 'v': u.toggleSelectForCursor() case 'V': u.visualSelectMode = !u.visualSelectMode case 'C': u.toggleSort(&u.sortByCount) case 'A': u.toggleSort(&u.sortByAverageSize) case 'y': u.copyPath() case 'Y': u.displayPath() case 'd': u.delete() case 'u': u.humanReadable = !u.humanReadable case 'D': u.deleteSelected() case '?': u.togglePopupBox(helpText()) case 'r': // restart scan rootChan, errChan, updated = u.scan() // Refresh the screen. Not obvious what key to map // this onto, but ^L is a common choice. case key(tcell.KeyCtrlL): u.Draw() u.s.Sync() continue // don't draw again } } } u.Draw() u.s.Show() } return nil } // key returns a rune representing the key k. It is a negative value, to not collide with Unicode code-points. func key(k tcell.Key) rune { return rune(-k) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/ncdu/scan/scan.go
cmd/ncdu/scan/scan.go
// Package scan does concurrent scanning of an Fs building up a directory tree. package scan import ( "context" "fmt" "path" "slices" "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/walk" ) // Dir represents a directory found in the remote type Dir struct { parent *Dir path string mu sync.Mutex size int64 count int64 countUnknownSize int64 entries fs.DirEntries dirs map[string]*Dir readError error entriesHaveErrors bool } // Attrs contains accumulated properties for a directory entry // // Files with unknown size are counted separately but also included // in the total count. They are not included in the size, i.e. treated // as empty files, which means the size may be underestimated. type Attrs struct { ModTime time.Time Size int64 Count int64 CountUnknownSize int64 IsDir bool Readable bool EntriesHaveErrors bool } // AverageSize calculates average size of files in directory // // If there are files with unknown size, this returns the average over // files with known sizes, which means it may be under- or // overestimated. func (a *Attrs) AverageSize() float64 { countKnownSize := a.Count - a.CountUnknownSize if countKnownSize > 0 { return float64(a.Size) / float64(countKnownSize) } return 0 } // Parent returns the directory above this one func (d *Dir) Parent() *Dir { // no locking needed since these are write once in newDir() return d.parent } // Path returns the position of the dir in the filesystem func (d *Dir) Path() string { // no locking needed since these are write once in newDir() return d.path } // make a new directory func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir { d := &Dir{ parent: parent, path: dirPath, entries: entries, dirs: make(map[string]*Dir), readError: err, } // Count size in this dir for _, entry := range entries { if o, ok := entry.(fs.Object); ok { d.count++ size := o.Size() if size < 0 { // Some backends may return -1 because size of object is not known d.countUnknownSize++ } else { d.size += size } } } // Set my directory entry in parent if parent != nil { parent.mu.Lock() leaf := path.Base(dirPath) d.parent.dirs[leaf] = d parent.mu.Unlock() } // Accumulate counts in parents for ; parent != nil; parent = parent.parent { parent.mu.Lock() parent.size += d.size parent.count += d.count parent.countUnknownSize += d.countUnknownSize if d.readError != nil { parent.entriesHaveErrors = true } parent.mu.Unlock() } return d } // Entries returns a copy of the entries in the directory func (d *Dir) Entries() fs.DirEntries { return slices.Clone(d.entries) } // Remove removes the i-th entry from the // in-memory representation of the remote directory func (d *Dir) Remove(i int) { d.mu.Lock() defer d.mu.Unlock() d.remove(i) } // removes the i-th entry from the // in-memory representation of the remote directory // // Call with d.mu held func (d *Dir) remove(i int) { size := d.entries[i].Size() countUnknownSize := int64(0) if size < 0 { size = 0 countUnknownSize = 1 } count := int64(1) subDir, ok := d.getDir(i) if ok { size = subDir.size count = subDir.count countUnknownSize = subDir.countUnknownSize delete(d.dirs, path.Base(subDir.path)) } d.size -= size d.count -= count d.countUnknownSize -= countUnknownSize d.entries = slices.Delete(d.entries, i, i+1) dir := d // populate changed size and count to parent(s) for parent := d.parent; parent != nil; parent = parent.parent { parent.mu.Lock() parent.dirs[path.Base(dir.path)] = dir parent.size -= size parent.count -= count parent.countUnknownSize -= countUnknownSize dir = parent parent.mu.Unlock() } } // gets the directory of the i-th entry // // returns nil if it is a file // returns a flag as to whether is directory or not // // Call with d.mu held func (d *Dir) getDir(i int) (subDir *Dir, isDir bool) { obj := d.entries[i] dir, ok := obj.(fs.Directory) if !ok { return nil, false } leaf := path.Base(dir.Remote()) subDir = d.dirs[leaf] return subDir, true } // GetDir returns the Dir of the i-th entry // // returns nil if it is a file // returns a flag as to whether is directory or not func (d *Dir) GetDir(i int) (subDir *Dir, isDir bool) { d.mu.Lock() defer d.mu.Unlock() return d.getDir(i) } // Attr returns the size and count for the directory func (d *Dir) Attr() (size int64, count int64) { d.mu.Lock() defer d.mu.Unlock() return d.size, d.count } // attrI returns the size, count and flags for the i-th directory entry func (d *Dir) attrI(i int) (attrs Attrs, err error) { subDir, isDir := d.getDir(i) if !isDir { return Attrs{time.Time{}, d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError } if subDir == nil { return Attrs{time.Time{}, 0, 0, 0, true, false, false}, nil } size, count := subDir.Attr() return Attrs{time.Time{}, size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError } // AttrI returns the size, count and flags for the i-th directory entry func (d *Dir) AttrI(i int) (attrs Attrs, err error) { d.mu.Lock() defer d.mu.Unlock() return d.attrI(i) } // AttrWithModTimeI returns the modtime, size, count and flags for the i-th directory entry func (d *Dir) AttrWithModTimeI(ctx context.Context, i int) (attrs Attrs, err error) { d.mu.Lock() defer d.mu.Unlock() attrs, err = d.attrI(i) attrs.ModTime = d.entries[i].ModTime(ctx) return } // Scan the Fs passed in, returning a root directory channel and an // error channel func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) { ci := fs.GetConfig(ctx) root := make(chan *Dir, 1) errChan := make(chan error, 1) updated := make(chan struct{}, 1) go func() { parents := map[string]*Dir{} err := walk.Walk(ctx, f, "", false, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { var parent *Dir if dirPath != "" { parentPath := path.Dir(dirPath) if parentPath == "." { parentPath = "" } var ok bool parent, ok = parents[parentPath] if !ok { errChan <- fmt.Errorf("couldn't find parent for %q", dirPath) } } d := newDir(parent, dirPath, entries, err) parents[dirPath] = d if dirPath == "" { root <- d } // Mark updated select { case updated <- struct{}{}: default: break } return nil }) if err != nil { errChan <- fmt.Errorf("ncdu listing failed: %w", err) } errChan <- nil }() return root, errChan, updated }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/rcd/rcd.go
cmd/rcd/rcd.go
// Package rcd provides the rcd command. package rcd import ( "context" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc/rcflags" "github.com/rclone/rclone/fs/rc/rcserver" libhttp "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/lib/systemd" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "rcd <path to files to serve>*", Short: `Run rclone listening to remote control commands only.`, Long: `This runs rclone so that it only listens to remote control commands. This is useful if you are controlling rclone via the rc API. If you pass in a path to a directory, rclone will serve that directory for GET requests on the URL passed in. It will also open the URL in the browser when rclone is run. See the [rc documentation](/rc/) for more info on the rc flags. ` + strings.TrimSpace(libhttp.Help(rcflags.FlagPrefix)+libhttp.TemplateHelp(rcflags.FlagPrefix)+libhttp.AuthHelp(rcflags.FlagPrefix)), Annotations: map[string]string{ "versionIntroduced": "v1.45", "groups": "RC", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) if rc.Opt.Enabled { fs.Fatalf(nil, "Don't supply --rc flag when using rcd") } // Start the rc rc.Opt.Enabled = true if len(args) > 0 { rc.Opt.Files = args[0] } s, err := rcserver.Start(context.Background(), &rc.Opt) if err != nil { fs.Fatalf(nil, "Failed to start remote control: %v", err) } if s == nil { fs.Fatal(nil, "rc server not configured") } // Notify stopping on exit defer systemd.Notify()() s.Wait() }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/lsd/lsd.go
cmd/lsd/lsd.go
// Package lsd provides the lsd command. package lsd import ( "context" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/ls/lshelp" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( recurse bool ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing", "") } var commandDefinition = &cobra.Command{ Use: "lsd remote:path", Short: `List all directories/containers/buckets in the path.`, Long: `Lists the directories in the source path to standard output. Does not recurse by default. Use the ` + "`-R`" + ` flag to recurse. This command lists the total size of the directory (if known, -1 if not), the modification time (if known, the current time if not), the number of objects in the directory (if known, -1 if not) and the name of the directory, E.g. ` + "```console" + ` $ rclone lsd swift: 494000 2018-04-26 08:43:20 10000 10000files 65 2018-04-26 08:43:20 1 1File ` + "```" + ` Or ` + "```console" + ` $ rclone lsd drive:test -1 2016-10-17 17:41:53 -1 1000files -1 2017-01-03 14:40:54 -1 2500files -1 2017-07-08 14:39:28 -1 4000files ` + "```" + ` If you just want the directory names use ` + "`rclone lsf --dirs-only`" + `. ` + lshelp.Help, Annotations: map[string]string{ "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { ci := fs.GetConfig(context.Background()) cmd.CheckArgs(1, 1, command, args) if recurse { ci.MaxDepth = 0 } fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { return operations.ListDir(context.Background(), fsrc, os.Stdout) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/archive_unsupported.go
cmd/archive/archive_unsupported.go
// Build for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package archive implements 'rclone archive'. package archive
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/archive.go
cmd/archive/archive.go
//go:build !plan9 // Package archive implements 'rclone archive'. package archive import ( "errors" "github.com/rclone/rclone/cmd" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(Command) } // Command - archive command var Command = &cobra.Command{ Use: "archive <action> [opts] <source> [<destination>]", Short: `Perform an action on an archive.`, Long: `Perform an action on an archive. Requires the use of a subcommand to specify the protocol, e.g. rclone archive list remote:file.zip Each subcommand has its own options which you can see in their help. See [rclone archive create](/commands/rclone_archive_create/) for the archive formats supported. `, Annotations: map[string]string{ "versionIntroduced": "v1.72", }, RunE: func(command *cobra.Command, args []string) error { if len(args) == 0 { return errors.New("archive requires an action, e.g. 'rclone archive list remote:'") } return errors.New("unknown action") }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/archive_test.go
cmd/archive/archive_test.go
package archive_test import ( "context" "strings" "testing" "github.com/mholt/archives" _ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/memory" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fstest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/rclone/rclone/cmd/archive/create" "github.com/rclone/rclone/cmd/archive/extract" "github.com/rclone/rclone/cmd/archive/list" ) var ( t1 = fstest.Time("2017-02-03T04:05:06.499999999Z") ) // TestMain drives the tests func TestMain(m *testing.M) { fstest.TestMain(m) } func TestCheckValidDestination(t *testing.T) { var err error ctx := context.Background() r := fstest.NewRun(t) // create file r.WriteObject(ctx, "file1.txt", "111", t1) // test checkValidDestination when file exists err = create.CheckValidDestination(ctx, r.Fremote, "file1.txt") require.NoError(t, err) // test checkValidDestination when file does not exist err = create.CheckValidDestination(ctx, r.Fremote, "file2.txt") require.NoError(t, err) // test checkValidDestination when dest is a directory if r.Fremote.Features().CanHaveEmptyDirectories { err = create.CheckValidDestination(ctx, r.Fremote, "") require.ErrorIs(t, err, fs.ErrorIsDir) } // test checkValidDestination when dest does not exists err = create.CheckValidDestination(ctx, r.Fremote, "dir/file.txt") require.NoError(t, err) } // test archiving to the remote func testArchiveRemote(t *testing.T, fromLocal bool, subDir string, extension string) { var err error ctx := context.Background() r := fstest.NewRun(t) var src, dst fs.Fs var f1, f2, f3 fstest.Item // create files to archive on src if fromLocal { // create files to archive on local src = r.Flocal dst = r.Fremote f1 = r.WriteFile("file1.txt", "content 1", t1) f2 = r.WriteFile("dir1/sub1.txt", "sub content 1", t1) f3 = r.WriteFile("dir2/sub2a.txt", "sub content 2a", t1) } else { // create files to archive on remote src = r.Fremote dst = r.Flocal f1 = r.WriteObject(ctx, "file1.txt", "content 1", t1) f2 = r.WriteObject(ctx, "dir1/sub1.txt", "sub content 1", t1) f3 = r.WriteObject(ctx, "dir2/sub2a.txt", "sub content 2a", t1) } fstest.CheckItems(t, src, f1, f2, f3) // create archive on dst archiveName := "test." + extension err = create.ArchiveCreate(ctx, dst, archiveName, src, "", "") require.NoError(t, err) // list archive on dst expected := map[string]int64{ "file1.txt": 9, "dir1/": 0, "dir1/sub1.txt": 13, "dir2/": 0, "dir2/sub2a.txt": 14, } listFile := func(ctx context.Context, f archives.FileInfo) error { name := f.NameInArchive gotSize := f.Size() if f.IsDir() && !strings.HasSuffix(name, "/") { name += "/" gotSize = 0 } wantSize, found := expected[name] assert.True(t, found, name) assert.Equal(t, wantSize, gotSize) delete(expected, name) return nil } err = list.ArchiveList(ctx, dst, archiveName, listFile) require.NoError(t, err) assert.Equal(t, 0, len(expected), expected) // clear the src require.NoError(t, operations.Purge(ctx, src, "")) require.NoError(t, src.Mkdir(ctx, "")) fstest.CheckItems(t, src) // extract dst archive back to src err = extract.ArchiveExtract(ctx, src, subDir, dst, archiveName) require.NoError(t, err) // check files on src are restored from the archive on dst items := []fstest.Item{f1, f2, f3} if subDir != "" { for i := range items { item := &items[i] item.Path = subDir + "/" + item.Path } } fstest.CheckListingWithPrecision(t, src, items, nil, fs.ModTimeNotSupported) } func testArchive(t *testing.T) { var extensions = []string{ "zip", "tar", "tar.gz", "tar.bz2", "tar.lz", "tar.lz4", "tar.xz", "tar.zst", "tar.br", "tar.sz", "tar.mz", } for _, extension := range extensions { t.Run(extension, func(t *testing.T) { for _, subDir := range []string{"", "subdir"} { name := subDir if name == "" { name = "root" } t.Run(name, func(t *testing.T) { t.Run("local", func(t *testing.T) { testArchiveRemote(t, true, name, extension) }) t.Run("remote", func(t *testing.T) { testArchiveRemote(t, false, name, extension) }) }) } }) } } func TestIntegration(t *testing.T) { testArchive(t) } func TestMemory(t *testing.T) { if *fstest.RemoteName != "" { t.Skip("skipping as -remote is set") } // Reset -remote to point to :memory: oldFstestRemoteName := fstest.RemoteName remoteName := ":memory:" fstest.RemoteName = &remoteName defer func() { fstest.RemoteName = oldFstestRemoteName }() fstest.ResetRun() testArchive(t) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/extract/extract.go
cmd/archive/extract/extract.go
//go:build !plan9 // Package extract implements 'rclone archive extract' package extract import ( "context" "errors" "fmt" "path" "strings" "github.com/mholt/archives" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/archive" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { archive.Command.AddCommand(Command) } // Command - extract var Command = &cobra.Command{ Use: "extract [flags] <source> <destination>", Short: `Extract archives from source to destination.`, Long: strings.ReplaceAll(` Extract the archive contents to a destination directory auto detecting the format. See [rclone archive create](/commands/rclone_archive_create/) for the archive formats supported. For example on this archive: ||| $ rclone archive list --long remote:archive.zip 6 2025-10-30 09:46:23.000000000 file.txt 0 2025-10-30 09:46:57.000000000 dir/ 4 2025-10-30 09:46:57.000000000 dir/bye.txt ||| You can run extract like this ||| $ rclone archive extract remote:archive.zip remote:extracted ||| Which gives this result ||| $ rclone tree remote:extracted / ├── dir │ └── bye.txt └── file.txt ||| The source or destination or both can be local or remote. Filters can be used to only extract certain files: ||| $ rclone archive extract archive.zip partial --include "bye.*" $ rclone tree partial / └── dir └── bye.txt ||| The [archive backend](/archive/) can also be used to extract files. It can be used to read only mount archives also but it supports a different set of archive formats to the archive commands. `, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.72", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(2, 2, command, args) src, srcFile := cmd.NewFsFile(args[0]) dst, dstFile := cmd.NewFsFile(args[1]) cmd.Run(false, false, command, func() error { return ArchiveExtract(context.Background(), dst, dstFile, src, srcFile) }) return nil }, } // ArchiveExtract extracts files from (src, srcFile) to (dst, dstDir) func ArchiveExtract(ctx context.Context, dst fs.Fs, dstDir string, src fs.Fs, srcFile string) error { var srcObj fs.Object var filesExtracted = 0 var err error fi := filter.GetConfig(ctx) ci := fs.GetConfig(ctx) // get source object srcObj, err = src.NewObject(ctx, srcFile) fs.Debugf(nil, "srcFile: %q, src : %v", srcFile, src) if errors.Is(err, fs.ErrorIsDir) { return fmt.Errorf("source can't be a directory: %w", err) } else if errors.Is(err, fs.ErrorObjectNotFound) { return fmt.Errorf("source not found: %w", err) } else if err != nil { return fmt.Errorf("unable to access source: %w", err) } fs.Debugf(nil, "Source archive file: %s/%s", src.Root(), srcFile) // Create destination directory err = dst.Mkdir(ctx, dstDir) if err != nil { return fmt.Errorf("unable to access destination: %w", err) } fs.Debugf(dst, "Destination for extracted files: %q", dstDir) // start accounting tr := accounting.Stats(ctx).NewTransfer(srcObj, nil) defer tr.Done(ctx, err) // open source var options []fs.OpenOption for _, option := range fs.GetConfig(ctx).DownloadHeaders { options = append(options, option) } in0, err := operations.Open(ctx, srcObj, options...) if err != nil { return fmt.Errorf("failed to open file %s: %w", srcFile, err) } // account and buffer the transfer // in = tr.Account(ctx, in).WithBuffer() in := tr.Account(ctx, in0) // identify format format, _, err := archives.Identify(ctx, "", in) if err != nil { return fmt.Errorf("failed to open check file type: %w", err) } fs.Debugf(nil, "Extract %s/%s, format %s to %s", src.Root(), srcFile, strings.TrimPrefix(format.Extension(), "."), dst.Root()) // check if extract is supported by format ex, isExtract := format.(archives.Extraction) if !isExtract { return fmt.Errorf("extraction for %s not supported", strings.TrimPrefix(format.Extension(), ".")) } // extract files err = ex.Extract(ctx, in, func(ctx context.Context, f archives.FileInfo) error { remote := f.NameInArchive if dstDir != "" { remote = path.Join(dstDir, remote) } // check if file should be extracted if !fi.Include(remote, f.Size(), f.ModTime(), fs.Metadata{}) { return nil } // process directory if f.IsDir() { // directory fs.Debugf(nil, "mkdir %s", remote) // leave if --dry-run set if ci.DryRun { return nil } // create the directory return operations.Mkdir(ctx, dst, remote) } // process file fs.Debugf(nil, "Extract %s", remote) // leave if --dry-run set if ci.DryRun { filesExtracted++ return nil } // open file fin, err := f.Open() if err != nil { return err } // extract the file to destination _, err = operations.Rcat(ctx, dst, remote, fin, f.ModTime(), nil) if err == nil { filesExtracted++ } return err }) fs.Infof(nil, "Total files extracted %d", filesExtracted) return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/extract/extract_unsupported.go
cmd/archive/extract/extract_unsupported.go
// Build for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package archive implements 'rclone archive extract'. package extract
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/list/list.go
cmd/archive/list/list.go
//go:build !plan9 // Package list implements 'rclone archive list' package list import ( "context" "fmt" "os" "strings" "github.com/mholt/archives" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/archive" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var ( longList = false plainList = false filesOnly = false dirsOnly = false ) func init() { flagSet := Command.Flags() flags.BoolVarP(flagSet, &longList, "long", "", longList, "List extra attributtes", "") flags.BoolVarP(flagSet, &plainList, "plain", "", plainList, "Only list file names", "") flags.BoolVarP(flagSet, &filesOnly, "files-only", "", false, "Only list files", "") flags.BoolVarP(flagSet, &dirsOnly, "dirs-only", "", false, "Only list directories", "") archive.Command.AddCommand(Command) } // Command - list var Command = &cobra.Command{ Use: "list [flags] <source>", Short: `List archive contents from source.`, Long: strings.ReplaceAll(` List the contents of an archive to the console, auto detecting the format. See [rclone archive create](/commands/rclone_archive_create/) for the archive formats supported. For example: ||| $ rclone archive list remote:archive.zip 6 file.txt 0 dir/ 4 dir/bye.txt ||| Or with |--long| flag for more info: ||| $ rclone archive list --long remote:archive.zip 6 2025-10-30 09:46:23.000000000 file.txt 0 2025-10-30 09:46:57.000000000 dir/ 4 2025-10-30 09:46:57.000000000 dir/bye.txt ||| Or with |--plain| flag which is useful for scripting: ||| $ rclone archive list --plain /path/to/archive.zip file.txt dir/ dir/bye.txt ||| Or with |--dirs-only|: ||| $ rclone archive list --plain --dirs-only /path/to/archive.zip dir/ ||| Or with |--files-only|: ||| $ rclone archive list --plain --files-only /path/to/archive.zip file.txt dir/bye.txt ||| Filters may also be used: ||| $ rclone archive list --long archive.zip --include "bye.*" 4 2025-10-30 09:46:57.000000000 dir/bye.txt ||| The [archive backend](/archive/) can also be used to list files. It can be used to read only mount archives also but it supports a different set of archive formats to the archive commands. `, "|", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.72", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(1, 1, command, args) src, srcFile := cmd.NewFsFile(args[0]) cmd.Run(false, false, command, func() error { return ArchiveList(context.Background(), src, srcFile, listFile) }) return nil }, } func listFile(ctx context.Context, f archives.FileInfo) error { ci := fs.GetConfig(ctx) fi := filter.GetConfig(ctx) // check if excluded if !fi.Include(f.NameInArchive, f.Size(), f.ModTime(), fs.Metadata{}) { return nil } if filesOnly && f.IsDir() { return nil } if dirsOnly && !f.IsDir() { return nil } // get entry name name := f.NameInArchive if f.IsDir() && !strings.HasSuffix(name, "/") { name += "/" } // print info if longList { operations.SyncFprintf(os.Stdout, "%s %s %s\n", operations.SizeStringField(f.Size(), ci.HumanReadable, 9), f.ModTime().Format("2006-01-02 15:04:05.000000000"), name) } else if plainList { operations.SyncFprintf(os.Stdout, "%s\n", name) } else { operations.SyncFprintf(os.Stdout, "%s %s\n", operations.SizeStringField(f.Size(), ci.HumanReadable, 9), name) } return nil } // ArchiveList -- print a list of the files in the archive func ArchiveList(ctx context.Context, src fs.Fs, srcFile string, listFn archives.FileHandler) error { var srcObj fs.Object var err error // get object srcObj, err = src.NewObject(ctx, srcFile) if err != nil { return fmt.Errorf("source is not a file, %w", err) } fs.Debugf(nil, "Source archive file: %s/%s", src.Root(), srcFile) // start accounting tr := accounting.Stats(ctx).NewTransfer(srcObj, nil) defer func() { tr.Done(ctx, err) }() // open source var options []fs.OpenOption for _, option := range fs.GetConfig(ctx).DownloadHeaders { options = append(options, option) } in0, err := operations.Open(ctx, srcObj, options...) if err != nil { return fmt.Errorf("failed to open file %s: %w", srcFile, err) } // account and buffer the transfer // in = tr.Account(ctx, in).WithBuffer() in := tr.Account(ctx, in0) // identify format format, _, err := archives.Identify(ctx, "", in) if err != nil { return fmt.Errorf("failed to open check file type, %w", err) } fs.Debugf(nil, "Listing %s/%s, format %s", src.Root(), srcFile, strings.TrimPrefix(format.Extension(), ".")) // check if extract is supported by format ex, isExtract := format.(archives.Extraction) if !isExtract { return fmt.Errorf("extraction for %s not supported", strings.TrimPrefix(format.Extension(), ".")) } // list files return ex.Extract(ctx, in, listFn) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/list/list_unsupported.go
cmd/archive/list/list_unsupported.go
// Build for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package archive implements 'rclone archive list'. package list
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/files/countwriter_test.go
cmd/archive/files/countwriter_test.go
package files import ( "errors" "io" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type stubWriter struct { n int err error } func (s stubWriter) Write(p []byte) (int, error) { if s.n > len(p) { return len(p), s.err } return s.n, s.err } func TestCountWriter(t *testing.T) { t.Parallel() t.Run("initial count is zero", func(t *testing.T) { cw := NewCountWriter(io.Discard) require.Equal(t, uint64(0), cw.Count()) }) t.Run("counts bytes with real writes", func(t *testing.T) { cw := NewCountWriter(io.Discard) n, err := cw.Write([]byte("abcd")) require.NoError(t, err) require.Equal(t, 4, n) assert.Equal(t, uint64(4), cw.Count()) n, err = cw.Write([]byte("xyz")) require.NoError(t, err) require.Equal(t, 3, n) assert.Equal(t, uint64(7), cw.Count()) }) t.Run("nil writer uses io.Discard", func(t *testing.T) { cw := NewCountWriter(nil) n, err := cw.Write([]byte("ok")) require.NoError(t, err) require.Equal(t, 2, n) assert.Equal(t, uint64(2), cw.Count()) }) t.Run("zero-length write does not change count", func(t *testing.T) { cw := NewCountWriter(io.Discard) n, err := cw.Write(nil) require.NoError(t, err) require.Equal(t, 0, n) assert.Equal(t, uint64(0), cw.Count()) }) t.Run("partial write with error counts n and returns error", func(t *testing.T) { s := stubWriter{n: 3, err: errors.New("boom")} cw := NewCountWriter(s) n, err := cw.Write([]byte("abcdef")) require.Error(t, err) require.Equal(t, 3, n) assert.Equal(t, uint64(3), cw.Count()) }) t.Run("short successful write counts returned n", func(t *testing.T) { s := stubWriter{n: 1} cw := NewCountWriter(s) n, err := cw.Write([]byte("hi")) require.NoError(t, err) require.Equal(t, 1, n) assert.Equal(t, uint64(1), cw.Count()) }) } func TestCountWriterConcurrent(t *testing.T) { t.Parallel() const ( goroutines = 32 loops = 200 chunkSize = 64 ) data := make([]byte, chunkSize) cw := NewCountWriter(io.Discard) var wg sync.WaitGroup wg.Add(goroutines) for g := 0; g < goroutines; g++ { go func() { defer wg.Done() for i := 0; i < loops; i++ { n, err := cw.Write(data) assert.NoError(t, err) assert.Equal(t, chunkSize, n) } }() } wg.Wait() want := uint64(goroutines * loops * chunkSize) assert.Equal(t, want, cw.Count()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/files/countwriter.go
cmd/archive/files/countwriter.go
package files import ( "io" "sync/atomic" ) // CountWriter counts bytes written through it. // It is safe for concurrent Count/Reset; Write is as safe as the wrapped Writer. type CountWriter struct { w io.Writer count atomic.Uint64 } // NewCountWriter wraps w (use nil if you want to drop data). func NewCountWriter(w io.Writer) *CountWriter { if w == nil { w = io.Discard } return &CountWriter{w: w} } func (cw *CountWriter) Write(p []byte) (int, error) { n, err := cw.w.Write(p) if n > 0 { cw.count.Add(uint64(n)) } return n, err } // Count returns the total bytes written. func (cw *CountWriter) Count() uint64 { return cw.count.Load() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/files/files.go
cmd/archive/files/files.go
// Package files implements io/fs objects package files import ( "archive/tar" "context" "fmt" "io" stdfs "io/fs" "path" "strconv" "strings" "time" "github.com/mholt/archives" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/operations" ) // fill tar.Header with metadata if available (too bad username/groupname is not available) func metadataToHeader(metadata fs.Metadata, header *tar.Header) { var val string var ok bool var err error var mode, uid, gid int64 var atime, ctime time.Time var uname, gname string // check if metadata is valid if metadata != nil { // mode val, ok = metadata["mode"] if !ok { mode = 0644 } else { mode, err = strconv.ParseInt(val, 8, 64) if err != nil { mode = 0664 } } // uid val, ok = metadata["uid"] if !ok { uid = 0 } else { uid, err = strconv.ParseInt(val, 10, 32) if err != nil { uid = 0 } } // gid val, ok = metadata["gid"] if !ok { gid = 0 } else { gid, err = strconv.ParseInt(val, 10, 32) if err != nil { gid = 0 } } // access time val, ok := metadata["atime"] if !ok { atime = time.Unix(0, 0) } else { atime, err = time.Parse(time.RFC3339Nano, val) if err != nil { atime = time.Unix(0, 0) } } // set uname/gname if uid == 0 { uname = "root" } else { uname = strconv.FormatInt(uid, 10) } if gid == 0 { gname = "root" } else { gname = strconv.FormatInt(gid, 10) } } else { mode = 0644 uid = 0 gid = 0 uname = "root" gname = "root" atime = header.ModTime ctime = header.ModTime } // set values header.Mode = mode header.Uid = int(uid) header.Gid = int(gid) header.Uname = uname header.Gname = gname header.AccessTime = atime header.ChangeTime = ctime } // structs for fs.FileInfo,fs.File,SeekableFile type fileInfoImpl struct { header *tar.Header } type fileImpl struct { entry stdfs.FileInfo ctx context.Context reader io.ReadSeekCloser transfer *accounting.Transfer err error } func newFileInfo(ctx context.Context, entry fs.DirEntry, prefix string, metadata fs.Metadata) stdfs.FileInfo { var fi = new(fileInfoImpl) fi.header = new(tar.Header) if prefix != "" { fi.header.Name = path.Join(strings.TrimPrefix(prefix, "/"), entry.Remote()) } else { fi.header.Name = entry.Remote() } fi.header.Size = entry.Size() fi.header.ModTime = entry.ModTime(ctx) // set metadata metadataToHeader(metadata, fi.header) // flag if directory _, isDir := entry.(fs.Directory) if isDir { fi.header.Mode = int64(stdfs.ModeDir) | fi.header.Mode } return fi } func (a *fileInfoImpl) Name() string { return a.header.Name } func (a *fileInfoImpl) Size() int64 { return a.header.Size } func (a *fileInfoImpl) Mode() stdfs.FileMode { return stdfs.FileMode(a.header.Mode) } func (a *fileInfoImpl) ModTime() time.Time { return a.header.ModTime } func (a *fileInfoImpl) IsDir() bool { return (a.header.Mode & int64(stdfs.ModeDir)) != 0 } func (a *fileInfoImpl) Sys() any { return a.header } func (a *fileInfoImpl) String() string { return fmt.Sprintf("Name=%v Size=%v IsDir=%v UID=%v GID=%v", a.Name(), a.Size(), a.IsDir(), a.header.Uid, a.header.Gid) } // create a fs.File compatible struct func newFile(ctx context.Context, obj fs.Object, fi stdfs.FileInfo) (stdfs.File, error) { var f = new(fileImpl) // create stdfs.File f.entry = fi f.ctx = ctx f.err = nil // create transfer f.transfer = accounting.Stats(ctx).NewTransfer(obj, nil) // get open options var options []fs.OpenOption for _, option := range fs.GetConfig(ctx).DownloadHeaders { options = append(options, option) } // open file f.reader, f.err = operations.Open(ctx, obj, options...) if f.err != nil { defer f.transfer.Done(ctx, f.err) return nil, f.err } // Account the transfer f.reader = f.transfer.Account(ctx, f.reader) return f, f.err } func (a *fileImpl) Stat() (stdfs.FileInfo, error) { return a.entry, nil } func (a *fileImpl) Read(data []byte) (int, error) { if a.reader == nil { a.err = fmt.Errorf("file %s not open", a.entry.Name()) return 0, a.err } i, err := a.reader.Read(data) a.err = err return i, a.err } func (a *fileImpl) Close() error { // close file if a.reader == nil { a.err = fmt.Errorf("file %s not open", a.entry.Name()) } else { a.err = a.reader.Close() } // close transfer a.transfer.Done(a.ctx, a.err) return a.err } // NewArchiveFileInfo will take a fs.DirEntry and return a archives.Fileinfo func NewArchiveFileInfo(ctx context.Context, entry fs.DirEntry, prefix string, metadata fs.Metadata) archives.FileInfo { fi := newFileInfo(ctx, entry, prefix, metadata) return archives.FileInfo{ FileInfo: fi, NameInArchive: fi.Name(), LinkTarget: "", Open: func() (stdfs.File, error) { obj, isObject := entry.(fs.Object) if isObject { return newFile(ctx, obj, fi) } return nil, fmt.Errorf("%s is not a file", fi.Name()) }, } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/create/create.go
cmd/archive/create/create.go
//go:build !plan9 // Package create implements 'rclone archive create'. package create import ( "context" "errors" "fmt" "io" "os" "path" "sort" "strings" "time" "github.com/mholt/archives" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/archive" "github.com/rclone/rclone/cmd/archive/files" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" "github.com/spf13/cobra" ) var ( fullPath = false prefix = "" format = "" ) func init() { flagSet := Command.Flags() flags.BoolVarP(flagSet, &fullPath, "full-path", "", fullPath, "Set prefix for files in archive to source path", "") flags.StringVarP(flagSet, &prefix, "prefix", "", prefix, "Set prefix for files in archive to entered value or source path", "") flags.StringVarP(flagSet, &format, "format", "", format, "Create the archive with format or guess from extension.", "") archive.Command.AddCommand(Command) } // Command - create var Command = &cobra.Command{ Use: "create [flags] <source> [<destination>]", Short: `Archive source file(s) to destination.`, // Warning! "!" will be replaced by backticks below Long: strings.ReplaceAll(` Creates an archive from the files in source:path and saves the archive to dest:path. If dest:path is missing, it will write to the console. The valid formats for the !--format! flag are listed below. If !--format! is not set rclone will guess it from the extension of dest:path. | Format | Extensions | |:-------|:-----------| | zip | .zip | | tar | .tar | | tar.gz | .tar.gz, .tgz, .taz | | tar.bz2| .tar.bz2, .tb2, .tbz, .tbz2, .tz2 | | tar.lz | .tar.lz | | tar.lz4| .tar.lz4 | | tar.xz | .tar.xz, .txz | | tar.zst| .tar.zst, .tzst | | tar.br | .tar.br | | tar.sz | .tar.sz | | tar.mz | .tar.mz | The !--prefix! and !--full-path! flags control the prefix for the files in the archive. If the flag !--full-path! is set then the files will have the full source path as the prefix. If the flag !--prefix=<value>! is set then the files will have !<value>! as prefix. It's possible to create invalid file names with !--prefix=<value>! so use with caution. Flag !--prefix! has priority over !--full-path!. Given a directory !/sourcedir! with the following: file1.txt dir1/file2.txt Running the command !rclone archive create /sourcedir /dest.tar.gz! will make an archive with the contents: file1.txt dir1/ dir1/file2.txt Running the command !rclone archive create --full-path /sourcedir /dest.tar.gz! will make an archive with the contents: sourcedir/file1.txt sourcedir/dir1/ sourcedir/dir1/file2.txt Running the command !rclone archive create --prefix=my_new_path /sourcedir /dest.tar.gz! will make an archive with the contents: my_new_path/file1.txt my_new_path/dir1/ my_new_path/dir1/file2.txt `, "!", "`"), Annotations: map[string]string{ "versionIntroduced": "v1.72", }, RunE: func(command *cobra.Command, args []string) error { var src, dst fs.Fs var dstFile string if len(args) == 1 { // source only, archive to stdout src = cmd.NewFsSrc(args) } else if len(args) == 2 { src = cmd.NewFsSrc(args) dst, dstFile = cmd.NewFsDstFile(args[1:2]) } else { cmd.CheckArgs(1, 2, command, args) } cmd.Run(false, false, command, func() error { fmt.Printf("dst=%v, dstFile=%q, src=%v, format=%q, prefix=%q\n", dst, dstFile, src, format, prefix) if prefix != "" { return ArchiveCreate(context.Background(), dst, dstFile, src, format, prefix) } else if fullPath { return ArchiveCreate(context.Background(), dst, dstFile, src, format, src.Root()) } return ArchiveCreate(context.Background(), dst, dstFile, src, format, "") }) return nil }, } // Globals var ( archiveFormats = map[string]archives.CompressedArchive{ "zip": archives.CompressedArchive{ Archival: archives.Zip{ContinueOnError: true}, }, "tar": archives.CompressedArchive{ Archival: archives.Tar{ContinueOnError: true}, }, "tar.gz": archives.CompressedArchive{ Compression: archives.Gz{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.bz2": archives.CompressedArchive{ Compression: archives.Bz2{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.lz": archives.CompressedArchive{ Compression: archives.Lzip{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.lz4": archives.CompressedArchive{ Compression: archives.Lz4{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.xz": archives.CompressedArchive{ Compression: archives.Xz{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.zst": archives.CompressedArchive{ Compression: archives.Zstd{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.br": archives.CompressedArchive{ Compression: archives.Brotli{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.sz": archives.CompressedArchive{ Compression: archives.Sz{}, Archival: archives.Tar{ContinueOnError: true}, }, "tar.mz": archives.CompressedArchive{ Compression: archives.MinLZ{}, Archival: archives.Tar{ContinueOnError: true}, }, } archiveExtensions = map[string]string{ // zip "*.zip": "zip", // tar "*.tar": "tar", // tar.gz "*.tar.gz": "tar.gz", "*.tgz": "tar.gz", "*.taz": "tar.gz", // tar.bz2 "*.tar.bz2": "tar.bz2", "*.tb2": "tar.bz2", "*.tbz": "tar.bz2", "*.tbz2": "tar.bz2", "*.tz2": "tar.bz2", // tar.lz "*.tar.lz": "tar.lz", // tar.lz4 "*.tar.lz4": "tar.lz4", // tar.xz "*.tar.xz": "tar.xz", "*.txz": "tar.xz", // tar.zst "*.tar.zst": "tar.zst", "*.tzst": "tar.zst", // tar.br "*.tar.br": "tar.br", // tar.sz "*.tar.sz": "tar.sz", // tar.mz "*.tar.mz": "tar.mz", } ) // sorted FileInfo list type archivesFileInfoList []archives.FileInfo func (a archivesFileInfoList) Len() int { return len(a) } func (a archivesFileInfoList) Less(i, j int) bool { if a[i].FileInfo.IsDir() == a[j].FileInfo.IsDir() { // both are same type, order by name return strings.Compare(a[i].NameInArchive, a[j].NameInArchive) < 0 } else if a[i].FileInfo.IsDir() { return strings.Compare(strings.TrimSuffix(a[i].NameInArchive, "/"), path.Dir(a[j].NameInArchive)) < 0 } return strings.Compare(path.Dir(a[i].NameInArchive), strings.TrimSuffix(a[j].NameInArchive, "/")) < 0 } func (a archivesFileInfoList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func getCompressor(format string, filename string) (archives.CompressedArchive, error) { var compressor archives.CompressedArchive var found bool // make filename lowercase for checks filename = strings.ToLower(filename) if format == "" { // format flag not set, get format from the file extension for pattern, formatName := range archiveExtensions { ok, err := path.Match(pattern, filename) if err != nil { // error in pattern return archives.CompressedArchive{}, fmt.Errorf("invalid extension pattern '%s'", pattern) } else if ok { // pattern matches filename, get compressor compressor, found = archiveFormats[formatName] break } } } else { // format flag set, look for it compressor, found = archiveFormats[format] } if found { return compressor, nil } else if format == "" { return archives.CompressedArchive{}, fmt.Errorf("format not set and can't be guessed from extension") } return archives.CompressedArchive{}, fmt.Errorf("invalid format '%s'", format) } // CheckValidDestination - takes (dst, dstFile) and checks it is valid func CheckValidDestination(ctx context.Context, dst fs.Fs, dstFile string) error { var err error // check if dst + dstFile is a file _, err = dst.NewObject(ctx, dstFile) if err == nil { // (dst, dstFile) is a valid file we can overwrite return nil } else if errors.Is(err, fs.ErrorIsDir) { // dst is a directory return fmt.Errorf("destination must not be a directory: %w", err) } else if !errors.Is(err, fs.ErrorObjectNotFound) { // dst is a directory (we need a filename) or some other error happened // not good, leave return fmt.Errorf("error reading destination: %w", err) } // if we are here dst points to a non existent path return nil } func loadMetadata(ctx context.Context, o fs.DirEntry) fs.Metadata { meta, err := fs.GetMetadata(ctx, o) if err != nil { meta = make(fs.Metadata, 0) } return meta } // ArchiveCreate - compresses/archive source to destination func ArchiveCreate(ctx context.Context, dst fs.Fs, dstFile string, src fs.Fs, format string, prefix string) error { var err error var list archivesFileInfoList var compArchive archives.CompressedArchive var totalLength int64 // check id dst is valid err = CheckValidDestination(ctx, dst, dstFile) if err != nil { return err } ci := fs.GetConfig(ctx) fi := filter.GetConfig(ctx) // get archive format compArchive, err = getCompressor(format, dstFile) if err != nil { return err } // get source files err = walk.ListR(ctx, src, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error { // get directories entries.ForDir(func(o fs.Directory) { var metadata fs.Metadata if ci.Metadata { metadata = loadMetadata(ctx, o) } if fi.Include(o.Remote(), o.Size(), o.ModTime(ctx), metadata) { info := files.NewArchiveFileInfo(ctx, o, prefix, metadata) list = append(list, info) } }) // get files entries.ForObject(func(o fs.Object) { var metadata fs.Metadata if ci.Metadata { metadata = loadMetadata(ctx, o) } if fi.Include(o.Remote(), o.Size(), o.ModTime(ctx), metadata) { info := files.NewArchiveFileInfo(ctx, o, prefix, metadata) list = append(list, info) totalLength += o.Size() } }) return nil }) if err != nil { return err } else if list.Len() == 0 { return fmt.Errorf("no files found in source") } sort.Stable(list) // create archive if ci.DryRun { // write nowhere counter := files.NewCountWriter(nil) err = compArchive.Archive(ctx, counter, list) // log totals fs.Infof(nil, "Total files added %d", list.Len()) fs.Infof(nil, "Total bytes read %d", totalLength) fs.Infof(nil, "Compressed file size %d", counter.Count()) return err } else if dst == nil { // write to stdout counter := files.NewCountWriter(os.Stdout) err = compArchive.Archive(ctx, counter, list) // log totals fs.Infof(nil, "Total files added %d", list.Len()) fs.Infof(nil, "Total bytes read %d", totalLength) fs.Infof(nil, "Compressed file size %d", counter.Count()) return err } // write to remote pipeReader, pipeWriter := io.Pipe() // write to pipewriter in background counter := files.NewCountWriter(pipeWriter) go func() { err := compArchive.Archive(ctx, counter, list) pipeWriter.CloseWithError(err) }() // rcat to remote from pipereader _, err = operations.Rcat(ctx, dst, dstFile, pipeReader, time.Now(), nil) // log totals fs.Infof(nil, "Total files added %d", list.Len()) fs.Infof(nil, "Total bytes read %d", totalLength) fs.Infof(nil, "Compressed file size %d", counter.Count()) return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/archive/create/create_unsupported.go
cmd/archive/create/create_unsupported.go
// Build for unsupported platforms to stop go complaining // about "no buildable Go source files " //go:build plan9 // Package archive implements 'rclone archive create'. package create
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount2/file.go
cmd/mount2/file.go
//go:build linux || (darwin && amd64) package mount2 import ( "context" "fmt" "io" "syscall" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // FileHandle is a resource identifier for opened files. Usually, a // FileHandle should implement some of the FileXxxx interfaces. // // All of the FileXxxx operations can also be implemented at the // InodeEmbedder level, for example, one can implement NodeReader // instead of FileReader. // // FileHandles are useful in two cases: First, if the underlying // storage systems needs a handle for reading/writing. This is the // case with Unix system calls, which need a file descriptor (See also // the function `NewLoopbackFile`). Second, it is useful for // implementing files whose contents are not tied to an inode. For // example, a file like `/proc/interrupts` has no fixed content, but // changes on each open call. This means that each file handle must // have its own view of the content; this view can be tied to a // FileHandle. Files that have such dynamic content should return the // FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go // for an example. type FileHandle struct { h vfs.Handle fsys *FS } // Create a new FileHandle func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle { return &FileHandle{ h: h, fsys: fsys, } } // Check interface satisfied var _ fusefs.FileHandle = (*FileHandle)(nil) // The String method is for debug printing. func (f *FileHandle) String() string { return fmt.Sprintf("fh=%p(%s)", f, f.h.Node().Path()) } // Read data from a file. The data should be returned as // ReadResult, which may be constructed from the incoming // `dest` buffer. func (f *FileHandle) Read(ctx context.Context, dest []byte, off int64) (res fuse.ReadResult, errno syscall.Errno) { var n int var err error defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno) n, err = f.h.ReadAt(dest, off) if err == io.EOF { err = nil } return fuse.ReadResultData(dest[:n]), translateError(err) } var _ fusefs.FileReader = (*FileHandle)(nil) // Write the data into the file handle at given offset. After // returning, the data will be reused and may not referenced. func (f *FileHandle) Write(ctx context.Context, data []byte, off int64) (written uint32, errno syscall.Errno) { var n int var err error defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno) n, err = f.h.WriteAt(data, off) return uint32(n), translateError(err) } var _ fusefs.FileWriter = (*FileHandle)(nil) // Flush is called for the close(2) call on a file descriptor. In case // of a descriptor that was duplicated using dup(2), it may be called // more than once for the same FileHandle. func (f *FileHandle) Flush(ctx context.Context) syscall.Errno { return translateError(f.h.Flush()) } var _ fusefs.FileFlusher = (*FileHandle)(nil) // Release is called to before a FileHandle is forgotten. The // kernel ignores the return value of this method, // so any cleanup that requires specific synchronization or // could fail with I/O errors should happen in Flush instead. func (f *FileHandle) Release(ctx context.Context) syscall.Errno { return translateError(f.h.Release()) } var _ fusefs.FileReleaser = (*FileHandle)(nil) // Fsync is a signal to ensure writes to the Inode are flushed // to stable storage. func (f *FileHandle) Fsync(ctx context.Context, flags uint32) (errno syscall.Errno) { return translateError(f.h.Sync()) } var _ fusefs.FileFsyncer = (*FileHandle)(nil) // Getattr reads attributes for an Inode. The library will ensure that // Mode and Ino are set correctly. For files that are not opened with // FOPEN_DIRECTIO, Size should be set so it can be read correctly. If // returning zeroed permissions, the default behavior is to change the // mode of 0755 (directory) or 0644 (files). This can be switched off // with the Options.NullPermissions setting. If blksize is unset, 4096 // is assumed, and the 'blocks' field is set accordingly. func (f *FileHandle) Getattr(ctx context.Context, out *fuse.AttrOut) (errno syscall.Errno) { defer log.Trace(f, "")("attr=%v, errno=%v", &out, &errno) f.fsys.setAttrOut(f.h.Node(), out) return 0 } var _ fusefs.FileGetattrer = (*FileHandle)(nil) // Setattr sets attributes for an Inode. func (f *FileHandle) Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) { defer log.Trace(f, "in=%v", in)("attr=%v, errno=%v", &out, &errno) var err error f.fsys.setAttrOut(f.h.Node(), out) size, ok := in.GetSize() if ok { err = f.h.Truncate(int64(size)) if err != nil { return translateError(err) } out.Attr.Size = size } mtime, ok := in.GetMTime() if ok { err = f.h.Node().SetModTime(mtime) if err != nil { return translateError(err) } out.Attr.Mtime = uint64(mtime.Unix()) out.Attr.Mtimensec = uint32(mtime.Nanosecond()) } return 0 } var _ fusefs.FileSetattrer = (*FileHandle)(nil)
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount2/node.go
cmd/mount2/node.go
//go:build linux || (darwin && amd64) package mount2 import ( "context" "os" "path" "syscall" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // Node represents a directory or file type Node struct { fusefs.Inode node vfs.Node fsys *FS } // Node types must be InodeEmbedders var _ fusefs.InodeEmbedder = (*Node)(nil) // newNode creates a new fusefs.Node from a vfs Node func newNode(fsys *FS, vfsNode vfs.Node) (node *Node) { // Check the vfsNode to see if it has a fuse Node cached // We must return the same fuse nodes for vfs Nodes node, ok := vfsNode.Sys().(*Node) if ok { return node } node = &Node{ node: vfsNode, fsys: fsys, } // Cache the node for later vfsNode.SetSys(node) return node } // String used for pretty printing. func (n *Node) String() string { return n.node.Path() } // lookup a Node in a directory func (n *Node) lookupVfsNodeInDir(leaf string) (vfsNode vfs.Node, errno syscall.Errno) { dir, ok := n.node.(*vfs.Dir) if !ok { return nil, syscall.ENOTDIR } vfsNode, err := dir.Stat(leaf) return vfsNode, translateError(err) } // // lookup a Dir given a path // func (n *Node) lookupDir(path string) (dir *vfs.Dir, code fuse.Status) { // node, code := fsys.lookupVfsNodeInDir(path) // if !code.Ok() { // return nil, code // } // dir, ok := n.(*vfs.Dir) // if !ok { // return nil, fuse.ENOTDIR // } // return dir, fuse.OK // } // // lookup a parent Dir given a path returning the dir and the leaf // func (n *Node) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, code fuse.Status) { // parentDir, leaf := path.Split(filePath) // dir, code = fsys.lookupDir(parentDir) // return leaf, dir, code // } // Statfs implements statistics for the filesystem that holds this // Inode. If not defined, the `out` argument will zeroed with an OK // result. This is because OSX filesystems must Statfs, or the mount // will not work. func (n *Node) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno { defer log.Trace(n, "")("out=%+v", &out) const blockSize = 4096 total, _, free := n.fsys.VFS.Statfs() out.Blocks = uint64(total) / blockSize // Total data blocks in file system. out.Bfree = uint64(free) / blockSize // Free blocks in file system. out.Bavail = out.Bfree // Free blocks in file system if you're not root. out.Files = 1e9 // Total files in file system. out.Ffree = 1e9 // Free files in file system. out.Bsize = blockSize // Block size out.NameLen = 255 // Maximum file name length? out.Frsize = blockSize // Fragment size, smallest addressable data size in the file system. mountlib.ClipBlocks(&out.Blocks) mountlib.ClipBlocks(&out.Bfree) mountlib.ClipBlocks(&out.Bavail) return 0 } var _ = (fusefs.NodeStatfser)((*Node)(nil)) // Getattr reads attributes for an Inode. The library will ensure that // Mode and Ino are set correctly. For files that are not opened with // FOPEN_DIRECTIO, Size should be set so it can be read correctly. If // returning zeroed permissions, the default behavior is to change the // mode of 0755 (directory) or 0644 (files). This can be switched off // with the Options.NullPermissions setting. If blksize is unset, 4096 // is assumed, and the 'blocks' field is set accordingly. func (n *Node) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno { n.fsys.setAttrOut(n.node, out) return 0 } var _ = (fusefs.NodeGetattrer)((*Node)(nil)) // Setattr sets attributes for an Inode. func (n *Node) Setattr(ctx context.Context, f fusefs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) { defer log.Trace(n, "in=%v", in)("out=%#v, errno=%v", &out, &errno) var err error n.fsys.setAttrOut(n.node, out) size, ok := in.GetSize() if ok { err = n.node.Truncate(int64(size)) if err != nil { return translateError(err) } out.Attr.Size = size } mtime, ok := in.GetMTime() if ok { err = n.node.SetModTime(mtime) if err != nil { return translateError(err) } out.Attr.Mtime = uint64(mtime.Unix()) out.Attr.Mtimensec = uint32(mtime.Nanosecond()) } return 0 } var _ = (fusefs.NodeSetattrer)((*Node)(nil)) // Open opens an Inode (of regular file type) for reading. It // is optional but recommended to return a FileHandle. func (n *Node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) { defer log.Trace(n, "flags=%#o", flags)("errno=%v", &errno) // fuse flags are based off syscall flags as are os flags, so // should be compatible handle, err := n.node.Open(int(flags)) if err != nil { return nil, 0, translateError(err) } // If size unknown then use direct io to read if entry := n.node.DirEntry(); entry != nil && entry.Size() < 0 { fuseFlags |= fuse.FOPEN_DIRECT_IO } if n.fsys.opt.DirectIO { fuseFlags |= fuse.FOPEN_DIRECT_IO } return newFileHandle(handle, n.fsys), fuseFlags, 0 } var _ = (fusefs.NodeOpener)((*Node)(nil)) // Lookup should find a direct child of a directory by the child's name. If // the entry does not exist, it should return ENOENT and optionally // set a NegativeTimeout in `out`. If it does exist, it should return // attribute data in `out` and return the Inode for the child. A new // inode can be created using `Inode.NewInode`. The new Inode will be // added to the FS tree automatically if the return status is OK. // // If a directory does not implement NodeLookuper, the library looks // for an existing child with the given name. // // The input to a Lookup is {parent directory, name string}. // // Lookup, if successful, must return an *Inode. Once the Inode is // returned to the kernel, the kernel can issue further operations, // such as Open or Getxattr on that node. // // A successful Lookup also returns an EntryOut. Among others, this // contains file attributes (mode, size, mtime, etc.). // // FUSE supports other operations that modify the namespace. For // example, the Symlink, Create, Mknod, Link methods all create new // children in directories. Hence, they also return *Inode and must // populate their fuse.EntryOut arguments. func (n *Node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (inode *fusefs.Inode, errno syscall.Errno) { defer log.Trace(n, "name=%q", name)("inode=%v, attr=%v, errno=%v", &inode, &out, &errno) vfsNode, errno := n.lookupVfsNodeInDir(name) if errno != 0 { return nil, errno } newNode := newNode(n.fsys, vfsNode) // FIXME // out.SetEntryTimeout(dt time.Duration) // out.SetAttrTimeout(dt time.Duration) n.fsys.setEntryOut(vfsNode, out) return n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode}), 0 } var _ = (fusefs.NodeLookuper)((*Node)(nil)) // Opendir opens a directory Inode for reading its // contents. The actual reading is driven from Readdir, so // this method is just for performing sanity/permission // checks. The default is to return success. func (n *Node) Opendir(ctx context.Context) syscall.Errno { if !n.node.IsDir() { return syscall.ENOTDIR } return 0 } var _ = (fusefs.NodeOpendirer)((*Node)(nil)) type dirStream struct { nodes []os.FileInfo i int } // HasNext indicates if there are further entries. HasNext // might be called on already closed streams. func (ds *dirStream) HasNext() bool { return ds.i < len(ds.nodes)+2 } // Next retrieves the next entry. It is only called if HasNext // has previously returned true. The Errno return may be used to // indicate I/O errors func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) { // defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno) if ds.i == 0 { ds.i++ return fuse.DirEntry{ Mode: fuse.S_IFDIR, Name: ".", Ino: 0, // FIXME }, 0 } else if ds.i == 1 { ds.i++ return fuse.DirEntry{ Mode: fuse.S_IFDIR, Name: "..", Ino: 0, // FIXME }, 0 } fi := ds.nodes[ds.i-2] de = fuse.DirEntry{ // Mode is the file's mode. Only the high bits (e.g. S_IFDIR) // are considered. Mode: getMode(fi), // Name is the basename of the file in the directory. Name: path.Base(fi.Name()), // Ino is the inode number. Ino: 0, // FIXME } ds.i++ return de, 0 } // Close releases resources related to this directory // stream. func (ds *dirStream) Close() { } var _ fusefs.DirStream = (*dirStream)(nil) // Readdir opens a stream of directory entries. // // Readdir essentially returns a list of strings, and it is allowed // for Readdir to return different results from Lookup. For example, // you can return nothing for Readdir ("ls my-fuse-mount" is empty), // while still implementing Lookup ("ls my-fuse-mount/a-specific-file" // shows a single file). // // If a directory does not implement NodeReaddirer, a list of // currently known children from the tree is returned. This means that // static in-memory file systems need not implement NodeReaddirer. func (n *Node) Readdir(ctx context.Context) (ds fusefs.DirStream, errno syscall.Errno) { defer log.Trace(n, "")("ds=%v, errno=%v", &ds, &errno) if !n.node.IsDir() { return nil, syscall.ENOTDIR } fh, err := n.node.Open(os.O_RDONLY) if err != nil { return nil, translateError(err) } defer func() { closeErr := fh.Close() if errno == 0 && closeErr != nil { errno = translateError(closeErr) } }() items, err := fh.Readdir(-1) if err != nil { return nil, translateError(err) } return &dirStream{ nodes: items, }, 0 } var _ = (fusefs.NodeReaddirer)((*Node)(nil)) // Mkdir is similar to Lookup, but must create a directory entry and Inode. // Default is to return EROFS. func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (inode *fusefs.Inode, errno syscall.Errno) { defer log.Trace(name, "mode=0%o", mode)("inode=%v, errno=%v", &inode, &errno) dir, ok := n.node.(*vfs.Dir) if !ok { return nil, syscall.ENOTDIR } newDir, err := dir.Mkdir(name) if err != nil { return nil, translateError(err) } newNode := newNode(n.fsys, newDir) n.fsys.setEntryOut(newNode.node, out) newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode}) return newInode, 0 } var _ = (fusefs.NodeMkdirer)((*Node)(nil)) // Create is similar to Lookup, but should create a new // child. It typically also returns a FileHandle as a // reference for future reads/writes. // Default is to return EROFS. func (n *Node) Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (node *fusefs.Inode, fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) { defer log.Trace(n, "name=%q, flags=%#o, mode=%#o", name, flags, mode)("node=%v, fh=%v, flags=%#o, errno=%v", &node, &fh, &fuseFlags, &errno) dir, ok := n.node.(*vfs.Dir) if !ok { return nil, nil, 0, syscall.ENOTDIR } // translate the fuse flags to os flags osFlags := int(flags) | os.O_CREATE file, err := dir.Create(name, osFlags) if err != nil { return nil, nil, 0, translateError(err) } handle, err := file.Open(osFlags) if err != nil { return nil, nil, 0, translateError(err) } fh = newFileHandle(handle, n.fsys) // FIXME // fh = &fusefs.WithFlags{ // File: fh, // //FuseFlags: fuse.FOPEN_NONSEEKABLE, // OpenFlags: flags, // } // Find the created node vfsNode, errno := n.lookupVfsNodeInDir(name) if errno != 0 { return nil, nil, 0, errno } n.fsys.setEntryOut(vfsNode, out) newNode := newNode(n.fsys, vfsNode) fs.Debugf(nil, "attr=%#v", out.Attr) newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode}) return newInode, fh, 0, 0 } var _ = (fusefs.NodeCreater)((*Node)(nil)) // Unlink should remove a child from this directory. If the // return status is OK, the Inode is removed as child in the // FS tree automatically. Default is to return EROFS. func (n *Node) Unlink(ctx context.Context, name string) (errno syscall.Errno) { defer log.Trace(n, "name=%q", name)("errno=%v", &errno) vfsNode, errno := n.lookupVfsNodeInDir(name) if errno != 0 { return errno } return translateError(vfsNode.Remove()) } var _ = (fusefs.NodeUnlinker)((*Node)(nil)) // Rmdir is like Unlink but for directories. // Default is to return EROFS. func (n *Node) Rmdir(ctx context.Context, name string) (errno syscall.Errno) { defer log.Trace(n, "name=%q", name)("errno=%v", &errno) vfsNode, errno := n.lookupVfsNodeInDir(name) if errno != 0 { return errno } return translateError(vfsNode.Remove()) } var _ = (fusefs.NodeRmdirer)((*Node)(nil)) // Rename should move a child from one directory to a different // one. The change is effected in the FS tree if the return status is // OK. Default is to return EROFS. func (n *Node) Rename(ctx context.Context, oldName string, newParent fusefs.InodeEmbedder, newName string, flags uint32) (errno syscall.Errno) { defer log.Trace(n, "oldName=%q, newParent=%v, newName=%q", oldName, newParent, newName)("errno=%v", &errno) oldDir, ok := n.node.(*vfs.Dir) if !ok { return syscall.ENOTDIR } newParentNode, ok := newParent.(*Node) if !ok { fs.Errorf(n, "newParent was not a *Node") return syscall.EIO } newDir, ok := newParentNode.node.(*vfs.Dir) if !ok { return syscall.ENOTDIR } return translateError(oldDir.Rename(oldName, newName, newDir)) } var _ = (fusefs.NodeRenamer)((*Node)(nil)) // Getxattr should read data for the given attribute into // `dest` and return the number of bytes. If `dest` is too // small, it should return ERANGE and the size of the attribute. // If not defined, Getxattr will return ENOATTR. func (n *Node) Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno) { return 0, syscall.ENOSYS // we never implement this } var _ fusefs.NodeGetxattrer = (*Node)(nil) // Setxattr should store data for the given attribute. See // setxattr(2) for information about flags. // If not defined, Setxattr will return ENOATTR. func (n *Node) Setxattr(ctx context.Context, attr string, data []byte, flags uint32) syscall.Errno { return syscall.ENOSYS // we never implement this } var _ fusefs.NodeSetxattrer = (*Node)(nil) // Removexattr should delete the given attribute. // If not defined, Removexattr will return ENOATTR. func (n *Node) Removexattr(ctx context.Context, attr string) syscall.Errno { return syscall.ENOSYS // we never implement this } var _ fusefs.NodeRemovexattrer = (*Node)(nil) // Listxattr should read all attributes (null terminated) into // `dest`. If the `dest` buffer is too small, it should return ERANGE // and the correct size. If not defined, return an empty list and // success. func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) { return 0, syscall.ENOSYS // we never implement this } var _ fusefs.NodeListxattrer = (*Node)(nil) var _ fusefs.NodeReadlinker = (*Node)(nil) // Readlink read symbolic link target. func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) { defer log.Trace(n, "")("ret=%v, err=%v", &ret, &err) path := n.node.Path() s, serr := n.node.VFS().Readlink(path) return []byte(s), translateError(serr) } var _ fusefs.NodeSymlinker = (*Node)(nil) // Symlink create symbolic link. func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) { defer log.Trace(n, "name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err) fullPath := path.Join(n.node.Path(), name) vfsNode, serr := n.node.VFS().CreateSymlink(target, fullPath) if serr != nil { return nil, translateError(serr) } n.fsys.setEntryOut(vfsNode, out) newNode := newNode(n.fsys, vfsNode) newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode}) return newInode, 0 }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount2/mount_test.go
cmd/mount2/mount_test.go
//go:build linux package mount2 import ( "testing" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfstest" ) func TestMount(t *testing.T) { vfstest.RunTests(t, false, vfscommon.CacheModeOff, true, mount) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount2/mount.go
cmd/mount2/mount.go
//go:build linux || (darwin && amd64) // Package mount2 implements a FUSE mounting system for rclone remotes. package mount2 import ( "fmt" "runtime" "time" fusefs "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs" ) func init() { mountlib.NewMountCommand("mount2", true, mount) mountlib.AddRc("mount2", mount) } // mountOptions configures the options from the command line flags // // man mount.fuse for more info and note the -o flag for other options func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) { mountOpts = &fuse.MountOptions{ AllowOther: fsys.opt.AllowOther, FsName: opt.DeviceName, Name: "rclone", DisableXAttrs: true, Debug: fsys.opt.DebugFUSE, MaxReadAhead: int(fsys.opt.MaxReadAhead), MaxWrite: 1024 * 1024, // Linux v4.20+ caps requests at 1 MiB DisableReadDirPlus: true, // RememberInodes: true, // SingleThreaded: true, /* AllowOther bool // Options are passed as -o string to fusermount. Options []string // Default is _DEFAULT_BACKGROUND_TASKS, 12. This numbers // controls the allowed number of requests that relate to // async I/O. Concurrency for synchronous I/O is not limited. MaxBackground int // MaxWrite is the max size for read and write requests. If 0, use // go-fuse default (currently 64 kiB). // This number is internally capped at MAX_KERNEL_WRITE (higher values don't make // sense). // // Non-direct-io reads are mostly served via kernel readahead, which is // additionally subject to the MaxReadAhead limit. // // Implementation notes: // // There's four values the Linux kernel looks at when deciding the request size: // * MaxWrite, passed via InitOut.MaxWrite. Limits the WRITE size. // * max_read, passed via a string mount option. Limits the READ size. // go-fuse sets max_read equal to MaxWrite. // You can see the current max_read value in /proc/self/mounts . // * MaxPages, passed via InitOut.MaxPages. In Linux 4.20 and later, the value // can go up to 1 MiB and go-fuse calculates the MaxPages value acc. // to MaxWrite, rounding up. // On older kernels, the value is fixed at 128 kiB and the // passed value is ignored. No request can be larger than MaxPages, so // READ and WRITE are effectively capped at MaxPages. // * MaxReadAhead, passed via InitOut.MaxReadAhead. MaxWrite int // MaxReadAhead is the max read ahead size to use. It controls how much data the // kernel reads in advance to satisfy future read requests from applications. // How much exactly is subject to clever heuristics in the kernel // (see https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/readahead.c?h=v6.2-rc5#n375 // if you are brave) and hence also depends on the kernel version. // // If 0, use kernel default. This number is capped at the kernel maximum // (128 kiB on Linux) and cannot be larger than MaxWrite. // // MaxReadAhead only affects buffered reads (=non-direct-io), but even then, the // kernel can and does send larger reads to satisfy read requests from applications // (up to MaxWrite or VM_READAHEAD_PAGES=128 kiB, whichever is less). MaxReadAhead int // If IgnoreSecurityLabels is set, all security related xattr // requests will return NO_DATA without passing through the // user defined filesystem. You should only set this if you // file system implements extended attributes, and you are not // interested in security labels. IgnoreSecurityLabels bool // ignoring labels should be provided as a fusermount mount option. // If RememberInodes is set, we will never forget inodes. // This may be useful for NFS. RememberInodes bool // Values shown in "df -T" and friends // First column, "Filesystem" FsName string // Second column, "Type", will be shown as "fuse." + Name Name string // If set, wrap the file system in a single-threaded locking wrapper. SingleThreaded bool // If set, return ENOSYS for Getxattr calls, so the kernel does not issue any // Xattr operations at all. DisableXAttrs bool // If set, print debugging information. Debug bool // If set, ask kernel to forward file locks to FUSE. If using, // you must implement the GetLk/SetLk/SetLkw methods. EnableLocks bool // If set, the kernel caches all Readlink return values. The // filesystem must use content notification to force the // kernel to issue a new Readlink call. EnableSymlinkCaching bool // If set, ask kernel not to do automatic data cache invalidation. // The filesystem is fully responsible for invalidating data cache. ExplicitDataCacheControl bool // Disable ReadDirPlus capability so ReadDir is used instead. Simple // directory queries (i.e. 'ls' without '-l') can be faster with // ReadDir, as no per-file stat calls are needed DisableReadDirPlus bool */ } var opts []string // FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead)) if fsys.opt.AllowOther { opts = append(opts, "allow_other") } if fsys.opt.AllowRoot { opts = append(opts, "allow_root") } if fsys.opt.DefaultPermissions { opts = append(opts, "default_permissions") } if fsys.VFS.Opt.ReadOnly { opts = append(opts, "ro") } if fsys.opt.WritebackCache { fs.Printf(nil, "FIXME --write-back-cache not supported") // FIXME opts = append(opts,fuse.WritebackCache()) } // Some OS X only options if runtime.GOOS == "darwin" { opts = append(opts, // VolumeName sets the volume name shown in Finder. fmt.Sprintf("volname=%s", opt.VolumeName), // NoAppleXattr makes OSXFUSE disallow extended attributes with the // prefix "com.apple.". This disables persistent Finder state and // other such information. "noapplexattr", // NoAppleDouble makes OSXFUSE disallow files with names used by OS X // to store extended attributes on file systems that do not support // them natively. // // Such file names are: // // ._* // .DS_Store "noappledouble", ) } mountOpts.Options = opts return mountOpts } // mount the file system // // The mount point will be ready when this returns. // // returns an error, and an error channel for the serve process to // report an error when fusermount is called. func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) { f := VFS.Fs() if err := mountlib.CheckOverlap(f, mountpoint); err != nil { return nil, nil, err } if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil { return nil, nil, err } fs.Debugf(f, "Mounting on %q", mountpoint) fsys := NewFS(VFS, opt) // nodeFsOpts := &fusefs.PathNodeFsOptions{ // ClientInodes: false, // Debug: mountlib.DebugFUSE, // } // nodeFs := fusefs.NewPathNodeFs(fsys, nodeFsOpts) //mOpts := fusefs.NewOptions() // default options // FIXME // mOpts.EntryTimeout = 10 * time.Second // mOpts.AttrTimeout = 10 * time.Second // mOpts.NegativeTimeout = 10 * time.Second //mOpts.Debug = mountlib.DebugFUSE //conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts) mountOpts := mountOptions(fsys, f, opt) // FIXME fill out opts := fusefs.Options{ MountOptions: *mountOpts, EntryTimeout: (*time.Duration)(&opt.AttrTimeout), AttrTimeout: (*time.Duration)(&opt.AttrTimeout), GID: VFS.Opt.GID, UID: VFS.Opt.UID, } root, err := fsys.Root() if err != nil { return nil, nil, err } rawFS := fusefs.NewNodeFS(root, &opts) server, err := fuse.NewServer(rawFS, mountpoint, &opts.MountOptions) if err != nil { return nil, nil, err } //mountOpts := &fuse.MountOptions{} //server, err := fusefs.Mount(mountpoint, fsys, &opts) // server, err := fusefs.Mount(mountpoint, root, &opts) // if err != nil { // return nil, nil, err // } umount := func() error { // Shutdown the VFS fsys.VFS.Shutdown() return server.Unmount() } // serverSettings := server.KernelSettings() // fs.Debugf(f, "Server settings %+v", serverSettings) // Serve the mount point in the background returning error to errChan errs := make(chan error, 1) go func() { server.Serve() errs <- nil }() fs.Debugf(f, "Waiting for the mount to start...") err = server.WaitMount() if err != nil { return nil, nil, err } fs.Debugf(f, "Mount started") return errs, umount, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount2/mount_unsupported.go
cmd/mount2/mount_unsupported.go
//go:build !linux && (!darwin || !amd64) // Package mount2 implements a FUSE mounting system for rclone remotes. // // Build for mount for unsupported platforms to stop go complaining // about "no buildable Go source files". package mount2
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount2/fs.go
cmd/mount2/fs.go
// FUSE main Fs //go:build linux || (darwin && amd64) package mount2 import ( "os" "syscall" "time" "github.com/hanwen/go-fuse/v2/fuse" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // FS represents the top level filing system type FS struct { VFS *vfs.VFS f fs.Fs opt *mountlib.Options } // NewFS creates a pathfs.FileSystem from the fs.Fs passed in func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS { fsys := &FS{ VFS: VFS, f: VFS.Fs(), opt: opt, } return fsys } // Root returns the root node func (f *FS) Root() (node *Node, err error) { defer log.Trace("", "")("node=%+v, err=%v", &node, &err) root, err := f.VFS.Root() if err != nil { return nil, err } return newNode(f, root), nil } // SetDebug if called, provide debug output through the log package. func (f *FS) SetDebug(debug bool) { fs.Debugf(f.f, "SetDebug %v", debug) } // get the Mode from a vfs Node func getMode(node os.FileInfo) uint32 { vfsMode := node.Mode() Mode := vfsMode.Perm() if vfsMode&os.ModeDir != 0 { Mode |= fuse.S_IFDIR } else if vfsMode&os.ModeSymlink != 0 { Mode |= fuse.S_IFLNK } else if vfsMode&os.ModeNamedPipe != 0 { Mode |= fuse.S_IFIFO } else { Mode |= fuse.S_IFREG } return uint32(Mode) } // fill in attr from node func setAttr(node vfs.Node, attr *fuse.Attr) { Size := uint64(node.Size()) const BlockSize = 512 Blocks := (Size + BlockSize - 1) / BlockSize modTime := node.ModTime() // set attributes vfs := node.VFS() attr.Owner.Gid = vfs.Opt.GID attr.Owner.Uid = vfs.Opt.UID attr.Mode = getMode(node) attr.Size = Size attr.Nlink = 1 attr.Blocks = Blocks // attr.Blksize = BlockSize // not supported in freebsd/darwin, defaults to 4k if not set s := uint64(modTime.Unix()) ns := uint32(modTime.Nanosecond()) attr.Atime = s attr.Atimensec = ns attr.Mtime = s attr.Mtimensec = ns attr.Ctime = s attr.Ctimensec = ns //attr.Rdev } // fill in AttrOut from node func (f *FS) setAttrOut(node vfs.Node, out *fuse.AttrOut) { setAttr(node, &out.Attr) out.SetTimeout(time.Duration(f.opt.AttrTimeout)) } // fill in EntryOut from node func (f *FS) setEntryOut(node vfs.Node, out *fuse.EntryOut) { setAttr(node, &out.Attr) out.SetEntryTimeout(time.Duration(f.opt.AttrTimeout)) out.SetAttrTimeout(time.Duration(f.opt.AttrTimeout)) } // Translate errors from mountlib into Syscall error numbers func translateError(err error) syscall.Errno { if err == nil { return 0 } _, uErr := fserrors.Cause(err) switch uErr { case vfs.OK: return 0 case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound: return syscall.ENOENT case vfs.EEXIST, fs.ErrorDirExists: return syscall.EEXIST case vfs.EPERM, fs.ErrorPermissionDenied: return syscall.EPERM case vfs.ECLOSED: return syscall.EBADF case vfs.ENOTEMPTY: return syscall.ENOTEMPTY case vfs.ESPIPE: return syscall.ESPIPE case vfs.EBADF: return syscall.EBADF case vfs.EROFS: return syscall.EROFS case vfs.ENOSYS, fs.ErrorNotImplemented: return syscall.ENOSYS case vfs.EINVAL: return syscall.EINVAL case vfs.ELOOP: return syscall.ELOOP } fs.Errorf(nil, "IO error: %v", err) return syscall.EIO }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/cryptdecode/cryptdecode.go
cmd/cryptdecode/cryptdecode.go
// Package cryptdecode provides the cryptdecode command. package cryptdecode import ( "errors" "fmt" "strings" "github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/spf13/cobra" ) // Options set by command line flags var ( Reverse = false ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames", "") } var commandDefinition = &cobra.Command{ Use: "cryptdecode encryptedremote: encryptedfilename", Short: `Cryptdecode returns unencrypted file names.`, Long: `Returns unencrypted file names when provided with a list of encrypted file names. List limit is 10 items. If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file names. use it like this ` + "```console" + ` rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2 rclone cryptdecode --reverse encryptedremote: filename1 filename2 ` + "```" + ` Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command. See the documentation on the [crypt](/crypt/) overlay for more info.`, Annotations: map[string]string{ "versionIntroduced": "v1.38", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(2, 11, command, args) cmd.Run(false, false, command, func() error { fsInfo, _, _, config, err := fs.ConfigFs(args[0]) if err != nil { return err } if fsInfo.Name != "crypt" { return errors.New("the remote needs to be of type \"crypt\"") } cipher, err := crypt.NewCipher(config) if err != nil { return err } if Reverse { return cryptEncode(cipher, args[1:]) } return cryptDecode(cipher, args[1:]) }) }, } // cryptDecode returns the unencrypted file name func cryptDecode(cipher *crypt.Cipher, args []string) error { var output strings.Builder for _, encryptedFileName := range args { fileName, err := cipher.DecryptFileName(encryptedFileName) if err != nil { output.WriteString(fmt.Sprintln(encryptedFileName, "\t", "Failed to decrypt")) } else { output.WriteString(fmt.Sprintln(encryptedFileName, "\t", fileName)) } } fmt.Print(output.String()) return nil } // cryptEncode returns the encrypted file name func cryptEncode(cipher *crypt.Cipher, args []string) error { var output strings.Builder for _, fileName := range args { encryptedFileName := cipher.EncryptFileName(fileName) output.WriteString(fmt.Sprintln(fileName, "\t", encryptedFileName)) } fmt.Print(output.String()) return nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/file.go
cmd/mount/file.go
//go:build linux package mount import ( "context" "os" "syscall" "time" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // File represents a file type File struct { *vfs.File fsys *FS } // Check interface satisfied var _ fusefs.Node = (*File)(nil) // Attr fills out the attributes for the file func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) { defer log.Trace(f, "")("a=%+v, err=%v", a, &err) a.Valid = time.Duration(f.fsys.opt.AttrTimeout) modTime := f.File.ModTime() Size := uint64(f.File.Size()) Blocks := (Size + 511) / 512 a.Gid = f.VFS().Opt.GID a.Uid = f.VFS().Opt.UID a.Mode = f.File.Mode() &^ os.ModeAppend a.Size = Size a.Atime = modTime a.Mtime = modTime a.Ctime = modTime a.Blocks = Blocks return nil } // Check interface satisfied var _ fusefs.NodeSetattrer = (*File)(nil) // Setattr handles attribute changes from FUSE. Currently supports ModTime and Size only func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) { defer log.Trace(f, "a=%+v", req)("err=%v", &err) if !f.VFS().Opt.NoModTime { if req.Valid.Mtime() { err = f.File.SetModTime(req.Mtime) } else if req.Valid.MtimeNow() { err = f.File.SetModTime(time.Now()) } } if req.Valid.Size() { err = f.File.Truncate(int64(req.Size)) } return translateError(err) } // Check interface satisfied var _ fusefs.NodeOpener = (*File)(nil) // Open the file for read or write func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fh fusefs.Handle, err error) { defer log.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err) // fuse flags are based off syscall flags as are os flags, so // should be compatible handle, err := f.File.Open(int(req.Flags)) if err != nil { return nil, translateError(err) } // If size unknown then use direct io to read if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 { resp.Flags |= fuse.OpenDirectIO } if f.fsys.opt.DirectIO { resp.Flags |= fuse.OpenDirectIO } return &FileHandle{handle}, nil } // Check interface satisfied var _ fusefs.NodeFsyncer = (*File)(nil) // Fsync the file // // Note that we don't do anything except return OK func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) { defer log.Trace(f, "")("err=%v", &err) return nil } // Getxattr gets an extended attribute by the given name from the // node. // // If there is no xattr by that name, returns fuse.ErrNoXattr. func (f *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return syscall.ENOSYS // we never implement this } var _ fusefs.NodeGetxattrer = (*File)(nil) // Listxattr lists the extended attributes recorded for the node. func (f *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { return syscall.ENOSYS // we never implement this } var _ fusefs.NodeListxattrer = (*File)(nil) // Setxattr sets an extended attribute with the given name and // value for the node. func (f *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error { return syscall.ENOSYS // we never implement this } var _ fusefs.NodeSetxattrer = (*File)(nil) // Removexattr removes an extended attribute for the name. // // If there is no xattr by that name, returns fuse.ErrNoXattr. func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error { return syscall.ENOSYS // we never implement this } var _ fusefs.NodeRemovexattrer = (*File)(nil) var _ fusefs.NodeReadlinker = (*File)(nil) // Readlink read symbolic link target. func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) { defer log.Trace(f, "")("ret=%v, err=%v", &ret, &err) return f.VFS().Readlink(f.Path()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/handle.go
cmd/mount/handle.go
//go:build linux package mount import ( "context" "io" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // FileHandle is an open for read file handle on a File type FileHandle struct { vfs.Handle } // Check interface satisfied var _ fusefs.HandleReader = (*FileHandle)(nil) // Read from the file handle func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) { var n int defer log.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &n, &err) data := resp.Data[:req.Size] n, err = fh.Handle.ReadAt(data, req.Offset) resp.Data = data[:n] if err == io.EOF { err = nil } return translateError(err) } // Check interface satisfied var _ fusefs.HandleWriter = (*FileHandle)(nil) // Write data to the file handle func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) { defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err) n, err := fh.Handle.WriteAt(req.Data, req.Offset) if err != nil { return translateError(err) } resp.Size = n return nil } // Check interface satisfied var _ fusefs.HandleFlusher = (*FileHandle)(nil) // Flush is called on each close() of a file descriptor. So if a // filesystem wants to return write errors in close() and the file has // cached dirty data, this is a good place to write back data and // return any errors. Since many applications ignore close() errors // this is not always useful. // // NOTE: The flush() method may be called more than once for each // open(). This happens if more than one file descriptor refers to an // opened file due to dup(), dup2() or fork() calls. It is not // possible to determine if a flush is final, so each flush should be // treated equally. Multiple write-flush sequences are relatively // rare, so this shouldn't be a problem. // // Filesystems shouldn't assume that flush will always be called after // some writes, or that if will be called at all. func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) { defer log.Trace(fh, "")("err=%v", &err) return translateError(fh.Handle.Flush()) } var _ fusefs.HandleReleaser = (*FileHandle)(nil) // Release is called when we are finished with the file handle // // It isn't called directly from userspace so the error is ignored by // the kernel func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) { defer log.Trace(fh, "")("err=%v", &err) return translateError(fh.Handle.Release()) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/mount_test.go
cmd/mount/mount_test.go
//go:build linux package mount import ( "testing" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfstest" ) func TestMount(t *testing.T) { vfstest.RunTests(t, false, vfscommon.CacheModeOff, true, mount) }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/mount.go
cmd/mount/mount.go
//go:build linux // Package mount implements a FUSE mounting system for rclone remotes. package mount import ( "fmt" "time" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs" ) func init() { mountlib.NewMountCommand("mount", false, mount) mountlib.AddRc("mount", mount) } // mountOptions configures the options from the command line flags func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options []fuse.MountOption) { options = []fuse.MountOption{ fuse.MaxReadahead(uint32(opt.MaxReadAhead)), fuse.Subtype("rclone"), fuse.FSName(device), // Options from benchmarking in the fuse module //fuse.MaxReadahead(64 * 1024 * 1024), //fuse.WritebackCache(), } if opt.AsyncRead { options = append(options, fuse.AsyncRead()) } if opt.AllowOther { options = append(options, fuse.AllowOther()) } if opt.AllowRoot { // options = append(options, fuse.AllowRoot()) fs.Errorf(nil, "Ignoring --allow-root. Support has been removed upstream - see https://github.com/bazil/fuse/issues/144 for more info") } if opt.DefaultPermissions { options = append(options, fuse.DefaultPermissions()) } if VFS.Opt.ReadOnly { options = append(options, fuse.ReadOnly()) } if opt.WritebackCache { options = append(options, fuse.WritebackCache()) } if opt.DaemonTimeout != 0 { options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(time.Duration(opt.DaemonTimeout).Seconds())))) } if len(opt.ExtraOptions) > 0 { fs.Errorf(nil, "-o/--option not supported with this FUSE backend") } if len(opt.ExtraFlags) > 0 { fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend") } return options } // mount the file system // // The mount point will be ready when this returns. // // returns an error, and an error channel for the serve process to // report an error when fusermount is called. func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) { f := VFS.Fs() if err := mountlib.CheckOverlap(f, mountpoint); err != nil { return nil, nil, err } if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil { return nil, nil, err } fs.Debugf(f, "Mounting on %q", mountpoint) if opt.DebugFUSE { fuse.Debug = func(msg any) { fs.Debugf("fuse", "%v", msg) } } c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...) if err != nil { return nil, nil, err } filesys := NewFS(VFS, opt) filesys.server = fusefs.New(c, nil) // Serve the mount point in the background returning error to errChan errChan := make(chan error, 1) go func() { err := filesys.server.Serve(filesys) closeErr := c.Close() if err == nil { err = closeErr } errChan <- err }() unmount := func() error { // Shutdown the VFS filesys.VFS.Shutdown() return fuse.Unmount(mountpoint) } return errChan, unmount, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/mount_unsupported.go
cmd/mount/mount_unsupported.go
//go:build !linux // Package mount implements a FUSE mounting system for rclone remotes. // // Build for mount for unsupported platforms to stop go complaining // about "no buildable Go source files". package mount
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/fs.go
cmd/mount/fs.go
// FUSE main Fs //go:build linux package mount import ( "context" "syscall" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // FS represents the top level filing system type FS struct { *vfs.VFS f fs.Fs opt *mountlib.Options server *fusefs.Server } // Check interface satisfied var _ fusefs.FS = (*FS)(nil) // NewFS makes a new FS func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS { fsys := &FS{ VFS: VFS, f: VFS.Fs(), opt: opt, } return fsys } // Root returns the root node func (f *FS) Root() (node fusefs.Node, err error) { defer log.Trace("", "")("node=%+v, err=%v", &node, &err) root, err := f.VFS.Root() if err != nil { return nil, translateError(err) } return &Dir{root, f}, nil } // Check interface satisfied var _ fusefs.FSStatfser = (*FS)(nil) // Statfs is called to obtain file system metadata. // It should write that data to resp. func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) { defer log.Trace("", "")("stat=%+v, err=%v", resp, &err) const blockSize = 4096 total, _, free := f.VFS.Statfs() resp.Blocks = uint64(total) / blockSize // Total data blocks in file system. resp.Bfree = uint64(free) / blockSize // Free blocks in file system. resp.Bavail = resp.Bfree // Free blocks in file system if you're not root. resp.Files = 1e9 // Total files in file system. resp.Ffree = 1e9 // Free files in file system. resp.Bsize = blockSize // Block size resp.Namelen = 255 // Maximum file name length? resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system. mountlib.ClipBlocks(&resp.Blocks) mountlib.ClipBlocks(&resp.Bfree) mountlib.ClipBlocks(&resp.Bavail) return nil } // Translate errors from mountlib func translateError(err error) error { if err == nil { return nil } _, uErr := fserrors.Cause(err) switch uErr { case vfs.OK: return nil case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound: return fuse.Errno(syscall.ENOENT) case vfs.EEXIST, fs.ErrorDirExists: return fuse.Errno(syscall.EEXIST) case vfs.EPERM, fs.ErrorPermissionDenied: return fuse.Errno(syscall.EPERM) case vfs.ECLOSED: return fuse.Errno(syscall.EBADF) case vfs.ENOTEMPTY: return fuse.Errno(syscall.ENOTEMPTY) case vfs.ESPIPE: return fuse.Errno(syscall.ESPIPE) case vfs.EBADF: return fuse.Errno(syscall.EBADF) case vfs.EROFS: return fuse.Errno(syscall.EROFS) case vfs.ENOSYS, fs.ErrorNotImplemented: return syscall.ENOSYS case vfs.EINVAL: return fuse.Errno(syscall.EINVAL) case vfs.ELOOP: return fuse.Errno(syscall.ELOOP) } fs.Errorf(nil, "IO error: %v", err) return err }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/dir.go
cmd/mount/dir.go
//go:build linux package mount import ( "context" "fmt" "io" "os" "path" "syscall" "time" "bazil.org/fuse" fusefs "bazil.org/fuse/fs" "github.com/rclone/rclone/cmd/mountlib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" ) // Dir represents a directory entry type Dir struct { *vfs.Dir fsys *FS } // Check interface satisfied var _ fusefs.Node = (*Dir)(nil) // Attr updates the attributes of a directory func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) { defer log.Trace(d, "")("attr=%+v, err=%v", a, &err) a.Valid = time.Duration(d.fsys.opt.AttrTimeout) a.Gid = d.VFS().Opt.GID a.Uid = d.VFS().Opt.UID a.Mode = d.Mode() modTime := d.ModTime() a.Atime = modTime a.Mtime = modTime a.Ctime = modTime // FIXME include Valid so get some caching? // FIXME fs.Debugf(d.path, "Dir.Attr %+v", a) return nil } // Check interface satisfied var _ fusefs.NodeSetattrer = (*Dir)(nil) // Setattr handles attribute changes from FUSE. Currently supports ModTime only. func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) { defer log.Trace(d, "stat=%+v", req)("err=%v", &err) if d.VFS().Opt.NoModTime { return nil } if req.Valid.MtimeNow() { err = d.SetModTime(time.Now()) } else if req.Valid.Mtime() { err = d.SetModTime(req.Mtime) } return translateError(err) } // Check interface satisfied var _ fusefs.NodeRequestLookuper = (*Dir)(nil) // Lookup looks up a specific entry in the receiver. // // Lookup should return a Node corresponding to the entry. If the // name does not exist in the directory, Lookup should return ENOENT. // // Lookup need not to handle the names "." and "..". func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err) mnode, err := d.Dir.Stat(req.Name) if err != nil { return nil, translateError(err) } resp.EntryValid = time.Duration(d.fsys.opt.AttrTimeout) // Check the mnode to see if it has a fuse Node cached // We must return the same fuse nodes for vfs Nodes node, ok := mnode.Sys().(fusefs.Node) if ok { return node, nil } switch x := mnode.(type) { case *vfs.File: node = &File{x, d.fsys} case *vfs.Dir: node = &Dir{x, d.fsys} default: panic("bad type") } // Cache the node for later mnode.SetSys(node) return node, nil } // Check interface satisfied var _ fusefs.HandleReadDirAller = (*Dir)(nil) // ReadDirAll reads the contents of the directory func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) { itemsRead := -1 defer log.Trace(d, "")("item=%d, err=%v", &itemsRead, &err) items, err := d.Dir.ReadDirAll() if err != nil { return nil, translateError(err) } dirents = append(dirents, fuse.Dirent{ Type: fuse.DT_Dir, Name: ".", }, fuse.Dirent{ Type: fuse.DT_Dir, Name: "..", }) for _, node := range items { name := node.Name() if len(name) > mountlib.MaxLeafSize { fs.Errorf(d, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name) continue } var dirent = fuse.Dirent{ // Inode FIXME ??? Type: fuse.DT_File, Name: name, } if node.IsDir() { dirent.Type = fuse.DT_Dir } switch node := node.(type) { case *vfs.File: if node.IsSymlink() { dirent.Type = fuse.DT_Link } } dirents = append(dirents, dirent) } itemsRead = len(dirents) return dirents, nil } var _ fusefs.NodeCreater = (*Dir)(nil) // Create makes a new file func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err) // translate the fuse flags to os flags osFlags := int(req.Flags) | os.O_CREATE file, err := d.Dir.Create(req.Name, osFlags) if err != nil { return nil, nil, translateError(err) } fh, err := file.Open(osFlags) if err != nil { return nil, nil, translateError(err) } node = &File{file, d.fsys} file.SetSys(node) // cache the FUSE node for later return node, &FileHandle{fh}, err } var _ fusefs.NodeMkdirer = (*Dir)(nil) // Mkdir creates a new directory func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) { defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err) dir, err := d.Dir.Mkdir(req.Name) if err != nil { return nil, translateError(err) } node = &Dir{dir, d.fsys} dir.SetSys(node) // cache the FUSE node for later return node, nil } var _ fusefs.NodeRemover = (*Dir)(nil) // Remove removes the entry with the given name from // the receiver, which must be a directory. The entry to be removed // may correspond to a file (unlink) or to a directory (rmdir). func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) { defer log.Trace(d, "name=%q", req.Name)("err=%v", &err) err = d.Dir.RemoveName(req.Name) if err != nil { return translateError(err) } return nil } // Invalidate a leaf in a directory func (d *Dir) invalidateEntry(dirNode fusefs.Node, leaf string) { fs.Debugf(dirNode, "Invalidating %q", leaf) err := d.fsys.server.InvalidateEntry(dirNode, leaf) if err != nil { fs.Debugf(dirNode, "Failed to invalidate %q: %v", leaf, err) } } // Check interface satisfied var _ fusefs.NodeRenamer = (*Dir)(nil) // Rename the file func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) { defer log.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err) destDir, ok := newDir.(*Dir) if !ok { return fmt.Errorf("unknown Dir type %T", newDir) } err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir) if err != nil { return translateError(err) } // Invalidate the new directory entry so it gets re-read (in // the background otherwise we cause a deadlock) // // See https://github.com/rclone/rclone/issues/4977 for why go d.invalidateEntry(newDir, req.NewName) //go d.invalidateEntry(d, req.OldName) return nil } // Check interface satisfied var _ fusefs.NodeFsyncer = (*Dir)(nil) // Fsync the directory func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) { defer log.Trace(d, "")("err=%v", &err) err = d.Dir.Sync() if err != nil { return translateError(err) } return nil } // Check interface satisfied var _ fusefs.NodeLinker = (*Dir)(nil) // Link creates a new directory entry in the receiver based on an // existing Node. Receiver must be a directory. func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node) (newNode fusefs.Node, err error) { defer log.Trace(d, "req=%v, old=%v", req, old)("new=%v, err=%v", &newNode, &err) return nil, syscall.ENOSYS } var _ fusefs.NodeSymlinker = (*Dir)(nil) // Symlink create a symbolic link. func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) { defer log.Trace(d, "newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err) newName := path.Join(d.Path(), req.NewName) target := req.Target n, err := d.VFS().CreateSymlink(target, newName) if err != nil { return nil, err } node = &File{n.(*vfs.File), d.fsys} return node, nil } // Check interface satisfied var _ fusefs.NodeMknoder = (*Dir)(nil) // Mknod is called to create a file. Since we define create this will // be called in preference, however NFS likes to call it for some // reason. We don't actually create a file here just the Node. func (d *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (node fusefs.Node, err error) { defer log.Trace(d, "name=%v, mode=%d, rdev=%d", req.Name, req.Mode, req.Rdev)("node=%v, err=%v", &node, &err) if req.Rdev != 0 { fs.Errorf(d, "Can't create device node %q", req.Name) return nil, fuse.Errno(syscall.EIO) } var cReq = fuse.CreateRequest{ Name: req.Name, Flags: fuse.OpenFlags(os.O_CREATE | os.O_WRONLY), Mode: req.Mode, Umask: req.Umask, } var cResp fuse.CreateResponse node, handle, err := d.Create(ctx, &cReq, &cResp) if err != nil { return nil, err } err = handle.(io.Closer).Close() if err != nil { return nil, err } return node, nil }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/test/seekers.go
cmd/mount/test/seekers.go
//go:build ignore // Read lots files with lots of simultaneous seeking to stress test the seek code package main import ( "flag" "io" "log" "math/rand" "os" "path/filepath" "sort" "sync" "time" ) var ( // Flags iterations = flag.Int("n", 1e6, "Iterations to try") maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read") simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open") seeksPerFile = flag.Int("seeks", 8, "Seeks per file") mask = flag.Int64("mask", 0, "mask for seek, e.g. 0x7fff") ) func init() { rand.Seed(time.Now().UnixNano()) } func seekTest(n int, file string) { in, err := os.Open(file) if err != nil { log.Fatalf("Couldn't open %q: %v", file, err) } fi, err := in.Stat() if err != nil { log.Fatalf("Couldn't stat %q: %v", file, err) } size := fi.Size() // FIXME make sure we try start and end maxBlockSize := *maxBlockSize if int64(maxBlockSize) > size { maxBlockSize = int(size) } for i := 0; i < n; i++ { start := rand.Int63n(size) if *mask != 0 { start &^= *mask } blockSize := rand.Intn(maxBlockSize) beyondEnd := false switch rand.Intn(10) { case 0: start = 0 case 1: start = size - int64(blockSize) case 2: // seek beyond the end start = size + int64(blockSize) beyondEnd = true default: } if !beyondEnd && int64(blockSize) > size-start { blockSize = int(size - start) } log.Printf("%s: Reading %d from %d", file, blockSize, start) _, err = in.Seek(start, io.SeekStart) if err != nil { log.Fatalf("Seek failed on %q: %v", file, err) } buf := make([]byte, blockSize) n, err := io.ReadFull(in, buf) if beyondEnd && err == io.EOF { // OK } else if err != nil { log.Fatalf("Read failed on %q: %v (%d)", file, err, n) } } err = in.Close() if err != nil { log.Fatalf("Error closing %q: %v", file, err) } } // Find all the files in dir func findFiles(dir string) (files []string) { filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if info.Mode().IsRegular() && info.Size() > 0 { files = append(files, path) } return nil }) sort.Strings(files) return files } func main() { flag.Parse() args := flag.Args() if len(args) != 1 { log.Fatalf("Require a directory as argument") } dir := args[0] files := findFiles(dir) jobs := make(chan string, *simultaneous) var wg sync.WaitGroup wg.Add(*simultaneous) for i := 0; i < *simultaneous; i++ { go func() { defer wg.Done() for file := range jobs { seekTest(*seeksPerFile, file) } }() } for i := 0; i < *iterations; i++ { i := rand.Intn(len(files)) jobs <- files[i] //jobs <- files[i] } close(jobs) wg.Wait() }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/test/seek_speed.go
cmd/mount/test/seek_speed.go
//go:build ignore // Read blocks out of a single file to time the seeking code package main import ( "flag" "io" "log" "math/rand" "os" "time" ) var ( // Flags iterations = flag.Int("n", 25, "Iterations to try") maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read") randSeed = flag.Int64("seed", 1, "Seed for the random number generator") ) func randomSeekTest(size int64, in *os.File, name string) { startTime := time.Now() start := rand.Int63n(size) blockSize := rand.Intn(*maxBlockSize) if int64(blockSize) > size-start { blockSize = int(size - start) } _, err := in.Seek(start, io.SeekStart) if err != nil { log.Fatalf("Seek failed on %q: %v", name, err) } buf := make([]byte, blockSize) _, err = io.ReadFull(in, buf) if err != nil { log.Fatalf("Read failed on %q: %v", name, err) } log.Printf("Reading %d from %d took %v ", blockSize, start, time.Since(startTime)) } func main() { flag.Parse() args := flag.Args() if len(args) != 1 { log.Fatalf("Require 1 file as argument") } rand.Seed(*randSeed) name := args[0] openStart := time.Now() in, err := os.Open(name) if err != nil { log.Fatalf("Couldn't open %q: %v", name, err) } log.Printf("File Open took %v", time.Since(openStart)) fi, err := in.Stat() if err != nil { log.Fatalf("Couldn't stat %q: %v", name, err) } start := time.Now() for i := 0; i < *iterations; i++ { randomSeekTest(fi.Size(), in, name) } dt := time.Since(start) log.Printf("That took %v for %d iterations, %v per iteration", dt, *iterations, dt/time.Duration(*iterations)) err = in.Close() if err != nil { log.Fatalf("Error closing %q: %v", name, err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/mount/test/seeker.go
cmd/mount/test/seeker.go
//go:build ignore // Read two files with lots of seeking to stress test the seek code package main import ( "bytes" "flag" "io" "log" "math/rand" "os" "time" ) var ( // Flags iterations = flag.Int("n", 1e6, "Iterations to try") maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read") ) func init() { rand.Seed(time.Now().UnixNano()) } func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) { start := rand.Int63n(size) blockSize := rand.Intn(*maxBlockSize) if int64(blockSize) > size-start { blockSize = int(size - start) } log.Printf("Reading %d from %d", blockSize, start) _, err := in1.Seek(start, io.SeekStart) if err != nil { log.Fatalf("Seek failed on %q: %v", file1, err) } _, err = in2.Seek(start, io.SeekStart) if err != nil { log.Fatalf("Seek failed on %q: %v", file2, err) } buf1 := make([]byte, blockSize) n1, err := io.ReadFull(in1, buf1) if err != nil { log.Fatalf("Read failed on %q: %v", file1, err) } buf2 := make([]byte, blockSize) n2, err := io.ReadFull(in2, buf2) if err != nil { log.Fatalf("Read failed on %q: %v", file2, err) } if n1 != n2 { log.Fatalf("Read different lengths %d (%q) != %d (%q)", n1, file1, n2, file2) } if !bytes.Equal(buf1, buf2) { log.Printf("Dumping different blocks") err = os.WriteFile("/tmp/z1", buf1, 0777) if err != nil { log.Fatalf("Failed to write /tmp/z1: %v", err) } err = os.WriteFile("/tmp/z2", buf2, 0777) if err != nil { log.Fatalf("Failed to write /tmp/z2: %v", err) } log.Fatalf("Read different contents - saved in /tmp/z1 and /tmp/z2") } } func main() { flag.Parse() args := flag.Args() if len(args) != 2 { log.Fatalf("Require 2 files as argument") } file1, file2 := args[0], args[1] in1, err := os.Open(file1) if err != nil { log.Fatalf("Couldn't open %q: %v", file1, err) } in2, err := os.Open(file2) if err != nil { log.Fatalf("Couldn't open %q: %v", file2, err) } fi1, err := in1.Stat() if err != nil { log.Fatalf("Couldn't stat %q: %v", file1, err) } fi2, err := in2.Stat() if err != nil { log.Fatalf("Couldn't stat %q: %v", file2, err) } if fi1.Size() != fi2.Size() { log.Fatalf("Files not the same size") } for i := 0; i < *iterations; i++ { randomSeekTest(fi1.Size(), in1, in2, file1, file2) } err = in1.Close() if err != nil { log.Fatalf("Error closing %q: %v", file1, err) } err = in2.Close() if err != nil { log.Fatalf("Error closing %q: %v", file2, err) } }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/checksum/checksum.go
cmd/checksum/checksum.go
// Package checksum provides the checksum command. package checksum import ( "context" "fmt" "strings" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/check" // for common flags "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) var download = false func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents", "") check.AddFlags(cmdFlags) } var commandDefinition = &cobra.Command{ Use: "checksum <hash> sumfile dst:path", Short: `Checks the files in the destination against a SUM file.`, Long: strings.ReplaceAll(`Checks that hashsums of destination files match the SUM file. It compares hashes (MD5, SHA1, etc) and logs a report of files which don't match. It doesn't alter the file system. The sumfile is treated as the source and the dst:path is treated as the destination for the purposes of the output. If you supply the |--download| flag, it will download the data from the remote and calculate the content hash on the fly. This can be useful for remotes that don't support hashes or if you really want to check all the data. Note that hash values in the SUM file are treated as case insensitive. `, "|", "`") + check.FlagsHelp, Annotations: map[string]string{ "versionIntroduced": "v1.56", "groups": "Filter,Listing", }, RunE: func(command *cobra.Command, args []string) error { cmd.CheckArgs(3, 3, command, args) var hashType hash.Type if err := hashType.Set(args[0]); err != nil { fmt.Println(hash.HelpString(0)) return err } fsum, sumFile, fsrc := cmd.NewFsSrcFileDst(args[1:]) cmd.Run(false, true, command, func() error { opt, close, err := check.GetCheckOpt(nil, fsrc) if err != nil { return err } defer close() return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download) }) return nil }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/about/about.go
cmd/about/about.go
// Package about provides the about command. package about import ( "context" "encoding/json" "errors" "fmt" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/spf13/cobra" ) var ( jsonOutput bool fullOutput bool ) func init() { cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "") flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable", "") } // printValue formats uv to be output func printValue(what string, uv *int64, isSize bool) { what += ":" if uv == nil { return } var val string if fullOutput { val = fmt.Sprintf("%d", *uv) } else if isSize { val = fs.SizeSuffix(*uv).ByteUnit() } else { val = fs.CountSuffix(*uv).String() } fmt.Printf("%-9s%v\n", what, val) } var commandDefinition = &cobra.Command{ Use: "about remote:", Short: `Get quota information from the remote.`, Long: `Prints quota information about a remote to standard output. The output is typically used, free, quota and trash contents. E.g. Typical output from ` + "`rclone about remote:`" + ` is: ` + "```text" + ` Total: 17 GiB Used: 7.444 GiB Free: 1.315 GiB Trashed: 100.000 MiB Other: 8.241 GiB ` + "```" + ` Where the fields are: - Total: Total size available. - Used: Total size used. - Free: Total space available to this user. - Trashed: Total space used by trash. - Other: Total amount in other storage (e.g. Gmail, Google Photos). - Objects: Total number of objects in the storage. All sizes are in number of bytes. Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g. ` + "```text" + ` Total: 18253611008 Used: 7993453766 Free: 1411001220 Trashed: 104857602 Other: 8849156022 ` + "```" + ` A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g. ` + "```json" + ` { "total": 18253611008, "used": 7993453766, "trashed": 104857602, "other": 8849156022, "free": 1411001220 } ` + "```" + ` Not all backends print all fields. Information is not included if it is not provided by a backend. Where the value is unlimited it is omitted. Some backends does not support the ` + "`rclone about`" + ` command at all, see complete list in [documentation](https://rclone.org/overview/#optional-features).`, Annotations: map[string]string{ "versionIntroduced": "v1.41", // "groups": "", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) f := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { doAbout := f.Features().About if doAbout == nil { return fmt.Errorf("%v doesn't support about", f) } u, err := doAbout(context.Background()) if err != nil { return fmt.Errorf("about call failed: %w", err) } if u == nil { return errors.New("nil usage returned") } if jsonOutput { out := json.NewEncoder(os.Stdout) out.SetIndent("", "\t") return out.Encode(u) } printValue("Total", u.Total, true) printValue("Used", u.Used, true) printValue("Free", u.Free, true) printValue("Trashed", u.Trashed, true) printValue("Other", u.Other, true) printValue("Objects", u.Objects, false) return nil }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/deletefile/deletefile.go
cmd/deletefile/deletefile.go
// Package deletefile provides the deletefile command. package deletefile import ( "context" "fmt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "deletefile remote:path", Short: `Remove a single file from remote.`, Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to remove a directory and it doesn't obey include/exclude filters - if the specified file exists, it will always be removed.`, Annotations: map[string]string{ "versionIntroduced": "v1.42", "groups": "Important", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) f, fileName := cmd.NewFsFile(args[0]) cmd.Run(true, false, command, func() error { if fileName == "" { return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound) } fileObj, err := f.NewObject(context.Background(), fileName) if err != nil { return err } return operations.DeleteFile(context.Background(), fileObj) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false
rclone/rclone
https://github.com/rclone/rclone/blob/5f4e4b1a200708f5f36999a9d289823b742e4fd3/cmd/lsl/lsl.go
cmd/lsl/lsl.go
// Package lsl provides the lsl command. package lsl import ( "context" "os" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd/ls/lshelp" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" ) func init() { cmd.Root.AddCommand(commandDefinition) } var commandDefinition = &cobra.Command{ Use: "lsl remote:path", Short: `List the objects in path with modification time, size and path.`, Long: `Lists the objects in the source path to standard output in a human readable format with modification time, size and path. Recurses by default. E.g. ` + "```console" + ` $ rclone lsl swift:bucket 60295 2016-06-25 18:55:41.062626927 bevajer5jef 90613 2016-06-25 18:55:43.302607074 canole 94467 2016-06-25 18:55:43.046609333 diwogej7 37600 2016-06-25 18:55:40.814629136 fubuwic ` + "```" + ` ` + lshelp.Help, Annotations: map[string]string{ "versionIntroduced": "v1.02", "groups": "Filter,Listing", }, Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) fsrc := cmd.NewFsSrc(args) cmd.Run(false, false, command, func() error { return operations.ListLong(context.Background(), fsrc, os.Stdout) }) }, }
go
MIT
5f4e4b1a200708f5f36999a9d289823b742e4fd3
2026-01-07T08:35:43.525317Z
false