repo stringlengths 6 47 | file_url stringlengths 77 269 | file_path stringlengths 5 186 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-07 08:35:43 2026-01-07 08:55:24 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/archive/create.go | tools/archive/create.go | package archive
import (
"archive/zip"
"compress/flate"
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
)
// Create creates a new zip archive from src dir content and saves it in dest path.
//
// You can specify skipPaths to skip/ignore certain directories and files (relative to src)
// preventing adding them in the final archive.
func Create(src string, dest string, skipPaths ...string) error {
if err := os.MkdirAll(filepath.Dir(dest), os.ModePerm); err != nil {
return err
}
zf, err := os.Create(dest)
if err != nil {
return err
}
zw := zip.NewWriter(zf)
// register a custom Deflate compressor
zw.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) {
return flate.NewWriter(out, flate.BestSpeed)
})
err = zipAddFS(zw, os.DirFS(src), skipPaths...)
if err != nil {
// try to cleanup at least the created zip file
return errors.Join(err, zw.Close(), zf.Close(), os.Remove(dest))
}
return errors.Join(zw.Close(), zf.Close())
}
// note remove after similar method is added in the std lib (https://github.com/golang/go/issues/54898)
func zipAddFS(w *zip.Writer, fsys fs.FS, skipPaths ...string) error {
return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
// skip
for _, ignore := range skipPaths {
if ignore == name ||
strings.HasPrefix(filepath.Clean(name)+string(os.PathSeparator), filepath.Clean(ignore)+string(os.PathSeparator)) {
return nil
}
}
info, err := d.Info()
if err != nil {
return err
}
h, err := zip.FileInfoHeader(info)
if err != nil {
return err
}
h.Name = name
h.Method = zip.Deflate
fw, err := w.CreateHeader(h)
if err != nil {
return err
}
f, err := fsys.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(fw, f)
return err
})
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/archive/create_test.go | tools/archive/create_test.go | package archive_test
import (
"os"
"path/filepath"
"testing"
"github.com/pocketbase/pocketbase/tools/archive"
)
func TestCreateFailure(t *testing.T) {
testDir := createTestDir(t)
defer os.RemoveAll(testDir)
zipPath := filepath.Join(os.TempDir(), "pb_test.zip")
defer os.RemoveAll(zipPath)
missingDir := filepath.Join(os.TempDir(), "missing")
if err := archive.Create(missingDir, zipPath); err == nil {
t.Fatal("Expected to fail due to missing directory or file")
}
if _, err := os.Stat(zipPath); err == nil {
t.Fatalf("Expected the zip file not to be created")
}
}
func TestCreateSuccess(t *testing.T) {
testDir := createTestDir(t)
defer os.RemoveAll(testDir)
zipName := "pb_test.zip"
zipPath := filepath.Join(os.TempDir(), zipName)
defer os.RemoveAll(zipPath)
// zip testDir content (excluding test and a/b/c dir)
if err := archive.Create(testDir, zipPath, "a/b/c", "test"); err != nil {
t.Fatalf("Failed to create archive: %v", err)
}
info, err := os.Stat(zipPath)
if err != nil {
t.Fatalf("Failed to retrieve the generated zip file: %v", err)
}
if name := info.Name(); name != zipName {
t.Fatalf("Expected zip with name %q, got %q", zipName, name)
}
expectedSize := int64(544)
if size := info.Size(); size != expectedSize {
t.Fatalf("Expected zip with size %d, got %d", expectedSize, size)
}
}
// -------------------------------------------------------------------
// note: make sure to call os.RemoveAll(dir) after you are done
// working with the created test dir.
func createTestDir(t *testing.T) string {
dir, err := os.MkdirTemp(os.TempDir(), "pb_zip_test")
if err != nil {
t.Fatal(err)
}
if err := os.MkdirAll(filepath.Join(dir, "a/b/c"), os.ModePerm); err != nil {
t.Fatal(err)
}
{
f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "test2"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/test"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/b/sub1"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/b/c/sub2"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
{
f, err := os.OpenFile(filepath.Join(dir, "a/b/c/sub3"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
f.Close()
}
// symbolic link
if err := os.Symlink(filepath.Join(dir, "test"), filepath.Join(dir, "test_symlink")); err != nil {
t.Fatal(err)
}
return dir
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/archive/extract.go | tools/archive/extract.go | package archive
import (
"archive/zip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
// Extract extracts the zip archive at "src" to "dest".
//
// Note that only dirs and regular files will be extracted.
// Symbolic links, named pipes, sockets, or any other irregular files
// are skipped because they come with too many edge cases and ambiguities.
func Extract(src, dest string) error {
zr, err := zip.OpenReader(src)
if err != nil {
return err
}
defer zr.Close()
// normalize dest path to check later for Zip Slip
dest = filepath.Clean(dest) + string(os.PathSeparator)
for _, f := range zr.File {
err := extractFile(f, dest)
if err != nil {
return err
}
}
return nil
}
// extractFile extracts the provided zipFile into "basePath/zipFileName" path,
// creating all the necessary path directories.
func extractFile(zipFile *zip.File, basePath string) error {
path := filepath.Join(basePath, zipFile.Name)
// check for Zip Slip
if !strings.HasPrefix(path, basePath) {
return fmt.Errorf("invalid file path: %s", path)
}
r, err := zipFile.Open()
if err != nil {
return err
}
defer r.Close()
// allow only dirs or regular files
if zipFile.FileInfo().IsDir() {
if err := os.MkdirAll(path, os.ModePerm); err != nil {
return err
}
} else if zipFile.FileInfo().Mode().IsRegular() {
// ensure that the file path directories are created
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, zipFile.Mode())
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, r)
if err != nil {
return err
}
}
return nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/archive/extract_test.go | tools/archive/extract_test.go | package archive_test
import (
"io/fs"
"os"
"path/filepath"
"testing"
"github.com/pocketbase/pocketbase/tools/archive"
)
func TestExtractFailure(t *testing.T) {
testDir := createTestDir(t)
defer os.RemoveAll(testDir)
missingZipPath := filepath.Join(os.TempDir(), "pb_missing_test.zip")
extractedPath := filepath.Join(os.TempDir(), "pb_zip_extract")
defer os.RemoveAll(extractedPath)
if err := archive.Extract(missingZipPath, extractedPath); err == nil {
t.Fatal("Expected Extract to fail due to missing zipPath")
}
if _, err := os.Stat(extractedPath); err == nil {
t.Fatalf("Expected %q to not be created", extractedPath)
}
}
func TestExtractSuccess(t *testing.T) {
testDir := createTestDir(t)
defer os.RemoveAll(testDir)
zipPath := filepath.Join(os.TempDir(), "pb_test.zip")
defer os.RemoveAll(zipPath)
extractedPath := filepath.Join(os.TempDir(), "pb_zip_extract")
defer os.RemoveAll(extractedPath)
// zip testDir content (with exclude)
if err := archive.Create(testDir, zipPath, "a/b/c", "test2", "sub2"); err != nil {
t.Fatalf("Failed to create archive: %v", err)
}
if err := archive.Extract(zipPath, extractedPath); err != nil {
t.Fatalf("Failed to extract %q in %q", zipPath, extractedPath)
}
availableFiles := []string{}
walkErr := filepath.WalkDir(extractedPath, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
availableFiles = append(availableFiles, path)
return nil
})
if walkErr != nil {
t.Fatalf("Failed to read the extracted dir: %v", walkErr)
}
// (note: symbolic links and other regular files should be missing)
expectedFiles := []string{
filepath.Join(extractedPath, "test"),
filepath.Join(extractedPath, "a/test"),
filepath.Join(extractedPath, "a/b/sub1"),
}
if len(availableFiles) != len(expectedFiles) {
t.Fatalf("Expected \n%v, \ngot \n%v", expectedFiles, availableFiles)
}
ExpectedLoop:
for _, expected := range expectedFiles {
for _, available := range availableFiles {
if available == expected {
continue ExpectedLoop
}
}
t.Fatalf("Missing file %q in \n%v", expected, availableFiles)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/inflector/singularize_test.go | tools/inflector/singularize_test.go | package inflector_test
import (
"testing"
"github.com/pocketbase/pocketbase/tools/inflector"
)
func TestSingularize(t *testing.T) {
scenarios := []struct {
word string
expected string
}{
{"abcnese", "abcnese"},
{"deer", "deer"},
{"sheep", "sheep"},
{"measles", "measles"},
{"pox", "pox"},
{"media", "media"},
{"bliss", "bliss"},
{"sea-bass", "sea-bass"},
{"Statuses", "Status"},
{"Feet", "Foot"},
{"Teeth", "Tooth"},
{"abcmenus", "abcmenu"},
{"Quizzes", "Quiz"},
{"Matrices", "Matrix"},
{"Vertices", "Vertex"},
{"Indices", "Index"},
{"Aliases", "Alias"},
{"Alumni", "Alumnus"},
{"Bacilli", "Bacillus"},
{"Cacti", "Cactus"},
{"Fungi", "Fungus"},
{"Nuclei", "Nucleus"},
{"Radii", "Radius"},
{"Stimuli", "Stimulus"},
{"Syllabi", "Syllabus"},
{"Termini", "Terminus"},
{"Viri", "Virus"},
{"Faxes", "Fax"},
{"Crises", "Crisis"},
{"Axes", "Axis"},
{"Shoes", "Shoe"},
{"abcoes", "abco"},
{"Houses", "House"},
{"Mice", "Mouse"},
{"abcxes", "abcx"},
{"Movies", "Movie"},
{"Series", "Series"},
{"abcquies", "abcquy"},
{"Relatives", "Relative"},
{"Drives", "Drive"},
{"aardwolves", "aardwolf"},
{"Analyses", "Analysis"},
{"Diagnoses", "Diagnosis"},
{"People", "Person"},
{"Men", "Man"},
{"Children", "Child"},
{"News", "News"},
{"Netherlands", "Netherlands"},
{"Tableaus", "Tableau"},
{"Currencies", "Currency"},
{"abcs", "abc"},
{"abc", "abc"},
}
for _, s := range scenarios {
t.Run(s.word, func(t *testing.T) {
result := inflector.Singularize(s.word)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/inflector/inflector_test.go | tools/inflector/inflector_test.go | package inflector_test
import (
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/inflector"
)
func TestUcFirst(t *testing.T) {
scenarios := []struct {
val string
expected string
}{
{"", ""},
{" ", " "},
{"Test", "Test"},
{"test", "Test"},
{"test test2", "Test test2"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.val), func(t *testing.T) {
result := inflector.UcFirst(s.val)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestColumnify(t *testing.T) {
scenarios := []struct {
val string
expected string
}{
{"", ""},
{" ", ""},
{"123", "123"},
{"Test.", "Test."},
{" test ", "test"},
{"test1.test2", "test1.test2"},
{"@test!abc", "@testabc"},
{"#test?abc", "#testabc"},
{"123test(123)#", "123test123#"},
{"test1--test2", "test1--test2"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.val), func(t *testing.T) {
result := inflector.Columnify(s.val)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestSentenize(t *testing.T) {
scenarios := []struct {
val string
expected string
}{
{"", ""},
{" ", ""},
{".", "."},
{"?", "?"},
{"!", "!"},
{"Test", "Test."},
{" test ", "Test."},
{"hello world", "Hello world."},
{"hello world.", "Hello world."},
{"hello world!", "Hello world!"},
{"hello world?", "Hello world?"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.val), func(t *testing.T) {
result := inflector.Sentenize(s.val)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestSanitize(t *testing.T) {
scenarios := []struct {
val string
pattern string
expected string
expectErr bool
}{
{"", ``, "", false},
{" ", ``, " ", false},
{" ", ` `, "", false},
{"", `[A-Z]`, "", false},
{"abcABC", `[A-Z]`, "abc", false},
{"abcABC", `[A-Z`, "", true}, // invalid pattern
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.val), func(t *testing.T) {
result, err := inflector.Sanitize(s.val, s.pattern)
hasErr := err != nil
if s.expectErr != hasErr {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectErr, hasErr, err)
}
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestSnakecase(t *testing.T) {
scenarios := []struct {
val string
expected string
}{
{"", ""},
{" ", ""},
{"!@#$%^", ""},
{"...", ""},
{"_", ""},
{"John Doe", "john_doe"},
{"John_Doe", "john_doe"},
{".a!b@c#d$e%123. ", "a_b_c_d_e_123"},
{"HelloWorld", "hello_world"},
{"HelloWorld1HelloWorld2", "hello_world1_hello_world2"},
{"TEST", "test"},
{"testABR", "test_abr"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.val), func(t *testing.T) {
result := inflector.Snakecase(s.val)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestCamelize(t *testing.T) {
scenarios := []struct {
val string
expected string
}{
{"", ""},
{" ", ""},
{"Test", "Test"},
{"test", "Test"},
{"testTest2", "TestTest2"},
{"TestTest2", "TestTest2"},
{"test test2", "TestTest2"},
{"test-test2", "TestTest2"},
{"test'test2", "TestTest2"},
{"test1test2", "Test1test2"},
{"1test-test2", "1testTest2"},
{"123", "123"},
{"123a", "123a"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.val), func(t *testing.T) {
result := inflector.Camelize(s.val)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/inflector/inflector.go | tools/inflector/inflector.go | package inflector
import (
"regexp"
"strings"
"unicode"
)
var columnifyRemoveRegex = regexp.MustCompile(`[^\w\.\*\-\_\@\#]+`)
var snakecaseSplitRegex = regexp.MustCompile(`[\W_]+`)
// UcFirst converts the first character of a string into uppercase.
func UcFirst(str string) string {
if str == "" {
return ""
}
s := []rune(str)
return string(unicode.ToUpper(s[0])) + string(s[1:])
}
// Columnify strips invalid db identifier characters.
func Columnify(str string) string {
return columnifyRemoveRegex.ReplaceAllString(str, "")
}
// Sentenize converts and normalizes string into a sentence.
func Sentenize(str string) string {
str = strings.TrimSpace(str)
if str == "" {
return ""
}
str = UcFirst(str)
lastChar := str[len(str)-1:]
if lastChar != "." && lastChar != "?" && lastChar != "!" {
return str + "."
}
return str
}
// Sanitize sanitizes `str` by removing all characters satisfying `removePattern`.
// Returns an error if the pattern is not valid regex string.
func Sanitize(str string, removePattern string) (string, error) {
exp, err := regexp.Compile(removePattern)
if err != nil {
return "", err
}
return exp.ReplaceAllString(str, ""), nil
}
// Snakecase removes all non word characters and converts any english text into a snakecase.
// "ABBREVIATIONS" are preserved, eg. "myTestDB" will become "my_test_db".
func Snakecase(str string) string {
var result strings.Builder
// split at any non word character and underscore
words := snakecaseSplitRegex.Split(str, -1)
for _, word := range words {
if word == "" {
continue
}
if result.Len() > 0 {
result.WriteString("_")
}
for i, c := range word {
if unicode.IsUpper(c) && i > 0 &&
// is not a following uppercase character
!unicode.IsUpper(rune(word[i-1])) {
result.WriteString("_")
}
result.WriteRune(c)
}
}
return strings.ToLower(result.String())
}
// Camelize converts the provided string to its "CamelCased" version
// (non alphanumeric characters are removed).
//
// For example:
//
// inflector.Camelize("send_email") // "SendEmail"
func Camelize(str string) string {
var result strings.Builder
var isPrevSpecial bool
for _, c := range str {
if !unicode.IsLetter(c) && !unicode.IsNumber(c) {
isPrevSpecial = true
continue
}
if isPrevSpecial || result.Len() == 0 {
isPrevSpecial = false
result.WriteRune(unicode.ToUpper(c))
} else {
result.WriteRune(c)
}
}
return result.String()
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/inflector/singularize.go | tools/inflector/singularize.go | package inflector
import (
"log"
"regexp"
"github.com/pocketbase/pocketbase/tools/store"
)
var compiledPatterns = store.New[string, *regexp.Regexp](nil)
// note: the patterns are extracted from popular Ruby/PHP/Node.js inflector packages
var singularRules = []struct {
pattern string // lazily compiled
replacement string
}{
{"(?i)([nrlm]ese|deer|fish|sheep|measles|ois|pox|media|ss)$", "${1}"},
{"(?i)^(sea[- ]bass)$", "${1}"},
{"(?i)(s)tatuses$", "${1}tatus"},
{"(?i)(f)eet$", "${1}oot"},
{"(?i)(t)eeth$", "${1}ooth"},
{"(?i)^(.*)(menu)s$", "${1}${2}"},
{"(?i)(quiz)zes$", "${1}"},
{"(?i)(matr)ices$", "${1}ix"},
{"(?i)(vert|ind)ices$", "${1}ex"},
{"(?i)^(ox)en", "${1}"},
{"(?i)(alias)es$", "${1}"},
{"(?i)(alumn|bacill|cact|foc|fung|nucle|radi|stimul|syllab|termin|viri?)i$", "${1}us"},
{"(?i)([ftw]ax)es", "${1}"},
{"(?i)(cris|ax|test)es$", "${1}is"},
{"(?i)(shoe)s$", "${1}"},
{"(?i)(o)es$", "${1}"},
{"(?i)ouses$", "ouse"},
{"(?i)([^a])uses$", "${1}us"},
{"(?i)([m|l])ice$", "${1}ouse"},
{"(?i)(x|ch|ss|sh)es$", "${1}"},
{"(?i)(m)ovies$", "${1}ovie"},
{"(?i)(s)eries$", "${1}eries"},
{"(?i)([^aeiouy]|qu)ies$", "${1}y"},
{"(?i)([lr])ves$", "${1}f"},
{"(?i)(tive)s$", "${1}"},
{"(?i)(hive)s$", "${1}"},
{"(?i)(drive)s$", "${1}"},
{"(?i)([^fo])ves$", "${1}fe"},
{"(?i)(^analy)ses$", "${1}sis"},
{"(?i)(analy|diagno|^ba|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$", "${1}${2}sis"},
{"(?i)([ti])a$", "${1}um"},
{"(?i)(p)eople$", "${1}erson"},
{"(?i)(m)en$", "${1}an"},
{"(?i)(c)hildren$", "${1}hild"},
{"(?i)(n)ews$", "${1}ews"},
{"(?i)(n)etherlands$", "${1}etherlands"},
{"(?i)eaus$", "eau"},
{"(?i)(currenc)ies$", "${1}y"},
{"(?i)^(.*us)$", "${1}"},
{"(?i)s$", ""},
}
// Singularize converts the specified word into its singular version.
//
// For example:
//
// inflector.Singularize("people") // "person"
func Singularize(word string) string {
if word == "" {
return ""
}
for _, rule := range singularRules {
re := compiledPatterns.GetOrSet(rule.pattern, func() *regexp.Regexp {
re, err := regexp.Compile(rule.pattern)
if err != nil {
return nil
}
return re
})
if re == nil {
// log only for debug purposes
log.Println("[Singularize] failed to retrieve/compile rule pattern " + rule.pattern)
continue
}
if re.MatchString(word) {
return re.ReplaceAllString(word, rule.replacement)
}
}
return word
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/tokenizer/tokenizer.go | tools/tokenizer/tokenizer.go | // Package tokenizer implements a rudimentary tokens parser of buffered
// io.Reader while respecting quotes and parenthesis boundaries.
//
// Example
//
// tk := tokenizer.NewFromString("a, b, (c, d)")
// result, _ := tk.ScanAll() // ["a", "b", "(c, d)"]
package tokenizer
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
// eof represents a marker rune for the end of the reader.
const eof = rune(0)
// DefaultSeparators is a list with the default token separator characters.
var DefaultSeparators = []rune{','}
var whitespaceChars = []rune{'\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0}
// NewFromString creates new Tokenizer from the provided string.
func NewFromString(str string) *Tokenizer {
return New(strings.NewReader(str))
}
// NewFromBytes creates new Tokenizer from the provided bytes slice.
func NewFromBytes(b []byte) *Tokenizer {
return New(bytes.NewReader(b))
}
// New creates new Tokenizer from the provided reader with DefaultSeparators.
func New(r io.Reader) *Tokenizer {
t := &Tokenizer{r: bufio.NewReader(r)}
t.Separators(DefaultSeparators...)
return t
}
// Tokenizer defines a struct that parses a reader into tokens while
// respecting quotes and parenthesis boundaries.
type Tokenizer struct {
r *bufio.Reader
trimCutset string
separators []rune
keepSeparator bool
keepEmptyTokens bool
ignoreParenthesis bool
}
// Separators defines the provided separatos of the current Tokenizer.
func (t *Tokenizer) Separators(separators ...rune) {
t.separators = separators
t.rebuildTrimCutset()
}
// KeepSeparator defines whether to keep the separator rune as part
// of the token (default to false).
func (t *Tokenizer) KeepSeparator(state bool) {
t.keepSeparator = state
}
// KeepEmptyTokens defines whether to keep empty tokens on Scan() (default to false).
func (t *Tokenizer) KeepEmptyTokens(state bool) {
t.keepEmptyTokens = state
}
// IgnoreParenthesis defines whether to ignore the parenthesis boundaries
// and to treat the '(' and ')' as regular characters.
func (t *Tokenizer) IgnoreParenthesis(state bool) {
t.ignoreParenthesis = state
}
// Scan reads and returns the next available token from the Tokenizer's buffer (trimmed!).
//
// Empty tokens are skipped if t.keepEmptyTokens is not set (which is the default).
//
// Returns [io.EOF] error when there are no more tokens to scan.
func (t *Tokenizer) Scan() (string, error) {
ch := t.read()
if ch == eof {
return "", io.EOF
}
t.unread()
token, err := t.readToken()
if err != nil {
return "", err
}
if !t.keepEmptyTokens && token == "" {
return t.Scan()
}
return token, err
}
// ScanAll reads the entire Tokenizer's buffer and return all found tokens.
func (t *Tokenizer) ScanAll() ([]string, error) {
tokens := []string{}
for {
token, err := t.Scan()
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
tokens = append(tokens, token)
}
return tokens, nil
}
// readToken reads a single token from the buffer and returns it.
func (t *Tokenizer) readToken() (string, error) {
var buf bytes.Buffer
var parenthesis int
var quoteCh rune
var prevCh rune
for {
ch := t.read()
if ch == eof {
break
}
if !t.isEscapeRune(prevCh) {
if !t.ignoreParenthesis && ch == '(' && quoteCh == eof {
parenthesis++ // opening parenthesis
} else if !t.ignoreParenthesis && ch == ')' && parenthesis > 0 && quoteCh == eof {
parenthesis-- // closing parenthesis
} else if t.isQuoteRune(ch) {
switch quoteCh {
case ch:
quoteCh = eof // closing quote
case eof:
quoteCh = ch // opening quote
}
}
}
if t.isSeperatorRune(ch) && parenthesis == 0 && quoteCh == eof {
if t.keepSeparator {
buf.WriteRune(ch)
}
break
}
prevCh = ch
buf.WriteRune(ch)
}
if parenthesis > 0 || quoteCh != eof {
return "", fmt.Errorf("unbalanced parenthesis or quoted expression: %q", buf.String())
}
return strings.Trim(buf.String(), t.trimCutset), nil
}
// read reads the next rune from the buffered reader.
// Returns the `rune(0)` if an error or `io.EOF` occurs.
func (t *Tokenizer) read() rune {
ch, _, err := t.r.ReadRune()
if err != nil {
return eof
}
return ch
}
// unread places the previously read rune back on the reader.
func (t *Tokenizer) unread() error {
return t.r.UnreadRune()
}
// rebuildTrimCutset rebuilds the tokenizer trimCutset based on its separator runes.
func (t *Tokenizer) rebuildTrimCutset() {
var cutset strings.Builder
for _, w := range whitespaceChars {
if t.isSeperatorRune(w) {
continue
}
cutset.WriteRune(w)
}
t.trimCutset = cutset.String()
}
// isSeperatorRune checks if a rune is a token part separator.
func (t *Tokenizer) isSeperatorRune(ch rune) bool {
for _, r := range t.separators {
if ch == r {
return true
}
}
return false
}
// isQuoteRune checks if a rune is a quote.
func (t *Tokenizer) isQuoteRune(ch rune) bool {
return ch == '\'' || ch == '"' || ch == '`'
}
// isEscapeRune checks if a rune is an escape character.
func (t *Tokenizer) isEscapeRune(ch rune) bool {
return ch == '\\'
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/tokenizer/tokenizer_test.go | tools/tokenizer/tokenizer_test.go | package tokenizer
import (
"io"
"strings"
"testing"
)
func TestFactories(t *testing.T) {
expectedContent := "test"
scenarios := []struct {
name string
tk *Tokenizer
}{
{
"New()",
New(strings.NewReader(expectedContent)),
},
{
"NewFromString()",
NewFromString(expectedContent),
},
{
"NewFromBytes()",
NewFromBytes([]byte(expectedContent)),
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
content, _ := s.tk.r.ReadString(0)
if content != expectedContent {
t.Fatalf("Expected reader with content %q, got %q", expectedContent, content)
}
if s.tk.keepSeparator != false {
t.Fatal("Expected keepSeparator false, got true")
}
if s.tk.ignoreParenthesis != false {
t.Fatal("Expected ignoreParenthesis false, got true")
}
if len(s.tk.separators) != len(DefaultSeparators) {
t.Fatalf("Expected \n%v, \ngot \n%v", DefaultSeparators, s.tk.separators)
}
for _, r := range s.tk.separators {
exists := false
for _, def := range s.tk.separators {
if r == def {
exists = true
break
}
}
if !exists {
t.Fatalf("Unexpected sepator %s", string(r))
}
}
})
}
}
func TestScan(t *testing.T) {
tk := NewFromString("abc, 123.456, (abc)")
expectedTokens := []string{"abc", "123.456", "(abc)"}
for _, token := range expectedTokens {
result, err := tk.Scan()
if err != nil {
t.Fatalf("Expected token %q, got error %v", token, err)
}
if result != token {
t.Fatalf("Expected token %q, got error %v", token, result)
}
}
// scan the last character
token, err := tk.Scan()
if err != io.EOF {
t.Fatalf("Expected EOF error, got %v", err)
}
if token != "" || err != io.EOF {
t.Fatalf("Expected empty token, got %q", token)
}
}
func TestScanAll(t *testing.T) {
scenarios := []struct {
name string
content string
separators []rune
keepSeparator bool
keepEmptyTokens bool
ignoreParenthesis bool
expectError bool
expectTokens []string
}{
{
name: "empty string",
content: "",
separators: DefaultSeparators,
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: false,
expectError: false,
expectTokens: nil,
},
{
name: "unbalanced parenthesis",
content: `(a,b() c`,
separators: DefaultSeparators,
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: false,
expectError: true,
expectTokens: []string{},
},
{
name: "unmatching quotes",
content: `'asd"`,
separators: DefaultSeparators,
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: false,
expectError: true,
expectTokens: []string{},
},
{
name: "no separators",
content: `a, b, c, d, e 123, "abc"`,
separators: nil,
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: false,
expectError: false,
expectTokens: []string{`a, b, c, d, e 123, "abc"`},
},
{
name: "default separators",
content: `a, b , c , d e , "a,b, c " , ,, , (123, 456)
`,
separators: DefaultSeparators,
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: false,
expectError: false,
expectTokens: []string{
"a",
"b",
"c",
"d e",
`"a,b, c "`,
`(123, 456)`,
},
},
{
name: "keep separators",
content: `a, b, c, d e, "a,b, c ", (123, 456)`,
separators: []rune{',', ' '}, // the space should be removed from the cutset
keepSeparator: true,
keepEmptyTokens: true,
ignoreParenthesis: false,
expectError: false,
expectTokens: []string{
"a,",
" ",
"b,",
" ",
"c,",
" ",
"d ",
" ",
"e,",
" ",
`"a,b, c ",`,
`(123, 456)`,
},
},
{
name: "custom separators",
content: `a | b c d &(e + f) & "g & h" & & &`,
separators: []rune{'|', '&'},
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: false,
expectError: false,
expectTokens: []string{
"a",
"b c d",
"(e + f)",
`"g & h"`,
},
},
{
name: "ignoring parenthesis",
content: `a, b, (c,d)`,
separators: DefaultSeparators,
keepSeparator: false,
keepEmptyTokens: false,
ignoreParenthesis: true,
expectError: false,
expectTokens: []string{
"a",
"b",
"(c",
"d)",
},
},
{
name: "keep empty tokens",
content: `a, b, (c, d), ,, , e, , f`,
separators: DefaultSeparators,
keepSeparator: false,
keepEmptyTokens: true,
ignoreParenthesis: false,
expectError: false,
expectTokens: []string{
"a",
"b",
"(c, d)",
"",
"",
"",
"e",
"",
"f",
},
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
tk := NewFromString(s.content)
tk.Separators(s.separators...)
tk.KeepSeparator(s.keepSeparator)
tk.KeepEmptyTokens(s.keepEmptyTokens)
tk.IgnoreParenthesis(s.ignoreParenthesis)
tokens, err := tk.ScanAll()
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if len(tokens) != len(s.expectTokens) {
t.Fatalf("Expected \n%v (%d), \ngot \n%v (%d)", s.expectTokens, len(s.expectTokens), tokens, len(tokens))
}
for _, tok := range tokens {
exists := false
for _, def := range s.expectTokens {
if tok == def {
exists = true
break
}
}
if !exists {
t.Fatalf("Unexpected token %q", tok)
}
}
})
}
}
func TestTrimCutset(t *testing.T) {
scenarios := []struct {
name string
separators []rune
expectedCutset string
}{
{
"default factory separators",
nil,
"\t\n\v\f\r \u0085\u00a0",
},
{
"custom separators",
[]rune{'\t', ' ', '\r', ','},
"\n\v\f\u0085\u00a0",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
tk := NewFromString("")
if len(s.separators) > 0 {
tk.Separators(s.separators...)
}
if tk.trimCutset != s.expectedCutset {
t.Fatalf("Expected cutset %q, got %q", s.expectedCutset, tk.trimCutset)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/cron/job_test.go | tools/cron/job_test.go | package cron
import (
"encoding/json"
"testing"
)
func TestJobId(t *testing.T) {
expected := "test"
j := Job{id: expected}
if j.Id() != expected {
t.Fatalf("Expected job with id %q, got %q", expected, j.Id())
}
}
func TestJobExpr(t *testing.T) {
expected := "1 2 3 4 5"
s, err := NewSchedule(expected)
if err != nil {
t.Fatal(err)
}
j := Job{schedule: s}
if j.Expression() != expected {
t.Fatalf("Expected job with cron expression %q, got %q", expected, j.Expression())
}
}
func TestJobRun(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Errorf("Shouldn't panic: %v", r)
}
}()
calls := ""
j1 := Job{}
j2 := Job{fn: func() { calls += "2" }}
j1.Run()
j2.Run()
expected := "2"
if calls != expected {
t.Fatalf("Expected calls %q, got %q", expected, calls)
}
}
func TestJobMarshalJSON(t *testing.T) {
s, err := NewSchedule("1 2 3 4 5")
if err != nil {
t.Fatal(err)
}
j := Job{id: "test_id", schedule: s}
raw, err := json.Marshal(j)
if err != nil {
t.Fatal(err)
}
expected := `{"id":"test_id","expression":"1 2 3 4 5"}`
if str := string(raw); str != expected {
t.Fatalf("Expected\n%s\ngot\n%s", expected, str)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/cron/job.go | tools/cron/job.go | package cron
import "encoding/json"
// Job defines a single registered cron job.
type Job struct {
fn func()
schedule *Schedule
id string
}
// Id returns the cron job id.
func (j *Job) Id() string {
return j.id
}
// Expression returns the plain cron job schedule expression.
func (j *Job) Expression() string {
return j.schedule.rawExpr
}
// Run runs the cron job function.
func (j *Job) Run() {
if j.fn != nil {
j.fn()
}
}
// MarshalJSON implements [json.Marshaler] and export the current
// jobs data into valid JSON.
func (j Job) MarshalJSON() ([]byte, error) {
plain := struct {
Id string `json:"id"`
Expression string `json:"expression"`
}{
Id: j.Id(),
Expression: j.Expression(),
}
return json.Marshal(plain)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/cron/cron.go | tools/cron/cron.go | // Package cron implements a crontab-like service to execute and schedule
// repeative tasks/jobs.
//
// Example:
//
// c := cron.New()
// c.MustAdd("dailyReport", "0 0 * * *", func() { ... })
// c.Start()
package cron
import (
"errors"
"fmt"
"slices"
"sync"
"time"
)
// Cron is a crontab-like struct for tasks/jobs scheduling.
type Cron struct {
timezone *time.Location
ticker *time.Ticker
startTimer *time.Timer
tickerDone chan bool
jobs []*Job
interval time.Duration
mux sync.RWMutex
}
// New create a new Cron struct with default tick interval of 1 minute
// and timezone in UTC.
//
// You can change the default tick interval with Cron.SetInterval().
// You can change the default timezone with Cron.SetTimezone().
func New() *Cron {
return &Cron{
interval: 1 * time.Minute,
timezone: time.UTC,
jobs: []*Job{},
tickerDone: make(chan bool),
}
}
// SetInterval changes the current cron tick interval
// (it usually should be >= 1 minute).
func (c *Cron) SetInterval(d time.Duration) {
// update interval
c.mux.Lock()
wasStarted := c.ticker != nil
c.interval = d
c.mux.Unlock()
// restart the ticker
if wasStarted {
c.Start()
}
}
// SetTimezone changes the current cron tick timezone.
func (c *Cron) SetTimezone(l *time.Location) {
c.mux.Lock()
defer c.mux.Unlock()
c.timezone = l
}
// MustAdd is similar to Add() but panic on failure.
func (c *Cron) MustAdd(jobId string, cronExpr string, run func()) {
if err := c.Add(jobId, cronExpr, run); err != nil {
panic(err)
}
}
// Add registers a single cron job.
//
// If there is already a job with the provided id, then the old job
// will be replaced with the new one.
//
// cronExpr is a regular cron expression, eg. "0 */3 * * *" (aka. at minute 0 past every 3rd hour).
// Check cron.NewSchedule() for the supported tokens.
func (c *Cron) Add(jobId string, cronExpr string, fn func()) error {
if fn == nil {
return errors.New("failed to add new cron job: fn must be non-nil function")
}
schedule, err := NewSchedule(cronExpr)
if err != nil {
return fmt.Errorf("failed to add new cron job: %w", err)
}
c.mux.Lock()
defer c.mux.Unlock()
// remove previous (if any)
c.jobs = slices.DeleteFunc(c.jobs, func(j *Job) bool {
return j.Id() == jobId
})
// add new
c.jobs = append(c.jobs, &Job{
id: jobId,
fn: fn,
schedule: schedule,
})
return nil
}
// Remove removes a single cron job by its id.
func (c *Cron) Remove(jobId string) {
c.mux.Lock()
defer c.mux.Unlock()
if c.jobs == nil {
return // nothing to remove
}
c.jobs = slices.DeleteFunc(c.jobs, func(j *Job) bool {
return j.Id() == jobId
})
}
// RemoveAll removes all registered cron jobs.
func (c *Cron) RemoveAll() {
c.mux.Lock()
defer c.mux.Unlock()
c.jobs = []*Job{}
}
// Total returns the current total number of registered cron jobs.
func (c *Cron) Total() int {
c.mux.RLock()
defer c.mux.RUnlock()
return len(c.jobs)
}
// Jobs returns a shallow copy of the currently registered cron jobs.
func (c *Cron) Jobs() []*Job {
c.mux.RLock()
defer c.mux.RUnlock()
copy := make([]*Job, len(c.jobs))
for i, j := range c.jobs {
copy[i] = j
}
return copy
}
// Stop stops the current cron ticker (if not already).
//
// You can resume the ticker by calling Start().
func (c *Cron) Stop() {
c.mux.Lock()
defer c.mux.Unlock()
if c.startTimer != nil {
c.startTimer.Stop()
c.startTimer = nil
}
if c.ticker == nil {
return // already stopped
}
c.tickerDone <- true
c.ticker.Stop()
c.ticker = nil
}
// Start starts the cron ticker.
//
// Calling Start() on already started cron will restart the ticker.
func (c *Cron) Start() {
c.Stop()
// delay the ticker to start at 00 of 1 c.interval duration
now := time.Now()
next := now.Add(c.interval).Truncate(c.interval)
delay := next.Sub(now)
c.mux.Lock()
c.startTimer = time.AfterFunc(delay, func() {
c.mux.Lock()
c.ticker = time.NewTicker(c.interval)
c.mux.Unlock()
// run immediately at 00
c.runDue(time.Now())
// run after each tick
go func() {
for {
select {
case <-c.tickerDone:
return
case t := <-c.ticker.C:
c.runDue(t)
}
}
}()
})
c.mux.Unlock()
}
// HasStarted checks whether the current Cron ticker has been started.
func (c *Cron) HasStarted() bool {
c.mux.RLock()
defer c.mux.RUnlock()
return c.ticker != nil
}
// runDue runs all registered jobs that are scheduled for the provided time.
func (c *Cron) runDue(t time.Time) {
c.mux.RLock()
defer c.mux.RUnlock()
moment := NewMoment(t.In(c.timezone))
for _, j := range c.jobs {
if j.schedule.IsDue(moment) {
go j.Run()
}
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/cron/schedule_test.go | tools/cron/schedule_test.go | package cron_test
import (
"encoding/json"
"fmt"
"testing"
"time"
"github.com/pocketbase/pocketbase/tools/cron"
)
func TestNewMoment(t *testing.T) {
t.Parallel()
date, err := time.Parse("2006-01-02 15:04", "2023-05-09 15:20")
if err != nil {
t.Fatal(err)
}
m := cron.NewMoment(date)
if m.Minute != 20 {
t.Fatalf("Expected m.Minute %d, got %d", 20, m.Minute)
}
if m.Hour != 15 {
t.Fatalf("Expected m.Hour %d, got %d", 15, m.Hour)
}
if m.Day != 9 {
t.Fatalf("Expected m.Day %d, got %d", 9, m.Day)
}
if m.Month != 5 {
t.Fatalf("Expected m.Month %d, got %d", 5, m.Month)
}
if m.DayOfWeek != 2 {
t.Fatalf("Expected m.DayOfWeek %d, got %d", 2, m.DayOfWeek)
}
}
func TestNewSchedule(t *testing.T) {
t.Parallel()
scenarios := []struct {
cronExpr string
expectError bool
expectSchedule string
}{
{
"invalid",
true,
"",
},
{
"* * * *",
true,
"",
},
{
"* * * * * *",
true,
"",
},
{
"2/3 * * * *",
true,
"",
},
{
"* * * * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"*/2 */3 */5 */4 */2",
false,
`{"minutes":{"0":{},"10":{},"12":{},"14":{},"16":{},"18":{},"2":{},"20":{},"22":{},"24":{},"26":{},"28":{},"30":{},"32":{},"34":{},"36":{},"38":{},"4":{},"40":{},"42":{},"44":{},"46":{},"48":{},"50":{},"52":{},"54":{},"56":{},"58":{},"6":{},"8":{}},"hours":{"0":{},"12":{},"15":{},"18":{},"21":{},"3":{},"6":{},"9":{}},"days":{"1":{},"11":{},"16":{},"21":{},"26":{},"31":{},"6":{}},"months":{"1":{},"5":{},"9":{}},"daysOfWeek":{"0":{},"2":{},"4":{},"6":{}}}`,
},
// minute segment
{
"-1 * * * *",
true,
"",
},
{
"60 * * * *",
true,
"",
},
{
"0 * * * *",
false,
`{"minutes":{"0":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"59 * * * *",
false,
`{"minutes":{"59":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"1,2,5,7,40-50/2 * * * *",
false,
`{"minutes":{"1":{},"2":{},"40":{},"42":{},"44":{},"46":{},"48":{},"5":{},"50":{},"7":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
// hour segment
{
"* -1 * * *",
true,
"",
},
{
"* 24 * * *",
true,
"",
},
{
"* 0 * * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"* 23 * * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"23":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"* 3,4,8-16/3,7 * * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"11":{},"14":{},"3":{},"4":{},"7":{},"8":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
// day segment
{
"* * 0 * *",
true,
"",
},
{
"* * 32 * *",
true,
"",
},
{
"* * 1 * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"* * 31 * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"31":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"* * 5,6,20-30/3,1 * *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"20":{},"23":{},"26":{},"29":{},"5":{},"6":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
// month segment
{
"* * * 0 *",
true,
"",
},
{
"* * * 13 *",
true,
"",
},
{
"* * * 1 *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"* * * 12 *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"12":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"* * * 1,4,5-10/2 *",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"4":{},"5":{},"7":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
// day of week segment
{
"* * * * -1",
true,
"",
},
{
"* * * * 7",
true,
"",
},
{
"* * * * 0",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{}}}`,
},
{
"* * * * 6",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"6":{}}}`,
},
{
"* * * * 1,2-5/2",
false,
`{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"1":{},"2":{},"4":{}}}`,
},
// macros
{
"@yearly",
false,
`{"minutes":{"0":{}},"hours":{"0":{}},"days":{"1":{}},"months":{"1":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"@annually",
false,
`{"minutes":{"0":{}},"hours":{"0":{}},"days":{"1":{}},"months":{"1":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"@monthly",
false,
`{"minutes":{"0":{}},"hours":{"0":{}},"days":{"1":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"@weekly",
false,
`{"minutes":{"0":{}},"hours":{"0":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{}}}`,
},
{
"@daily",
false,
`{"minutes":{"0":{}},"hours":{"0":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"@midnight",
false,
`{"minutes":{"0":{}},"hours":{"0":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
{
"@hourly",
false,
`{"minutes":{"0":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
},
}
for _, s := range scenarios {
t.Run(s.cronExpr, func(t *testing.T) {
schedule, err := cron.NewSchedule(s.cronExpr)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr to be %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
return
}
encoded, err := json.Marshal(schedule)
if err != nil {
t.Fatalf("Failed to marshalize the result schedule: %v", err)
}
encodedStr := string(encoded)
if encodedStr != s.expectSchedule {
t.Fatalf("Expected \n%s, \ngot \n%s", s.expectSchedule, encodedStr)
}
})
}
}
func TestScheduleIsDue(t *testing.T) {
t.Parallel()
scenarios := []struct {
cronExpr string
moment *cron.Moment
expected bool
}{
{
"* * * * *",
&cron.Moment{},
false,
},
{
"* * * * *",
&cron.Moment{
Minute: 1,
Hour: 1,
Day: 1,
Month: 1,
DayOfWeek: 1,
},
true,
},
{
"5 * * * *",
&cron.Moment{
Minute: 1,
Hour: 1,
Day: 1,
Month: 1,
DayOfWeek: 1,
},
false,
},
{
"5 * * * *",
&cron.Moment{
Minute: 5,
Hour: 1,
Day: 1,
Month: 1,
DayOfWeek: 1,
},
true,
},
{
"* 2-6 * * 2,3",
&cron.Moment{
Minute: 1,
Hour: 2,
Day: 1,
Month: 1,
DayOfWeek: 1,
},
false,
},
{
"* 2-6 * * 2,3",
&cron.Moment{
Minute: 1,
Hour: 2,
Day: 1,
Month: 1,
DayOfWeek: 3,
},
true,
},
{
"* * 1,2,5,15-18 * *",
&cron.Moment{
Minute: 1,
Hour: 1,
Day: 6,
Month: 1,
DayOfWeek: 1,
},
false,
},
{
"* * 1,2,5,15-18/2 * *",
&cron.Moment{
Minute: 1,
Hour: 1,
Day: 2,
Month: 1,
DayOfWeek: 1,
},
true,
},
{
"* * 1,2,5,15-18/2 * *",
&cron.Moment{
Minute: 1,
Hour: 1,
Day: 18,
Month: 1,
DayOfWeek: 1,
},
false,
},
{
"* * 1,2,5,15-18/2 * *",
&cron.Moment{
Minute: 1,
Hour: 1,
Day: 17,
Month: 1,
DayOfWeek: 1,
},
true,
},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d-%s", i, s.cronExpr), func(t *testing.T) {
schedule, err := cron.NewSchedule(s.cronExpr)
if err != nil {
t.Fatalf("Unexpected cron error: %v", err)
}
result := schedule.IsDue(s.moment)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/cron/schedule.go | tools/cron/schedule.go | package cron
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
)
// Moment represents a parsed single time moment.
type Moment struct {
Minute int `json:"minute"`
Hour int `json:"hour"`
Day int `json:"day"`
Month int `json:"month"`
DayOfWeek int `json:"dayOfWeek"`
}
// NewMoment creates a new Moment from the specified time.
func NewMoment(t time.Time) *Moment {
return &Moment{
Minute: t.Minute(),
Hour: t.Hour(),
Day: t.Day(),
Month: int(t.Month()),
DayOfWeek: int(t.Weekday()),
}
}
// Schedule stores parsed information for each time component when a cron job should run.
type Schedule struct {
Minutes map[int]struct{} `json:"minutes"`
Hours map[int]struct{} `json:"hours"`
Days map[int]struct{} `json:"days"`
Months map[int]struct{} `json:"months"`
DaysOfWeek map[int]struct{} `json:"daysOfWeek"`
rawExpr string
}
// IsDue checks whether the provided Moment satisfies the current Schedule.
func (s *Schedule) IsDue(m *Moment) bool {
if _, ok := s.Minutes[m.Minute]; !ok {
return false
}
if _, ok := s.Hours[m.Hour]; !ok {
return false
}
if _, ok := s.Days[m.Day]; !ok {
return false
}
if _, ok := s.DaysOfWeek[m.DayOfWeek]; !ok {
return false
}
if _, ok := s.Months[m.Month]; !ok {
return false
}
return true
}
var macros = map[string]string{
"@yearly": "0 0 1 1 *",
"@annually": "0 0 1 1 *",
"@monthly": "0 0 1 * *",
"@weekly": "0 0 * * 0",
"@daily": "0 0 * * *",
"@midnight": "0 0 * * *",
"@hourly": "0 * * * *",
}
// NewSchedule creates a new Schedule from a cron expression.
//
// A cron expression could be a macro OR 5 segments separated by space,
// representing: minute, hour, day of the month, month and day of the week.
//
// The following segment formats are supported:
// - wildcard: *
// - range: 1-30
// - step: */n or 1-30/n
// - list: 1,2,3,10-20/n
//
// The following macros are supported:
// - @yearly (or @annually)
// - @monthly
// - @weekly
// - @daily (or @midnight)
// - @hourly
func NewSchedule(cronExpr string) (*Schedule, error) {
if v, ok := macros[cronExpr]; ok {
cronExpr = v
}
segments := strings.Split(cronExpr, " ")
if len(segments) != 5 {
return nil, errors.New("invalid cron expression - must be a valid macro or to have exactly 5 space separated segments")
}
minutes, err := parseCronSegment(segments[0], 0, 59)
if err != nil {
return nil, err
}
hours, err := parseCronSegment(segments[1], 0, 23)
if err != nil {
return nil, err
}
days, err := parseCronSegment(segments[2], 1, 31)
if err != nil {
return nil, err
}
months, err := parseCronSegment(segments[3], 1, 12)
if err != nil {
return nil, err
}
daysOfWeek, err := parseCronSegment(segments[4], 0, 6)
if err != nil {
return nil, err
}
return &Schedule{
Minutes: minutes,
Hours: hours,
Days: days,
Months: months,
DaysOfWeek: daysOfWeek,
rawExpr: cronExpr,
}, nil
}
// parseCronSegment parses a single cron expression segment and
// returns its time schedule slots.
func parseCronSegment(segment string, min int, max int) (map[int]struct{}, error) {
slots := map[int]struct{}{}
list := strings.Split(segment, ",")
for _, p := range list {
stepParts := strings.Split(p, "/")
// step (*/n, 1-30/n)
var step int
switch len(stepParts) {
case 1:
step = 1
case 2:
parsedStep, err := strconv.Atoi(stepParts[1])
if err != nil {
return nil, err
}
if parsedStep < 1 || parsedStep > max {
return nil, fmt.Errorf("invalid segment step boundary - the step must be between 1 and the %d", max)
}
step = parsedStep
default:
return nil, errors.New("invalid segment step format - must be in the format */n or 1-30/n")
}
// find the min and max range of the segment part
var rangeMin, rangeMax int
if stepParts[0] == "*" {
rangeMin = min
rangeMax = max
} else {
// single digit (1) or range (1-30)
rangeParts := strings.Split(stepParts[0], "-")
switch len(rangeParts) {
case 1:
if step != 1 {
return nil, errors.New("invalid segement step - step > 1 could be used only with the wildcard or range format")
}
parsed, err := strconv.Atoi(rangeParts[0])
if err != nil {
return nil, err
}
if parsed < min || parsed > max {
return nil, errors.New("invalid segment value - must be between the min and max of the segment")
}
rangeMin = parsed
rangeMax = rangeMin
case 2:
parsedMin, err := strconv.Atoi(rangeParts[0])
if err != nil {
return nil, err
}
if parsedMin < min || parsedMin > max {
return nil, fmt.Errorf("invalid segment range minimum - must be between %d and %d", min, max)
}
rangeMin = parsedMin
parsedMax, err := strconv.Atoi(rangeParts[1])
if err != nil {
return nil, err
}
if parsedMax < parsedMin || parsedMax > max {
return nil, fmt.Errorf("invalid segment range maximum - must be between %d and %d", rangeMin, max)
}
rangeMax = parsedMax
default:
return nil, errors.New("invalid segment range format - the range must have 1 or 2 parts")
}
}
// fill the slots
for i := rangeMin; i <= rangeMax; i += step {
slots[i] = struct{}{}
}
}
return slots, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/cron/cron_test.go | tools/cron/cron_test.go | package cron
import (
"encoding/json"
"slices"
"testing"
"time"
)
func TestCronNew(t *testing.T) {
t.Parallel()
c := New()
expectedInterval := 1 * time.Minute
if c.interval != expectedInterval {
t.Fatalf("Expected default interval %v, got %v", expectedInterval, c.interval)
}
expectedTimezone := time.UTC
if c.timezone.String() != expectedTimezone.String() {
t.Fatalf("Expected default timezone %v, got %v", expectedTimezone, c.timezone)
}
if len(c.jobs) != 0 {
t.Fatalf("Expected no jobs by default, got \n%v", c.jobs)
}
if c.ticker != nil {
t.Fatal("Expected the ticker NOT to be initialized")
}
}
func TestCronSetInterval(t *testing.T) {
t.Parallel()
c := New()
interval := 2 * time.Minute
c.SetInterval(interval)
if c.interval != interval {
t.Fatalf("Expected interval %v, got %v", interval, c.interval)
}
}
func TestCronSetTimezone(t *testing.T) {
t.Parallel()
c := New()
timezone, _ := time.LoadLocation("Asia/Tokyo")
c.SetTimezone(timezone)
if c.timezone.String() != timezone.String() {
t.Fatalf("Expected timezone %v, got %v", timezone, c.timezone)
}
}
func TestCronAddAndRemove(t *testing.T) {
t.Parallel()
c := New()
if err := c.Add("test0", "* * * * *", nil); err == nil {
t.Fatal("Expected nil function error")
}
if err := c.Add("test1", "invalid", func() {}); err == nil {
t.Fatal("Expected invalid cron expression error")
}
if err := c.Add("test2", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if err := c.Add("test3", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if err := c.Add("test4", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
// overwrite test2
if err := c.Add("test2", "1 2 3 4 5", func() {}); err != nil {
t.Fatal(err)
}
if err := c.Add("test5", "1 2 3 4 5", func() {}); err != nil {
t.Fatal(err)
}
// mock job deletion
c.Remove("test4")
// try to remove non-existing (should be no-op)
c.Remove("missing")
indexedJobs := make(map[string]*Job, len(c.jobs))
for _, j := range c.jobs {
indexedJobs[j.Id()] = j
}
// check job keys
{
expectedKeys := []string{"test3", "test2", "test5"}
if v := len(c.jobs); v != len(expectedKeys) {
t.Fatalf("Expected %d jobs, got %d", len(expectedKeys), v)
}
for _, k := range expectedKeys {
if indexedJobs[k] == nil {
t.Fatalf("Expected job with key %s, got nil", k)
}
}
}
// check the jobs schedule
{
expectedSchedules := map[string]string{
"test2": `{"minutes":{"1":{}},"hours":{"2":{}},"days":{"3":{}},"months":{"4":{}},"daysOfWeek":{"5":{}}}`,
"test3": `{"minutes":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"32":{},"33":{},"34":{},"35":{},"36":{},"37":{},"38":{},"39":{},"4":{},"40":{},"41":{},"42":{},"43":{},"44":{},"45":{},"46":{},"47":{},"48":{},"49":{},"5":{},"50":{},"51":{},"52":{},"53":{},"54":{},"55":{},"56":{},"57":{},"58":{},"59":{},"6":{},"7":{},"8":{},"9":{}},"hours":{"0":{},"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"days":{"1":{},"10":{},"11":{},"12":{},"13":{},"14":{},"15":{},"16":{},"17":{},"18":{},"19":{},"2":{},"20":{},"21":{},"22":{},"23":{},"24":{},"25":{},"26":{},"27":{},"28":{},"29":{},"3":{},"30":{},"31":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"months":{"1":{},"10":{},"11":{},"12":{},"2":{},"3":{},"4":{},"5":{},"6":{},"7":{},"8":{},"9":{}},"daysOfWeek":{"0":{},"1":{},"2":{},"3":{},"4":{},"5":{},"6":{}}}`,
"test5": `{"minutes":{"1":{}},"hours":{"2":{}},"days":{"3":{}},"months":{"4":{}},"daysOfWeek":{"5":{}}}`,
}
for k, v := range expectedSchedules {
raw, err := json.Marshal(indexedJobs[k].schedule)
if err != nil {
t.Fatal(err)
}
if string(raw) != v {
t.Fatalf("Expected %q schedule \n%s, \ngot \n%s", k, v, raw)
}
}
}
}
func TestCronMustAdd(t *testing.T) {
t.Parallel()
c := New()
defer func() {
if r := recover(); r == nil {
t.Errorf("test1 didn't panic")
}
}()
c.MustAdd("test1", "* * * * *", nil)
c.MustAdd("test2", "* * * * *", func() {})
if !slices.ContainsFunc(c.jobs, func(j *Job) bool { return j.Id() == "test2" }) {
t.Fatal("Couldn't find job test2")
}
}
func TestCronRemoveAll(t *testing.T) {
t.Parallel()
c := New()
if err := c.Add("test1", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if err := c.Add("test2", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if err := c.Add("test3", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if v := len(c.jobs); v != 3 {
t.Fatalf("Expected %d jobs, got %d", 3, v)
}
c.RemoveAll()
if v := len(c.jobs); v != 0 {
t.Fatalf("Expected %d jobs, got %d", 0, v)
}
}
func TestCronTotal(t *testing.T) {
t.Parallel()
c := New()
if v := c.Total(); v != 0 {
t.Fatalf("Expected 0 jobs, got %v", v)
}
if err := c.Add("test1", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if err := c.Add("test2", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
// overwrite
if err := c.Add("test1", "* * * * *", func() {}); err != nil {
t.Fatal(err)
}
if v := c.Total(); v != 2 {
t.Fatalf("Expected 2 jobs, got %v", v)
}
}
func TestCronJobs(t *testing.T) {
t.Parallel()
c := New()
calls := ""
if err := c.Add("a", "1 * * * *", func() { calls += "a" }); err != nil {
t.Fatal(err)
}
if err := c.Add("b", "2 * * * *", func() { calls += "b" }); err != nil {
t.Fatal(err)
}
// overwrite
if err := c.Add("b", "3 * * * *", func() { calls += "b" }); err != nil {
t.Fatal(err)
}
jobs := c.Jobs()
if len(jobs) != 2 {
t.Fatalf("Expected 2 jobs, got %v", len(jobs))
}
for _, j := range jobs {
j.Run()
}
expectedCalls := "ab"
if calls != expectedCalls {
t.Fatalf("Expected %q calls, got %q", expectedCalls, calls)
}
}
func TestCronStartStop(t *testing.T) {
t.Parallel()
test1 := 0
test2 := 0
c := New()
c.SetInterval(250 * time.Millisecond)
c.Add("test1", "* * * * *", func() {
test1++
})
c.Add("test2", "* * * * *", func() {
test2++
})
expectedCalls := 2
// call twice Start to check if the previous ticker will be reseted
c.Start()
c.Start()
time.Sleep(505 * time.Millisecond) // slightly larger to minimize flakiness
// call twice Stop to ensure that the second stop is no-op
c.Stop()
c.Stop()
if test1 != expectedCalls {
t.Fatalf("Expected %d test1, got %d", expectedCalls, test1)
}
if test2 != expectedCalls {
t.Fatalf("Expected %d test2, got %d", expectedCalls, test2)
}
// resume for 1 seconds
c.Start()
time.Sleep(1005 * time.Millisecond) // slightly larger to minimize flakiness
c.Stop()
expectedCalls += 4
if test1 != expectedCalls {
t.Fatalf("Expected %d test1, got %d", expectedCalls, test1)
}
if test2 != expectedCalls {
t.Fatalf("Expected %d test2, got %d", expectedCalls, test2)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/file.go | tools/filesystem/file.go | package filesystem
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"path"
"regexp"
"strings"
"github.com/gabriel-vasile/mimetype"
"github.com/pocketbase/pocketbase/tools/inflector"
"github.com/pocketbase/pocketbase/tools/security"
)
// FileReader defines an interface for a file resource reader.
type FileReader interface {
Open() (io.ReadSeekCloser, error)
}
// File defines a single file [io.ReadSeekCloser] resource.
//
// The file could be from a local path, multipart/form-data header, etc.
type File struct {
Reader FileReader `form:"-" json:"-" xml:"-"`
Name string `form:"name" json:"name" xml:"name"`
OriginalName string `form:"originalName" json:"originalName" xml:"originalName"`
Size int64 `form:"size" json:"size" xml:"size"`
}
// AsMap implements [core.mapExtractor] and returns a value suitable
// to be used in an API rule expression.
func (f *File) AsMap() map[string]any {
return map[string]any{
"name": f.Name,
"originalName": f.OriginalName,
"size": f.Size,
}
}
// NewFileFromPath creates a new File instance from the provided local file path.
func NewFileFromPath(path string) (*File, error) {
f := &File{}
info, err := os.Stat(path)
if err != nil {
return nil, err
}
f.Reader = &PathReader{Path: path}
f.Size = info.Size()
f.OriginalName = info.Name()
f.Name = normalizeName(f.Reader, f.OriginalName)
return f, nil
}
// NewFileFromBytes creates a new File instance from the provided byte slice.
func NewFileFromBytes(b []byte, name string) (*File, error) {
size := len(b)
if size == 0 {
return nil, errors.New("cannot create an empty file")
}
f := &File{}
f.Reader = &BytesReader{b}
f.Size = int64(size)
f.OriginalName = name
f.Name = normalizeName(f.Reader, f.OriginalName)
return f, nil
}
// NewFileFromMultipart creates a new File from the provided multipart header.
func NewFileFromMultipart(mh *multipart.FileHeader) (*File, error) {
f := &File{}
f.Reader = &MultipartReader{Header: mh}
f.Size = mh.Size
f.OriginalName = mh.Filename
f.Name = normalizeName(f.Reader, f.OriginalName)
return f, nil
}
// NewFileFromURL creates a new File from the provided url by
// downloading the resource and load it as BytesReader.
//
// Example
//
// ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
// defer cancel()
//
// file, err := filesystem.NewFileFromURL(ctx, "https://example.com/image.png")
func NewFileFromURL(ctx context.Context, url string) (*File, error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < 200 || res.StatusCode > 399 {
return nil, fmt.Errorf("failed to download url %s (%d)", url, res.StatusCode)
}
var buf bytes.Buffer
if _, err = io.Copy(&buf, res.Body); err != nil {
return nil, err
}
return NewFileFromBytes(buf.Bytes(), path.Base(url))
}
// -------------------------------------------------------------------
var _ FileReader = (*MultipartReader)(nil)
// MultipartReader defines a FileReader from [multipart.FileHeader].
type MultipartReader struct {
Header *multipart.FileHeader
}
// Open implements the [filesystem.FileReader] interface.
func (r *MultipartReader) Open() (io.ReadSeekCloser, error) {
return r.Header.Open()
}
// -------------------------------------------------------------------
var _ FileReader = (*PathReader)(nil)
// PathReader defines a FileReader from a local file path.
type PathReader struct {
Path string
}
// Open implements the [filesystem.FileReader] interface.
func (r *PathReader) Open() (io.ReadSeekCloser, error) {
return os.Open(r.Path)
}
// -------------------------------------------------------------------
var _ FileReader = (*BytesReader)(nil)
// BytesReader defines a FileReader from bytes content.
type BytesReader struct {
Bytes []byte
}
// Open implements the [filesystem.FileReader] interface.
func (r *BytesReader) Open() (io.ReadSeekCloser, error) {
return &bytesReadSeekCloser{bytes.NewReader(r.Bytes)}, nil
}
type bytesReadSeekCloser struct {
*bytes.Reader
}
// Close implements the [io.ReadSeekCloser] interface.
func (r *bytesReadSeekCloser) Close() error {
return nil
}
// -------------------------------------------------------------------
var _ FileReader = (openFuncAsReader)(nil)
// openFuncAsReader defines a FileReader from a bare Open function.
type openFuncAsReader func() (io.ReadSeekCloser, error)
// Open implements the [filesystem.FileReader] interface.
func (r openFuncAsReader) Open() (io.ReadSeekCloser, error) {
return r()
}
// -------------------------------------------------------------------
var extInvalidCharsRegex = regexp.MustCompile(`[^\w\.\*\-\+\=\#]+`)
const randomAlphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
func normalizeName(fr FileReader, name string) string {
// cut the name even if it is not multibyte safe to avoid operating on too large strings
// ---
originalLength := len(name)
if originalLength > 300 {
name = name[originalLength-300:]
}
// extension
// ---
originalExt := extractExtension(name)
cleanExt := "." + strings.Trim(extInvalidCharsRegex.ReplaceAllString(originalExt, ""), ".")
if cleanExt == "." {
// try to detect the extension from the file content
cleanExt, _ = detectExtension(fr)
}
if extLength := len(cleanExt); extLength > 20 {
// keep only the last 20 characters (it is multibyte safe after the regex replace)
cleanExt = "." + strings.Trim(cleanExt[extLength-20:], ".")
}
// name
//
// note: leading dot is trimmed to prevent various subtle issues with files
// sync programs as they sometimes have special handling for "invisible" files
// ---
cleanName := inflector.Snakecase(strings.Trim(strings.TrimSuffix(name, originalExt), "."))
if length := len(cleanName); length < 3 {
// the name is too short so we concatenate an additional random part
cleanName += security.RandomStringWithAlphabet(10, randomAlphabet)
} else if length > 100 {
// keep only the first 100 characters (it is multibyte safe after Snakecase)
cleanName = cleanName[:100]
}
return fmt.Sprintf(
"%s_%s%s",
cleanName,
security.RandomStringWithAlphabet(10, randomAlphabet), // ensure that there is always a random part
cleanExt,
)
}
// extractExtension extracts the extension (with leading dot) from name.
//
// This differ from filepath.Ext() by supporting double extensions (eg. ".tar.gz").
//
// Returns an empty string if no match is found.
//
// Example:
// extractExtension("test.txt") // .txt
// extractExtension("test.tar.gz") // .tar.gz
// extractExtension("test.a.tar.gz") // .tar.gz
func extractExtension(name string) string {
primaryDot := strings.LastIndex(name, ".")
if primaryDot == -1 {
return ""
}
// look for secondary extension
secondaryDot := strings.LastIndex(name[:primaryDot], ".")
if secondaryDot >= 0 {
return name[secondaryDot:]
}
return name[primaryDot:]
}
// detectExtension tries to detect the extension from file mime type.
func detectExtension(fr FileReader) (string, error) {
r, err := fr.Open()
if err != nil {
return "", err
}
defer r.Close()
mt, err := mimetype.DetectReader(r)
if err != nil {
return "", err
}
return mt.Extension(), nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/file_test.go | tools/filesystem/file_test.go | package filesystem_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tests"
"github.com/pocketbase/pocketbase/tools/filesystem"
)
func TestFileAsMap(t *testing.T) {
file, err := filesystem.NewFileFromBytes([]byte("test"), "test123.txt")
if err != nil {
t.Fatal(err)
}
result := file.AsMap()
if len(result) != 3 {
t.Fatalf("Expected map with %d keys, got\n%v", 3, result)
}
if result["size"] != int64(4) {
t.Fatalf("Expected size %d, got %#v", 4, result["size"])
}
if str, ok := result["name"].(string); !ok || !strings.HasPrefix(str, "test123") {
t.Fatalf("Expected name to have prefix %q, got %#v", "test123", result["name"])
}
if result["originalName"] != "test123.txt" {
t.Fatalf("Expected originalName %q, got %#v", "test123.txt", result["originalName"])
}
}
func TestNewFileFromPath(t *testing.T) {
testDir := createTestDir(t)
defer os.RemoveAll(testDir)
// missing file
_, err := filesystem.NewFileFromPath("missing")
if err == nil {
t.Fatal("Expected error, got nil")
}
// existing file
originalName := "image_!@ special"
normalizedNamePattern := regexp.QuoteMeta("image_special_") + `\w{10}` + regexp.QuoteMeta(".png")
f, err := filesystem.NewFileFromPath(filepath.Join(testDir, originalName))
if err != nil {
t.Fatalf("Expected nil error, got %v", err)
}
if f.OriginalName != originalName {
t.Fatalf("Expected OriginalName %q, got %q", originalName, f.OriginalName)
}
if match, err := regexp.Match(normalizedNamePattern, []byte(f.Name)); !match {
t.Fatalf("Expected Name to match %v, got %q (%v)", normalizedNamePattern, f.Name, err)
}
if f.Size != 73 {
t.Fatalf("Expected Size %v, got %v", 73, f.Size)
}
if _, ok := f.Reader.(*filesystem.PathReader); !ok {
t.Fatalf("Expected Reader to be PathReader, got %v", f.Reader)
}
}
func TestNewFileFromBytes(t *testing.T) {
// nil bytes
if _, err := filesystem.NewFileFromBytes(nil, "photo.jpg"); err == nil {
t.Fatal("Expected error, got nil")
}
// zero bytes
if _, err := filesystem.NewFileFromBytes([]byte{}, "photo.jpg"); err == nil {
t.Fatal("Expected error, got nil")
}
originalName := "image_!@ special"
normalizedNamePattern := regexp.QuoteMeta("image_special_") + `\w{10}` + regexp.QuoteMeta(".txt")
f, err := filesystem.NewFileFromBytes([]byte("text\n"), originalName)
if err != nil {
t.Fatal(err)
}
if f.Size != 5 {
t.Fatalf("Expected Size %v, got %v", 5, f.Size)
}
if f.OriginalName != originalName {
t.Fatalf("Expected OriginalName %q, got %q", originalName, f.OriginalName)
}
if match, err := regexp.Match(normalizedNamePattern, []byte(f.Name)); !match {
t.Fatalf("Expected Name to match %v, got %q (%v)", normalizedNamePattern, f.Name, err)
}
}
func TestNewFileFromMultipart(t *testing.T) {
formData, mp, err := tests.MockMultipartData(nil, "test")
if err != nil {
t.Fatal(err)
}
req := httptest.NewRequest("", "/", formData)
req.Header.Set("Content-Type", mp.FormDataContentType())
req.ParseMultipartForm(32 << 20)
_, mh, err := req.FormFile("test")
if err != nil {
t.Fatal(err)
}
f, err := filesystem.NewFileFromMultipart(mh)
if err != nil {
t.Fatal(err)
}
originalNamePattern := regexp.QuoteMeta("tmpfile-") + `\w+` + regexp.QuoteMeta(".txt")
if match, err := regexp.Match(originalNamePattern, []byte(f.OriginalName)); !match {
t.Fatalf("Expected OriginalName to match %v, got %q (%v)", originalNamePattern, f.OriginalName, err)
}
normalizedNamePattern := regexp.QuoteMeta("tmpfile_") + `\w+\_\w{10}` + regexp.QuoteMeta(".txt")
if match, err := regexp.Match(normalizedNamePattern, []byte(f.Name)); !match {
t.Fatalf("Expected Name to match %v, got %q (%v)", normalizedNamePattern, f.Name, err)
}
if f.Size != 4 {
t.Fatalf("Expected Size %v, got %v", 4, f.Size)
}
if _, ok := f.Reader.(*filesystem.MultipartReader); !ok {
t.Fatalf("Expected Reader to be MultipartReader, got %v", f.Reader)
}
}
func TestNewFileFromURLTimeout(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/error" {
w.WriteHeader(http.StatusInternalServerError)
}
fmt.Fprintf(w, "test")
}))
defer srv.Close()
// cancelled context
{
ctx, cancel := context.WithCancel(context.Background())
cancel()
f, err := filesystem.NewFileFromURL(ctx, srv.URL+"/cancel")
if err == nil {
t.Fatal("[ctx_cancel] Expected error, got nil")
}
if f != nil {
t.Fatalf("[ctx_cancel] Expected file to be nil, got %v", f)
}
}
// error response
{
f, err := filesystem.NewFileFromURL(context.Background(), srv.URL+"/error")
if err == nil {
t.Fatal("[error_status] Expected error, got nil")
}
if f != nil {
t.Fatalf("[error_status] Expected file to be nil, got %v", f)
}
}
// valid response
{
originalName := "image_!@ special"
normalizedNamePattern := regexp.QuoteMeta("image_special_") + `\w{10}` + regexp.QuoteMeta(".txt")
f, err := filesystem.NewFileFromURL(context.Background(), srv.URL+"/"+originalName)
if err != nil {
t.Fatalf("[valid] Unexpected error %v", err)
}
if f == nil {
t.Fatal("[valid] Expected non-nil file")
}
// check the created file fields
if f.OriginalName != originalName {
t.Fatalf("Expected OriginalName %q, got %q", originalName, f.OriginalName)
}
if match, err := regexp.Match(normalizedNamePattern, []byte(f.Name)); !match {
t.Fatalf("Expected Name to match %v, got %q (%v)", normalizedNamePattern, f.Name, err)
}
if f.Size != 4 {
t.Fatalf("Expected Size %v, got %v", 4, f.Size)
}
if _, ok := f.Reader.(*filesystem.BytesReader); !ok {
t.Fatalf("Expected Reader to be BytesReader, got %v", f.Reader)
}
}
}
func TestFileNameNormalizations(t *testing.T) {
scenarios := []struct {
name string
pattern string
}{
{"", `^\w{10}_\w{10}\.txt$`},
{".png", `^\w{10}_\w{10}\.png$`},
{".tar.gz", `^\w{10}_\w{10}\.tar\.gz$`},
{"a.tar.gz", `^a\w{10}_\w{10}\.tar\.gz$`},
{"....abc", `^\w{10}_\w{10}\.abc$`},
{"a.b.c.?.?.?.2", `^a_b_c_\w{10}\.2$`},
{"a.b.c.d.tar.gz", `^a_b_c_d_\w{10}\.tar\.gz$`},
{"abcd", `^abcd_\w{10}\.txt$`},
{".abcd.123.", `^abcd_\w{10}\.123$`},
{"a b! c d . 456", `^a_b_c_d_\w{10}\.456$`}, // normalize spaces
{strings.Repeat("a", 101) + "." + strings.Repeat("b", 21), `^a{100}_\w{10}\.b{20}$`}, // name and extension length cut
{"abc" + strings.Repeat("d", 290) + "." + strings.Repeat("b", 9), `^d{100}_\w{10}\.b{9}$`}, // initial total length cut
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i)+"_"+s.name, func(t *testing.T) {
f, err := filesystem.NewFileFromBytes([]byte("abc"), s.name)
if err != nil {
t.Fatal(err)
}
match, err := regexp.Match(s.pattern, []byte(f.Name))
if !match {
t.Fatalf("Expected Name to match %v, got %q (%v)", s.pattern, f.Name, err)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/filesystem_test.go | tools/filesystem/filesystem_test.go | package filesystem_test
import (
"bytes"
"errors"
"image"
"image/jpeg"
"image/png"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"github.com/gabriel-vasile/mimetype"
"github.com/pocketbase/pocketbase/tools/filesystem"
)
func TestFileSystemExists(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
scenarios := []struct {
file string
exists bool
}{
{"sub1.txt", false},
{"test/sub1.txt", true},
{"test/sub2.txt", true},
{"image.png", true},
}
for _, s := range scenarios {
t.Run(s.file, func(t *testing.T) {
exists, err := fsys.Exists(s.file)
if err != nil {
t.Fatal(err)
}
if exists != s.exists {
t.Fatalf("Expected exists %v, got %v", s.exists, exists)
}
})
}
}
func TestFileSystemAttributes(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
scenarios := []struct {
file string
expectError bool
expectContentType string
}{
{"sub1.txt", true, ""},
{"test/sub1.txt", false, "application/octet-stream"},
{"test/sub2.txt", false, "application/octet-stream"},
{"image.png", false, "image/png"},
}
for _, s := range scenarios {
t.Run(s.file, func(t *testing.T) {
attr, err := fsys.Attributes(s.file)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v", s.expectError, hasErr)
}
if hasErr && !errors.Is(err, filesystem.ErrNotFound) {
t.Fatalf("Expected ErrNotFound err, got %q", err)
}
if !hasErr && attr.ContentType != s.expectContentType {
t.Fatalf("Expected attr.ContentType to be %q, got %q", s.expectContentType, attr.ContentType)
}
})
}
}
func TestFileSystemDelete(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
if err := fsys.Delete("missing.txt"); err == nil || !errors.Is(err, filesystem.ErrNotFound) {
t.Fatalf("Expected ErrNotFound error, got %v", err)
}
if err := fsys.Delete("image.png"); err != nil {
t.Fatalf("Expected nil, got error %v", err)
}
}
func TestFileSystemDeletePrefixWithoutTrailingSlash(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
if errs := fsys.DeletePrefix(""); len(errs) == 0 {
t.Fatal("Expected error, got nil", errs)
}
if errs := fsys.DeletePrefix("missing"); len(errs) != 0 {
t.Fatalf("Not existing prefix shouldn't error, got %v", errs)
}
if errs := fsys.DeletePrefix("test"); len(errs) != 0 {
t.Fatalf("Expected nil, got errors %v", errs)
}
// ensure that the test/* files are deleted
if exists, _ := fsys.Exists("test/sub1.txt"); exists {
t.Fatalf("Expected test/sub1.txt to be deleted")
}
if exists, _ := fsys.Exists("test/sub2.txt"); exists {
t.Fatalf("Expected test/sub2.txt to be deleted")
}
// the test directory should remain since the prefix didn't have trailing slash
if _, err := os.Stat(filepath.Join(dir, "test")); os.IsNotExist(err) {
t.Fatal("Expected the prefix dir to remain")
}
}
func TestFileSystemDeletePrefixWithTrailingSlash(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
if errs := fsys.DeletePrefix("missing/"); len(errs) != 0 {
t.Fatalf("Not existing prefix shouldn't error, got %v", errs)
}
if errs := fsys.DeletePrefix("test/"); len(errs) != 0 {
t.Fatalf("Expected nil, got errors %v", errs)
}
// ensure that the test/* files are deleted
if exists, _ := fsys.Exists("test/sub1.txt"); exists {
t.Fatalf("Expected test/sub1.txt to be deleted")
}
if exists, _ := fsys.Exists("test/sub2.txt"); exists {
t.Fatalf("Expected test/sub2.txt to be deleted")
}
// the test directory should be also deleted since the prefix has trailing slash
if _, err := os.Stat(filepath.Join(dir, "test")); !os.IsNotExist(err) {
t.Fatal("Expected the prefix dir to be deleted")
}
}
func TestFileSystemIsEmptyDir(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
scenarios := []struct {
dir string
expected bool
}{
{"", false}, // special case that shouldn't be suffixed with delimiter to search for any files within the bucket
{"/", true},
{"missing", true},
{"missing/", true},
{"test", false},
{"test/", false},
{"empty", true},
{"empty/", true},
}
for _, s := range scenarios {
t.Run(s.dir, func(t *testing.T) {
result := fsys.IsEmptyDir(s.dir)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestFileSystemUploadMultipart(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
// create multipart form file
body := new(bytes.Buffer)
mp := multipart.NewWriter(body)
w, err := mp.CreateFormFile("test", "test")
if err != nil {
t.Fatalf("Failed creating form file: %v", err)
}
w.Write([]byte("demo"))
mp.Close()
req := httptest.NewRequest(http.MethodPost, "/", body)
req.Header.Add("Content-Type", mp.FormDataContentType())
file, fh, err := req.FormFile("test")
if err != nil {
t.Fatalf("Failed to fetch form file: %v", err)
}
defer file.Close()
// ---
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
fileKey := "newdir/newkey.txt"
uploadErr := fsys.UploadMultipart(fh, fileKey)
if uploadErr != nil {
t.Fatal(uploadErr)
}
if exists, _ := fsys.Exists(fileKey); !exists {
t.Fatalf("Expected %q to exist", fileKey)
}
attrs, err := fsys.Attributes(fileKey)
if err != nil {
t.Fatalf("Failed to fetch file attributes: %v", err)
}
if name, ok := attrs.Metadata["original-filename"]; !ok || name != "test" {
t.Fatalf("Expected original-filename to be %q, got %q", "test", name)
}
}
func TestFileSystemUploadFile(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
fileKey := "newdir/newkey.txt"
file, err := filesystem.NewFileFromPath(filepath.Join(dir, "image.svg"))
if err != nil {
t.Fatalf("Failed to load test file: %v", err)
}
file.OriginalName = "test.txt"
uploadErr := fsys.UploadFile(file, fileKey)
if uploadErr != nil {
t.Fatal(uploadErr)
}
if exists, _ := fsys.Exists(fileKey); !exists {
t.Fatalf("Expected %q to exist", fileKey)
}
attrs, err := fsys.Attributes(fileKey)
if err != nil {
t.Fatalf("Failed to fetch file attributes: %v", err)
}
if name, ok := attrs.Metadata["original-filename"]; !ok || name != file.OriginalName {
t.Fatalf("Expected original-filename to be %q, got %q", file.OriginalName, name)
}
}
func TestFileSystemUpload(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
fileKey := "newdir/newkey.txt"
uploadErr := fsys.Upload([]byte("demo"), fileKey)
if uploadErr != nil {
t.Fatal(uploadErr)
}
if exists, _ := fsys.Exists(fileKey); !exists {
t.Fatalf("Expected %s to exist", fileKey)
}
}
func TestFileSystemServe(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
csp := "default-src 'none'; media-src 'self'; style-src 'unsafe-inline'; sandbox"
cacheControl := "max-age=2592000, stale-while-revalidate=86400"
scenarios := []struct {
path string
name string
query map[string]string
headers map[string]string
expectError bool
expectHeaders map[string]string
}{
{
// missing
"missing.txt",
"test_name.txt",
nil,
nil,
true,
nil,
},
{
// existing regular file
"test/sub1.txt",
"test_name.txt",
nil,
nil,
false,
map[string]string{
"Content-Disposition": "attachment; filename=test_name.txt",
"Content-Type": "application/octet-stream",
"Content-Length": "4",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// png inline
"image.png",
"test_name.png",
nil,
nil,
false,
map[string]string{
"Content-Disposition": "inline; filename=test_name.png",
"Content-Type": "image/png",
"Content-Length": "73",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// png with forced attachment
"image.png",
"test_name_download.png",
map[string]string{"download": "1"},
nil,
false,
map[string]string{
"Content-Disposition": "attachment; filename=test_name_download.png",
"Content-Type": "image/png",
"Content-Length": "73",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// svg exception
"image.svg",
"test_name.svg",
nil,
nil,
false,
map[string]string{
"Content-Disposition": "attachment; filename=test_name.svg",
"Content-Type": "image/svg+xml",
"Content-Length": "0",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// css exception
"style.css",
"test_name.css",
nil,
nil,
false,
map[string]string{
"Content-Disposition": "attachment; filename=test_name.css",
"Content-Type": "text/css",
"Content-Length": "0",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// js exception
"main.js",
"test_name.js",
nil,
nil,
false,
map[string]string{
"Content-Disposition": "attachment; filename=test_name.js",
"Content-Type": "text/javascript",
"Content-Length": "0",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// mjs exception
"main.mjs",
"test_name.mjs",
nil,
nil,
false,
map[string]string{
"Content-Disposition": "attachment; filename=test_name.mjs",
"Content-Type": "text/javascript",
"Content-Length": "0",
"Content-Security-Policy": csp,
"Cache-Control": cacheControl,
},
},
{
// custom header
"test/sub2.txt",
"test_name.txt",
nil,
map[string]string{
"Content-Disposition": "1",
"Content-Type": "2",
"Content-Length": "1",
"Content-Security-Policy": "4",
"Cache-Control": "5",
"X-Custom": "6",
},
false,
map[string]string{
"Content-Disposition": "1",
"Content-Type": "2",
"Content-Length": "4", // overwriten by http.ServeContent
"Content-Security-Policy": "4",
"Cache-Control": "5",
"X-Custom": "6",
},
},
}
for _, s := range scenarios {
t.Run(s.path, func(t *testing.T) {
res := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/", nil)
query := req.URL.Query()
for k, v := range s.query {
query.Set(k, v)
}
req.URL.RawQuery = query.Encode()
for k, v := range s.headers {
res.Header().Set(k, v)
}
err := fsys.Serve(res, req, s.path, s.name)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasError %v, got %v (%v)", s.expectError, hasErr, err)
}
if s.expectError {
return
}
result := res.Result()
defer result.Body.Close()
for hName, hValue := range s.expectHeaders {
v := result.Header.Get(hName)
if v != hValue {
t.Errorf("Expected value %q for header %q, got %q", hValue, hName, v)
}
}
})
}
}
func TestFileSystemGetReader(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
scenarios := []struct {
file string
expectError bool
expectedContent string
}{
{"test/missing.txt", true, ""},
{"test/sub1.txt", false, "sub1"},
}
for _, s := range scenarios {
t.Run(s.file, func(t *testing.T) {
f, err := fsys.GetReader(s.file)
defer func() {
if f != nil {
f.Close()
}
}()
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v", s.expectError, hasErr)
}
if hasErr {
if !errors.Is(err, filesystem.ErrNotFound) {
t.Fatalf("Expected ErrNotFound error, got %v", err)
}
return
}
raw, err := io.ReadAll(f)
if err != nil {
t.Fatal(err)
}
if str := string(raw); str != s.expectedContent {
t.Fatalf("Expected content\n%s\ngot\n%s", s.expectedContent, str)
}
})
}
}
func TestFileSystemGetReuploadableFile(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
t.Run("missing.txt", func(t *testing.T) {
_, err := fsys.GetReuploadableFile("missing.txt", false)
if err == nil {
t.Fatal("Expected error, got nil")
}
})
testReader := func(t *testing.T, f *filesystem.File, expectedContent string) {
r, err := f.Reader.Open()
if err != nil {
t.Fatal(err)
}
defer r.Close()
raw, err := io.ReadAll(r)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
if rawStr != expectedContent {
t.Fatalf("Expected content %q, got %q", expectedContent, rawStr)
}
}
t.Run("existing (preserve name)", func(t *testing.T) {
file, err := fsys.GetReuploadableFile("test/sub1.txt", true)
if err != nil {
t.Fatal(err)
}
if v := file.OriginalName; v != "sub1.txt" {
t.Fatalf("Expected originalName %q, got %q", "sub1.txt", v)
}
if v := file.Size; v != 4 {
t.Fatalf("Expected size %d, got %d", 4, v)
}
if v := file.Name; v != "sub1.txt" {
t.Fatalf("Expected name to be preserved, got %q", v)
}
testReader(t, file, "sub1")
})
t.Run("existing (new random suffix name)", func(t *testing.T) {
file, err := fsys.GetReuploadableFile("test/sub1.txt", false)
if err != nil {
t.Fatal(err)
}
if v := file.OriginalName; v != "sub1.txt" {
t.Fatalf("Expected originalName %q, got %q", "sub1.txt", v)
}
if v := file.Size; v != 4 {
t.Fatalf("Expected size %d, got %d", 4, v)
}
if v := file.Name; v == "sub1.txt" || len(v) <= len("sub1.txt.png") {
t.Fatalf("Expected name to have new random suffix, got %q", v)
}
testReader(t, file, "sub1")
})
}
func TestFileSystemCopy(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
src := "image.png"
dst := "image.png_copy"
// copy missing file
if err := fsys.Copy(dst, src); err == nil {
t.Fatalf("Expected to fail copying %q to %q, got nil", dst, src)
}
// copy existing file
if err := fsys.Copy(src, dst); err != nil {
t.Fatalf("Failed to copy %q to %q: %v", src, dst, err)
}
f, err := fsys.GetReader(dst)
if err != nil {
t.Fatalf("Missing copied file %q: %v", dst, err)
}
defer f.Close()
if f.Size() != 73 {
t.Fatalf("Expected file size %d, got %d", 73, f.Size())
}
}
func TestFileSystemList(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
scenarios := []struct {
prefix string
expected []string
}{
{
"",
[]string{
"image.png",
"image.jpg",
"image.svg",
"image.webp",
"image_!@ special",
"image_noext",
"style.css",
"main.js",
"main.mjs",
"test/sub1.txt",
"test/sub2.txt",
},
},
{
"test",
[]string{
"test/sub1.txt",
"test/sub2.txt",
},
},
{
"missing",
[]string{},
},
}
for _, s := range scenarios {
t.Run("prefix_"+s.prefix, func(t *testing.T) {
objs, err := fsys.List(s.prefix)
if err != nil {
t.Fatal(err)
}
if len(s.expected) != len(objs) {
t.Fatalf("Expected %d files, got \n%v", len(s.expected), objs)
}
for _, obj := range objs {
var exists bool
for _, name := range s.expected {
if name == obj.Key {
exists = true
break
}
}
if !exists {
t.Fatalf("Unexpected file %q", obj.Key)
}
}
})
}
}
func TestFileSystemServeSingleRange(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
res := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/", nil)
req.Header.Add("Range", "bytes=0-20")
if err := fsys.Serve(res, req, "image.png", "image.png"); err != nil {
t.Fatal(err)
}
result := res.Result()
if result.StatusCode != http.StatusPartialContent {
t.Fatalf("Expected StatusCode %d, got %d", http.StatusPartialContent, result.StatusCode)
}
expectedRange := "bytes 0-20/73"
if cr := result.Header.Get("Content-Range"); cr != expectedRange {
t.Fatalf("Expected Content-Range %q, got %q", expectedRange, cr)
}
if l := result.Header.Get("Content-Length"); l != "21" {
t.Fatalf("Expected Content-Length %v, got %v", 21, l)
}
}
func TestFileSystemServeMultiRange(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
res := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/", nil)
req.Header.Add("Range", "bytes=0-20, 25-30")
if err := fsys.Serve(res, req, "image.png", "image.png"); err != nil {
t.Fatal(err)
}
result := res.Result()
if result.StatusCode != http.StatusPartialContent {
t.Fatalf("Expected StatusCode %d, got %d", http.StatusPartialContent, result.StatusCode)
}
if ct := result.Header.Get("Content-Type"); !strings.HasPrefix(ct, "multipart/byteranges; boundary=") {
t.Fatalf("Expected Content-Type to be multipart/byteranges, got %v", ct)
}
}
func TestFileSystemCreateThumb(t *testing.T) {
dir := createTestDir(t)
defer os.RemoveAll(dir)
fsys, err := filesystem.NewLocal(dir)
if err != nil {
t.Fatal(err)
}
defer fsys.Close()
scenarios := []struct {
file string
thumb string
size string
expectedMimeType string
}{
// missing
{"missing.txt", "thumb_test_missing", "100x100", ""},
// non-image existing file
{"test/sub1.txt", "thumb_test_sub1", "100x100", ""},
// existing image file with existing thumb path = should fail
{"image.png", "test", "100x100", ""},
// existing image file with invalid thumb size
{"image.png", "thumb0", "invalid", ""},
// existing image file with 0xH thumb size
{"image.png", "thumb_0xH", "0x100", "image/png"},
// existing image file with Wx0 thumb size
{"image.png", "thumb_Wx0", "100x0", "image/png"},
// existing image file with WxH thumb size
{"image.png", "thumb_WxH", "100x100", "image/png"},
// existing image file with WxHt thumb size
{"image.png", "thumb_WxHt", "100x100t", "image/png"},
// existing image file with WxHb thumb size
{"image.png", "thumb_WxHb", "100x100b", "image/png"},
// existing image file with WxHf thumb size
{"image.png", "thumb_WxHf", "100x100f", "image/png"},
// jpg
{"image.jpg", "thumb.jpg", "100x100", "image/jpeg"},
// webp (should produce png)
{"image.webp", "thumb.webp", "100x100", "image/png"},
// without extension (should extract the mimetype from its stored ContentType)
{"image_noext", "image_noext.jpeg", "100x100", "image/jpeg"},
}
for _, s := range scenarios {
t.Run(s.file+"_"+s.thumb+"_"+s.size, func(t *testing.T) {
err := fsys.CreateThumb(s.file, s.thumb, s.size)
expectErr := s.expectedMimeType == ""
hasErr := err != nil
if hasErr != expectErr {
t.Fatalf("Expected hasErr to be %v, got %v (%v)", expectErr, hasErr, err)
}
if hasErr {
return
}
f, err := fsys.GetReader(s.thumb)
if err != nil {
t.Fatalf("Missing expected thumb %s (%v)", s.thumb, err)
}
defer f.Close()
attrsMimeType := f.ContentType()
mt, err := mimetype.DetectReader(f)
if err != nil {
t.Fatalf("Failed to detect thumb %s mimetype (%v)", s.thumb, err)
}
fileMimeType := mt.String()
if fileMimeType != s.expectedMimeType {
t.Fatalf("Expected thumb file %s MimeType %q, got %q", s.thumb, s.expectedMimeType, fileMimeType)
}
if attrsMimeType != s.expectedMimeType {
t.Fatalf("Expected thumb attrs %s MimeType %q, got %q", s.thumb, s.expectedMimeType, attrsMimeType)
}
})
}
}
// ---
func createTestDir(t *testing.T) string {
dir, err := os.MkdirTemp(os.TempDir(), "pb_test")
if err != nil {
t.Fatal(err)
}
err = os.MkdirAll(filepath.Join(dir, "empty"), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = os.MkdirAll(filepath.Join(dir, "test"), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = os.WriteFile(filepath.Join(dir, "test/sub1.txt"), []byte("sub1"), 0644)
if err != nil {
t.Fatal(err)
}
err = os.WriteFile(filepath.Join(dir, "test/sub2.txt"), []byte("sub2"), 0644)
if err != nil {
t.Fatal(err)
}
// png
{
file, err := os.OpenFile(filepath.Join(dir, "image.png"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
imgRect := image.Rect(0, 0, 1, 1) // tiny 1x1 png
_ = png.Encode(file, imgRect)
file.Close()
err = os.WriteFile(filepath.Join(dir, "image.png.attrs"), []byte(`{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/png","user.metadata":null}`), 0644)
if err != nil {
t.Fatal(err)
}
}
// jpg
{
file, err := os.OpenFile(filepath.Join(dir, "image.jpg"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
imgRect := image.Rect(0, 0, 1, 1) // tiny 1x1 jpg
_ = jpeg.Encode(file, imgRect, nil)
file.Close()
err = os.WriteFile(filepath.Join(dir, "image.jpg.attrs"), []byte(`{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/jpeg","user.metadata":null}`), 0644)
if err != nil {
t.Fatal(err)
}
}
// svg
{
file, err := os.OpenFile(filepath.Join(dir, "image.svg"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
file.Close()
}
// webp
{
err := os.WriteFile(filepath.Join(dir, "image.webp"), []byte{
82, 73, 70, 70, 36, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 32,
24, 0, 0, 0, 48, 1, 0, 157, 1, 42, 1, 0, 1, 0, 2, 0, 52, 37,
164, 0, 3, 112, 0, 254, 251, 253, 80, 0,
}, 0644)
if err != nil {
t.Fatal(err)
}
}
// invalid/special characters
{
file, err := os.OpenFile(filepath.Join(dir, "image_!@ special"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
imgRect := image.Rect(0, 0, 1, 1) // tiny 1x1 png
_ = png.Encode(file, imgRect)
file.Close()
}
// no extension
{
fullPath := filepath.Join(dir, "image_noext")
file, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
imgRect := image.Rect(0, 0, 1, 1) // tiny 1x1 jpg
_ = jpeg.Encode(file, imgRect, nil)
file.Close()
err = os.WriteFile(fullPath+".attrs", []byte(`{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"image/jpeg","user.metadata":null}`), 0644)
if err != nil {
t.Fatal(err)
}
}
// css
{
file, err := os.OpenFile(filepath.Join(dir, "style.css"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
file.Close()
}
// js
{
file, err := os.OpenFile(filepath.Join(dir, "main.js"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
file.Close()
}
// mjs
{
file, err := os.OpenFile(filepath.Join(dir, "main.mjs"), os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
file.Close()
}
return dir
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/filesystem.go | tools/filesystem/filesystem.go | package filesystem
import (
"context"
"errors"
"image"
"io"
"mime/multipart"
"net/http"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"github.com/disintegration/imaging"
"github.com/fatih/color"
"github.com/gabriel-vasile/mimetype"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/fileblob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/list"
// explicit webp decoder because disintegration/imaging does not support webp
_ "golang.org/x/image/webp"
)
// note: the same as blob.ErrNotFound for backward compatibility with earlier versions
var ErrNotFound = blob.ErrNotFound
const metadataOriginalName = "original-filename"
type System struct {
ctx context.Context
bucket *blob.Bucket
}
// NewS3 initializes an S3 filesystem instance.
//
// NB! Make sure to call `Close()` after you are done working with it.
func NewS3(
bucketName string,
region string,
endpoint string,
accessKey string,
secretKey string,
s3ForcePathStyle bool,
) (*System, error) {
ctx := context.Background() // default context
client := &s3.S3{
Bucket: bucketName,
Region: region,
Endpoint: endpoint,
AccessKey: accessKey,
SecretKey: secretKey,
UsePathStyle: s3ForcePathStyle,
}
drv, err := s3blob.New(client)
if err != nil {
return nil, err
}
return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil
}
// NewLocal initializes a new local filesystem instance.
//
// NB! Make sure to call `Close()` after you are done working with it.
func NewLocal(dirPath string) (*System, error) {
ctx := context.Background() // default context
// makes sure that the directory exist
if err := os.MkdirAll(dirPath, os.ModePerm); err != nil {
return nil, err
}
drv, err := fileblob.New(dirPath, &fileblob.Options{
NoTempDir: true,
})
if err != nil {
return nil, err
}
return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil
}
// SetContext assigns the specified context to the current filesystem.
func (s *System) SetContext(ctx context.Context) {
s.ctx = ctx
}
// Close releases any resources used for the related filesystem.
func (s *System) Close() error {
return s.bucket.Close()
}
// Exists checks if file with fileKey path exists or not.
func (s *System) Exists(fileKey string) (bool, error) {
return s.bucket.Exists(s.ctx, fileKey)
}
// Attributes returns the attributes for the file with fileKey path.
//
// If the file doesn't exist it returns ErrNotFound.
func (s *System) Attributes(fileKey string) (*blob.Attributes, error) {
return s.bucket.Attributes(s.ctx, fileKey)
}
// GetReader returns a file content reader for the given fileKey.
//
// NB! Make sure to call Close() on the file after you are done working with it.
//
// If the file doesn't exist returns ErrNotFound.
func (s *System) GetReader(fileKey string) (*blob.Reader, error) {
return s.bucket.NewReader(s.ctx, fileKey)
}
// Deprecated: Please use GetReader(fileKey) instead.
func (s *System) GetFile(fileKey string) (*blob.Reader, error) {
color.Yellow("Deprecated: Please replace GetFile with GetReader.")
return s.GetReader(fileKey)
}
// GetReuploadableFile constructs a new reuploadable File value
// from the associated fileKey blob.Reader.
//
// If preserveName is false then the returned File.Name will have
// a new randomly generated suffix, otherwise it will reuse the original one.
//
// This method could be useful in case you want to clone an existing
// Record file and assign it to a new Record (e.g. in a Record duplicate action).
//
// If you simply want to copy an existing file to a new location you
// could check the Copy(srcKey, dstKey) method.
func (s *System) GetReuploadableFile(fileKey string, preserveName bool) (*File, error) {
attrs, err := s.Attributes(fileKey)
if err != nil {
return nil, err
}
name := path.Base(fileKey)
originalName := attrs.Metadata[metadataOriginalName]
if originalName == "" {
originalName = name
}
file := &File{}
file.Size = attrs.Size
file.OriginalName = originalName
file.Reader = openFuncAsReader(func() (io.ReadSeekCloser, error) {
return s.GetReader(fileKey)
})
if preserveName {
file.Name = name
} else {
file.Name = normalizeName(file.Reader, originalName)
}
return file, nil
}
// Copy copies the file stored at srcKey to dstKey.
//
// If srcKey file doesn't exist, it returns ErrNotFound.
//
// If dstKey file already exists, it is overwritten.
func (s *System) Copy(srcKey, dstKey string) error {
return s.bucket.Copy(s.ctx, dstKey, srcKey)
}
// List returns a flat list with info for all files under the specified prefix.
func (s *System) List(prefix string) ([]*blob.ListObject, error) {
files := []*blob.ListObject{}
iter := s.bucket.List(&blob.ListOptions{
Prefix: prefix,
})
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if !errors.Is(err, io.EOF) {
return nil, err
}
break
}
files = append(files, obj)
}
return files, nil
}
// Upload writes content into the fileKey location.
func (s *System) Upload(content []byte, fileKey string) error {
opts := &blob.WriterOptions{
ContentType: mimetype.Detect(content).String(),
}
w, writerErr := s.bucket.NewWriter(s.ctx, fileKey, opts)
if writerErr != nil {
return writerErr
}
if _, err := w.Write(content); err != nil {
return errors.Join(err, w.Close())
}
return w.Close()
}
// UploadFile uploads the provided File to the fileKey location.
func (s *System) UploadFile(file *File, fileKey string) error {
f, err := file.Reader.Open()
if err != nil {
return err
}
defer f.Close()
mt, err := mimetype.DetectReader(f)
if err != nil {
return err
}
// rewind
f.Seek(0, io.SeekStart)
originalName := file.OriginalName
if len(originalName) > 255 {
// keep only the first 255 chars as a very rudimentary measure
// to prevent the metadata to grow too big in size
originalName = originalName[:255]
}
opts := &blob.WriterOptions{
ContentType: mt.String(),
Metadata: map[string]string{
metadataOriginalName: originalName,
},
}
w, err := s.bucket.NewWriter(s.ctx, fileKey, opts)
if err != nil {
return err
}
if _, err := w.ReadFrom(f); err != nil {
w.Close()
return err
}
return w.Close()
}
// UploadMultipart uploads the provided multipart file to the fileKey location.
func (s *System) UploadMultipart(fh *multipart.FileHeader, fileKey string) error {
f, err := fh.Open()
if err != nil {
return err
}
defer f.Close()
mt, err := mimetype.DetectReader(f)
if err != nil {
return err
}
// rewind
f.Seek(0, io.SeekStart)
originalName := fh.Filename
if len(originalName) > 255 {
// keep only the first 255 chars as a very rudimentary measure
// to prevent the metadata to grow too big in size
originalName = originalName[:255]
}
opts := &blob.WriterOptions{
ContentType: mt.String(),
Metadata: map[string]string{
metadataOriginalName: originalName,
},
}
w, err := s.bucket.NewWriter(s.ctx, fileKey, opts)
if err != nil {
return err
}
_, err = w.ReadFrom(f)
if err != nil {
w.Close()
return err
}
return w.Close()
}
// Delete deletes stored file at fileKey location.
//
// If the file doesn't exist returns ErrNotFound.
func (s *System) Delete(fileKey string) error {
return s.bucket.Delete(s.ctx, fileKey)
}
// DeletePrefix deletes everything starting with the specified prefix.
//
// The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_").
func (s *System) DeletePrefix(prefix string) []error {
failed := []error{}
if prefix == "" {
failed = append(failed, errors.New("prefix mustn't be empty"))
return failed
}
dirsMap := map[string]struct{}{}
var isPrefixDir bool
// treat the prefix as directory only if it ends with trailing slash
if strings.HasSuffix(prefix, "/") {
isPrefixDir = true
dirsMap[strings.TrimRight(prefix, "/")] = struct{}{}
}
// delete all files with the prefix
// ---
iter := s.bucket.List(&blob.ListOptions{
Prefix: prefix,
})
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if !errors.Is(err, io.EOF) {
failed = append(failed, err)
}
break
}
if err := s.Delete(obj.Key); err != nil {
failed = append(failed, err)
} else if isPrefixDir {
slashIdx := strings.LastIndex(obj.Key, "/")
if slashIdx > -1 {
dirsMap[obj.Key[:slashIdx]] = struct{}{}
}
}
}
// ---
// try to delete the empty remaining dir objects
// (this operation usually is optional and there is no need to strictly check the result)
// ---
// fill dirs slice
dirs := make([]string, 0, len(dirsMap))
for d := range dirsMap {
dirs = append(dirs, d)
}
// sort the child dirs first, aka. ["a/b/c", "a/b", "a"]
sort.SliceStable(dirs, func(i, j int) bool {
return len(strings.Split(dirs[i], "/")) > len(strings.Split(dirs[j], "/"))
})
// delete dirs
for _, d := range dirs {
if d != "" {
s.Delete(d)
}
}
// ---
return failed
}
// Checks if the provided dir prefix doesn't have any files.
//
// A trailing slash will be appended to a non-empty dir string argument
// to ensure that the checked prefix is a "directory".
//
// Returns "false" in case the has at least one file, otherwise - "true".
func (s *System) IsEmptyDir(dir string) bool {
if dir != "" && !strings.HasSuffix(dir, "/") {
dir += "/"
}
iter := s.bucket.List(&blob.ListOptions{
Prefix: dir,
})
_, err := iter.Next(s.ctx)
return err != nil && errors.Is(err, io.EOF)
}
var inlineServeContentTypes = []string{
// image
"image/png", "image/jpg", "image/jpeg", "image/gif", "image/webp", "image/x-icon", "image/bmp",
// video
"video/webm", "video/mp4", "video/3gpp", "video/quicktime", "video/x-ms-wmv",
// audio
"audio/basic", "audio/aiff", "audio/mpeg", "audio/midi", "audio/mp3", "audio/wave",
"audio/wav", "audio/x-wav", "audio/x-mpeg", "audio/x-m4a", "audio/aac",
// document
"application/pdf", "application/x-pdf",
}
// manualExtensionContentTypes is a map of file extensions to content types.
var manualExtensionContentTypes = map[string]string{
".svg": "image/svg+xml", // (see https://github.com/whatwg/mimesniff/issues/7)
".css": "text/css", // (see https://github.com/gabriel-vasile/mimetype/pull/113)
".js": "text/javascript", // (see https://github.com/pocketbase/pocketbase/issues/6597)
".mjs": "text/javascript",
}
// forceAttachmentParam is the name of the request query parameter to
// force "Content-Disposition: attachment" header.
const forceAttachmentParam = "download"
// Serve serves the file at fileKey location to an HTTP response.
//
// If the `download` query parameter is used the file will be always served for
// download no matter of its type (aka. with "Content-Disposition: attachment").
//
// Internally this method uses [http.ServeContent] so Range requests,
// If-Match, If-Unmodified-Since, etc. headers are handled transparently.
func (s *System) Serve(res http.ResponseWriter, req *http.Request, fileKey string, name string) error {
br, readErr := s.GetReader(fileKey)
if readErr != nil {
return readErr
}
defer br.Close()
var forceAttachment bool
if raw := req.URL.Query().Get(forceAttachmentParam); raw != "" {
forceAttachment, _ = strconv.ParseBool(raw)
}
disposition := "attachment"
realContentType := br.ContentType()
if !forceAttachment && list.ExistInSlice(realContentType, inlineServeContentTypes) {
disposition = "inline"
}
// make an exception for specific content types and force a custom
// content type to send in the response so that it can be loaded properly
extContentType := realContentType
if ct, found := manualExtensionContentTypes[filepath.Ext(name)]; found {
extContentType = ct
}
setHeaderIfMissing(res, "Content-Disposition", disposition+"; filename="+name)
setHeaderIfMissing(res, "Content-Type", extContentType)
setHeaderIfMissing(res, "Content-Security-Policy", "default-src 'none'; media-src 'self'; style-src 'unsafe-inline'; sandbox")
// set a default cache-control header
// (valid for 30 days but the cache is allowed to reuse the file for any requests
// that are made in the last day while revalidating the res in the background)
setHeaderIfMissing(res, "Cache-Control", "max-age=2592000, stale-while-revalidate=86400")
http.ServeContent(res, req, name, br.ModTime(), br)
return nil
}
// note: expects key to be in a canonical form (eg. "accept-encoding" should be "Accept-Encoding").
func setHeaderIfMissing(res http.ResponseWriter, key string, value string) {
if _, ok := res.Header()[key]; !ok {
res.Header().Set(key, value)
}
}
var ThumbSizeRegex = regexp.MustCompile(`^(\d+)x(\d+)(t|b|f)?$`)
// CreateThumb creates a new thumb image for the file at originalKey location.
// The new thumb file is stored at thumbKey location.
//
// thumbSize is in the format:
// - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio
// - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio
// - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center)
// - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top)
// - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom)
// - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping)
func (s *System) CreateThumb(originalKey string, thumbKey, thumbSize string) error {
sizeParts := ThumbSizeRegex.FindStringSubmatch(thumbSize)
if len(sizeParts) != 4 {
return errors.New("thumb size must be in WxH, WxHt, WxHb or WxHf format")
}
width, _ := strconv.Atoi(sizeParts[1])
height, _ := strconv.Atoi(sizeParts[2])
resizeType := sizeParts[3]
if width == 0 && height == 0 {
return errors.New("thumb width and height cannot be zero at the same time")
}
// fetch the original
r, readErr := s.GetReader(originalKey)
if readErr != nil {
return readErr
}
defer r.Close()
// create imaging object from the original reader
// (note: only the first frame for animated image formats)
img, decodeErr := imaging.Decode(r, imaging.AutoOrientation(true))
if decodeErr != nil {
return decodeErr
}
var thumbImg *image.NRGBA
if width == 0 || height == 0 {
// force resize preserving aspect ratio
thumbImg = imaging.Resize(img, width, height, imaging.Linear)
} else {
switch resizeType {
case "f":
// fit
thumbImg = imaging.Fit(img, width, height, imaging.Linear)
case "t":
// fill and crop from top
thumbImg = imaging.Fill(img, width, height, imaging.Top, imaging.Linear)
case "b":
// fill and crop from bottom
thumbImg = imaging.Fill(img, width, height, imaging.Bottom, imaging.Linear)
default:
// fill and crop from center
thumbImg = imaging.Fill(img, width, height, imaging.Center, imaging.Linear)
}
}
originalContentType := r.ContentType()
opts := &blob.WriterOptions{
ContentType: originalContentType,
}
var format imaging.Format
switch originalContentType {
case "image/jpeg":
format = imaging.JPEG
case "image/gif":
format = imaging.GIF
case "image/tiff":
format = imaging.TIFF
case "image/bmp":
format = imaging.BMP
default:
// fallback to PNG (this includes webp!)
opts.ContentType = "image/png"
format = imaging.PNG
}
// open a thumb storage writer (aka. prepare for upload)
w, err := s.bucket.NewWriter(s.ctx, thumbKey, opts)
if err != nil {
return err
}
// thumb encode (aka. upload)
err = imaging.Encode(w, thumbImg, format)
if err != nil {
w.Close()
return err
}
// check for close errors to ensure that the thumb was really saved
return w.Close()
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/blob/driver.go | tools/filesystem/blob/driver.go | package blob
import (
"context"
"io"
"time"
)
// ReaderAttributes contains a subset of attributes about a blob that are
// accessible from Reader.
type ReaderAttributes struct {
// ContentType is the MIME type of the blob object. It must not be empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string `json:"contentType"`
// ModTime is the time the blob object was last modified.
ModTime time.Time `json:"modTime"`
// Size is the size of the object in bytes.
Size int64 `json:"size"`
}
// DriverReader reads an object from the blob.
type DriverReader interface {
io.ReadCloser
// Attributes returns a subset of attributes about the blob.
// The portable type will not modify the returned ReaderAttributes.
Attributes() *ReaderAttributes
}
// DriverWriter writes an object to the blob.
type DriverWriter interface {
io.WriteCloser
}
// Driver provides read, write and delete operations on objects within it on the
// blob service.
type Driver interface {
NormalizeError(err error) error
// Attributes returns attributes for the blob. If the specified object does
// not exist, Attributes must return an ErrNotFound.
// The portable type will not modify the returned Attributes.
Attributes(ctx context.Context, key string) (*Attributes, error)
// ListPaged lists objects in the bucket, in lexicographical order by
// UTF-8-encoded key, returning pages of objects at a time.
// Services are only required to be eventually consistent with respect
// to recently written or deleted objects. That is to say, there is no
// guarantee that an object that's been written will immediately be returned
// from ListPaged.
ListPaged(ctx context.Context, opts *ListOptions) (*ListPage, error)
// NewRangeReader returns a Reader that reads part of an object, reading at
// most length bytes starting at the given offset. If length is negative, it
// will read until the end of the object. If the specified object does not
// exist, NewRangeReader must return an ErrNotFound.
NewRangeReader(ctx context.Context, key string, offset, length int64) (DriverReader, error)
// NewTypedWriter returns Writer that writes to an object associated with key.
//
// A new object will be created unless an object with this key already exists.
// Otherwise any previous object with the same key will be replaced.
// The object may not be available (and any previous object will remain)
// until Close has been called.
//
// contentType sets the MIME type of the object to be written.
// opts is guaranteed to be non-nil.
//
// The caller must call Close on the returned Writer when done writing.
//
// Implementations should abort an ongoing write if ctx is later canceled,
// and do any necessary cleanup in Close. Close should then return ctx.Err().
//
// The returned Writer *may* also implement Uploader if the underlying
// implementation can take advantage of that. The Upload call is guaranteed
// to be the only non-Close call to the Writer..
NewTypedWriter(ctx context.Context, key, contentType string, opts *WriterOptions) (DriverWriter, error)
// Copy copies the object associated with srcKey to dstKey.
//
// If the source object does not exist, Copy must return an ErrNotFound.
//
// If the destination object already exists, it should be overwritten.
Copy(ctx context.Context, dstKey, srcKey string) error
// Delete deletes the object associated with key. If the specified object does
// not exist, Delete must return an ErrNotFound.
Delete(ctx context.Context, key string) error
// Close cleans up any resources used by the Bucket. Once Close is called,
// there will be no method calls to the Bucket other than As, ErrorAs, and
// ErrorCode. There may be open readers or writers that will receive calls.
// It is up to the driver as to how these will be handled.
Close() error
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/blob/bucket.go | tools/filesystem/blob/bucket.go | // Package blob defines a lightweight abstration for interacting with
// various storage services (local filesystem, S3, etc.).
//
// NB!
// For compatibility with earlier PocketBase versions and to prevent
// unnecessary breaking changes, this package is based and implemented
// as a minimal, stripped down version of the previously used gocloud.dev/blob.
// While there is no promise that it won't diverge in the future to accommodate
// better some PocketBase specific use cases, currently it copies and
// tries to follow as close as possible the same implementations,
// conventions and rules for the key escaping/unescaping, blob read/write
// interfaces and struct options as gocloud.dev/blob, therefore the
// credits goes to the original Go Cloud Development Kit Authors.
package blob
import (
"bytes"
"context"
"crypto/md5"
"errors"
"fmt"
"io"
"log"
"mime"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
)
var (
ErrNotFound = errors.New("resource not found")
ErrClosed = errors.New("bucket or blob is closed")
)
// Bucket provides an easy and portable way to interact with blobs
// within a "bucket", including read, write, and list operations.
// To create a Bucket, use constructors found in driver subpackages.
type Bucket struct {
drv Driver
// mu protects the closed variable.
// Read locks are kept to allow holding a read lock for long-running calls,
// and thereby prevent closing until a call finishes.
mu sync.RWMutex
closed bool
}
// NewBucket creates a new *Bucket based on a specific driver implementation.
func NewBucket(drv Driver) *Bucket {
return &Bucket{drv: drv}
}
// ListOptions sets options for listing blobs via Bucket.List.
type ListOptions struct {
// Prefix indicates that only blobs with a key starting with this prefix
// should be returned.
Prefix string
// Delimiter sets the delimiter used to define a hierarchical namespace,
// like a filesystem with "directories". It is highly recommended that you
// use "" or "/" as the Delimiter. Other values should work through this API,
// but service UIs generally assume "/".
//
// An empty delimiter means that the bucket is treated as a single flat
// namespace.
//
// A non-empty delimiter means that any result with the delimiter in its key
// after Prefix is stripped will be returned with ListObject.IsDir = true,
// ListObject.Key truncated after the delimiter, and zero values for other
// ListObject fields. These results represent "directories". Multiple results
// in a "directory" are returned as a single result.
Delimiter string
// PageSize sets the maximum number of objects to be returned.
// 0 means no maximum; driver implementations should choose a reasonable
// max. It is guaranteed to be >= 0.
PageSize int
// PageToken may be filled in with the NextPageToken from a previous
// ListPaged call.
PageToken []byte
}
// ListPage represents a page of results return from ListPaged.
type ListPage struct {
// Objects is the slice of objects found. If ListOptions.PageSize > 0,
// it should have at most ListOptions.PageSize entries.
//
// Objects should be returned in lexicographical order of UTF-8 encoded keys,
// including across pages. I.e., all objects returned from a ListPage request
// made using a PageToken from a previous ListPage request's NextPageToken
// should have Key >= the Key for all objects from the previous request.
Objects []*ListObject `json:"objects"`
// NextPageToken should be left empty unless there are more objects
// to return. The value may be returned as ListOptions.PageToken on a
// subsequent ListPaged call, to fetch the next page of results.
// It can be an arbitrary []byte; it need not be a valid key.
NextPageToken []byte `json:"nextPageToken"`
}
// ListIterator iterates over List results.
type ListIterator struct {
b *Bucket
opts *ListOptions
page *ListPage
nextIdx int
}
// Next returns a *ListObject for the next blob.
// It returns (nil, io.EOF) if there are no more.
func (i *ListIterator) Next(ctx context.Context) (*ListObject, error) {
if i.page != nil {
// We've already got a page of results.
if i.nextIdx < len(i.page.Objects) {
// Next object is in the page; return it.
dobj := i.page.Objects[i.nextIdx]
i.nextIdx++
return &ListObject{
Key: dobj.Key,
ModTime: dobj.ModTime,
Size: dobj.Size,
MD5: dobj.MD5,
IsDir: dobj.IsDir,
}, nil
}
if len(i.page.NextPageToken) == 0 {
// Done with current page, and there are no more; return io.EOF.
return nil, io.EOF
}
// We need to load the next page.
i.opts.PageToken = i.page.NextPageToken
}
i.b.mu.RLock()
defer i.b.mu.RUnlock()
if i.b.closed {
return nil, ErrClosed
}
// Loading a new page.
p, err := i.b.drv.ListPaged(ctx, i.opts)
if err != nil {
return nil, wrapError(i.b.drv, err, "")
}
i.page = p
i.nextIdx = 0
return i.Next(ctx)
}
// ListObject represents a single blob returned from List.
type ListObject struct {
// Key is the key for this blob.
Key string `json:"key"`
// ModTime is the time the blob was last modified.
ModTime time.Time `json:"modTime"`
// Size is the size of the blob's content in bytes.
Size int64 `json:"size"`
// MD5 is an MD5 hash of the blob contents or nil if not available.
MD5 []byte `json:"md5"`
// IsDir indicates that this result represents a "directory" in the
// hierarchical namespace, ending in ListOptions.Delimiter. Key can be
// passed as ListOptions.Prefix to list items in the "directory".
// Fields other than Key and IsDir will not be set if IsDir is true.
IsDir bool `json:"isDir"`
}
// List returns a ListIterator that can be used to iterate over blobs in a
// bucket, in lexicographical order of UTF-8 encoded keys. The underlying
// implementation fetches results in pages.
//
// A nil ListOptions is treated the same as the zero value.
//
// List is not guaranteed to include all recently-written blobs;
// some services are only eventually consistent.
func (b *Bucket) List(opts *ListOptions) *ListIterator {
if opts == nil {
opts = &ListOptions{}
}
dopts := &ListOptions{
Prefix: opts.Prefix,
Delimiter: opts.Delimiter,
}
return &ListIterator{b: b, opts: dopts}
}
// FirstPageToken is the pageToken to pass to ListPage to retrieve the first page of results.
var FirstPageToken = []byte("first page")
// ListPage returns a page of ListObject results for blobs in a bucket, in lexicographical
// order of UTF-8 encoded keys.
//
// To fetch the first page, pass FirstPageToken as the pageToken. For subsequent pages, pass
// the pageToken returned from a previous call to ListPage.
// It is not possible to "skip ahead" pages.
//
// Each call will return pageSize results, unless there are not enough blobs to fill the
// page, in which case it will return fewer results (possibly 0).
//
// If there are no more blobs available, ListPage will return an empty pageToken. Note that
// this may happen regardless of the number of returned results -- the last page might have
// 0 results (i.e., if the last item was deleted), pageSize results, or anything in between.
//
// Calling ListPage with an empty pageToken will immediately return io.EOF. When looping
// over pages, callers can either check for an empty pageToken, or they can make one more
// call and check for io.EOF.
//
// The underlying implementation fetches results in pages, but one call to ListPage may
// require multiple page fetches (and therefore, multiple calls to the BeforeList callback).
//
// A nil ListOptions is treated the same as the zero value.
//
// ListPage is not guaranteed to include all recently-written blobs;
// some services are only eventually consistent.
func (b *Bucket) ListPage(ctx context.Context, pageToken []byte, pageSize int, opts *ListOptions) (retval []*ListObject, nextPageToken []byte, err error) {
if opts == nil {
opts = &ListOptions{}
}
if pageSize <= 0 {
return nil, nil, fmt.Errorf("pageSize must be > 0 (%d)", pageSize)
}
// Nil pageToken means no more results.
if len(pageToken) == 0 {
return nil, nil, io.EOF
}
// FirstPageToken fetches the first page. Drivers use nil.
// The public API doesn't use nil for the first page because it would be too easy to
// keep fetching forever (since the last page return nil for the next pageToken).
if bytes.Equal(pageToken, FirstPageToken) {
pageToken = nil
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, nil, ErrClosed
}
dopts := &ListOptions{
Prefix: opts.Prefix,
Delimiter: opts.Delimiter,
PageToken: pageToken,
PageSize: pageSize,
}
retval = make([]*ListObject, 0, pageSize)
for len(retval) < pageSize {
p, err := b.drv.ListPaged(ctx, dopts)
if err != nil {
return nil, nil, wrapError(b.drv, err, "")
}
for _, dobj := range p.Objects {
retval = append(retval, &ListObject{
Key: dobj.Key,
ModTime: dobj.ModTime,
Size: dobj.Size,
MD5: dobj.MD5,
IsDir: dobj.IsDir,
})
}
// ListPaged may return fewer results than pageSize. If there are more results
// available, signalled by non-empty p.NextPageToken, try to fetch the remainder
// of the page.
// It does not work to ask for more results than we need, because then we'd have
// a NextPageToken on a non-page boundary.
dopts.PageSize = pageSize - len(retval)
dopts.PageToken = p.NextPageToken
if len(dopts.PageToken) == 0 {
dopts.PageToken = nil
break
}
}
return retval, dopts.PageToken, nil
}
// Attributes contains attributes about a blob.
type Attributes struct {
// CacheControl specifies caching attributes that services may use
// when serving the blob.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string `json:"cacheControl"`
// ContentDisposition specifies whether the blob content is expected to be
// displayed inline or as an attachment.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
ContentDisposition string `json:"contentDisposition"`
// ContentEncoding specifies the encoding used for the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
ContentEncoding string `json:"contentEncoding"`
// ContentLanguage specifies the language used in the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
ContentLanguage string `json:"contentLanguage"`
// ContentType is the MIME type of the blob. It will not be empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string `json:"contentType"`
// Metadata holds key/value pairs associated with the blob.
// Keys are guaranteed to be in lowercase, even if the backend service
// has case-sensitive keys (although note that Metadata written via
// this package will always be lowercased). If there are duplicate
// case-insensitive keys (e.g., "foo" and "FOO"), only one value
// will be kept, and it is undefined which one.
Metadata map[string]string `json:"metadata"`
// CreateTime is the time the blob was created, if available. If not available,
// CreateTime will be the zero time.
CreateTime time.Time `json:"createTime"`
// ModTime is the time the blob was last modified.
ModTime time.Time `json:"modTime"`
// Size is the size of the blob's content in bytes.
Size int64 `json:"size"`
// MD5 is an MD5 hash of the blob contents or nil if not available.
MD5 []byte `json:"md5"`
// ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag.
ETag string `json:"etag"`
}
// Attributes returns attributes for the blob stored at key.
//
// If the blob does not exist, Attributes returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
func (b *Bucket) Attributes(ctx context.Context, key string) (_ *Attributes, err error) {
if !utf8.ValidString(key) {
return nil, fmt.Errorf("Attributes key must be a valid UTF-8 string: %q", key)
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, ErrClosed
}
a, err := b.drv.Attributes(ctx, key)
if err != nil {
return nil, wrapError(b.drv, err, key)
}
var md map[string]string
if len(a.Metadata) > 0 {
// Services are inconsistent, but at least some treat keys
// as case-insensitive. To make the behavior consistent, we
// force-lowercase them when writing and reading.
md = make(map[string]string, len(a.Metadata))
for k, v := range a.Metadata {
md[strings.ToLower(k)] = v
}
}
return &Attributes{
CacheControl: a.CacheControl,
ContentDisposition: a.ContentDisposition,
ContentEncoding: a.ContentEncoding,
ContentLanguage: a.ContentLanguage,
ContentType: a.ContentType,
Metadata: md,
CreateTime: a.CreateTime,
ModTime: a.ModTime,
Size: a.Size,
MD5: a.MD5,
ETag: a.ETag,
}, nil
}
// Exists returns true if a blob exists at key, false if it does not exist, or an error.
//
// It is a shortcut for calling Attributes and checking if it returns
// an error with code ErrNotFound.
func (b *Bucket) Exists(ctx context.Context, key string) (bool, error) {
_, err := b.Attributes(ctx, key)
if err == nil {
return true, nil
}
if errors.Is(err, ErrNotFound) {
return false, nil
}
return false, err
}
// NewReader is a shortcut for NewRangeReader with offset=0 and length=-1.
func (b *Bucket) NewReader(ctx context.Context, key string) (*Reader, error) {
return b.newRangeReader(ctx, key, 0, -1)
}
// NewRangeReader returns a Reader to read content from the blob stored at key.
// It reads at most length bytes starting at offset (>= 0).
// If length is negative, it will read till the end of the blob.
//
// For the purposes of Seek, the returned Reader will start at offset and
// end at the minimum of the actual end of the blob or (if length > 0) offset + length.
//
// Note that ctx is used for all reads performed during the lifetime of the reader.
//
// If the blob does not exist, NewRangeReader returns an ErrNotFound.
// Exists is a lighter-weight wayto check for existence.
//
// The caller must call Close on the returned Reader when done reading.
func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (_ *Reader, err error) {
return b.newRangeReader(ctx, key, offset, length)
}
func (b *Bucket) newRangeReader(ctx context.Context, key string, offset, length int64) (_ *Reader, err error) {
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, ErrClosed
}
if offset < 0 {
return nil, fmt.Errorf("NewRangeReader offset must be non-negative (%d)", offset)
}
if !utf8.ValidString(key) {
return nil, fmt.Errorf("NewRangeReader key must be a valid UTF-8 string: %q", key)
}
var dr DriverReader
dr, err = b.drv.NewRangeReader(ctx, key, offset, length)
if err != nil {
return nil, wrapError(b.drv, err, key)
}
r := &Reader{
drv: b.drv,
r: dr,
key: key,
ctx: ctx,
baseOffset: offset,
baseLength: length,
savedOffset: -1,
}
_, file, lineno, ok := runtime.Caller(2)
runtime.SetFinalizer(r, func(r *Reader) {
if !r.closed {
var caller string
if ok {
caller = fmt.Sprintf(" (%s:%d)", file, lineno)
}
log.Printf("A blob.Reader reading from %q was never closed%s", key, caller)
}
})
return r, nil
}
// WriterOptions sets options for NewWriter.
type WriterOptions struct {
// BufferSize changes the default size in bytes of the chunks that
// Writer will upload in a single request; larger blobs will be split into
// multiple requests.
//
// This option may be ignored by some drivers.
//
// If 0, the driver will choose a reasonable default.
//
// If the Writer is used to do many small writes concurrently, using a
// smaller BufferSize may reduce memory usage.
BufferSize int
// MaxConcurrency changes the default concurrency for parts of an upload.
//
// This option may be ignored by some drivers.
//
// If 0, the driver will choose a reasonable default.
MaxConcurrency int
// CacheControl specifies caching attributes that services may use
// when serving the blob.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
// ContentDisposition specifies whether the blob content is expected to be
// displayed inline or as an attachment.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
ContentDisposition string
// ContentEncoding specifies the encoding used for the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
ContentEncoding string
// ContentLanguage specifies the language used in the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
ContentLanguage string
// ContentType specifies the MIME type of the blob being written. If not set,
// it will be inferred from the content using the algorithm described at
// http://mimesniff.spec.whatwg.org/.
// Set DisableContentTypeDetection to true to disable the above and force
// the ContentType to stay empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string
// When true, if ContentType is the empty string, it will stay the empty
// string rather than being inferred from the content.
// Note that while the blob will be written with an empty string ContentType,
// most providers will fill one in during reads, so don't expect an empty
// ContentType if you read the blob back.
DisableContentTypeDetection bool
// ContentMD5 is used as a message integrity check.
// If len(ContentMD5) > 0, the MD5 hash of the bytes written must match
// ContentMD5, or Close will return an error without completing the write.
// https://tools.ietf.org/html/rfc1864
ContentMD5 []byte
// Metadata holds key/value strings to be associated with the blob, or nil.
// Keys may not be empty, and are lowercased before being written.
// Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in
// an error.
Metadata map[string]string
}
// NewWriter returns a Writer that writes to the blob stored at key.
// A nil WriterOptions is treated the same as the zero value.
//
// If a blob with this key already exists, it will be replaced.
// The blob being written is not guaranteed to be readable until Close
// has been called; until then, any previous blob will still be readable.
// Even after Close is called, newly written blobs are not guaranteed to be
// returned from List; some services are only eventually consistent.
//
// The returned Writer will store ctx for later use in Write and/or Close.
// To abort a write, cancel ctx; otherwise, it must remain open until
// Close is called.
//
// The caller must call Close on the returned Writer, even if the write is
// aborted.
func (b *Bucket) NewWriter(ctx context.Context, key string, opts *WriterOptions) (_ *Writer, err error) {
if !utf8.ValidString(key) {
return nil, fmt.Errorf("NewWriter key must be a valid UTF-8 string: %q", key)
}
if opts == nil {
opts = &WriterOptions{}
}
dopts := &WriterOptions{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentMD5: opts.ContentMD5,
BufferSize: opts.BufferSize,
MaxConcurrency: opts.MaxConcurrency,
DisableContentTypeDetection: opts.DisableContentTypeDetection,
}
if len(opts.Metadata) > 0 {
// Services are inconsistent, but at least some treat keys
// as case-insensitive. To make the behavior consistent, we
// force-lowercase them when writing and reading.
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
if k == "" {
return nil, errors.New("WriterOptions.Metadata keys may not be empty strings")
}
if !utf8.ValidString(k) {
return nil, fmt.Errorf("WriterOptions.Metadata keys must be valid UTF-8 strings: %q", k)
}
if !utf8.ValidString(v) {
return nil, fmt.Errorf("WriterOptions.Metadata values must be valid UTF-8 strings: %q", v)
}
lowerK := strings.ToLower(k)
if _, found := md[lowerK]; found {
return nil, fmt.Errorf("WriterOptions.Metadata has a duplicate case-insensitive metadata key: %q", lowerK)
}
md[lowerK] = v
}
dopts.Metadata = md
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, ErrClosed
}
ctx, cancel := context.WithCancel(ctx)
w := &Writer{
drv: b.drv,
cancel: cancel,
key: key,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
}
if opts.ContentType != "" || opts.DisableContentTypeDetection {
var ct string
if opts.ContentType != "" {
t, p, err := mime.ParseMediaType(opts.ContentType)
if err != nil {
cancel()
return nil, err
}
ct = mime.FormatMediaType(t, p)
}
dw, err := b.drv.NewTypedWriter(ctx, key, ct, dopts)
if err != nil {
cancel()
return nil, wrapError(b.drv, err, key)
}
w.w = dw
} else {
// Save the fields needed to called NewTypedWriter later, once we've gotten
// sniffLen bytes; see the comment on Writer.
w.ctx = ctx
w.opts = dopts
w.buf = bytes.NewBuffer([]byte{})
}
_, file, lineno, ok := runtime.Caller(1)
runtime.SetFinalizer(w, func(w *Writer) {
if !w.closed {
var caller string
if ok {
caller = fmt.Sprintf(" (%s:%d)", file, lineno)
}
log.Printf("A blob.Writer writing to %q was never closed%s", key, caller)
}
})
return w, nil
}
// Copy the blob stored at srcKey to dstKey.
// A nil CopyOptions is treated the same as the zero value.
//
// If the source blob does not exist, Copy returns an ErrNotFound.
//
// If the destination blob already exists, it is overwritten.
func (b *Bucket) Copy(ctx context.Context, dstKey, srcKey string) (err error) {
if !utf8.ValidString(srcKey) {
return fmt.Errorf("Copy srcKey must be a valid UTF-8 string: %q", srcKey)
}
if !utf8.ValidString(dstKey) {
return fmt.Errorf("Copy dstKey must be a valid UTF-8 string: %q", dstKey)
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return ErrClosed
}
return wrapError(b.drv, b.drv.Copy(ctx, dstKey, srcKey), fmt.Sprintf("%s -> %s", srcKey, dstKey))
}
// Delete deletes the blob stored at key.
//
// If the blob does not exist, Delete returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
func (b *Bucket) Delete(ctx context.Context, key string) (err error) {
if !utf8.ValidString(key) {
return fmt.Errorf("Delete key must be a valid UTF-8 string: %q", key)
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return ErrClosed
}
return wrapError(b.drv, b.drv.Delete(ctx, key), key)
}
// Close releases any resources used for the bucket.
//
// @todo Consider removing it.
func (b *Bucket) Close() error {
b.mu.Lock()
prev := b.closed
b.closed = true
b.mu.Unlock()
if prev {
return ErrClosed
}
return wrapError(b.drv, b.drv.Close(), "")
}
func wrapError(b Driver, err error, key string) error {
if err == nil {
return nil
}
// don't wrap or normalize EOF errors since there are many places
// in the standard library (e.g. io.ReadAll) that rely on checks
// such as "err == io.EOF" and they will fail
if errors.Is(err, io.EOF) {
return err
}
err = b.NormalizeError(err)
if key != "" {
err = fmt.Errorf("[key: %s] %w", key, err)
}
return err
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/blob/writer.go | tools/filesystem/blob/writer.go | package blob
import (
"bytes"
"context"
"fmt"
"hash"
"io"
"net/http"
)
// Largely copied from gocloud.dev/blob.Writer to minimize breaking changes.
//
// -------------------------------------------------------------------
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -------------------------------------------------------------------
var _ io.WriteCloser = (*Writer)(nil)
// Writer writes bytes to a blob.
//
// It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be
// closed after all writes are done.
type Writer struct {
drv Driver
w DriverWriter
key string
cancel func() // cancels the ctx provided to NewTypedWriter if contentMD5 verification fails
contentMD5 []byte
md5hash hash.Hash
bytesWritten int
closed bool
// These fields are non-zero values only when w is nil (not yet created).
//
// A ctx is stored in the Writer since we need to pass it into NewTypedWriter
// when we finish detecting the content type of the blob and create the
// underlying driver.Writer. This step happens inside Write or Close and
// neither of them take a context.Context as an argument.
//
// All 3 fields are only initialized when we create the Writer without
// setting the w field, and are reset to zero values after w is created.
ctx context.Context
opts *WriterOptions
buf *bytes.Buffer
}
// sniffLen is the byte size of Writer.buf used to detect content-type.
const sniffLen = 512
// Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer).
//
// Writes may happen asynchronously, so the returned error can be nil
// even if the actual write eventually fails. The write is only guaranteed to
// have succeeded if Close returns no error.
func (w *Writer) Write(p []byte) (int, error) {
if len(w.contentMD5) > 0 {
if _, err := w.md5hash.Write(p); err != nil {
return 0, err
}
}
if w.w != nil {
return w.write(p)
}
// If w is not yet created due to no content-type being passed in, try to sniff
// the MIME type based on at most 512 bytes of the blob content of p.
// Detect the content-type directly if the first chunk is at least 512 bytes.
if w.buf.Len() == 0 && len(p) >= sniffLen {
return w.open(p)
}
// Store p in w.buf and detect the content-type when the size of content in
// w.buf is at least 512 bytes.
n, err := w.buf.Write(p)
if err != nil {
return 0, err
}
if w.buf.Len() >= sniffLen {
// Note that w.open will return the full length of the buffer; we don't want
// to return that as the length of this write since some of them were written in
// previous writes. Instead, we return the n from this write, above.
_, err := w.open(w.buf.Bytes())
return n, err
}
return n, nil
}
// Close closes the blob writer. The write operation is not guaranteed
// to have succeeded until Close returns with no error.
//
// Close may return an error if the context provided to create the
// Writer is canceled or reaches its deadline.
func (w *Writer) Close() (err error) {
w.closed = true
// Verify the MD5 hash of what was written matches the ContentMD5 provided by the user.
if len(w.contentMD5) > 0 {
md5sum := w.md5hash.Sum(nil)
if !bytes.Equal(md5sum, w.contentMD5) {
// No match! Return an error, but first cancel the context and call the
// driver's Close function to ensure the write is aborted.
w.cancel()
if w.w != nil {
_ = w.w.Close()
}
return fmt.Errorf("the WriterOptions.ContentMD5 you specified (%X) did not match what was written (%X)", w.contentMD5, md5sum)
}
}
defer w.cancel()
if w.w != nil {
return wrapError(w.drv, w.w.Close(), w.key)
}
if _, err := w.open(w.buf.Bytes()); err != nil {
return err
}
return wrapError(w.drv, w.w.Close(), w.key)
}
// open tries to detect the MIME type of p and write it to the blob.
// The error it returns is wrapped.
func (w *Writer) open(p []byte) (int, error) {
ct := http.DetectContentType(p)
var err error
w.w, err = w.drv.NewTypedWriter(w.ctx, w.key, ct, w.opts)
if err != nil {
return 0, wrapError(w.drv, err, w.key)
}
// Set the 3 fields needed for lazy NewTypedWriter back to zero values
// (see the comment on Writer).
w.buf = nil
w.ctx = nil
w.opts = nil
return w.write(p)
}
func (w *Writer) write(p []byte) (int, error) {
n, err := w.w.Write(p)
w.bytesWritten += n
return n, wrapError(w.drv, err, w.key)
}
// ReadFrom reads from r and writes to w until EOF or error.
// The return value is the number of bytes read from r.
//
// It implements the io.ReaderFrom interface.
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
// If the reader has a WriteTo method, use it to do the copy.
// Don't do this for our own *Reader to avoid infinite recursion.
// Avoids an allocation and a copy.
switch r.(type) {
case *Reader:
default:
if wt, ok := r.(io.WriterTo); ok {
return wt.WriteTo(w)
}
}
nr, _, err := readFromWriteTo(r, w)
return nr, err
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/blob/reader.go | tools/filesystem/blob/reader.go | package blob
import (
"context"
"fmt"
"io"
"log"
"time"
)
// Largely copied from gocloud.dev/blob.Reader to minimize breaking changes.
//
// -------------------------------------------------------------------
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -------------------------------------------------------------------
var _ io.ReadSeekCloser = (*Reader)(nil)
// Reader reads bytes from a blob.
// It implements io.ReadSeekCloser, and must be closed after reads are finished.
type Reader struct {
ctx context.Context // Used to recreate r after Seeks
r DriverReader
drv Driver
key string
baseOffset int64 // The base offset provided to NewRangeReader.
baseLength int64 // The length provided to NewRangeReader (may be negative).
relativeOffset int64 // Current offset (relative to baseOffset).
savedOffset int64 // Last relativeOffset for r, saved after relativeOffset is changed in Seek, or -1 if no Seek.
closed bool
}
// Read implements io.Reader (https://golang.org/pkg/io/#Reader).
func (r *Reader) Read(p []byte) (int, error) {
if r.savedOffset != -1 {
// We've done one or more Seeks since the last read. We may have
// to recreate the Reader.
//
// Note that remembering the savedOffset and lazily resetting the
// reader like this allows the caller to Seek, then Seek again back,
// to the original offset, without having to recreate the reader.
// We only have to recreate the reader if we actually read after a Seek.
// This is an important optimization because it's common to Seek
// to (SeekEnd, 0) and use the return value to determine the size
// of the data, then Seek back to (SeekStart, 0).
saved := r.savedOffset
if r.relativeOffset == saved {
// Nope! We're at the same place we left off.
r.savedOffset = -1
} else {
// Yep! We've changed the offset. Recreate the reader.
length := r.baseLength
if length >= 0 {
length -= r.relativeOffset
if length < 0 {
// Shouldn't happen based on checks in Seek.
return 0, fmt.Errorf("invalid Seek (base length %d, relative offset %d)", r.baseLength, r.relativeOffset)
}
}
newR, err := r.drv.NewRangeReader(r.ctx, r.key, r.baseOffset+r.relativeOffset, length)
if err != nil {
return 0, wrapError(r.drv, err, r.key)
}
_ = r.r.Close()
r.savedOffset = -1
r.r = newR
}
}
n, err := r.r.Read(p)
r.relativeOffset += int64(n)
return n, wrapError(r.drv, err, r.key)
}
// Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker).
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
if r.savedOffset == -1 {
// Save the current offset for our reader. If the Seek changes the
// offset, and then we try to read, we'll need to recreate the reader.
// See comment above in Read for why we do it lazily.
r.savedOffset = r.relativeOffset
}
// The maximum relative offset is the minimum of:
// 1. The actual size of the blob, minus our initial baseOffset.
// 2. The length provided to NewRangeReader (if it was non-negative).
maxRelativeOffset := r.Size() - r.baseOffset
if r.baseLength >= 0 && r.baseLength < maxRelativeOffset {
maxRelativeOffset = r.baseLength
}
switch whence {
case io.SeekStart:
r.relativeOffset = offset
case io.SeekCurrent:
r.relativeOffset += offset
case io.SeekEnd:
r.relativeOffset = maxRelativeOffset + offset
}
if r.relativeOffset < 0 {
// "Seeking to an offset before the start of the file is an error."
invalidOffset := r.relativeOffset
r.relativeOffset = 0
return 0, fmt.Errorf("Seek resulted in invalid offset %d, using 0", invalidOffset)
}
if r.relativeOffset > maxRelativeOffset {
// "Seeking to any positive offset is legal, but the behavior of subsequent
// I/O operations on the underlying object is implementation-dependent."
// We'll choose to set the offset to the EOF.
log.Printf("blob.Reader.Seek set an offset after EOF (base offset/length from NewRangeReader %d, %d; actual blob size %d; relative offset %d -> absolute offset %d).", r.baseOffset, r.baseLength, r.Size(), r.relativeOffset, r.baseOffset+r.relativeOffset)
r.relativeOffset = maxRelativeOffset
}
return r.relativeOffset, nil
}
// Close implements io.Closer (https://golang.org/pkg/io/#Closer).
func (r *Reader) Close() error {
r.closed = true
err := wrapError(r.drv, r.r.Close(), r.key)
return err
}
// ContentType returns the MIME type of the blob.
func (r *Reader) ContentType() string {
return r.r.Attributes().ContentType
}
// ModTime returns the time the blob was last modified.
func (r *Reader) ModTime() time.Time {
return r.r.Attributes().ModTime
}
// Size returns the size of the blob content in bytes.
func (r *Reader) Size() int64 {
return r.r.Attributes().Size
}
// WriteTo reads from r and writes to w until there's no more data or
// an error occurs.
// The return value is the number of bytes written to w.
//
// It implements the io.WriterTo interface.
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
// If the writer has a ReaderFrom method, use it to do the copy.
// Don't do this for our own *Writer to avoid infinite recursion.
// Avoids an allocation and a copy.
switch w.(type) {
case *Writer:
default:
if rf, ok := w.(io.ReaderFrom); ok {
n, err := rf.ReadFrom(r)
return n, err
}
}
_, nw, err := readFromWriteTo(r, w)
return nw, err
}
// readFromWriteTo is a helper for ReadFrom and WriteTo.
// It reads data from r and writes to w, until EOF or a read/write error.
// It returns the number of bytes read from r and the number of bytes
// written to w.
func readFromWriteTo(r io.Reader, w io.Writer) (int64, int64, error) {
// Note: can't use io.Copy because it will try to use r.WriteTo
// or w.WriteTo, which is recursive in this context.
buf := make([]byte, 4096)
var totalRead, totalWritten int64
for {
numRead, rerr := r.Read(buf)
if numRead > 0 {
totalRead += int64(numRead)
numWritten, werr := w.Write(buf[0:numRead])
totalWritten += int64(numWritten)
if werr != nil {
return totalRead, totalWritten, werr
}
}
if rerr == io.EOF {
// Done!
return totalRead, totalWritten, nil
}
if rerr != nil {
return totalRead, totalWritten, rerr
}
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/blob/hex.go | tools/filesystem/blob/hex.go | package blob
import (
"fmt"
"strconv"
)
// Copied from gocloud.dev/blob to avoid nuances around the specific
// HEX escaping/unescaping rules.
//
// -------------------------------------------------------------------
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -------------------------------------------------------------------
// HexEscape returns s, with all runes for which shouldEscape returns true
// escaped to "__0xXXX__", where XXX is the hex representation of the rune
// value. For example, " " would escape to "__0x20__".
//
// Non-UTF-8 strings will have their non-UTF-8 characters escaped to
// unicode.ReplacementChar; the original value is lost. Please file an
// issue if you need non-UTF8 support.
//
// Note: shouldEscape takes the whole string as a slice of runes and an
// index. Passing it a single byte or a single rune doesn't provide
// enough context for some escape decisions; for example, the caller might
// want to escape the second "/" in "//" but not the first one.
// We pass a slice of runes instead of the string or a slice of bytes
// because some decisions will be made on a rune basis (e.g., encode
// all non-ASCII runes).
func HexEscape(s string, shouldEscape func(s []rune, i int) bool) string {
// Do a first pass to see which runes (if any) need escaping.
runes := []rune(s)
var toEscape []int
for i := range runes {
if shouldEscape(runes, i) {
toEscape = append(toEscape, i)
}
}
if len(toEscape) == 0 {
return s
}
// Each escaped rune turns into at most 14 runes ("__0x7fffffff__"),
// so allocate an extra 13 for each. We'll reslice at the end
// if we didn't end up using them.
escaped := make([]rune, len(runes)+13*len(toEscape))
n := 0 // current index into toEscape
j := 0 // current index into escaped
for i, r := range runes {
if n < len(toEscape) && i == toEscape[n] {
// We were asked to escape this rune.
for _, x := range fmt.Sprintf("__%#x__", r) {
escaped[j] = x
j++
}
n++
} else {
escaped[j] = r
j++
}
}
return string(escaped[0:j])
}
// unescape tries to unescape starting at r[i].
// It returns a boolean indicating whether the unescaping was successful,
// and (if true) the unescaped rune and the last index of r that was used
// during unescaping.
func unescape(r []rune, i int) (bool, rune, int) {
// Look for "__0x".
if r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '0' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != 'x' {
return false, 0, 0
}
i++
// Capture the digits until the next "_" (if any).
var hexdigits []rune
for ; i < len(r) && r[i] != '_'; i++ {
hexdigits = append(hexdigits, r[i])
}
// Look for the trailing "__".
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
// Parse the hex digits into an int32.
retval, err := strconv.ParseInt(string(hexdigits), 16, 32)
if err != nil {
return false, 0, 0
}
return true, rune(retval), i
}
// HexUnescape reverses HexEscape.
func HexUnescape(s string) string {
var unescaped []rune
runes := []rune(s)
for i := 0; i < len(runes); i++ {
if ok, newR, newI := unescape(runes, i); ok {
// We unescaped some runes starting at i, resulting in the
// unescaped rune newR. The last rune used was newI.
if unescaped == nil {
// This is the first rune we've encountered that
// needed unescaping. Allocate a buffer and copy any
// previous runes.
unescaped = make([]rune, i)
copy(unescaped, runes)
}
unescaped = append(unescaped, newR)
i = newI
} else if unescaped != nil {
unescaped = append(unescaped, runes[i])
}
}
if unescaped == nil {
return s
}
return string(unescaped)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3blob_test.go | tools/filesystem/internal/s3blob/s3blob_test.go | package s3blob_test
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestNew(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
s3Client *s3.S3
expectError bool
}{
{
"blank",
&s3.S3{},
true,
},
{
"no bucket",
&s3.S3{Region: "b", Endpoint: "c"},
true,
},
{
"no endpoint",
&s3.S3{Bucket: "a", Region: "b"},
true,
},
{
"no region",
&s3.S3{Bucket: "a", Endpoint: "c"},
true,
},
{
"with bucket, endpoint and region",
&s3.S3{Bucket: "a", Region: "b", Endpoint: "c"},
false,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
drv, err := s3blob.New(s.s3Client)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if err == nil && drv == nil {
t.Fatal("Expected non-nil driver instance")
}
})
}
}
func TestDriverClose(t *testing.T) {
t.Parallel()
drv, err := s3blob.New(&s3.S3{Bucket: "a", Region: "b", Endpoint: "c"})
if err != nil {
t.Fatal(err)
}
err = drv.Close()
if err != nil {
t.Fatalf("Expected nil, got error %v", err)
}
}
func TestDriverNormilizeError(t *testing.T) {
t.Parallel()
drv, err := s3blob.New(&s3.S3{Bucket: "a", Region: "b", Endpoint: "c"})
if err != nil {
t.Fatal(err)
}
scenarios := []struct {
name string
err error
expectErrNotFound bool
}{
{
"plain error",
errors.New("test"),
false,
},
{
"response error with only status (non-404)",
&s3.ResponseError{Status: 123},
false,
},
{
"response error with only status (404)",
&s3.ResponseError{Status: 404},
true,
},
{
"response error with custom code",
&s3.ResponseError{Code: "test"},
false,
},
{
"response error with NoSuchBucket code",
&s3.ResponseError{Code: "NoSuchBucket"},
true,
},
{
"response error with NoSuchKey code",
&s3.ResponseError{Code: "NoSuchKey"},
true,
},
{
"response error with NotFound code",
&s3.ResponseError{Code: "NotFound"},
true,
},
{
"wrapped response error with NotFound code", // ensures that the entire error's tree is checked
fmt.Errorf("test: %w", &s3.ResponseError{Code: "NotFound"}),
true,
},
{
"already normalized error",
fmt.Errorf("test: %w", blob.ErrNotFound),
true,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
err := drv.NormalizeError(s.err)
if err == nil {
t.Fatal("Expected non-nil error")
}
isErrNotFound := errors.Is(err, blob.ErrNotFound)
if isErrNotFound != s.expectErrNotFound {
t.Fatalf("Expected isErrNotFound %v, got %v (%v)", s.expectErrNotFound, isErrNotFound, err)
}
})
}
}
func TestDriverDeleteEscaping(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(&tests.RequestStub{
Method: http.MethodDelete,
URL: "https://test_bucket.example.com/..__0x2f__abc/test/",
})
drv, err := s3blob.New(&s3.S3{
Bucket: "test_bucket",
Region: "test_region",
Endpoint: "https://example.com",
Client: httpClient,
})
if err != nil {
t.Fatal(err)
}
err = drv.Delete(context.Background(), "../abc/test/")
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestDriverCopyEscaping(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(&tests.RequestStub{
Method: http.MethodPut,
URL: "https://test_bucket.example.com/..__0x2f__a/",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"x-amz-copy-source": "test_bucket%2F..__0x2f__b%2F",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`<CopyObjectResult></CopyObjectResult>`)),
},
})
drv, err := s3blob.New(&s3.S3{
Bucket: "test_bucket",
Region: "test_region",
Endpoint: "https://example.com",
Client: httpClient,
})
if err != nil {
t.Fatal(err)
}
err = drv.Copy(context.Background(), "../a/", "../b/")
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestDriverAttributes(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(&tests.RequestStub{
Method: http.MethodHead,
URL: "https://test_bucket.example.com/..__0x2f__a/",
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Cache-Control": []string{"test_cache"},
"Content-Disposition": []string{"test_disposition"},
"Content-Encoding": []string{"test_encoding"},
"Content-Language": []string{"test_language"},
"Content-Type": []string{"test_type"},
"Content-Range": []string{"test_range"},
"Etag": []string{`"ce5be8b6f53645c596306c4572ece521"`},
"Content-Length": []string{"100"},
"x-amz-meta-AbC%40": []string{"%40test_meta_a"},
"x-amz-meta-Def": []string{"test_meta_b"},
},
Body: http.NoBody,
},
})
drv, err := s3blob.New(&s3.S3{
Bucket: "test_bucket",
Region: "test_region",
Endpoint: "https://example.com",
Client: httpClient,
})
if err != nil {
t.Fatal(err)
}
attrs, err := drv.Attributes(context.Background(), "../a/")
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(attrs)
if err != nil {
t.Fatal(err)
}
expected := `{"cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","metadata":{"abc@":"@test_meta_a","def":"test_meta_b"},"createTime":"0001-01-01T00:00:00Z","modTime":"2025-02-01T03:04:05Z","size":100,"md5":"zlvotvU2RcWWMGxFcuzlIQ==","etag":"\"ce5be8b6f53645c596306c4572ece521\""}`
if str := string(raw); str != expected {
t.Fatalf("Expected attributes\n%s\ngot\n%s", expected, str)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestDriverListPaged(t *testing.T) {
t.Parallel()
listResponse := func() *http.Response {
return &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example</Name>
<ContinuationToken>ct</ContinuationToken>
<NextContinuationToken>test_next</NextContinuationToken>
<StartAfter>example0.txt</StartAfter>
<KeyCount>1</KeyCount>
<MaxKeys>3</MaxKeys>
<Contents>
<Key>..__0x2f__prefixB/test/example.txt</Key>
<LastModified>2025-01-01T01:02:03.123Z</LastModified>
<ETag>"ce5be8b6f53645c596306c4572ece521"</ETag>
<Size>123</Size>
</Contents>
<Contents>
<Key>prefixA/..__0x2f__escape.txt</Key>
<LastModified>2025-01-02T01:02:03.123Z</LastModified>
<Size>456</Size>
</Contents>
<CommonPrefixes>
<Prefix>prefixA</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>..__0x2f__prefixB</Prefix>
</CommonPrefixes>
</ListBucketResult>
`)),
}
}
expectedPage := `{"objects":[{"key":"../prefixB","modTime":"0001-01-01T00:00:00Z","size":0,"md5":null,"isDir":true},{"key":"../prefixB/test/example.txt","modTime":"2025-01-01T01:02:03.123Z","size":123,"md5":"zlvotvU2RcWWMGxFcuzlIQ==","isDir":false},{"key":"prefixA","modTime":"0001-01-01T00:00:00Z","size":0,"md5":null,"isDir":true},{"key":"prefixA/../escape.txt","modTime":"2025-01-02T01:02:03.123Z","size":456,"md5":null,"isDir":false}],"nextPageToken":"dGVzdF9uZXh0"}`
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/?list-type=2&max-keys=1000",
Response: listResponse(),
},
&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/?continuation-token=test_token&delimiter=test_delimiter&list-type=2&max-keys=123&prefix=test_prefix",
Response: listResponse(),
},
)
drv, err := s3blob.New(&s3.S3{
Bucket: "test_bucket",
Region: "test_region",
Endpoint: "https://example.com",
Client: httpClient,
})
if err != nil {
t.Fatal(err)
}
scenarios := []struct {
name string
opts *blob.ListOptions
expected string
}{
{
"empty options",
&blob.ListOptions{},
expectedPage,
},
{
"filled options",
&blob.ListOptions{Prefix: "test_prefix", Delimiter: "test_delimiter", PageSize: 123, PageToken: []byte("test_token")},
expectedPage,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
page, err := drv.ListPaged(context.Background(), s.opts)
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(page)
if err != nil {
t.Fatal(err)
}
if str := string(raw); s.expected != str {
t.Fatalf("Expected page result\n%s\ngot\n%s", s.expected, str)
}
})
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestDriverNewRangeReader(t *testing.T) {
t.Parallel()
scenarios := []struct {
offset int64
length int64
httpClient *tests.Client
expectedAttrs string
}{
{
0,
0,
tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/..__0x2f__abc/test.txt",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Range": "bytes=0-0",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Content-Type": []string{"test_ct"},
"Content-Length": []string{"123"},
},
Body: io.NopCloser(strings.NewReader("test")),
},
}),
`{"contentType":"test_ct","modTime":"2025-02-01T03:04:05Z","size":123}`,
},
{
10,
-1,
tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/..__0x2f__abc/test.txt",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Range": "bytes=10-",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Content-Type": []string{"test_ct"},
"Content-Range": []string{"bytes 1-1/456"}, // should take precedence over content-length
"Content-Length": []string{"123"},
},
Body: io.NopCloser(strings.NewReader("test")),
},
}),
`{"contentType":"test_ct","modTime":"2025-02-01T03:04:05Z","size":456}`,
},
{
10,
0,
tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/..__0x2f__abc/test.txt",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Range": "bytes=10-10",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Content-Type": []string{"test_ct"},
// no range and length headers
// "Content-Range": []string{"bytes 1-1/456"},
// "Content-Length": []string{"123"},
},
Body: io.NopCloser(strings.NewReader("test")),
},
}),
`{"contentType":"test_ct","modTime":"2025-02-01T03:04:05Z","size":0}`,
},
{
10,
20,
tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/..__0x2f__abc/test.txt",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Range": "bytes=10-29",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Content-Type": []string{"test_ct"},
// with range header but invalid format -> content-length takes precedence
"Content-Range": []string{"bytes invalid-456"},
"Content-Length": []string{"123"},
},
Body: io.NopCloser(strings.NewReader("test")),
},
}),
`{"contentType":"test_ct","modTime":"2025-02-01T03:04:05Z","size":123}`,
},
}
for _, s := range scenarios {
t.Run(fmt.Sprintf("offset_%d_length_%d", s.offset, s.length), func(t *testing.T) {
drv, err := s3blob.New(&s3.S3{
Bucket: "test_bucket",
Region: "tesst_region",
Endpoint: "https://example.com",
Client: s.httpClient,
})
if err != nil {
t.Fatal(err)
}
r, err := drv.NewRangeReader(context.Background(), "../abc/test.txt", s.offset, s.length)
if err != nil {
t.Fatal(err)
}
defer r.Close()
// the response body should be always replaced with http.NoBody
if s.length == 0 {
body := make([]byte, 1)
n, err := r.Read(body)
if n != 0 || !errors.Is(err, io.EOF) {
t.Fatalf("Expected body to be http.NoBody, got %v (%v)", body, err)
}
}
rawAttrs, err := json.Marshal(r.Attributes())
if err != nil {
t.Fatal(err)
}
if str := string(rawAttrs); str != s.expectedAttrs {
t.Fatalf("Expected attributes\n%s\ngot\n%s", s.expectedAttrs, str)
}
err = s.httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
})
}
}
func TestDriverNewTypedWriter(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodPut,
URL: "https://test_bucket.example.com/..__0x2f__abc/test/",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "test" && tests.ExpectHeaders(req.Header, map[string]string{
"cache-control": "test_cache_control",
"content-disposition": "test_content_disposition",
"content-encoding": "test_content_encoding",
"content-language": "test_content_language",
"content-type": "test_ct",
"content-md5": "dGVzdA==",
})
},
},
)
drv, err := s3blob.New(&s3.S3{
Bucket: "test_bucket",
Region: "test_region",
Endpoint: "https://example.com",
Client: httpClient,
})
if err != nil {
t.Fatal(err)
}
options := &blob.WriterOptions{
CacheControl: "test_cache_control",
ContentDisposition: "test_content_disposition",
ContentEncoding: "test_content_encoding",
ContentLanguage: "test_content_language",
ContentType: "test_content_type", // should be ignored
ContentMD5: []byte("test"),
Metadata: map[string]string{"@test_meta_a": "@test"},
}
w, err := drv.NewTypedWriter(context.Background(), "../abc/test/", "test_ct", options)
if err != nil {
t.Fatal(err)
}
n, err := w.Write(nil)
if err != nil {
t.Fatal(err)
}
if n != 0 {
t.Fatalf("Expected nil write to result in %d written bytes, got %d", 0, n)
}
n, err = w.Write([]byte("test"))
if err != nil {
t.Fatal(err)
}
if n != 4 {
t.Fatalf("Expected nil write to result in %d written bytes, got %d", 4, n)
}
err = w.Close()
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3blob.go | tools/filesystem/internal/s3blob/s3blob.go | // Package s3blob provides a blob.Bucket S3 driver implementation.
//
// NB! To minimize breaking changes with older PocketBase releases,
// the driver is based of the previously used gocloud.dev/blob/s3blob,
// hence many of the below doc comments, struct options and interface
// implementations are the same.
//
// The blob abstraction supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for s3blob:
// - Blob keys: ASCII characters 0-31 are escaped to "__0x<hex>__".
// Additionally, the "/" in "../" is escaped in the same way.
// - Metadata keys: Escaped using URL encoding, then additionally "@:=" are
// escaped using "__0x<hex>__". These characters were determined by
// experimentation.
// - Metadata values: Escaped using URL encoding.
//
// Example:
//
// drv, _ := s3blob.New(&s3.S3{
// Bucket: "bucketName",
// Region: "region",
// Endpoint: "endpoint",
// AccessKey: "accessKey",
// SecretKey: "secretKey",
// })
// bucket := blob.NewBucket(drv)
package s3blob
import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
const defaultPageSize = 1000
// New creates a new instance of the S3 driver backed by the the internal S3 client.
func New(s3Client *s3.S3) (blob.Driver, error) {
if s3Client.Bucket == "" {
return nil, errors.New("s3blob.New: missing bucket name")
}
if s3Client.Endpoint == "" {
return nil, errors.New("s3blob.New: missing endpoint")
}
if s3Client.Region == "" {
return nil, errors.New("s3blob.New: missing region")
}
return &driver{s3: s3Client}, nil
}
type driver struct {
s3 *s3.S3
}
// Close implements [blob/Driver.Close].
func (drv *driver) Close() error {
return nil // nothing to close
}
// NormalizeError implements [blob/Driver.NormalizeError].
func (drv *driver) NormalizeError(err error) error {
// already normalized
if errors.Is(err, blob.ErrNotFound) {
return err
}
// normalize base on its S3 error status or code
var ae *s3.ResponseError
if errors.As(err, &ae) {
if ae.Status == 404 {
return errors.Join(err, blob.ErrNotFound)
}
switch ae.Code {
case "NoSuchBucket", "NoSuchKey", "NotFound":
return errors.Join(err, blob.ErrNotFound)
}
}
return err
}
// ListPaged implements [blob/Driver.ListPaged].
func (drv *driver) ListPaged(ctx context.Context, opts *blob.ListOptions) (*blob.ListPage, error) {
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
listParams := s3.ListParams{
MaxKeys: pageSize,
}
if len(opts.PageToken) > 0 {
listParams.ContinuationToken = string(opts.PageToken)
}
if opts.Prefix != "" {
listParams.Prefix = escapeKey(opts.Prefix)
}
if opts.Delimiter != "" {
listParams.Delimiter = escapeKey(opts.Delimiter)
}
resp, err := drv.s3.ListObjects(ctx, listParams)
if err != nil {
return nil, err
}
page := blob.ListPage{}
if resp.NextContinuationToken != "" {
page.NextPageToken = []byte(resp.NextContinuationToken)
}
if n := len(resp.Contents) + len(resp.CommonPrefixes); n > 0 {
page.Objects = make([]*blob.ListObject, n)
for i, obj := range resp.Contents {
page.Objects[i] = &blob.ListObject{
Key: unescapeKey(obj.Key),
ModTime: obj.LastModified,
Size: obj.Size,
MD5: eTagToMD5(obj.ETag),
}
}
for i, prefix := range resp.CommonPrefixes {
page.Objects[i+len(resp.Contents)] = &blob.ListObject{
Key: unescapeKey(prefix.Prefix),
IsDir: true,
}
}
if len(resp.Contents) > 0 && len(resp.CommonPrefixes) > 0 {
// S3 gives us blobs and "directories" in separate lists; sort them.
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
}
return &page, nil
}
// Attributes implements [blob/Driver.Attributes].
func (drv *driver) Attributes(ctx context.Context, key string) (*blob.Attributes, error) {
key = escapeKey(key)
resp, err := drv.s3.HeadObject(ctx, key)
if err != nil {
return nil, err
}
md := make(map[string]string, len(resp.Metadata))
for k, v := range resp.Metadata {
// See the package comments for more details on escaping of metadata keys & values.
md[blob.HexUnescape(urlUnescape(k))] = urlUnescape(v)
}
return &blob.Attributes{
CacheControl: resp.CacheControl,
ContentDisposition: resp.ContentDisposition,
ContentEncoding: resp.ContentEncoding,
ContentLanguage: resp.ContentLanguage,
ContentType: resp.ContentType,
Metadata: md,
// CreateTime not supported; left as the zero time.
ModTime: resp.LastModified,
Size: resp.ContentLength,
MD5: eTagToMD5(resp.ETag),
ETag: resp.ETag,
}, nil
}
// NewRangeReader implements [blob/Driver.NewRangeReader].
func (drv *driver) NewRangeReader(ctx context.Context, key string, offset, length int64) (blob.DriverReader, error) {
key = escapeKey(key)
var byteRange string
if offset > 0 && length < 0 {
byteRange = fmt.Sprintf("bytes=%d-", offset)
} else if length == 0 {
// AWS doesn't support a zero-length read; we'll read 1 byte and then
// ignore it in favor of http.NoBody below.
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset)
} else if length >= 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)
}
reqOpt := func(req *http.Request) {
if byteRange != "" {
req.Header.Set("Range", byteRange)
}
}
resp, err := drv.s3.GetObject(ctx, key, reqOpt)
if err != nil {
return nil, err
}
body := resp.Body
if length == 0 {
body = http.NoBody
}
return &reader{
body: body,
attrs: &blob.ReaderAttributes{
ContentType: resp.ContentType,
ModTime: resp.LastModified,
Size: getSize(resp.ContentLength, resp.ContentRange),
},
}, nil
}
// NewTypedWriter implements [blob/Driver.NewTypedWriter].
func (drv *driver) NewTypedWriter(ctx context.Context, key string, contentType string, opts *blob.WriterOptions) (blob.DriverWriter, error) {
key = escapeKey(key)
u := &s3.Uploader{
S3: drv.s3,
Key: key,
}
if opts.BufferSize != 0 {
u.MinPartSize = opts.BufferSize
}
if opts.MaxConcurrency != 0 {
u.MaxConcurrency = opts.MaxConcurrency
}
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
// See the package comments for more details on escaping of metadata keys & values.
k = blob.HexEscape(url.PathEscape(k), func(runes []rune, i int) bool {
c := runes[i]
return c == '@' || c == ':' || c == '='
})
md[k] = url.PathEscape(v)
}
u.Metadata = md
var reqOptions []func(*http.Request)
reqOptions = append(reqOptions, func(r *http.Request) {
r.Header.Set("Content-Type", contentType)
if opts.CacheControl != "" {
r.Header.Set("Cache-Control", opts.CacheControl)
}
if opts.ContentDisposition != "" {
r.Header.Set("Content-Disposition", opts.ContentDisposition)
}
if opts.ContentEncoding != "" {
r.Header.Set("Content-Encoding", opts.ContentEncoding)
}
if opts.ContentLanguage != "" {
r.Header.Set("Content-Language", opts.ContentLanguage)
}
if len(opts.ContentMD5) > 0 {
r.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(opts.ContentMD5))
}
})
return &writer{
ctx: ctx,
uploader: u,
donec: make(chan struct{}),
reqOptions: reqOptions,
}, nil
}
// Copy implements [blob/Driver.Copy].
func (drv *driver) Copy(ctx context.Context, dstKey, srcKey string) error {
dstKey = escapeKey(dstKey)
srcKey = escapeKey(srcKey)
_, err := drv.s3.CopyObject(ctx, srcKey, dstKey)
return err
}
// Delete implements [blob/Driver.Delete].
func (drv *driver) Delete(ctx context.Context, key string) error {
key = escapeKey(key)
return drv.s3.DeleteObject(ctx, key)
}
// -------------------------------------------------------------------
// reader reads an S3 object. It implements io.ReadCloser.
type reader struct {
attrs *blob.ReaderAttributes
body io.ReadCloser
}
// Read implements [io/ReadCloser.Read].
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
// Close closes the reader itself. It must be called when done reading.
func (r *reader) Close() error {
return r.body.Close()
}
// Attributes implements [blob/DriverReader.Attributes].
func (r *reader) Attributes() *blob.ReaderAttributes {
return r.attrs
}
// -------------------------------------------------------------------
// writer writes an S3 object, it implements io.WriteCloser.
type writer struct {
ctx context.Context
err error // written before donec closes
uploader *s3.Uploader
// Ends of an io.Pipe, created when the first byte is written.
pw *io.PipeWriter
pr *io.PipeReader
donec chan struct{} // closed when done writing
reqOptions []func(*http.Request)
}
// Write appends p to w.pw. User must call Close to close the w after done writing.
func (w *writer) Write(p []byte) (int, error) {
// Avoid opening the pipe for a zero-length write;
// the concrete can do these for empty blobs.
if len(p) == 0 {
return 0, nil
}
if w.pw == nil {
// We'll write into pw and use pr as an io.Reader for the
// Upload call to S3.
w.pr, w.pw = io.Pipe()
w.open(w.pr, true)
}
return w.pw.Write(p)
}
// r may be nil if we're Closing and no data was written.
// If closePipeOnError is true, w.pr will be closed if there's an
// error uploading to S3.
func (w *writer) open(r io.Reader, closePipeOnError bool) {
// This goroutine will keep running until Close, unless there's an error.
go func() {
defer func() {
close(w.donec)
}()
if r == nil {
// AWS doesn't like a nil Body.
r = http.NoBody
}
w.uploader.Payload = r
err := w.uploader.Upload(w.ctx, w.reqOptions...)
if err != nil {
if closePipeOnError {
w.pr.CloseWithError(err)
}
w.err = err
}
}()
}
// Close completes the writer and closes it. Any error occurring during write
// will be returned. If a writer is closed before any Write is called, Close
// will create an empty file at the given key.
func (w *writer) Close() error {
if w.pr != nil {
defer w.pr.Close()
}
if w.pw == nil {
// We never got any bytes written. We'll write an http.NoBody.
w.open(nil, false)
} else if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
return w.err
}
// -------------------------------------------------------------------
// etagToMD5 processes an ETag header and returns an MD5 hash if possible.
// S3's ETag header is sometimes a quoted hexstring of the MD5. Other times,
// notably when the object was uploaded in multiple parts, it is not.
// We do the best we can.
// Some links about ETag:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
// https://github.com/aws/aws-sdk-net/issues/815
// https://teppen.io/2018/06/23/aws_s3_etags/
func eTagToMD5(etag string) []byte {
// No header at all.
if etag == "" {
return nil
}
// Strip the expected leading and trailing quotes.
if len(etag) < 2 || etag[0] != '"' || etag[len(etag)-1] != '"' {
return nil
}
unquoted := etag[1 : len(etag)-1]
// Un-hex; we return nil on error. In particular, we'll get an error here
// for multi-part uploaded blobs, whose ETag will contain a "-" and so will
// never be a legal hex encoding.
md5, err := hex.DecodeString(unquoted)
if err != nil {
return nil
}
return md5
}
func getSize(contentLength int64, contentRange string) int64 {
// Default size to ContentLength, but that's incorrect for partial-length reads,
// where ContentLength refers to the size of the returned Body, not the entire
// size of the blob. ContentRange has the full size.
size := contentLength
if contentRange != "" {
// Sample: bytes 10-14/27 (where 27 is the full size).
parts := strings.Split(contentRange, "/")
if len(parts) == 2 {
if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
size = i
}
}
}
return size
}
// escapeKey does all required escaping for UTF-8 strings to work with S3.
func escapeKey(key string) string {
return blob.HexEscape(key, func(r []rune, i int) bool {
c := r[i]
// S3 doesn't handle these characters (determined via experimentation).
if c < 32 {
return true
}
// For "../", escape the trailing slash.
if i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.' {
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return blob.HexUnescape(key)
}
// urlUnescape reverses URLEscape using url.PathUnescape. If the unescape
// returns an error, it returns s.
func urlUnescape(s string) string {
if u, err := url.PathUnescape(s); err == nil {
return u
}
return s
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/head_object_test.go | tools/filesystem/internal/s3blob/s3/head_object_test.go | package s3_test
import (
"context"
"encoding/json"
"net/http"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestS3HeadObject(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodHead,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Cache-Control": []string{"test_cache"},
"Content-Disposition": []string{"test_disposition"},
"Content-Encoding": []string{"test_encoding"},
"Content-Language": []string{"test_language"},
"Content-Type": []string{"test_type"},
"Content-Range": []string{"test_range"},
"Etag": []string{"test_etag"},
"Content-Length": []string{"100"},
"x-amz-meta-AbC": []string{"test_meta_a"},
"x-amz-meta-Def": []string{"test_meta_b"},
},
Body: http.NoBody,
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
resp, err := s3Client.HeadObject(context.Background(), "test_key", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(resp)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}`
if rawStr != expected {
t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/copy_object_test.go | tools/filesystem/internal/s3blob/s3/copy_object_test.go | package s3_test
import (
"context"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestS3CopyObject(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/%40dst_test",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"x-amz-copy-source": "test_bucket%2F@src_test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<CopyObjectResult>
<LastModified>2025-01-01T01:02:03.456Z</LastModified>
<ETag>test_etag</ETag>
</CopyObjectResult>
`)),
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
copyResp, err := s3Client.CopyObject(context.Background(), "@src_test", "@dst_test", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
if copyResp.ETag != "test_etag" {
t.Fatalf("Expected ETag %q, got %q", "test_etag", copyResp.ETag)
}
if date := copyResp.LastModified.Format("2006-01-02T15:04:05.000Z"); date != "2025-01-01T01:02:03.456Z" {
t.Fatalf("Expected LastModified %q, got %q", "2025-01-01T01:02:03.456Z", date)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/s3_test.go | tools/filesystem/internal/s3blob/s3/s3_test.go | package s3_test
import (
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestS3URL(t *testing.T) {
t.Parallel()
path := "/test_key/a/b c@d?a=@1&b=!2#@a b c"
// note: query params and fragments are kept as it is
// since they are later escaped if necessery by the Go HTTP client
expectedPath := "/test_key/a/b%20c%40d?a=@1&b=!2#@a b c"
scenarios := []struct {
name string
s3Client *s3.S3
expected string
}{
{
"no schema",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "example.com/",
AccessKey: "123",
SecretKey: "abc",
},
"https://test_bucket.example.com" + expectedPath,
},
{
"with https schema",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "123",
SecretKey: "abc",
},
"https://test_bucket.example.com" + expectedPath,
},
{
"with http schema",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com/",
AccessKey: "123",
SecretKey: "abc",
},
"http://test_bucket.example.com" + expectedPath,
},
{
"path style addressing (non-explicit schema)",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "example.com/",
AccessKey: "123",
SecretKey: "abc",
UsePathStyle: true,
},
"https://example.com/test_bucket" + expectedPath,
},
{
"path style addressing (explicit schema)",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com/",
AccessKey: "123",
SecretKey: "abc",
UsePathStyle: true,
},
"http://example.com/test_bucket" + expectedPath,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.s3Client.URL(path)
if result != s.expected {
t.Fatalf("Expected URL\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestS3SignAndSend(t *testing.T) {
t.Parallel()
testResponse := func() *http.Response {
return &http.Response{
Body: io.NopCloser(strings.NewReader("test_response")),
}
}
scenarios := []struct {
name string
path string
reqFunc func(req *http.Request)
s3Client *s3.S3
}{
{
"minimal",
"/test",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "123",
SecretKey: "abc",
Client: tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/test",
Response: testResponse(),
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=ea093662bc1deef08dfb4ac35453dfaad5ea89edf102e9dd3b7156c9a27e4c1f",
"Host": "test_bucket.example.com",
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
"X-Amz-Date": "20250102T150405Z",
})
},
}),
},
},
{
"minimal with different access and secret keys",
"/test",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "456",
SecretKey: "def",
Client: tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/test",
Response: testResponse(),
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Authorization": "AWS4-HMAC-SHA256 Credential=456/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=17510fa1f724403dd0a563b61c9b31d1d718f877fcbd75455620d17a8afce5fb",
"Host": "test_bucket.example.com",
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
"X-Amz-Date": "20250102T150405Z",
})
},
}),
},
},
{
"minimal with special characters",
"/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789 -_.~!@&*():=$()?a=1&@b=@2#@a b c",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "456",
SecretKey: "def",
Client: tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789%20-_.~%21%40%26%2A%28%29%3A%3D%24%28%29?a=1&@b=@2#@a%20b%20c",
Response: testResponse(),
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"Authorization": "AWS4-HMAC-SHA256 Credential=456/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=9458a033554f52913801b3de16f54409b36ed25c6da3aed14e64439500e2c5e1",
"Host": "test_bucket.example.com",
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
"X-Amz-Date": "20250102T150405Z",
})
},
}),
},
},
{
"with extra headers",
"/test",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
req.Header.Set("x-amz-content-sha256", "test_sha256")
req.Header.Set("x-amz-example", "123")
req.Header.Set("x-amz-meta-a", "456")
req.Header.Set("content-type", "image/png")
req.Header.Set("x-test", "789") // shouldn't be included in the signing headers
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "123",
SecretKey: "abc",
Client: tests.NewClient(&tests.RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/test",
Response: testResponse(),
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date;x-amz-example;x-amz-meta-a, Signature=86dccbcd012c33073dc99e9d0a9e0b717a4d8c11c37848cfa9a4a02716bc0db3",
"host": "test_bucket.example.com",
"x-amz-date": "20250102T150405Z",
"x-amz-content-sha256": "test_sha256",
"x-amz-example": "123",
"x-amz-meta-a": "456",
"x-test": "789",
})
},
}),
},
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, s.s3Client.URL(s.path), strings.NewReader("test_request"))
if err != nil {
t.Fatal(err)
}
if s.reqFunc != nil {
s.reqFunc(req)
}
resp, err := s.s3Client.SignAndSend(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
err = s.s3Client.Client.(*tests.Client).AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
expectedBody := "test_response"
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if str := string(body); str != expectedBody {
t.Fatalf("Expected body %q, got %q", expectedBody, str)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/delete_object_test.go | tools/filesystem/internal/s3blob/s3/delete_object_test.go | package s3_test
import (
"context"
"net/http"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestS3DeleteObject(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodDelete,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
err := s3Client.DeleteObject(context.Background(), "test_key", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/error.go | tools/filesystem/internal/s3blob/s3/error.go | package s3
import (
"encoding/xml"
"strconv"
"strings"
)
var _ error = (*ResponseError)(nil)
// ResponseError defines a general S3 response error.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
type ResponseError struct {
XMLName xml.Name `json:"-" xml:"Error"`
Code string `json:"code" xml:"Code"`
Message string `json:"message" xml:"Message"`
RequestId string `json:"requestId" xml:"RequestId"`
Resource string `json:"resource" xml:"Resource"`
Raw []byte `json:"-" xml:"-"`
Status int `json:"status" xml:"Status"`
}
// Error implements the std error interface.
func (err *ResponseError) Error() string {
var strBuilder strings.Builder
strBuilder.WriteString(strconv.Itoa(err.Status))
strBuilder.WriteString(" ")
if err.Code != "" {
strBuilder.WriteString(err.Code)
} else {
strBuilder.WriteString("S3ResponseError")
}
if err.Message != "" {
strBuilder.WriteString(": ")
strBuilder.WriteString(err.Message)
}
if len(err.Raw) > 0 {
strBuilder.WriteString("\n(RAW: ")
strBuilder.Write(err.Raw)
strBuilder.WriteString(")")
}
return strBuilder.String()
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/s3.go | tools/filesystem/internal/s3blob/s3/s3.go | // Package s3 implements a lightweight client for interacting with the
// REST APIs of any S3 compatible service.
//
// It implements only the minimal functionality required by PocketBase
// such as objects list, get, copy, delete and upload.
//
// For more details why we don't use the official aws-sdk-go-v2, you could check
// https://github.com/pocketbase/pocketbase/discussions/6562.
//
// Example:
//
// client := &s3.S3{
// Endpoint: "example.com",
// Region: "us-east-1",
// Bucket: "test",
// AccessKey: "...",
// SecretKey: "...",
// UsePathStyle: true,
// }
// resp, err := client.GetObject(context.Background(), "abc.txt")
package s3
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"slices"
"strings"
"time"
)
const (
awsS3ServiceCode = "s3"
awsSignAlgorithm = "AWS4-HMAC-SHA256"
awsTerminationString = "aws4_request"
metadataPrefix = "x-amz-meta-"
dateTimeFormat = "20060102T150405Z"
)
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type S3 struct {
// Client specifies a custom HTTP client to send the request with.
//
// If not explicitly set, fallbacks to http.DefaultClient.
Client HTTPClient
Bucket string
Region string
Endpoint string // can be with or without the schema
AccessKey string
SecretKey string
UsePathStyle bool
}
// URL constructs an S3 request URL based on the current configuration.
//
// Note that the path will be URL escaped based on the AWS [UriEncode rules]
// for broader compatibility with some providers that expect the same
// path format as the one in the canonical signed header
// (see also https://github.com/pocketbase/pocketbase/issues/7153).
//
// [UriEncode rules]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html
func (s3 *S3) URL(path string) string {
scheme := "https"
endpoint := strings.TrimRight(s3.Endpoint, "/")
if after, ok := strings.CutPrefix(endpoint, "https://"); ok {
endpoint = after
} else if after, ok := strings.CutPrefix(endpoint, "http://"); ok {
endpoint = after
scheme = "http"
}
// to prevent double escaping we first parse/unescape it
parsed, err := url.Parse(path)
if err != nil {
// truly rare case, keep the path as it is
} else {
path = escapePath(parsed.Path)
// the rest is usually not expected to be part of the S3 path but it is kept to avoid surprises
// (it will be further escaped if necessery by the Go HTTP client)
if parsed.RawQuery != "" {
path += "?" + parsed.RawQuery
}
if parsed.RawFragment != "" {
path += "#" + parsed.RawFragment
}
}
path = strings.TrimLeft(path, "/")
if s3.UsePathStyle {
return fmt.Sprintf("%s://%s/%s/%s", scheme, endpoint, s3.Bucket, path)
}
return fmt.Sprintf("%s://%s.%s/%s", scheme, s3.Bucket, endpoint, path)
}
// SignAndSend signs the provided request per AWS Signature v4 and sends it.
//
// It automatically normalizes all 40x/50x responses to ResponseError.
//
// Note: Don't forget to call resp.Body.Close() after done with the result.
func (s3 *S3) SignAndSend(req *http.Request) (*http.Response, error) {
s3.sign(req)
client := s3.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
defer resp.Body.Close()
respErr := &ResponseError{
Status: resp.StatusCode,
}
respErr.Raw, err = io.ReadAll(resp.Body)
if err != nil && !errors.Is(err, io.EOF) {
return nil, errors.Join(err, respErr)
}
if len(respErr.Raw) > 0 {
err = xml.Unmarshal(respErr.Raw, respErr)
if err != nil {
return nil, errors.Join(err, respErr)
}
}
return nil, respErr
}
return resp, nil
}
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-signed-request-steps
func (s3 *S3) sign(req *http.Request) {
// fallback to the Unsigned payload option
// (data integrity checks could be still applied via the content-md5 or x-amz-checksum-* headers)
if req.Header.Get("x-amz-content-sha256") == "" {
req.Header.Set("x-amz-content-sha256", "UNSIGNED-PAYLOAD")
}
reqDateTime, _ := time.Parse(dateTimeFormat, req.Header.Get("x-amz-date"))
if reqDateTime.IsZero() {
reqDateTime = time.Now().UTC()
req.Header.Set("x-amz-date", reqDateTime.Format(dateTimeFormat))
}
req.Header.Set("host", req.URL.Host)
date := reqDateTime.Format("20060102")
dateTime := reqDateTime.Format(dateTimeFormat)
// 1. Create canonical request
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request
// ---------------------------------------------------------------
canonicalHeaders, signedHeaders := canonicalAndSignedHeaders(req)
canonicalParts := []string{
req.Method,
escapePath(req.URL.Path),
escapeQuery(req.URL.Query()),
canonicalHeaders,
signedHeaders,
req.Header.Get("x-amz-content-sha256"),
}
// 2. Create a hash of the canonical request
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request-hash
// ---------------------------------------------------------------
hashedCanonicalRequest := sha256Hex([]byte(strings.Join(canonicalParts, "\n")))
// 3. Create a string to sign
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-string-to-sign
// ---------------------------------------------------------------
scope := strings.Join([]string{
date,
s3.Region,
awsS3ServiceCode,
awsTerminationString,
}, "/")
stringToSign := strings.Join([]string{
awsSignAlgorithm,
dateTime,
scope,
hashedCanonicalRequest,
}, "\n")
// 4. Derive a signing key for SigV4
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#derive-signing-key
// ---------------------------------------------------------------
dateKey := hmacSHA256([]byte("AWS4"+s3.SecretKey), date)
dateRegionKey := hmacSHA256(dateKey, s3.Region)
dateRegionServiceKey := hmacSHA256(dateRegionKey, awsS3ServiceCode)
signingKey := hmacSHA256(dateRegionServiceKey, awsTerminationString)
signature := hex.EncodeToString(hmacSHA256(signingKey, stringToSign))
// 5. Add the signature to the request
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#add-signature-to-request
authorization := fmt.Sprintf(
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
awsSignAlgorithm,
s3.AccessKey,
scope,
signedHeaders,
signature,
)
req.Header.Set("authorization", authorization)
}
func sha256Hex(content []byte) string {
h := sha256.New()
h.Write(content)
return hex.EncodeToString(h.Sum(nil))
}
func hmacSHA256(key []byte, content string) []byte {
mac := hmac.New(sha256.New, key)
mac.Write([]byte(content))
return mac.Sum(nil)
}
func canonicalAndSignedHeaders(req *http.Request) (string, string) {
signed := []string{}
canonical := map[string]string{}
for key, values := range req.Header {
normalizedKey := strings.ToLower(key)
if normalizedKey != "host" &&
normalizedKey != "content-type" &&
!strings.HasPrefix(normalizedKey, "x-amz-") {
continue
}
signed = append(signed, normalizedKey)
// for each value:
// trim any leading or trailing spaces
// convert sequential spaces to a single space
normalizedValues := make([]string, len(values))
for i, v := range values {
normalizedValues[i] = strings.ReplaceAll(strings.TrimSpace(v), " ", " ")
}
canonical[normalizedKey] = strings.Join(normalizedValues, ",")
}
slices.Sort(signed)
var sortedCanonical strings.Builder
for _, key := range signed {
sortedCanonical.WriteString(key)
sortedCanonical.WriteString(":")
sortedCanonical.WriteString(canonical[key])
sortedCanonical.WriteString("\n")
}
return sortedCanonical.String(), strings.Join(signed, ";")
}
// extractMetadata parses and extracts and the metadata from the specified request headers.
//
// The metadata keys are all lowercased and without the "x-amz-meta-" prefix.
func extractMetadata(headers http.Header) map[string]string {
result := map[string]string{}
for k, v := range headers {
if len(v) == 0 {
continue
}
metadataKey, ok := strings.CutPrefix(strings.ToLower(k), metadataPrefix)
if !ok {
continue
}
result[metadataKey] = v[0]
}
return result
}
// escapeQuery returns the URI encoded request query parameters according to the AWS S3 spec requirements
// (it is similar to url.Values.Encode but instead of url.QueryEscape uses our own escape method).
func escapeQuery(values url.Values) string {
if len(values) == 0 {
return ""
}
var buf strings.Builder
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
slices.Sort(keys)
for _, k := range keys {
vs := values[k]
keyEscaped := escape(k)
for _, values := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(keyEscaped)
buf.WriteByte('=')
buf.WriteString(escape(values))
}
}
return buf.String()
}
// escapePath returns the URI encoded request path according to the AWS S3 spec requirements.
func escapePath(path string) string {
parts := strings.Split(path, "/")
for i, part := range parts {
parts[i] = escape(part)
}
return strings.Join(parts, "/")
}
const upperhex = "0123456789ABCDEF"
// escape is similar to the std url.escape but implements the AWS [UriEncode requirements]:
// - URI encode every byte except the unreserved characters: 'A'-'Z', 'a'-'z', '0'-'9', '-', '.', '_', and '~'.
// - The space character is a reserved character and must be encoded as "%20" (and not as "+").
// - Each URI encoded byte is formed by a '%' and the two-digit hexadecimal value of the byte.
// - Letters in the hexadecimal value must be uppercase, for example "%1A".
//
// [UriEncode requirements]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html
func escape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
result := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
result[j] = '%'
result[j+1] = upperhex[c>>4]
result[j+2] = upperhex[c&15]
j += 3
} else {
result[j] = c
j++
}
}
return string(result)
}
// > "URI encode every byte except the unreserved characters: 'A'-'Z', 'a'-'z', '0'-'9', '-', '.', '_', and '~'."
func shouldEscape(c byte) bool {
isUnreserved := (c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') ||
c == '-' || c == '.' || c == '_' || c == '~'
return !isUnreserved
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/get_object_test.go | tools/filesystem/internal/s3blob/s3/get_object_test.go | package s3_test
import (
"context"
"encoding/json"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestS3GetObject(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodGet,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Cache-Control": []string{"test_cache"},
"Content-Disposition": []string{"test_disposition"},
"Content-Encoding": []string{"test_encoding"},
"Content-Language": []string{"test_language"},
"Content-Type": []string{"test_type"},
"Content-Range": []string{"test_range"},
"Etag": []string{"test_etag"},
"Content-Length": []string{"100"},
"x-amz-meta-AbC": []string{"test_meta_a"},
"x-amz-meta-Def": []string{"test_meta_b"},
},
Body: io.NopCloser(strings.NewReader("test")),
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
resp, err := s3Client.GetObject(context.Background(), "test_key", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
// check body
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
bodyStr := string(body)
if bodyStr != "test" {
t.Fatalf("Expected body\n%q\ngot\n%q", "test", bodyStr)
}
// check serialized attributes
raw, err := json.Marshal(resp)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}`
if rawStr != expected {
t.Fatalf("Expected attributes\n%s\ngot\n%s", expected, rawStr)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/list_objects.go | tools/filesystem/internal/s3blob/s3/list_objects.go | package s3
import (
"context"
"encoding/xml"
"net/http"
"net/url"
"strconv"
"time"
)
// ListParams defines optional parameters for the ListObject request.
type ListParams struct {
// ContinuationToken indicates that the list is being continued on this bucket with a token.
// ContinuationToken is obfuscated and is not a real key.
// You can use this ContinuationToken for pagination of the list results.
ContinuationToken string `json:"continuationToken"`
// Delimiter is a character that you use to group keys.
//
// For directory buckets, "/" is the only supported delimiter.
Delimiter string `json:"delimiter"`
// Prefix limits the response to keys that begin with the specified prefix.
Prefix string `json:"prefix"`
// Encoding type is used to encode the object keys in the response.
// Responses are encoded only in UTF-8.
// An object key can contain any Unicode character.
// However, the XML 1.0 parser can't parse certain characters,
// such as characters with an ASCII value from 0 to 10.
// For characters that aren't supported in XML 1.0, you can add
// this parameter to request that S3 encode the keys in the response.
//
// Valid Values: url
EncodingType string `json:"encodingType"`
// StartAfter is where you want S3 to start listing from.
// S3 starts listing after this specified key.
// StartAfter can be any key in the bucket.
//
// This functionality is not supported for directory buckets.
StartAfter string `json:"startAfter"`
// MaxKeys Sets the maximum number of keys returned in the response.
// By default, the action returns up to 1,000 key names.
// The response might contain fewer keys but will never contain more.
MaxKeys int `json:"maxKeys"`
// FetchOwner returns the owner field with each key in the result.
FetchOwner bool `json:"fetchOwner"`
}
// Encode encodes the parameters in a properly formatted query string.
func (l *ListParams) Encode() string {
query := url.Values{}
query.Add("list-type", "2")
if l.ContinuationToken != "" {
query.Add("continuation-token", l.ContinuationToken)
}
if l.Delimiter != "" {
query.Add("delimiter", l.Delimiter)
}
if l.Prefix != "" {
query.Add("prefix", l.Prefix)
}
if l.EncodingType != "" {
query.Add("encoding-type", l.EncodingType)
}
if l.FetchOwner {
query.Add("fetch-owner", "true")
}
if l.MaxKeys > 0 {
query.Add("max-keys", strconv.Itoa(l.MaxKeys))
}
if l.StartAfter != "" {
query.Add("start-after", l.StartAfter)
}
return query.Encode()
}
// ListObjects retrieves paginated objects list.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
func (s3 *S3) ListObjects(ctx context.Context, params ListParams, optReqFuncs ...func(*http.Request)) (*ListObjectsResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL("?"+params.Encode()), nil)
if err != nil {
return nil, err
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := &ListObjectsResponse{}
err = xml.NewDecoder(resp.Body).Decode(result)
if err != nil {
return nil, err
}
return result, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_ResponseSyntax
type ListObjectsResponse struct {
XMLName xml.Name `json:"-" xml:"ListBucketResult"`
EncodingType string `json:"encodingType" xml:"EncodingType"`
Name string `json:"name" xml:"Name"`
Prefix string `json:"prefix" xml:"Prefix"`
Delimiter string `json:"delimiter" xml:"Delimiter"`
ContinuationToken string `json:"continuationToken" xml:"ContinuationToken"`
NextContinuationToken string `json:"nextContinuationToken" xml:"NextContinuationToken"`
StartAfter string `json:"startAfter" xml:"StartAfter"`
CommonPrefixes []*ListObjectCommonPrefix `json:"commonPrefixes" xml:"CommonPrefixes"`
Contents []*ListObjectContent `json:"contents" xml:"Contents"`
KeyCount int `json:"keyCount" xml:"KeyCount"`
MaxKeys int `json:"maxKeys" xml:"MaxKeys"`
IsTruncated bool `json:"isTruncated" xml:"IsTruncated"`
}
type ListObjectCommonPrefix struct {
Prefix string `json:"prefix" xml:"Prefix"`
}
type ListObjectContent struct {
Owner struct {
DisplayName string `json:"displayName" xml:"DisplayName"`
ID string `json:"id" xml:"ID"`
} `json:"owner" xml:"Owner"`
ChecksumAlgorithm string `json:"checksumAlgorithm" xml:"ChecksumAlgorithm"`
ETag string `json:"etag" xml:"ETag"`
Key string `json:"key" xml:"Key"`
StorageClass string `json:"storageClass" xml:"StorageClass"`
LastModified time.Time `json:"lastModified" xml:"LastModified"`
RestoreStatus struct {
RestoreExpiryDate time.Time `json:"restoreExpiryDate" xml:"RestoreExpiryDate"`
IsRestoreInProgress bool `json:"isRestoreInProgress" xml:"IsRestoreInProgress"`
} `json:"restoreStatus" xml:"RestoreStatus"`
Size int64 `json:"size" xml:"Size"`
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/list_objects_test.go | tools/filesystem/internal/s3blob/s3/list_objects_test.go | package s3_test
import (
"context"
"encoding/json"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestS3ListParamsEncode(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
params s3.ListParams
expected string
}{
{
"blank",
s3.ListParams{},
"list-type=2",
},
{
"filled",
s3.ListParams{
ContinuationToken: "test_ct",
Delimiter: "test_delimiter",
Prefix: "test_prefix",
EncodingType: "test_et",
StartAfter: "test_sa",
MaxKeys: 1,
FetchOwner: true,
},
"continuation-token=test_ct&delimiter=test_delimiter&encoding-type=test_et&fetch-owner=true&list-type=2&max-keys=1&prefix=test_prefix&start-after=test_sa",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.params.Encode()
if result != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestS3ListObjects(t *testing.T) {
t.Parallel()
listParams := s3.ListParams{
ContinuationToken: "test_ct",
Delimiter: "test_delimiter",
Prefix: "test_prefix",
EncodingType: "test_et",
StartAfter: "test_sa",
MaxKeys: 10,
FetchOwner: true,
}
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodGet,
URL: "http://test_bucket.example.com/?" + listParams.Encode(),
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example</Name>
<EncodingType>test_encoding</EncodingType>
<Prefix>a/</Prefix>
<Delimiter>/</Delimiter>
<ContinuationToken>ct</ContinuationToken>
<NextContinuationToken>nct</NextContinuationToken>
<StartAfter>example0.txt</StartAfter>
<KeyCount>1</KeyCount>
<MaxKeys>3</MaxKeys>
<IsTruncated>true</IsTruncated>
<Contents>
<Key>example1.txt</Key>
<LastModified>2025-01-01T01:02:03.123Z</LastModified>
<ChecksumAlgorithm>test_ca</ChecksumAlgorithm>
<ETag>test_etag1</ETag>
<Size>123</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<DisplayName>owner_dn</DisplayName>
<ID>owner_id</ID>
</Owner>
<RestoreStatus>
<RestoreExpiryDate>2025-01-02T01:02:03.123Z</RestoreExpiryDate>
<IsRestoreInProgress>true</IsRestoreInProgress>
</RestoreStatus>
</Contents>
<Contents>
<Key>example2.txt</Key>
<LastModified>2025-01-02T01:02:03.123Z</LastModified>
<ETag>test_etag2</ETag>
<Size>456</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
<CommonPrefixes>
<Prefix>a/b/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>a/c/</Prefix>
</CommonPrefixes>
</ListBucketResult>
`)),
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
resp, err := s3Client.ListObjects(context.Background(), listParams, func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(resp)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"encodingType":"test_encoding","name":"example","prefix":"a/","delimiter":"/","continuationToken":"ct","nextContinuationToken":"nct","startAfter":"example0.txt","commonPrefixes":[{"prefix":"a/b/"},{"prefix":"a/c/"}],"contents":[{"owner":{"displayName":"owner_dn","id":"owner_id"},"checksumAlgorithm":"test_ca","etag":"test_etag1","key":"example1.txt","storageClass":"STANDARD","lastModified":"2025-01-01T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"2025-01-02T01:02:03.123Z","isRestoreInProgress":true},"size":123},{"owner":{"displayName":"","id":""},"checksumAlgorithm":"","etag":"test_etag2","key":"example2.txt","storageClass":"STANDARD","lastModified":"2025-01-02T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"0001-01-01T00:00:00Z","isRestoreInProgress":false},"size":456}],"keyCount":1,"maxKeys":3,"isTruncated":true}`
if rawStr != expected {
t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/get_object.go | tools/filesystem/internal/s3blob/s3/get_object.go | package s3
import (
"context"
"io"
"net/http"
)
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_ResponseElements
type GetObjectResponse struct {
Body io.ReadCloser `json:"-" xml:"-"`
HeadObjectResponse
}
// GetObject retrieves a single object by its key.
//
// NB! Make sure to call GetObjectResponse.Body.Close() after done working with the result.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (s3 *S3) GetObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*GetObjectResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL(key), nil)
if err != nil {
return nil, err
}
// apply optional request funcs
for _, fn := range optFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
result := &GetObjectResponse{Body: resp.Body}
result.load(resp.Header)
return result, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/error_test.go | tools/filesystem/internal/s3blob/s3/error_test.go | package s3_test
import (
"encoding/json"
"encoding/xml"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestResponseErrorSerialization(t *testing.T) {
raw := `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>test_code</Code>
<Message>test_message</Message>
<RequestId>test_request_id</RequestId>
<Resource>test_resource</Resource>
</Error>
`
respErr := &s3.ResponseError{
Status: 123,
Raw: []byte("test"),
}
err := xml.Unmarshal([]byte(raw), &respErr)
if err != nil {
t.Fatal(err)
}
jsonRaw, err := json.Marshal(respErr)
if err != nil {
t.Fatal(err)
}
jsonStr := string(jsonRaw)
expected := `{"code":"test_code","message":"test_message","requestId":"test_request_id","resource":"test_resource","status":123}`
if expected != jsonStr {
t.Fatalf("Expected JSON\n%s\ngot\n%s", expected, jsonStr)
}
}
func TestResponseErrorErrorInterface(t *testing.T) {
scenarios := []struct {
name string
err *s3.ResponseError
expected string
}{
{
"empty",
&s3.ResponseError{},
"0 S3ResponseError",
},
{
"with code and message (nil raw)",
&s3.ResponseError{
Status: 123,
Code: "test_code",
Message: "test_message",
},
"123 test_code: test_message",
},
{
"with code and message (non-nil raw)",
&s3.ResponseError{
Status: 123,
Code: "test_code",
Message: "test_message",
Raw: []byte("test_raw"),
},
"123 test_code: test_message\n(RAW: test_raw)",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.err.Error()
if result != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/uploader_test.go | tools/filesystem/internal/s3blob/s3/uploader_test.go | package s3_test
import (
"context"
"io"
"net/http"
"strconv"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3/tests"
)
func TestUploaderRequiredFields(t *testing.T) {
t.Parallel()
s3Client := &s3.S3{
Client: tests.NewClient(&tests.RequestStub{Method: "PUT", URL: `^.+$`}), // match every upload
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
payload := strings.NewReader("test")
scenarios := []struct {
name string
uploader *s3.Uploader
expectedError bool
}{
{
"blank",
&s3.Uploader{},
true,
},
{
"no Key",
&s3.Uploader{S3: s3Client, Payload: payload},
true,
},
{
"no S3",
&s3.Uploader{Key: "abc", Payload: payload},
true,
},
{
"no Payload",
&s3.Uploader{S3: s3Client, Key: "abc"},
true,
},
{
"with S3, Key and Payload",
&s3.Uploader{S3: s3Client, Key: "abc", Payload: payload},
false,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
err := s.uploader.Upload(context.Background())
hasErr := err != nil
if hasErr != s.expectedError {
t.Fatalf("Expected hasErr %v, got %v", s.expectedError, hasErr)
}
})
}
}
func TestUploaderSingleUpload(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abcdefg" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "7",
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdefg"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 8,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestUploaderMultipartUploadSuccess(t *testing.T) {
t.Parallel()
maxConcurrencies := []int{-1, 0, 1, 10}
for _, mc := range maxConcurrencies {
t.Run("MaxConcurrency_"+strconv.Itoa(mc), func(t *testing.T) {
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploads",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult>
<Bucket>test_bucket</Bucket>
<Key>test_key</Key>
<UploadId>test_id</UploadId>
</InitiateMultipartUploadResult>
`)),
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abc" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag1"}},
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "def" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag2"}},
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=3&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "g" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "1",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag3"}},
},
},
&tests.RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
expected := `<CompleteMultipartUpload><Part><ETag>etag1</ETag><PartNumber>1</PartNumber></Part><Part><ETag>etag2</ETag><PartNumber>2</PartNumber></Part><Part><ETag>etag3</ETag><PartNumber>3</PartNumber></Part></CompleteMultipartUpload>`
return strings.Contains(string(body), expected) && tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdefg"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 3,
MaxConcurrency: mc,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
})
}
}
func TestUploaderMultipartUploadPartFailure(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploads",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult>
<Bucket>test_bucket</Bucket>
<Key>test_key</Key>
<UploadId>test_id</UploadId>
</InitiateMultipartUploadResult>
`)),
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abc" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag1"}},
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
StatusCode: 400,
},
},
&tests.RequestStub{
Method: http.MethodDelete,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdefg"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 3,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err == nil {
t.Fatal("Expected non-nil error")
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestUploaderMultipartUploadCompleteFailure(t *testing.T) {
t.Parallel()
httpClient := tests.NewClient(
&tests.RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploads",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult>
<Bucket>test_bucket</Bucket>
<Key>test_key</Key>
<UploadId>test_id</UploadId>
</InitiateMultipartUploadResult>
`)),
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abc" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag1"}},
},
},
&tests.RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "def" && tests.ExpectHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag2"}},
},
},
&tests.RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
StatusCode: 400,
},
},
&tests.RequestStub{
Method: http.MethodDelete,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
return tests.ExpectHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdef"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 3,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err == nil {
t.Fatal("Expected non-nil error")
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/uploader.go | tools/filesystem/internal/s3blob/s3/uploader.go | package s3
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"slices"
"strconv"
"strings"
"sync"
"golang.org/x/sync/errgroup"
)
var ErrUsedUploader = errors.New("the Uploader has been already used")
const (
defaultMaxConcurrency int = 5
defaultMinPartSize int = 6 << 20
)
// Uploader handles the upload of a single S3 object.
//
// If the Payload size is less than the configured MinPartSize it sends
// a single (PutObject) request, otherwise performs chunked/multipart upload.
type Uploader struct {
// S3 is the S3 client instance performing the upload object request (required).
S3 *S3
// Payload is the object content to upload (required).
Payload io.Reader
// Key is the destination key of the uploaded object (required).
Key string
// Metadata specifies the optional metadata to write with the object upload.
Metadata map[string]string
// MaxConcurrency specifies the max number of workers to use when
// performing chunked/multipart upload.
//
// If zero or negative, defaults to 5.
//
// This option is used only when the Payload size is > MinPartSize.
MaxConcurrency int
// MinPartSize specifies the min Payload size required to perform
// chunked/multipart upload.
//
// If zero or negative, defaults to ~6MB.
MinPartSize int
uploadId string
uploadedParts []*mpPart
lastPartNumber int
mu sync.Mutex // guards lastPartNumber and the uploadedParts slice
used bool
}
// Upload processes the current Uploader instance.
//
// Users can specify an optional optReqFuncs that will be passed down to all Upload internal requests
// (single upload, multipart init, multipart parts upload, multipart complete, multipart abort).
//
// Note that after this call the Uploader should be discarded (aka. no longer can be used).
func (u *Uploader) Upload(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
if u.used {
return ErrUsedUploader
}
err := u.validateAndNormalize()
if err != nil {
return err
}
initPart, _, err := u.nextPart()
if err != nil && !errors.Is(err, io.EOF) {
return err
}
if len(initPart) < u.MinPartSize {
return u.singleUpload(ctx, initPart, optReqFuncs...)
}
err = u.multipartInit(ctx, optReqFuncs...)
if err != nil {
return fmt.Errorf("multipart init error: %w", err)
}
err = u.multipartUpload(ctx, initPart, optReqFuncs...)
if err != nil {
return errors.Join(
u.multipartAbort(ctx, optReqFuncs...),
fmt.Errorf("multipart upload error: %w", err),
)
}
err = u.multipartComplete(ctx, optReqFuncs...)
if err != nil {
return errors.Join(
u.multipartAbort(ctx, optReqFuncs...),
fmt.Errorf("multipart complete error: %w", err),
)
}
return nil
}
// -------------------------------------------------------------------
func (u *Uploader) validateAndNormalize() error {
if u.S3 == nil {
return errors.New("Uploader.S3 must be a non-empty and properly initialized S3 client instance")
}
if u.Key == "" {
return errors.New("Uploader.Key is required")
}
if u.Payload == nil {
return errors.New("Uploader.Payload must be non-nill")
}
if u.MaxConcurrency <= 0 {
u.MaxConcurrency = defaultMaxConcurrency
}
if u.MinPartSize <= 0 {
u.MinPartSize = defaultMinPartSize
}
return nil
}
func (u *Uploader) singleUpload(ctx context.Context, part []byte, optReqFuncs ...func(*http.Request)) error {
if u.used {
return ErrUsedUploader
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key), bytes.NewReader(part))
if err != nil {
return err
}
req.Header.Set("Content-Length", strconv.Itoa(len(part)))
for k, v := range u.Metadata {
req.Header.Set(metadataPrefix+k, v)
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// -------------------------------------------------------------------
type mpPart struct {
XMLName xml.Name `xml:"Part"`
ETag string `xml:"ETag"`
PartNumber int `xml:"PartNumber"`
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (u *Uploader) multipartInit(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
if u.used {
return ErrUsedUploader
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?uploads"), nil)
if err != nil {
return err
}
for k, v := range u.Metadata {
req.Header.Set(metadataPrefix+k, v)
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
body := &struct {
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
UploadId string `xml:"UploadId"`
}{}
err = xml.NewDecoder(resp.Body).Decode(body)
if err != nil {
return err
}
u.uploadId = body.UploadId
return nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
func (u *Uploader) multipartAbort(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
u.mu.Lock()
defer u.mu.Unlock()
u.used = true
// ensure that the specified abort context is always valid to allow cleanup
var abortCtx = ctx
if abortCtx.Err() != nil {
abortCtx = context.Background()
}
query := url.Values{"uploadId": []string{u.uploadId}}
req, err := http.NewRequestWithContext(abortCtx, http.MethodDelete, u.S3.URL(u.Key+"?"+query.Encode()), nil)
if err != nil {
return err
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
func (u *Uploader) multipartComplete(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
u.mu.Lock()
defer u.mu.Unlock()
u.used = true
// the list of parts must be sorted in ascending order
slices.SortFunc(u.uploadedParts, func(a, b *mpPart) int {
if a.PartNumber < b.PartNumber {
return -1
}
if a.PartNumber > b.PartNumber {
return 1
}
return 0
})
// build a request payload with the uploaded parts
xmlParts := &struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []*mpPart
}{
Parts: u.uploadedParts,
}
rawXMLParts, err := xml.Marshal(xmlParts)
if err != nil {
return err
}
reqPayload := strings.NewReader(xml.Header + string(rawXMLParts))
query := url.Values{"uploadId": []string{u.uploadId}}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?"+query.Encode()), reqPayload)
if err != nil {
return err
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
func (u *Uploader) nextPart() ([]byte, int, error) {
u.mu.Lock()
defer u.mu.Unlock()
part := make([]byte, u.MinPartSize)
n, err := io.ReadFull(u.Payload, part)
// normalize io.EOF errors and ensure that io.EOF is returned only when there were no read bytes
if err != nil && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
if n == 0 {
err = io.EOF
} else {
err = nil
}
}
u.lastPartNumber++
return part[0:n], u.lastPartNumber, err
}
func (u *Uploader) multipartUpload(ctx context.Context, initPart []byte, optReqFuncs ...func(*http.Request)) error {
var g errgroup.Group
g.SetLimit(u.MaxConcurrency)
totalWorkers := u.MaxConcurrency
if len(initPart) != 0 {
totalWorkers--
initPartNumber := u.lastPartNumber
g.Go(func() error {
mp, err := u.uploadPart(ctx, initPartNumber, initPart, optReqFuncs...)
if err != nil {
return err
}
u.mu.Lock()
u.uploadedParts = append(u.uploadedParts, mp)
u.mu.Unlock()
return nil
})
}
totalWorkers = max(totalWorkers, 1)
for i := 0; i < totalWorkers; i++ {
g.Go(func() error {
for {
part, num, err := u.nextPart()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
mp, err := u.uploadPart(ctx, num, part, optReqFuncs...)
if err != nil {
return err
}
u.mu.Lock()
u.uploadedParts = append(u.uploadedParts, mp)
u.mu.Unlock()
}
return nil
})
}
return g.Wait()
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
func (u *Uploader) uploadPart(ctx context.Context, partNumber int, partData []byte, optReqFuncs ...func(*http.Request)) (*mpPart, error) {
query := url.Values{}
query.Set("uploadId", u.uploadId)
query.Set("partNumber", strconv.Itoa(partNumber))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key+"?"+query.Encode()), bytes.NewReader(partData))
if err != nil {
return nil, err
}
req.Header.Set("Content-Length", strconv.Itoa(len(partData)))
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return &mpPart{
PartNumber: partNumber,
ETag: resp.Header.Get("ETag"),
}, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/s3_escape_test.go | tools/filesystem/internal/s3blob/s3/s3_escape_test.go | package s3
import (
"net/url"
"testing"
)
func TestEscapePath(t *testing.T) {
t.Parallel()
escaped := escapePath("/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~ !@#$%^&*()+={}[]?><\\|,`'\"/@sub1/@sub2/a/b/c/1/2/3")
expected := "/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~%20%21%40%23%24%25%5E%26%2A%28%29%2B%3D%7B%7D%5B%5D%3F%3E%3C%5C%7C%2C%60%27%22/%40sub1/%40sub2/a/b/c/1/2/3"
if escaped != expected {
t.Fatalf("Expected\n%s\ngot\n%s", expected, escaped)
}
}
func TestEscapeQuery(t *testing.T) {
t.Parallel()
escaped := escapeQuery(url.Values{
"abc": []string{"123"},
"/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~ !@#$%^&*()+={}[]?><\\|,`'\"": []string{
"/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~ !@#$%^&*()+={}[]?><\\|,`'\"",
},
})
expected := "%2FABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~%20%21%40%23%24%25%5E%26%2A%28%29%2B%3D%7B%7D%5B%5D%3F%3E%3C%5C%7C%2C%60%27%22=%2FABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~%20%21%40%23%24%25%5E%26%2A%28%29%2B%3D%7B%7D%5B%5D%3F%3E%3C%5C%7C%2C%60%27%22&abc=123"
if escaped != expected {
t.Fatalf("Expected\n%s\ngot\n%s", expected, escaped)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/copy_object.go | tools/filesystem/internal/s3blob/s3/copy_object.go | package s3
import (
"context"
"encoding/xml"
"net/http"
"net/url"
"strings"
"time"
)
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html#API_CopyObject_ResponseSyntax
type CopyObjectResponse struct {
CopyObjectResult xml.Name `json:"copyObjectResult" xml:"CopyObjectResult"`
ETag string `json:"etag" xml:"ETag"`
LastModified time.Time `json:"lastModified" xml:"LastModified"`
ChecksumType string `json:"checksumType" xml:"ChecksumType"`
ChecksumCRC32 string `json:"checksumCRC32" xml:"ChecksumCRC32"`
ChecksumCRC32C string `json:"checksumCRC32C" xml:"ChecksumCRC32C"`
ChecksumCRC64NVME string `json:"checksumCRC64NVME" xml:"ChecksumCRC64NVME"`
ChecksumSHA1 string `json:"checksumSHA1" xml:"ChecksumSHA1"`
ChecksumSHA256 string `json:"checksumSHA256" xml:"ChecksumSHA256"`
}
// CopyObject copies a single object from srcKey to dstKey destination.
// (both keys are expected to be operating within the same bucket).
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
func (s3 *S3) CopyObject(ctx context.Context, srcKey string, dstKey string, optReqFuncs ...func(*http.Request)) (*CopyObjectResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPut, s3.URL(dstKey), nil)
if err != nil {
return nil, err
}
// per the doc the header value must be URL-encoded
req.Header.Set("x-amz-copy-source", url.PathEscape(s3.Bucket+"/"+strings.TrimLeft(srcKey, "/")))
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := &CopyObjectResponse{}
err = xml.NewDecoder(resp.Body).Decode(result)
if err != nil {
return nil, err
}
return result, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/head_object.go | tools/filesystem/internal/s3blob/s3/head_object.go | package s3
import (
"context"
"net/http"
"strconv"
"time"
)
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseElements
type HeadObjectResponse struct {
// Metadata is the extra data that is stored with the S3 object (aka. the "x-amz-meta-*" header values).
//
// The map keys are normalized to lower-case.
Metadata map[string]string `json:"metadata"`
// LastModified date and time when the object was last modified.
LastModified time.Time `json:"lastModified"`
// CacheControl specifies caching behavior along the request/reply chain.
CacheControl string `json:"cacheControl"`
// ContentDisposition specifies presentational information for the object.
ContentDisposition string `json:"contentDisposition"`
// ContentEncoding indicates what content encodings have been applied to the object
// and thus what decoding mechanisms must be applied to obtain the
// media-type referenced by the Content-Type header field.
ContentEncoding string `json:"contentEncoding"`
// ContentLanguage indicates the language the content is in.
ContentLanguage string `json:"contentLanguage"`
// ContentType is a standard MIME type describing the format of the object data.
ContentType string `json:"contentType"`
// ContentRange is the portion of the object usually returned in the response for a GET request.
ContentRange string `json:"contentRange"`
// ETag is an opaque identifier assigned by a web
// server to a specific version of a resource found at a URL.
ETag string `json:"etag"`
// ContentLength is size of the body in bytes.
ContentLength int64 `json:"contentLength"`
}
// load parses and load the header values into the current HeadObjectResponse fields.
func (o *HeadObjectResponse) load(headers http.Header) {
o.LastModified, _ = time.Parse(time.RFC1123, headers.Get("Last-Modified"))
o.CacheControl = headers.Get("Cache-Control")
o.ContentDisposition = headers.Get("Content-Disposition")
o.ContentEncoding = headers.Get("Content-Encoding")
o.ContentLanguage = headers.Get("Content-Language")
o.ContentType = headers.Get("Content-Type")
o.ContentRange = headers.Get("Content-Range")
o.ETag = headers.Get("ETag")
o.ContentLength, _ = strconv.ParseInt(headers.Get("Content-Length"), 10, 0)
o.Metadata = extractMetadata(headers)
}
// HeadObject sends a HEAD request for a single object to check its
// existence and to retrieve its metadata.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html
func (s3 *S3) HeadObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*HeadObjectResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodHead, s3.URL(key), nil)
if err != nil {
return nil, err
}
// apply optional request funcs
for _, fn := range optFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := &HeadObjectResponse{}
result.load(resp.Header)
return result, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/delete_object.go | tools/filesystem/internal/s3blob/s3/delete_object.go | package s3
import (
"context"
"net/http"
)
// DeleteObject deletes a single object by its key.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
func (s3 *S3) DeleteObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) error {
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, s3.URL(key), nil)
if err != nil {
return err
}
// apply optional request funcs
for _, fn := range optFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/tests/client.go | tools/filesystem/internal/s3blob/s3/tests/client.go | // Package tests contains various tests helpers and utilities to assist
// with the S3 client testing.
package tests
import (
"errors"
"fmt"
"io"
"net/http"
"regexp"
"slices"
"strings"
"sync"
)
// NewClient creates a new test Client loaded with the specified RequestStubs.
func NewClient(stubs ...*RequestStub) *Client {
return &Client{stubs: stubs}
}
type RequestStub struct {
Method string
URL string // plain string or regex pattern wrapped in "^pattern$"
Match func(req *http.Request) bool
Response *http.Response
}
type Client struct {
stubs []*RequestStub
mu sync.Mutex
}
// AssertNoRemaining asserts that current client has no unprocessed requests remaining.
func (c *Client) AssertNoRemaining() error {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.stubs) == 0 {
return nil
}
msgParts := make([]string, 0, len(c.stubs)+1)
msgParts = append(msgParts, "not all stub requests were processed:")
for _, stub := range c.stubs {
msgParts = append(msgParts, "- "+stub.Method+" "+stub.URL)
}
return errors.New(strings.Join(msgParts, "\n"))
}
// Do implements the [s3.HTTPClient] interface.
func (c *Client) Do(req *http.Request) (*http.Response, error) {
c.mu.Lock()
defer c.mu.Unlock()
for i, stub := range c.stubs {
if req.Method != stub.Method {
continue
}
urlPattern := stub.URL
if !strings.HasPrefix(urlPattern, "^") && !strings.HasSuffix(urlPattern, "$") {
urlPattern = "^" + regexp.QuoteMeta(urlPattern) + "$"
}
urlRegex, err := regexp.Compile(urlPattern)
if err != nil {
return nil, err
}
if !urlRegex.MatchString(req.URL.String()) {
continue
}
if stub.Match != nil && !stub.Match(req) {
continue
}
// remove from the remaining stubs
c.stubs = slices.Delete(c.stubs, i, i+1)
response := stub.Response
if response == nil {
response = &http.Response{}
}
if response.Header == nil {
response.Header = http.Header{}
}
if response.Body == nil {
response.Body = http.NoBody
}
response.Request = req
return response, nil
}
var body []byte
if req.Body != nil {
defer req.Body.Close()
body, _ = io.ReadAll(req.Body)
}
return nil, fmt.Errorf(
"the below request doesn't have a corresponding stub:\n%s %s\nHeaders: %v\nBody: %q",
req.Method,
req.URL.String(),
req.Header,
body,
)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/s3blob/s3/tests/headers.go | tools/filesystem/internal/s3blob/s3/tests/headers.go | package tests
import (
"net/http"
"regexp"
"strings"
)
// ExpectHeaders checks whether specified headers match the expectations.
// The expectations map entry key is the header name.
// The expectations map entry value is the first header value. If wrapped with `^...$`
// it is compared as regular expression.
func ExpectHeaders(headers http.Header, expectations map[string]string) bool {
for h, expected := range expectations {
v := headers.Get(h)
pattern := expected
if !strings.HasPrefix(pattern, "^") && !strings.HasSuffix(pattern, "$") {
pattern = "^" + regexp.QuoteMeta(pattern) + "$"
}
expectedRegex, err := regexp.Compile(pattern)
if err != nil {
return false
}
if !expectedRegex.MatchString(v) {
return false
}
}
return true
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/fileblob/attrs.go | tools/filesystem/internal/fileblob/attrs.go | package fileblob
import (
"encoding/json"
"fmt"
"os"
)
// Largely copied from gocloud.dev/blob/fileblob to apply the same
// retrieve and write side-car .attrs rules.
//
// -------------------------------------------------------------------
// Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -------------------------------------------------------------------
const attrsExt = ".attrs"
var errAttrsExt = fmt.Errorf("file extension %q is reserved", attrsExt)
// xattrs stores extended attributes for an object. The format is like
// filesystem extended attributes, see
// https://www.freedesktop.org/wiki/CommonExtendedAttributes.
type xattrs struct {
CacheControl string `json:"user.cache_control"`
ContentDisposition string `json:"user.content_disposition"`
ContentEncoding string `json:"user.content_encoding"`
ContentLanguage string `json:"user.content_language"`
ContentType string `json:"user.content_type"`
Metadata map[string]string `json:"user.metadata"`
MD5 []byte `json:"md5"`
}
// setAttrs creates a "path.attrs" file along with blob to store the attributes,
// it uses JSON format.
func setAttrs(path string, xa xattrs) error {
f, err := os.Create(path + attrsExt)
if err != nil {
return err
}
if err := json.NewEncoder(f).Encode(xa); err != nil {
f.Close()
os.Remove(f.Name())
return err
}
return f.Close()
}
// getAttrs looks at the "path.attrs" file to retrieve the attributes and
// decodes them into a xattrs struct. It doesn't return error when there is no
// such .attrs file.
func getAttrs(path string) (xattrs, error) {
f, err := os.Open(path + attrsExt)
if err != nil {
if os.IsNotExist(err) {
// Handle gracefully for non-existent .attr files.
return xattrs{
ContentType: "application/octet-stream",
}, nil
}
return xattrs{}, err
}
xa := new(xattrs)
if err := json.NewDecoder(f).Decode(xa); err != nil {
f.Close()
return xattrs{}, err
}
return *xa, f.Close()
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/filesystem/internal/fileblob/fileblob.go | tools/filesystem/internal/fileblob/fileblob.go | // Package fileblob provides a blob.Bucket driver implementation.
//
// NB! To minimize breaking changes with older PocketBase releases,
// the driver is a stripped down and adapted version of the previously
// used gocloud.dev/blob/fileblob, hence many of the below doc comments,
// struct options and interface implementations are the same.
//
// To avoid partial writes, fileblob writes to a temporary file and then renames
// the temporary file to the final path on Close. By default, it creates these
// temporary files in `os.TempDir`. If `os.TempDir` is on a different mount than
// your base bucket path, the `os.Rename` will fail with `invalid cross-device link`.
// To avoid this, either configure the temp dir to use by setting the environment
// variable `TMPDIR`, or set `Options.NoTempDir` to `true` (fileblob will create
// the temporary files next to the actual files instead of in a temporary directory).
//
// By default fileblob stores blob metadata in "sidecar" files under the original
// filename with an additional ".attrs" suffix.
// This behaviour can be changed via `Options.Metadata`;
// writing of those metadata files can be suppressed by setting it to
// `MetadataDontWrite` or its equivalent "metadata=skip" in the URL for the opener.
// In either case, absent any stored metadata many `blob.Attributes` fields
// will be set to default values.
//
// The blob abstraction supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for fileblob:
// - Blob keys: ASCII characters 0-31 are escaped to "__0x<hex>__".
// If os.PathSeparator != "/", it is also escaped.
// Additionally, the "/" in "../", the trailing "/" in "//", and a trailing
// "/" is key names are escaped in the same way.
// On Windows, the characters "<>:"|?*" are also escaped.
//
// Example:
//
// drv, _ := fileblob.New("/path/to/dir", nil)
// bucket := blob.NewBucket(drv)
package fileblob
import (
"context"
"crypto/md5"
"errors"
"fmt"
"hash"
"io"
"io/fs"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
)
const defaultPageSize = 1000
type metadataOption string // Not exported as subject to change.
// Settings for Options.Metadata.
const (
// Metadata gets written to a separate file.
MetadataInSidecar metadataOption = ""
// Writes won't carry metadata, as per the package docstring.
MetadataDontWrite metadataOption = "skip"
)
// Options sets options for constructing a *blob.Bucket backed by fileblob.
type Options struct {
// Refers to the strategy for how to deal with metadata (such as blob.Attributes).
// For supported values please see the Metadata* constants.
// If left unchanged, 'MetadataInSidecar' will be used.
Metadata metadataOption
// The FileMode to use when creating directories for the top-level directory
// backing the bucket (when CreateDir is true), and for subdirectories for keys.
// Defaults to 0777.
DirFileMode os.FileMode
// If true, create the directory backing the Bucket if it does not exist
// (using os.MkdirAll).
CreateDir bool
// If true, don't use os.TempDir for temporary files, but instead place them
// next to the actual files. This may result in "stranded" temporary files
// (e.g., if the application is killed before the file cleanup runs).
//
// If your bucket directory is on a different mount than os.TempDir, you will
// need to set this to true, as os.Rename will fail across mount points.
NoTempDir bool
}
// New creates a new instance of the fileblob driver backed by the
// filesystem and rooted at dir, which must exist.
func New(dir string, opts *Options) (blob.Driver, error) {
if opts == nil {
opts = &Options{}
}
if opts.DirFileMode == 0 {
opts.DirFileMode = os.FileMode(0o777)
}
absdir, err := filepath.Abs(dir)
if err != nil {
return nil, fmt.Errorf("failed to convert %s into an absolute path: %v", dir, err)
}
// Optionally, create the directory if it does not already exist.
info, err := os.Stat(absdir)
if err != nil && opts.CreateDir && os.IsNotExist(err) {
err = os.MkdirAll(absdir, opts.DirFileMode)
if err != nil {
return nil, fmt.Errorf("tried to create directory but failed: %v", err)
}
info, err = os.Stat(absdir)
}
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%s is not a directory", absdir)
}
return &driver{dir: absdir, opts: opts}, nil
}
type driver struct {
opts *Options
dir string
}
// Close implements [blob/Driver.Close].
func (drv *driver) Close() error {
return nil
}
// NormalizeError implements [blob/Driver.NormalizeError].
func (drv *driver) NormalizeError(err error) error {
if os.IsNotExist(err) {
return errors.Join(err, blob.ErrNotFound)
}
return err
}
// path returns the full path for a key.
func (drv *driver) path(key string) (string, error) {
path := filepath.Join(drv.dir, escapeKey(key))
if strings.HasSuffix(path, attrsExt) {
return "", errAttrsExt
}
return path, nil
}
// forKey returns the full path, os.FileInfo, and attributes for key.
func (drv *driver) forKey(key string) (string, os.FileInfo, *xattrs, error) {
path, err := drv.path(key)
if err != nil {
return "", nil, nil, err
}
info, err := os.Stat(path)
if err != nil {
return "", nil, nil, err
}
if info.IsDir() {
return "", nil, nil, os.ErrNotExist
}
xa, err := getAttrs(path)
if err != nil {
return "", nil, nil, err
}
return path, info, &xa, nil
}
// ListPaged implements [blob/Driver.ListPaged].
func (drv *driver) ListPaged(ctx context.Context, opts *blob.ListOptions) (*blob.ListPage, error) {
var pageToken string
if len(opts.PageToken) > 0 {
pageToken = string(opts.PageToken)
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
// If opts.Delimiter != "", lastPrefix contains the last "directory" key we
// added. It is used to avoid adding it again; all files in this "directory"
// are collapsed to the single directory entry.
var lastPrefix string
var lastKeyAdded string
// If the Prefix contains a "/", we can set the root of the Walk
// to the path specified by the Prefix as any files below the path will not
// match the Prefix.
// Note that we use "/" explicitly and not os.PathSeparator, as the opts.Prefix
// is in the unescaped form.
root := drv.dir
if i := strings.LastIndex(opts.Prefix, "/"); i > -1 {
root = filepath.Join(root, opts.Prefix[:i])
}
var result blob.ListPage
// Do a full recursive scan of the root directory.
err := filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error {
if err != nil {
// Couldn't read this file/directory for some reason; just skip it.
return nil
}
// Skip the self-generated attribute files.
if strings.HasSuffix(path, attrsExt) {
return nil
}
// os.Walk returns the root directory; skip it.
if path == drv.dir {
return nil
}
// Strip the <drv.dir> prefix from path.
prefixLen := len(drv.dir)
// Include the separator for non-root.
if drv.dir != "/" {
prefixLen++
}
path = path[prefixLen:]
// Unescape the path to get the key.
key := unescapeKey(path)
// Skip all directories. If opts.Delimiter is set, we'll create
// pseudo-directories later.
// Note that returning nil means that we'll still recurse into it;
// we're just not adding a result for the directory itself.
if info.IsDir() {
key += "/"
// Avoid recursing into subdirectories if the directory name already
// doesn't match the prefix; any files in it are guaranteed not to match.
if len(key) > len(opts.Prefix) && !strings.HasPrefix(key, opts.Prefix) {
return filepath.SkipDir
}
// Similarly, avoid recursing into subdirectories if we're making
// "directories" and all of the files in this subdirectory are guaranteed
// to collapse to a "directory" that we've already added.
if lastPrefix != "" && strings.HasPrefix(key, lastPrefix) {
return filepath.SkipDir
}
return nil
}
// Skip files/directories that don't match the Prefix.
if !strings.HasPrefix(key, opts.Prefix) {
return nil
}
var md5 []byte
if xa, err := getAttrs(path); err == nil {
// Note: we only have the MD5 hash for blobs that we wrote.
// For other blobs, md5 will remain nil.
md5 = xa.MD5
}
fi, err := info.Info()
if err != nil {
return err
}
obj := &blob.ListObject{
Key: key,
ModTime: fi.ModTime(),
Size: fi.Size(),
MD5: md5,
}
// If using Delimiter, collapse "directories".
if opts.Delimiter != "" {
// Strip the prefix, which may contain Delimiter.
keyWithoutPrefix := key[len(opts.Prefix):]
// See if the key still contains Delimiter.
// If no, it's a file and we just include it.
// If yes, it's a file in a "sub-directory" and we want to collapse
// all files in that "sub-directory" into a single "directory" result.
if idx := strings.Index(keyWithoutPrefix, opts.Delimiter); idx != -1 {
prefix := opts.Prefix + keyWithoutPrefix[0:idx+len(opts.Delimiter)]
// We've already included this "directory"; don't add it.
if prefix == lastPrefix {
return nil
}
// Update the object to be a "directory".
obj = &blob.ListObject{
Key: prefix,
IsDir: true,
}
lastPrefix = prefix
}
}
// If there's a pageToken, skip anything before it.
if pageToken != "" && obj.Key <= pageToken {
return nil
}
// If we've already got a full page of results, set NextPageToken and stop.
// Unless the current object is a directory, in which case there may
// still be objects coming that are alphabetically before it (since
// we appended the delimiter). In that case, keep going; we'll trim the
// extra entries (if any) before returning.
if len(result.Objects) == pageSize && !obj.IsDir {
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
return io.EOF
}
result.Objects = append(result.Objects, obj)
// Normally, objects are added in the correct order (by Key).
// However, sometimes adding the file delimiter messes that up
// (e.g., if the file delimiter is later in the alphabet than the last character of a key).
// Detect if this happens and swap if needed.
if len(result.Objects) > 1 && obj.Key < lastKeyAdded {
i := len(result.Objects) - 1
result.Objects[i-1], result.Objects[i] = result.Objects[i], result.Objects[i-1]
lastKeyAdded = result.Objects[i].Key
} else {
lastKeyAdded = obj.Key
}
return nil
})
if err != nil && err != io.EOF {
return nil, err
}
if len(result.Objects) > pageSize {
result.Objects = result.Objects[0:pageSize]
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
}
return &result, nil
}
// Attributes implements [blob/Driver.Attributes].
func (drv *driver) Attributes(ctx context.Context, key string) (*blob.Attributes, error) {
_, info, xa, err := drv.forKey(key)
if err != nil {
return nil, err
}
return &blob.Attributes{
CacheControl: xa.CacheControl,
ContentDisposition: xa.ContentDisposition,
ContentEncoding: xa.ContentEncoding,
ContentLanguage: xa.ContentLanguage,
ContentType: xa.ContentType,
Metadata: xa.Metadata,
// CreateTime left as the zero time.
ModTime: info.ModTime(),
Size: info.Size(),
MD5: xa.MD5,
ETag: fmt.Sprintf("\"%x-%x\"", info.ModTime().UnixNano(), info.Size()),
}, nil
}
// NewRangeReader implements [blob/Driver.NewRangeReader].
func (drv *driver) NewRangeReader(ctx context.Context, key string, offset, length int64) (blob.DriverReader, error) {
path, info, xa, err := drv.forKey(key)
if err != nil {
return nil, err
}
// @todo consider replacing with os.Root
f, err := os.Open(path)
if err != nil {
return nil, err
}
if offset > 0 {
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
}
r := io.Reader(f)
if length >= 0 {
r = io.LimitReader(r, length)
}
return &reader{
r: r,
c: f,
attrs: &blob.ReaderAttributes{
ContentType: xa.ContentType,
ModTime: info.ModTime(),
Size: info.Size(),
},
}, nil
}
func createTemp(path string, noTempDir bool) (*os.File, error) {
// Use a custom createTemp function rather than os.CreateTemp() as
// os.CreateTemp() sets the permissions of the tempfile to 0600, rather than
// 0666, making it inconsistent with the directories and attribute files.
try := 0
for {
// Append the current time with nanosecond precision and .tmp to the
// base path. If the file already exists try again. Nanosecond changes enough
// between each iteration to make a conflict unlikely. Using the full
// time lowers the chance of a collision with a file using a similar
// pattern, but has undefined behavior after the year 2262.
var name string
if noTempDir {
name = path
} else {
name = filepath.Join(os.TempDir(), filepath.Base(path))
}
name += "." + strconv.FormatInt(time.Now().UnixNano(), 16) + ".tmp"
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o666)
if os.IsExist(err) {
if try++; try < 10000 {
continue
}
return nil, &os.PathError{Op: "createtemp", Path: path + ".*.tmp", Err: os.ErrExist}
}
return f, err
}
}
// NewTypedWriter implements [blob/Driver.NewTypedWriter].
func (drv *driver) NewTypedWriter(ctx context.Context, key, contentType string, opts *blob.WriterOptions) (blob.DriverWriter, error) {
path, err := drv.path(key)
if err != nil {
return nil, err
}
err = os.MkdirAll(filepath.Dir(path), drv.opts.DirFileMode)
if err != nil {
return nil, err
}
f, err := createTemp(path, drv.opts.NoTempDir)
if err != nil {
return nil, err
}
if drv.opts.Metadata == MetadataDontWrite {
w := &writer{
ctx: ctx,
File: f,
path: path,
}
return w, nil
}
var metadata map[string]string
if len(opts.Metadata) > 0 {
metadata = opts.Metadata
}
return &writerWithSidecar{
ctx: ctx,
f: f,
path: path,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
attrs: xattrs{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentType: contentType,
Metadata: metadata,
},
}, nil
}
// Copy implements [blob/Driver.Copy].
func (drv *driver) Copy(ctx context.Context, dstKey, srcKey string) error {
// Note: we could use NewRangeReader here, but since we need to copy all of
// the metadata (from xa), it's more efficient to do it directly.
srcPath, _, xa, err := drv.forKey(srcKey)
if err != nil {
return err
}
f, err := os.Open(srcPath)
if err != nil {
return err
}
defer f.Close()
// We'll write the copy using Writer, to avoid re-implementing making of a
// temp file, cleaning up after partial failures, etc.
wopts := blob.WriterOptions{
CacheControl: xa.CacheControl,
ContentDisposition: xa.ContentDisposition,
ContentEncoding: xa.ContentEncoding,
ContentLanguage: xa.ContentLanguage,
Metadata: xa.Metadata,
}
// Create a cancelable context so we can cancel the write if there are problems.
writeCtx, cancel := context.WithCancel(ctx)
defer cancel()
w, err := drv.NewTypedWriter(writeCtx, dstKey, xa.ContentType, &wopts)
if err != nil {
return err
}
_, err = io.Copy(w, f)
if err != nil {
cancel() // cancel before Close cancels the write
w.Close()
return err
}
return w.Close()
}
// Delete implements [blob/Driver.Delete].
func (b *driver) Delete(ctx context.Context, key string) error {
path, err := b.path(key)
if err != nil {
return err
}
err = os.Remove(path)
if err != nil {
return err
}
err = os.Remove(path + attrsExt)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// -------------------------------------------------------------------
type reader struct {
r io.Reader
c io.Closer
attrs *blob.ReaderAttributes
}
func (r *reader) Read(p []byte) (int, error) {
if r.r == nil {
return 0, io.EOF
}
return r.r.Read(p)
}
func (r *reader) Close() error {
if r.c == nil {
return nil
}
return r.c.Close()
}
// Attributes implements [blob/DriverReader.Attributes].
func (r *reader) Attributes() *blob.ReaderAttributes {
return r.attrs
}
// -------------------------------------------------------------------
// writerWithSidecar implements the strategy of storing metadata in a distinct file.
type writerWithSidecar struct {
ctx context.Context
md5hash hash.Hash
f *os.File
path string
attrs xattrs
contentMD5 []byte
}
func (w *writerWithSidecar) Write(p []byte) (n int, err error) {
n, err = w.f.Write(p)
if err != nil {
// Don't hash the unwritten tail twice when writing is resumed.
w.md5hash.Write(p[:n])
return n, err
}
if _, err := w.md5hash.Write(p); err != nil {
return n, err
}
return n, nil
}
func (w *writerWithSidecar) Close() error {
err := w.f.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been
// renamed so the Remove will fail.
defer func() {
_ = os.Remove(w.f.Name())
}()
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
md5sum := w.md5hash.Sum(nil)
w.attrs.MD5 = md5sum
// Write the attributes file.
if err := setAttrs(w.path, w.attrs); err != nil {
return err
}
// Rename the temp file to path.
if err := os.Rename(w.f.Name(), w.path); err != nil {
_ = os.Remove(w.path + attrsExt)
return err
}
return nil
}
// writer is a file with a temporary name until closed.
//
// Embedding os.File allows the likes of io.Copy to use optimizations,
// which is why it is not folded into writerWithSidecar.
type writer struct {
*os.File
ctx context.Context
path string
}
func (w *writer) Close() error {
err := w.File.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been renamed so
// the Remove will fail.
tempname := w.Name()
defer os.Remove(tempname)
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
// Rename the temp file to path.
return os.Rename(tempname, w.path)
}
// -------------------------------------------------------------------
// escapeKey does all required escaping for UTF-8 strings to work the filesystem.
func escapeKey(s string) string {
s = blob.HexEscape(s, func(r []rune, i int) bool {
c := r[i]
switch {
case c < 32:
return true
// We're going to replace '/' with os.PathSeparator below. In order for this
// to be reversible, we need to escape raw os.PathSeparators.
case os.PathSeparator != '/' && c == os.PathSeparator:
return true
// For "../", escape the trailing slash.
case i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
// For "//", escape the trailing slash.
case i > 0 && c == '/' && r[i-1] == '/':
return true
// Escape the trailing slash in a key.
case c == '/' && i == len(r)-1:
return true
// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
case os.PathSeparator == '\\' && (c == '>' || c == '<' || c == ':' || c == '"' || c == '|' || c == '?' || c == '*'):
return true
}
return false
})
// Replace "/" with os.PathSeparator if needed, so that the local filesystem
// can use subdirectories.
if os.PathSeparator != '/' {
s = strings.ReplaceAll(s, "/", string(os.PathSeparator))
}
return s
}
// unescapeKey reverses escapeKey.
func unescapeKey(s string) string {
if os.PathSeparator != '/' {
s = strings.ReplaceAll(s, string(os.PathSeparator), "/")
}
return blob.HexUnescape(s)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/types_test.go | tools/types/types_test.go | package types_test
import (
"testing"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestPointer(t *testing.T) {
s1 := types.Pointer("")
if s1 == nil || *s1 != "" {
t.Fatalf("Expected empty string pointer, got %#v", s1)
}
s2 := types.Pointer("test")
if s2 == nil || *s2 != "test" {
t.Fatalf("Expected 'test' string pointer, got %#v", s2)
}
s3 := types.Pointer(123)
if s3 == nil || *s3 != 123 {
t.Fatalf("Expected 123 string pointer, got %#v", s3)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/types.go | tools/types/types.go | // Package types implements some commonly used db serializable types
// like datetime, json, etc.
package types
// Pointer is a generic helper that returns val as *T.
func Pointer[T any](val T) *T {
return &val
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/json_map_test.go | tools/types/json_map_test.go | package types_test
import (
"database/sql/driver"
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestJSONMapMarshalJSON(t *testing.T) {
scenarios := []struct {
json types.JSONMap[any]
expected string
}{
{nil, "{}"},
{types.JSONMap[any]{}, `{}`},
{types.JSONMap[any]{"test1": 123, "test2": "lorem"}, `{"test1":123,"test2":"lorem"}`},
{types.JSONMap[any]{"test": []int{1, 2, 3}}, `{"test":[1,2,3]}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.expected), func(t *testing.T) {
result, err := s.json.MarshalJSON()
if err != nil {
t.Fatal(err)
}
if string(result) != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestJSONMapMarshalString(t *testing.T) {
scenarios := []struct {
json types.JSONMap[any]
expected string
}{
{nil, "{}"},
{types.JSONMap[any]{}, `{}`},
{types.JSONMap[any]{"test1": 123, "test2": "lorem"}, `{"test1":123,"test2":"lorem"}`},
{types.JSONMap[any]{"test": []int{1, 2, 3}}, `{"test":[1,2,3]}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.expected), func(t *testing.T) {
result := s.json.String()
if result != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestJSONMapGet(t *testing.T) {
scenarios := []struct {
json types.JSONMap[any]
key string
expected any
}{
{nil, "test", nil},
{types.JSONMap[any]{"test": 123}, "test", 123},
{types.JSONMap[any]{"test": 123}, "missing", nil},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.key), func(t *testing.T) {
result := s.json.Get(s.key)
if result != s.expected {
t.Fatalf("Expected %s, got %#v", s.expected, result)
}
})
}
}
func TestJSONMapSet(t *testing.T) {
scenarios := []struct {
key string
value any
}{
{"a", nil},
{"a", 123},
{"b", "test"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.key), func(t *testing.T) {
j := types.JSONMap[any]{}
j.Set(s.key, s.value)
if v := j[s.key]; v != s.value {
t.Fatalf("Expected %s, got %#v", s.value, v)
}
})
}
}
func TestJSONMapValue(t *testing.T) {
scenarios := []struct {
json types.JSONMap[any]
expected driver.Value
}{
{nil, `{}`},
{types.JSONMap[any]{}, `{}`},
{types.JSONMap[any]{"test1": 123, "test2": "lorem"}, `{"test1":123,"test2":"lorem"}`},
{types.JSONMap[any]{"test": []int{1, 2, 3}}, `{"test":[1,2,3]}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.expected), func(t *testing.T) {
result, err := s.json.Value()
if err != nil {
t.Fatal(err)
}
if result != s.expected {
t.Fatalf("Expected %s, got %#v", s.expected, result)
}
})
}
}
func TestJSONArrayMapScan(t *testing.T) {
scenarios := []struct {
value any
expectError bool
expectJSON string
}{
{``, false, `{}`},
{nil, false, `{}`},
{[]byte{}, false, `{}`},
{`{}`, false, `{}`},
{123, true, `{}`},
{`""`, true, `{}`},
{`invalid_json`, true, `{}`},
{`"test"`, true, `{}`},
{`1,2,3`, true, `{}`},
{`{"test": 1`, true, `{}`},
{`{"test": 1}`, false, `{"test":1}`},
{[]byte(`{"test": 1}`), false, `{"test":1}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.value), func(t *testing.T) {
arr := types.JSONMap[any]{}
scanErr := arr.Scan(s.value)
hasErr := scanErr != nil
if hasErr != s.expectError {
t.Fatalf("Expected %v, got %v (%v)", s.expectError, hasErr, scanErr)
}
result, _ := arr.MarshalJSON()
if string(result) != s.expectJSON {
t.Fatalf("Expected %s, got %s", s.expectJSON, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/datetime.go | tools/types/datetime.go | package types
import (
"database/sql/driver"
"encoding/json"
"time"
"github.com/spf13/cast"
)
// DefaultDateLayout specifies the default app date strings layout.
const DefaultDateLayout = "2006-01-02 15:04:05.000Z"
// NowDateTime returns new DateTime instance with the current local time.
func NowDateTime() DateTime {
return DateTime{t: time.Now()}
}
// ParseDateTime creates a new DateTime from the provided value
// (could be [cast.ToTime] supported string, [time.Time], etc.).
func ParseDateTime(value any) (DateTime, error) {
d := DateTime{}
err := d.Scan(value)
return d, err
}
// DateTime represents a [time.Time] instance in UTC that is wrapped
// and serialized using the app default date layout.
type DateTime struct {
t time.Time
}
// Time returns the internal [time.Time] instance.
func (d DateTime) Time() time.Time {
return d.t
}
// Add returns a new DateTime based on the current DateTime + the specified duration.
func (d DateTime) Add(duration time.Duration) DateTime {
d.t = d.t.Add(duration)
return d
}
// Sub returns a [time.Duration] by subtracting the specified DateTime from the current one.
//
// If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration],
// the maximum (or minimum) duration will be returned.
func (d DateTime) Sub(u DateTime) time.Duration {
return d.Time().Sub(u.Time())
}
// AddDate returns a new DateTime based on the current one + duration.
//
// It follows the same rules as [time.AddDate].
func (d DateTime) AddDate(years, months, days int) DateTime {
d.t = d.t.AddDate(years, months, days)
return d
}
// After reports whether the current DateTime instance is after u.
func (d DateTime) After(u DateTime) bool {
return d.Time().After(u.Time())
}
// Before reports whether the current DateTime instance is before u.
func (d DateTime) Before(u DateTime) bool {
return d.Time().Before(u.Time())
}
// Compare compares the current DateTime instance with u.
// If the current instance is before u, it returns -1.
// If the current instance is after u, it returns +1.
// If they're the same, it returns 0.
func (d DateTime) Compare(u DateTime) int {
return d.Time().Compare(u.Time())
}
// Equal reports whether the current DateTime and u represent the same time instant.
// Two DateTime can be equal even if they are in different locations.
// For example, 6:00 +0200 and 4:00 UTC are Equal.
func (d DateTime) Equal(u DateTime) bool {
return d.Time().Equal(u.Time())
}
// Unix returns the current DateTime as a Unix time, aka.
// the number of seconds elapsed since January 1, 1970 UTC.
func (d DateTime) Unix() int64 {
return d.Time().Unix()
}
// IsZero checks whether the current DateTime instance has zero time value.
func (d DateTime) IsZero() bool {
return d.Time().IsZero()
}
// String serializes the current DateTime instance into a formatted
// UTC date string.
//
// The zero value is serialized to an empty string.
func (d DateTime) String() string {
t := d.Time()
if t.IsZero() {
return ""
}
return t.UTC().Format(DefaultDateLayout)
}
// MarshalJSON implements the [json.Marshaler] interface.
func (d DateTime) MarshalJSON() ([]byte, error) {
return []byte(`"` + d.String() + `"`), nil
}
// UnmarshalJSON implements the [json.Unmarshaler] interface.
func (d *DateTime) UnmarshalJSON(b []byte) error {
var raw string
if err := json.Unmarshal(b, &raw); err != nil {
return err
}
return d.Scan(raw)
}
// Value implements the [driver.Valuer] interface.
func (d DateTime) Value() (driver.Value, error) {
return d.String(), nil
}
// Scan implements [sql.Scanner] interface to scan the provided value
// into the current DateTime instance.
func (d *DateTime) Scan(value any) error {
switch v := value.(type) {
case time.Time:
d.t = v
case DateTime:
d.t = v.Time()
case string:
if v == "" {
d.t = time.Time{}
} else {
t, err := time.Parse(DefaultDateLayout, v)
if err != nil {
// check for other common date layouts
t = cast.ToTime(v)
}
d.t = t
}
case int, int64, int32, uint, uint64, uint32:
d.t = cast.ToTime(v)
default:
str := cast.ToString(v)
if str == "" {
d.t = time.Time{}
} else {
d.t = cast.ToTime(str)
}
}
return nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/geo_point_test.go | tools/types/geo_point_test.go | package types_test
import (
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestGeoPointAsMap(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
point types.GeoPoint
expected map[string]any
}{
{"zero", types.GeoPoint{}, map[string]any{"lon": 0.0, "lat": 0.0}},
{"non-zero", types.GeoPoint{Lon: -10, Lat: 20.123}, map[string]any{"lon": -10.0, "lat": 20.123}},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.point.AsMap()
if len(result) != len(s.expected) {
t.Fatalf("Expected %d keys, got %d: %v", len(s.expected), len(result), result)
}
for k, v := range s.expected {
found, ok := result[k]
if !ok {
t.Fatalf("Missing expected %q key: %v", k, result)
}
if found != v {
t.Fatalf("Expected %q key value %v, got %v", k, v, found)
}
}
})
}
}
func TestGeoPointStringAndValue(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
point types.GeoPoint
expected string
}{
{"zero", types.GeoPoint{}, `{"lon":0,"lat":0}`},
{"non-zero", types.GeoPoint{Lon: -10, Lat: 20.123}, `{"lon":-10,"lat":20.123}`},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
str := s.point.String()
val, err := s.point.Value()
if err != nil {
t.Fatal(err)
}
if str != val {
t.Fatalf("Expected String and Value to return the same value")
}
if str != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, str)
}
})
}
}
func TestGeoPointScan(t *testing.T) {
t.Parallel()
scenarios := []struct {
value any
expectErr bool
expectStr string
}{
{nil, false, `{"lon":1,"lat":2}`},
{"", false, `{"lon":1,"lat":2}`},
{types.JSONRaw{}, false, `{"lon":1,"lat":2}`},
{[]byte{}, false, `{"lon":1,"lat":2}`},
{`{}`, false, `{"lon":1,"lat":2}`},
{`[]`, true, `{"lon":1,"lat":2}`},
{0, true, `{"lon":1,"lat":2}`},
{`{"lon":"1.23","lat":"4.56"}`, true, `{"lon":1,"lat":2}`},
{`{"lon":1.23,"lat":4.56}`, false, `{"lon":1.23,"lat":4.56}`},
{[]byte(`{"lon":1.23,"lat":4.56}`), false, `{"lon":1.23,"lat":4.56}`},
{types.JSONRaw(`{"lon":1.23,"lat":4.56}`), false, `{"lon":1.23,"lat":4.56}`},
{types.GeoPoint{}, false, `{"lon":0,"lat":0}`},
{types.GeoPoint{Lon: 1.23, Lat: 4.56}, false, `{"lon":1.23,"lat":4.56}`},
{&types.GeoPoint{Lon: 1.23, Lat: 4.56}, false, `{"lon":1.23,"lat":4.56}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.value), func(t *testing.T) {
point := types.GeoPoint{Lon: 1, Lat: 2}
err := point.Scan(s.value)
hasErr := err != nil
if hasErr != s.expectErr {
t.Errorf("Expected hasErr %v, got %v (%v)", s.expectErr, hasErr, err)
}
if str := point.String(); str != s.expectStr {
t.Errorf("Expected\n%s\ngot\n%s", s.expectStr, str)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/json_raw.go | tools/types/json_raw.go | package types
import (
"database/sql/driver"
"encoding/json"
"errors"
)
// JSONRaw defines a json value type that is safe for db read/write.
type JSONRaw []byte
// ParseJSONRaw creates a new JSONRaw instance from the provided value
// (could be JSONRaw, int, float, string, []byte, etc.).
func ParseJSONRaw(value any) (JSONRaw, error) {
result := JSONRaw{}
err := result.Scan(value)
return result, err
}
// String returns the current JSONRaw instance as a json encoded string.
func (j JSONRaw) String() string {
raw, _ := j.MarshalJSON()
return string(raw)
}
// MarshalJSON implements the [json.Marshaler] interface.
func (j JSONRaw) MarshalJSON() ([]byte, error) {
if len(j) == 0 {
return []byte("null"), nil
}
return j, nil
}
// UnmarshalJSON implements the [json.Unmarshaler] interface.
func (j *JSONRaw) UnmarshalJSON(b []byte) error {
if j == nil {
return errors.New("JSONRaw: UnmarshalJSON on nil pointer")
}
*j = append((*j)[0:0], b...)
return nil
}
// Value implements the [driver.Valuer] interface.
func (j JSONRaw) Value() (driver.Value, error) {
if len(j) == 0 {
return nil, nil
}
return j.String(), nil
}
// Scan implements [sql.Scanner] interface to scan the provided value
// into the current JSONRaw instance.
func (j *JSONRaw) Scan(value any) error {
var data []byte
switch v := value.(type) {
case nil:
// no cast is needed
case []byte:
if len(v) != 0 {
data = v
}
case string:
if v != "" {
data = []byte(v)
}
case JSONRaw:
if len(v) != 0 {
data = []byte(v)
}
default:
bytes, err := json.Marshal(v)
if err != nil {
return err
}
data = bytes
}
return j.UnmarshalJSON(data)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/json_map.go | tools/types/json_map.go | package types
import (
"database/sql/driver"
"encoding/json"
"fmt"
)
// JSONMap defines a map that is safe for json and db read/write.
type JSONMap[T any] map[string]T
// MarshalJSON implements the [json.Marshaler] interface.
func (m JSONMap[T]) MarshalJSON() ([]byte, error) {
type alias JSONMap[T] // prevent recursion
// initialize an empty map to ensure that `{}` is returned as json
if m == nil {
m = JSONMap[T]{}
}
return json.Marshal(alias(m))
}
// String returns the string representation of the current json map.
func (m JSONMap[T]) String() string {
v, _ := m.MarshalJSON()
return string(v)
}
// Get retrieves a single value from the current JSONMap[T].
//
// This helper was added primarily to assist the goja integration since custom map types
// don't have direct access to the map keys (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods).
func (m JSONMap[T]) Get(key string) T {
return m[key]
}
// Set sets a single value in the current JSONMap[T].
//
// This helper was added primarily to assist the goja integration since custom map types
// don't have direct access to the map keys (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods).
func (m JSONMap[T]) Set(key string, value T) {
m[key] = value
}
// Value implements the [driver.Valuer] interface.
func (m JSONMap[T]) Value() (driver.Value, error) {
data, err := json.Marshal(m)
return string(data), err
}
// Scan implements [sql.Scanner] interface to scan the provided value
// into the current JSONMap[T] instance.
func (m *JSONMap[T]) Scan(value any) error {
var data []byte
switch v := value.(type) {
case nil:
// no cast needed
case []byte:
data = v
case string:
data = []byte(v)
default:
return fmt.Errorf("failed to unmarshal JSONMap[T] value: %q", value)
}
if len(data) == 0 {
data = []byte("{}")
}
return json.Unmarshal(data, m)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/json_raw_test.go | tools/types/json_raw_test.go | package types_test
import (
"database/sql/driver"
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestParseJSONRaw(t *testing.T) {
scenarios := []struct {
value any
expectError bool
expectJSON string
}{
{nil, false, `null`},
{``, false, `null`},
{[]byte{}, false, `null`},
{types.JSONRaw{}, false, `null`},
{`{}`, false, `{}`},
{`[]`, false, `[]`},
{123, false, `123`},
{`""`, false, `""`},
{`test`, false, `test`},
{`{"invalid"`, false, `{"invalid"`}, // treated as a byte casted string
{`{"test":1}`, false, `{"test":1}`},
{[]byte(`[1,2,3]`), false, `[1,2,3]`},
{[]int{1, 2, 3}, false, `[1,2,3]`},
{map[string]int{"test": 1}, false, `{"test":1}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.value), func(t *testing.T) {
raw, parseErr := types.ParseJSONRaw(s.value)
hasErr := parseErr != nil
if hasErr != s.expectError {
t.Fatalf("Expected %v, got %v (%v)", s.expectError, hasErr, parseErr)
}
result, _ := raw.MarshalJSON()
if string(result) != s.expectJSON {
t.Fatalf("Expected %s, got %s", s.expectJSON, string(result))
}
})
}
}
func TestJSONRawString(t *testing.T) {
scenarios := []struct {
json types.JSONRaw
expected string
}{
{nil, `null`},
{types.JSONRaw{}, `null`},
{types.JSONRaw([]byte(`123`)), `123`},
{types.JSONRaw(`{"demo":123}`), `{"demo":123}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.expected), func(t *testing.T) {
result := s.json.String()
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestJSONRawMarshalJSON(t *testing.T) {
scenarios := []struct {
json types.JSONRaw
expected string
}{
{nil, `null`},
{types.JSONRaw{}, `null`},
{types.JSONRaw([]byte(`123`)), `123`},
{types.JSONRaw(`{"demo":123}`), `{"demo":123}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.expected), func(t *testing.T) {
result, err := s.json.MarshalJSON()
if err != nil {
t.Fatal(err)
}
if string(result) != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, string(result))
}
})
}
}
func TestJSONRawUnmarshalJSON(t *testing.T) {
scenarios := []struct {
json []byte
expectString string
}{
{nil, `null`},
{[]byte{0, 1, 2}, "\x00\x01\x02"},
{[]byte("123"), "123"},
{[]byte("test"), "test"},
{[]byte(`{"test":123}`), `{"test":123}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.expectString), func(t *testing.T) {
raw := types.JSONRaw{}
err := raw.UnmarshalJSON(s.json)
if err != nil {
t.Fatal(err)
}
if raw.String() != s.expectString {
t.Fatalf("Expected %q, got %q", s.expectString, raw.String())
}
})
}
}
func TestJSONRawValue(t *testing.T) {
scenarios := []struct {
json types.JSONRaw
expected driver.Value
}{
{nil, nil},
{types.JSONRaw{}, nil},
{types.JSONRaw(``), nil},
{types.JSONRaw(`test`), `test`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.json), func(t *testing.T) {
result, err := s.json.Value()
if err != nil {
t.Fatal(err)
}
if result != s.expected {
t.Fatalf("Expected %s, got %v", s.expected, result)
}
})
}
}
func TestJSONRawScan(t *testing.T) {
scenarios := []struct {
value any
expectError bool
expectJSON string
}{
{nil, false, `null`},
{``, false, `null`},
{[]byte{}, false, `null`},
{types.JSONRaw{}, false, `null`},
{types.JSONRaw(`test`), false, `test`},
{`{}`, false, `{}`},
{`[]`, false, `[]`},
{123, false, `123`},
{`""`, false, `""`},
{`test`, false, `test`},
{`{"invalid"`, false, `{"invalid"`}, // treated as a byte casted string
{`{"test":1}`, false, `{"test":1}`},
{[]byte(`[1,2,3]`), false, `[1,2,3]`},
{[]int{1, 2, 3}, false, `[1,2,3]`},
{map[string]int{"test": 1}, false, `{"test":1}`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.value), func(t *testing.T) {
raw := types.JSONRaw{}
scanErr := raw.Scan(s.value)
hasErr := scanErr != nil
if hasErr != s.expectError {
t.Fatalf("Expected %v, got %v (%v)", s.expectError, hasErr, scanErr)
}
result, _ := raw.MarshalJSON()
if string(result) != s.expectJSON {
t.Fatalf("Expected %s, got %v", s.expectJSON, string(result))
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/geo_point.go | tools/types/geo_point.go | package types
import (
"database/sql/driver"
"encoding/json"
"fmt"
)
// GeoPoint defines a struct for storing geo coordinates as serialized json object
// (e.g. {lon:0,lat:0}).
//
// Note: using object notation and not a plain array to avoid the confusion
// as there doesn't seem to be a fixed standard for the coordinates order.
type GeoPoint struct {
Lon float64 `form:"lon" json:"lon"`
Lat float64 `form:"lat" json:"lat"`
}
// String returns the string representation of the current GeoPoint instance.
func (p GeoPoint) String() string {
raw, _ := json.Marshal(p)
return string(raw)
}
// AsMap implements [core.mapExtractor] and returns a value suitable
// to be used in an API rule expression.
func (p GeoPoint) AsMap() map[string]any {
return map[string]any{
"lon": p.Lon,
"lat": p.Lat,
}
}
// Value implements the [driver.Valuer] interface.
func (p GeoPoint) Value() (driver.Value, error) {
data, err := json.Marshal(p)
return string(data), err
}
// Scan implements [sql.Scanner] interface to scan the provided value
// into the current GeoPoint instance.
//
// The value argument could be nil (no-op), another GeoPoint instance,
// map or serialized json object with lat-lon props.
func (p *GeoPoint) Scan(value any) error {
var err error
switch v := value.(type) {
case nil:
// no cast needed
case *GeoPoint:
p.Lon = v.Lon
p.Lat = v.Lat
case GeoPoint:
p.Lon = v.Lon
p.Lat = v.Lat
case JSONRaw:
if len(v) != 0 {
err = json.Unmarshal(v, p)
}
case []byte:
if len(v) != 0 {
err = json.Unmarshal(v, p)
}
case string:
if len(v) != 0 {
err = json.Unmarshal([]byte(v), p)
}
default:
var raw []byte
raw, err = json.Marshal(v)
if err != nil {
err = fmt.Errorf("unable to marshalize value for scanning: %w", err)
} else {
err = json.Unmarshal(raw, p)
}
}
if err != nil {
return fmt.Errorf("[GeoPoint] unable to scan value %v: %w", value, err)
}
return nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/datetime_test.go | tools/types/datetime_test.go | package types_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestNowDateTime(t *testing.T) {
now := time.Now().UTC().Format("2006-01-02 15:04:05") // without ms part for test consistency
dt := types.NowDateTime()
if !strings.Contains(dt.String(), now) {
t.Fatalf("Expected %q, got %q", now, dt.String())
}
}
func TestParseDateTime(t *testing.T) {
nowTime := time.Now().UTC()
nowDateTime, _ := types.ParseDateTime(nowTime)
nowStr := nowTime.Format(types.DefaultDateLayout)
scenarios := []struct {
value any
expected string
}{
{nil, ""},
{"", ""},
{"invalid", ""},
{nowDateTime, nowStr},
{nowTime, nowStr},
{1641024040, "2022-01-01 08:00:40.000Z"},
{int32(1641024040), "2022-01-01 08:00:40.000Z"},
{int64(1641024040), "2022-01-01 08:00:40.000Z"},
{uint(1641024040), "2022-01-01 08:00:40.000Z"},
{uint64(1641024040), "2022-01-01 08:00:40.000Z"},
{uint32(1641024040), "2022-01-01 08:00:40.000Z"},
{"2022-01-01 11:23:45.678", "2022-01-01 11:23:45.678Z"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.value), func(t *testing.T) {
dt, err := types.ParseDateTime(s.value)
if err != nil {
t.Fatalf("Failed to parse %v: %v", s.value, err)
}
if dt.String() != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, dt.String())
}
})
}
}
func TestDateTimeTime(t *testing.T) {
str := "2022-01-01 11:23:45.678Z"
expected, err := time.Parse(types.DefaultDateLayout, str)
if err != nil {
t.Fatal(err)
}
dt, err := types.ParseDateTime(str)
if err != nil {
t.Fatal(err)
}
result := dt.Time()
if !expected.Equal(result) {
t.Fatalf("Expected time %v, got %v", expected, result)
}
}
func TestDateTimeAdd(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2 := d1.Add(1 * time.Hour)
if d1.String() != "2024-01-01 10:00:00.123Z" {
t.Fatalf("Expected d1 to remain unchanged, got %s", d1.String())
}
expected := "2024-01-01 11:00:00.123Z"
if d2.String() != expected {
t.Fatalf("Expected d2 %s, got %s", expected, d2.String())
}
}
func TestDateTimeSub(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2, _ := types.ParseDateTime("2024-01-01 10:30:00.123Z")
result := d2.Sub(d1)
if result.Minutes() != 30 {
t.Fatalf("Expected %v minutes diff, got %v", 30, result.Minutes())
}
}
func TestDateTimeAddDate(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2 := d1.AddDate(1, 2, 3)
if d1.String() != "2024-01-01 10:00:00.123Z" {
t.Fatalf("Expected d1 to remain unchanged, got %s", d1.String())
}
expected := "2025-03-04 10:00:00.123Z"
if d2.String() != expected {
t.Fatalf("Expected d2 %s, got %s", expected, d2.String())
}
}
func TestDateTimeAfter(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2, _ := types.ParseDateTime("2024-01-02 10:00:00.123Z")
d3, _ := types.ParseDateTime("2024-01-03 10:00:00.123Z")
scenarios := []struct {
a types.DateTime
b types.DateTime
expect bool
}{
// d1
{d1, d1, false},
{d1, d2, false},
{d1, d3, false},
// d2
{d2, d1, true},
{d2, d2, false},
{d2, d3, false},
// d3
{d3, d1, true},
{d3, d2, true},
{d3, d3, false},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("after_%d", i), func(t *testing.T) {
if v := s.a.After(s.b); v != s.expect {
t.Fatalf("Expected %v, got %v", s.expect, v)
}
})
}
}
func TestDateTimeBefore(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2, _ := types.ParseDateTime("2024-01-02 10:00:00.123Z")
d3, _ := types.ParseDateTime("2024-01-03 10:00:00.123Z")
scenarios := []struct {
a types.DateTime
b types.DateTime
expect bool
}{
// d1
{d1, d1, false},
{d1, d2, true},
{d1, d3, true},
// d2
{d2, d1, false},
{d2, d2, false},
{d2, d3, true},
// d3
{d3, d1, false},
{d3, d2, false},
{d3, d3, false},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("before_%d", i), func(t *testing.T) {
if v := s.a.Before(s.b); v != s.expect {
t.Fatalf("Expected %v, got %v", s.expect, v)
}
})
}
}
func TestDateTimeCompare(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2, _ := types.ParseDateTime("2024-01-02 10:00:00.123Z")
d3, _ := types.ParseDateTime("2024-01-03 10:00:00.123Z")
scenarios := []struct {
a types.DateTime
b types.DateTime
expect int
}{
// d1
{d1, d1, 0},
{d1, d2, -1},
{d1, d3, -1},
// d2
{d2, d1, 1},
{d2, d2, 0},
{d2, d3, -1},
// d3
{d3, d1, 1},
{d3, d2, 1},
{d3, d3, 0},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("compare_%d", i), func(t *testing.T) {
if v := s.a.Compare(s.b); v != s.expect {
t.Fatalf("Expected %v, got %v", s.expect, v)
}
})
}
}
func TestDateTimeEqual(t *testing.T) {
t.Parallel()
d1, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d2, _ := types.ParseDateTime("2024-01-01 10:00:00.123Z")
d3, _ := types.ParseDateTime("2024-01-01 10:00:00.124Z")
scenarios := []struct {
a types.DateTime
b types.DateTime
expect bool
}{
{d1, d1, true},
{d1, d2, true},
{d1, d3, false},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("equal_%d", i), func(t *testing.T) {
if v := s.a.Equal(s.b); v != s.expect {
t.Fatalf("Expected %v, got %v", s.expect, v)
}
})
}
}
func TestDateTimeUnix(t *testing.T) {
scenarios := []struct {
date string
expected int64
}{
{"", -62135596800},
{"2022-01-01 11:23:45.678Z", 1641036225},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.date), func(t *testing.T) {
dt, err := types.ParseDateTime(s.date)
if err != nil {
t.Fatal(err)
}
v := dt.Unix()
if v != s.expected {
t.Fatalf("Expected %d, got %d", s.expected, v)
}
})
}
}
func TestDateTimeIsZero(t *testing.T) {
dt0 := types.DateTime{}
if !dt0.IsZero() {
t.Fatalf("Expected zero datatime, got %v", dt0)
}
dt1 := types.NowDateTime()
if dt1.IsZero() {
t.Fatalf("Expected non-zero datatime, got %v", dt1)
}
}
func TestDateTimeString(t *testing.T) {
dt0 := types.DateTime{}
if dt0.String() != "" {
t.Fatalf("Expected empty string for zer datetime, got %q", dt0.String())
}
expected := "2022-01-01 11:23:45.678Z"
dt1, _ := types.ParseDateTime(expected)
if dt1.String() != expected {
t.Fatalf("Expected %q, got %v", expected, dt1)
}
}
func TestDateTimeMarshalJSON(t *testing.T) {
scenarios := []struct {
date string
expected string
}{
{"", `""`},
{"2022-01-01 11:23:45.678", `"2022-01-01 11:23:45.678Z"`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.date), func(t *testing.T) {
dt, err := types.ParseDateTime(s.date)
if err != nil {
t.Fatal(err)
}
result, err := dt.MarshalJSON()
if err != nil {
t.Fatal(err)
}
if string(result) != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, string(result))
}
})
}
}
func TestDateTimeUnmarshalJSON(t *testing.T) {
scenarios := []struct {
date string
expected string
}{
{"", ""},
{"invalid_json", ""},
{"'123'", ""},
{"2022-01-01 11:23:45.678", ""},
{`"2022-01-01 11:23:45.678"`, "2022-01-01 11:23:45.678Z"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.date), func(t *testing.T) {
dt := types.DateTime{}
dt.UnmarshalJSON([]byte(s.date))
if dt.String() != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, dt.String())
}
})
}
}
func TestDateTimeValue(t *testing.T) {
scenarios := []struct {
value any
expected string
}{
{"", ""},
{"invalid", ""},
{1641024040, "2022-01-01 08:00:40.000Z"},
{"2022-01-01 11:23:45.678", "2022-01-01 11:23:45.678Z"},
{types.NowDateTime(), types.NowDateTime().String()},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.value), func(t *testing.T) {
dt, _ := types.ParseDateTime(s.value)
result, err := dt.Value()
if err != nil {
t.Fatal(err)
}
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestDateTimeScan(t *testing.T) {
now := time.Now().UTC().Format("2006-01-02 15:04:05") // without ms part for test consistency
scenarios := []struct {
value any
expected string
}{
{nil, ""},
{"", ""},
{"invalid", ""},
{types.NowDateTime(), now},
{time.Now(), now},
{1.0, ""},
{1641024040, "2022-01-01 08:00:40.000Z"},
{"2022-01-01 11:23:45.678", "2022-01-01 11:23:45.678Z"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.value), func(t *testing.T) {
dt := types.DateTime{}
err := dt.Scan(s.value)
if err != nil {
t.Fatalf("Failed to parse %v: %v", s.value, err)
}
if !strings.Contains(dt.String(), s.expected) {
t.Fatalf("Expected %q, got %q", s.expected, dt.String())
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/json_array_test.go | tools/types/json_array_test.go | package types_test
import (
"database/sql/driver"
"encoding/json"
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/types"
)
func TestJSONArrayMarshalJSON(t *testing.T) {
scenarios := []struct {
json json.Marshaler
expected string
}{
{new(types.JSONArray[any]), "[]"},
{types.JSONArray[any]{}, `[]`},
{types.JSONArray[int]{1, 2, 3}, `[1,2,3]`},
{types.JSONArray[string]{"test1", "test2", "test3"}, `["test1","test2","test3"]`},
{types.JSONArray[any]{1, "test"}, `[1,"test"]`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.expected), func(t *testing.T) {
result, err := s.json.MarshalJSON()
if err != nil {
t.Fatal(err)
}
if string(result) != s.expected {
t.Fatalf("Expected %s, got %s", s.expected, result)
}
})
}
}
func TestJSONArrayString(t *testing.T) {
scenarios := []struct {
json fmt.Stringer
expected string
}{
{new(types.JSONArray[any]), "[]"},
{types.JSONArray[any]{}, `[]`},
{types.JSONArray[int]{1, 2, 3}, `[1,2,3]`},
{types.JSONArray[string]{"test1", "test2", "test3"}, `["test1","test2","test3"]`},
{types.JSONArray[any]{1, "test"}, `[1,"test"]`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.expected), func(t *testing.T) {
result := s.json.String()
if result != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestJSONArrayValue(t *testing.T) {
scenarios := []struct {
json driver.Valuer
expected driver.Value
}{
{new(types.JSONArray[any]), `[]`},
{types.JSONArray[any]{}, `[]`},
{types.JSONArray[int]{1, 2, 3}, `[1,2,3]`},
{types.JSONArray[string]{"test1", "test2", "test3"}, `["test1","test2","test3"]`},
{types.JSONArray[any]{1, "test"}, `[1,"test"]`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%#v", i, s.expected), func(t *testing.T) {
result, err := s.json.Value()
if err != nil {
t.Fatal(err)
}
if result != s.expected {
t.Fatalf("Expected %s, got %#v", s.expected, result)
}
})
}
}
func TestJSONArrayScan(t *testing.T) {
scenarios := []struct {
value any
expectError bool
expectJSON string
}{
{``, false, `[]`},
{[]byte{}, false, `[]`},
{nil, false, `[]`},
{123, true, `[]`},
{`""`, true, `[]`},
{`invalid_json`, true, `[]`},
{`"test"`, true, `[]`},
{`1,2,3`, true, `[]`},
{`[1, 2, 3`, true, `[]`},
{`[1, 2, 3]`, false, `[1,2,3]`},
{[]byte(`[1, 2, 3]`), false, `[1,2,3]`},
{`[1, "test"]`, false, `[1,"test"]`},
{`[]`, false, `[]`},
}
for i, s := range scenarios {
arr := types.JSONArray[any]{}
scanErr := arr.Scan(s.value)
hasErr := scanErr != nil
if hasErr != s.expectError {
t.Errorf("(%d) Expected %v, got %v (%v)", i, s.expectError, hasErr, scanErr)
continue
}
result, _ := arr.MarshalJSON()
if string(result) != s.expectJSON {
t.Errorf("(%d) Expected %s, got %v", i, s.expectJSON, string(result))
}
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/types/json_array.go | tools/types/json_array.go | package types
import (
"database/sql/driver"
"encoding/json"
"fmt"
)
// JSONArray defines a slice that is safe for json and db read/write.
type JSONArray[T any] []T
// internal alias to prevent recursion during marshalization.
type jsonArrayAlias[T any] JSONArray[T]
// MarshalJSON implements the [json.Marshaler] interface.
func (m JSONArray[T]) MarshalJSON() ([]byte, error) {
// initialize an empty map to ensure that `[]` is returned as json
if m == nil {
m = JSONArray[T]{}
}
return json.Marshal(jsonArrayAlias[T](m))
}
// String returns the string representation of the current json array.
func (m JSONArray[T]) String() string {
v, _ := m.MarshalJSON()
return string(v)
}
// Value implements the [driver.Valuer] interface.
func (m JSONArray[T]) Value() (driver.Value, error) {
data, err := json.Marshal(m)
return string(data), err
}
// Scan implements [sql.Scanner] interface to scan the provided value
// into the current JSONArray[T] instance.
func (m *JSONArray[T]) Scan(value any) error {
var data []byte
switch v := value.(type) {
case nil:
// no cast needed
case []byte:
data = v
case string:
data = []byte(v)
default:
return fmt.Errorf("failed to unmarshal JSONArray value: %q", value)
}
if len(data) == 0 {
data = []byte("[]")
}
return json.Unmarshal(data, m)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/random_test.go | tools/security/random_test.go | package security_test
import (
"fmt"
"regexp"
"slices"
"testing"
"github.com/pocketbase/pocketbase/tools/security"
)
func TestRandomString(t *testing.T) {
testRandomString(t, security.RandomString)
}
func TestRandomStringWithAlphabet(t *testing.T) {
testRandomStringWithAlphabet(t, security.RandomStringWithAlphabet)
}
func TestPseudorandomString(t *testing.T) {
testRandomString(t, security.PseudorandomString)
}
func TestPseudorandomStringWithAlphabet(t *testing.T) {
testRandomStringWithAlphabet(t, security.PseudorandomStringWithAlphabet)
}
// -------------------------------------------------------------------
func testRandomStringWithAlphabet(t *testing.T, randomFunc func(n int, alphabet string) string) {
scenarios := []struct {
alphabet string
expectPattern string
}{
{"0123456789_", `[0-9_]+`},
{"abcdef123", `[abcdef123]+`},
{"!@#$%^&*()", `[\!\@\#\$\%\^\&\*\(\)]+`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%q", i, s.alphabet), func(t *testing.T) {
generated := make([]string, 0, 500)
length := 10
for j := 0; j < 500; j++ {
var run func(int)
run = func(attempt int) {
result := randomFunc(length, s.alphabet)
if len(result) != length {
t.Fatalf("(%d) Expected the length of the string to be %d, got %d", j, length, len(result))
}
reg := regexp.MustCompile(s.expectPattern)
if match := reg.MatchString(result); !match {
t.Fatalf("(%d) The generated string should have only %s characters, got %q", j, s.expectPattern, result)
}
if slices.Contains(generated, result) {
if attempt > 3 {
t.Fatalf("(%d) Repeating random string - found %q in %q", j, result, generated)
}
// rerun
run(attempt + 1)
return
}
generated = append(generated, result)
}
run(1)
}
})
}
}
func testRandomString(t *testing.T, randomFunc func(n int) string) {
generated := make([]string, 0, 500)
reg := regexp.MustCompile(`[a-zA-Z0-9]+`)
length := 10
for i := 0; i < 500; i++ {
var run func(int)
run = func(attempt int) {
result := randomFunc(length)
if len(result) != length {
t.Fatalf("(%d) Expected the length of the string to be %d, got %d", i, length, len(result))
}
if match := reg.MatchString(result); !match {
t.Fatalf("(%d) The generated string should have only [a-zA-Z0-9]+ characters, got %q", i, result)
}
if slices.Contains(generated, result) {
if attempt > 3 {
t.Fatalf("(%d) Repeating random string - found %q in \n%v", i, result, generated)
}
// rerun
run(attempt + 1)
return
}
generated = append(generated, result)
}
run(1)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/jwt_test.go | tools/security/jwt_test.go | package security_test
import (
"testing"
"time"
"github.com/golang-jwt/jwt/v5"
"github.com/pocketbase/pocketbase/tools/security"
)
func TestParseUnverifiedJWT(t *testing.T) {
// invalid formatted JWT
result1, err1 := security.ParseUnverifiedJWT("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9")
if err1 == nil {
t.Error("Expected error got nil")
}
if len(result1) > 0 {
t.Error("Expected no parsed claims, got", result1)
}
// properly formatted JWT with INVALID claims
// {"name": "test", "exp":1516239022}
result2, err2 := security.ParseUnverifiedJWT("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCIsImV4cCI6MTUxNjIzOTAyMn0.xYHirwESfSEW3Cq2BL47CEASvD_p_ps3QCA54XtNktU")
if err2 == nil {
t.Error("Expected error got nil")
}
if len(result2) != 2 || result2["name"] != "test" {
t.Errorf("Expected to have 2 claims, got %v", result2)
}
// properly formatted JWT with VALID claims (missing exp)
// {"name": "test"}
result3, err3 := security.ParseUnverifiedJWT("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9.ml0QsTms3K9wMygTu41ZhKlTyjmW9zHQtoS8FUsCCjU")
if err3 != nil {
t.Error("Expected nil, got", err3)
}
if len(result3) != 1 || result3["name"] != "test" {
t.Errorf("Expected to have 1 claim, got %v", result3)
}
// properly formatted JWT with VALID claims (valid exp)
// {"name": "test", "exp": 2524604461}
result4, err4 := security.ParseUnverifiedJWT("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1lIjoidGVzdCIsImV4cCI6MjUyNDYwNDQ2MX0.VIEO73GP5QRQOSfHgQhaqeuYqcx59vL3xlxmFP-fytQ")
if err4 != nil {
t.Error("Expected nil, got", err4)
}
if len(result4) != 2 || result4["name"] != "test" {
t.Errorf("Expected to have 2 claims, got %v", result4)
}
}
func TestParseJWT(t *testing.T) {
scenarios := []struct {
name string
token string
secret string
expectError bool
expectClaims jwt.MapClaims
}{
{
"invalid formatted JWT",
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9",
"test",
true,
nil,
},
{
"properly formatted JWT with INVALID claims and INVALID secret",
// {"name": "test", "exp": 1516239022}
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCIsImV4cCI6MTUxNjIzOTAyMn0.xYHirwESfSEW3Cq2BL47CEASvD_p_ps3QCA54XtNktU",
"invalid",
true,
nil,
},
{
"properly formatted JWT with INVALID claims and VALID secret",
// {"name": "test", "exp": 1516239022}
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCIsImV4cCI6MTUxNjIzOTAyMn0.xYHirwESfSEW3Cq2BL47CEASvD_p_ps3QCA54XtNktU",
"test",
true,
nil,
},
{
"properly formatted JWT with VALID claims and INVALID secret",
// {"name": "test", "exp": 2524604461}
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1lIjoidGVzdCIsImV4cCI6MjUyNDYwNDQ2MX0.VIEO73GP5QRQOSfHgQhaqeuYqcx59vL3xlxmFP-fytQ",
"invalid",
true,
nil,
},
{
"properly formatted JWT with VALID claims and VALID secret",
// {"name": "test", "exp": 2524604461}
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJuYW1lIjoidGVzdCIsImV4cCI6MjUyNDYwNDQ2MX0.VIEO73GP5QRQOSfHgQhaqeuYqcx59vL3xlxmFP-fytQ",
"test",
false,
jwt.MapClaims{"name": "test", "exp": 2524604461.0},
},
{
"properly formatted JWT with VALID claims (without exp) and VALID secret",
// {"name": "test"}
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoidGVzdCJ9.ml0QsTms3K9wMygTu41ZhKlTyjmW9zHQtoS8FUsCCjU",
"test",
false,
jwt.MapClaims{"name": "test"},
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result, err := security.ParseJWT(s.token, s.secret)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if len(result) != len(s.expectClaims) {
t.Fatalf("Expected %v claims got %v", s.expectClaims, result)
}
for k, v := range s.expectClaims {
v2, ok := result[k]
if !ok {
t.Fatalf("Missing expected claim %q", k)
}
if v != v2 {
t.Fatalf("Expected %v for %q claim, got %v", v, k, v2)
}
}
})
}
}
func TestNewJWT(t *testing.T) {
scenarios := []struct {
name string
claims jwt.MapClaims
key string
duration time.Duration
expectError bool
}{
{"empty, zero duration", jwt.MapClaims{}, "", 0, true},
{"empty, 10 seconds duration", jwt.MapClaims{}, "", 10 * time.Second, false},
{"non-empty, 10 seconds duration", jwt.MapClaims{"name": "test"}, "test", 10 * time.Second, false},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
token, tokenErr := security.NewJWT(s.claims, s.key, s.duration)
if tokenErr != nil {
t.Fatalf("Expected NewJWT to succeed, got error %v", tokenErr)
}
claims, parseErr := security.ParseJWT(token, s.key)
hasParseErr := parseErr != nil
if hasParseErr != s.expectError {
t.Fatalf("Expected hasParseErr to be %v, got %v (%v)", s.expectError, hasParseErr, parseErr)
}
if s.expectError {
return
}
if _, ok := claims["exp"]; !ok {
t.Fatalf("Missing required claim exp, got %v", claims)
}
// clear exp claim to match with the scenario ones
delete(claims, "exp")
if len(claims) != len(s.claims) {
t.Fatalf("Expected %v claims, got %v", s.claims, claims)
}
for k, v := range claims {
if v != s.claims[k] {
t.Fatalf("Expected %v for %q claim, got %v", s.claims[k], k, v)
}
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/crypto_test.go | tools/security/crypto_test.go | package security_test
import (
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/security"
)
func TestS256Challenge(t *testing.T) {
scenarios := []struct {
code string
expected string
}{
{"", "47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU"},
{"123", "pmWkWSBCL51Bfkhn79xPuKBKHz__H6B-mY6G9_eieuM"},
}
for _, s := range scenarios {
t.Run(s.code, func(t *testing.T) {
result := security.S256Challenge(s.code)
if result != s.expected {
t.Fatalf("Expected %q, got %q", s.expected, result)
}
})
}
}
func TestMD5(t *testing.T) {
scenarios := []struct {
code string
expected string
}{
{"", "d41d8cd98f00b204e9800998ecf8427e"},
{"123", "202cb962ac59075b964b07152d234b70"},
}
for _, s := range scenarios {
t.Run(s.code, func(t *testing.T) {
result := security.MD5(s.code)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestSHA256(t *testing.T) {
scenarios := []struct {
code string
expected string
}{
{"", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{"123", "a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3"},
}
for _, s := range scenarios {
t.Run(s.code, func(t *testing.T) {
result := security.SHA256(s.code)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestSHA512(t *testing.T) {
scenarios := []struct {
code string
expected string
}{
{"", "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"},
{"123", "3c9909afec25354d551dae21590bb26e38d53f2173b8d3dc3eee4c047e7ab1c1eb8b85103e3be7ba613b31bb5c9c36214dc9f14a42fd7a2fdb84856bca5c44c2"},
}
for _, s := range scenarios {
t.Run(s.code, func(t *testing.T) {
result := security.SHA512(s.code)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestHS256(t *testing.T) {
scenarios := []struct {
text string
secret string
expected string
}{
{" ", "test", "9fb4e4a12d50728683a222b4fc466a69ee977332cfcdd6b9ebb44c7121dbd99f"},
{" ", "test2", "d792417a504716e22805d940125ec12e68e8cb18fc84674703bd96c59f1e1228"},
{"hello", "test", "f151ea24bda91a18e89b8bb5793ef324b2a02133cce15a28a719acbd2e58a986"},
{"hello", "test2", "16436e8dcbf3d7b5b0455573b27e6372699beb5bfe94e6a2a371b14b4ae068f4"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d-%s", i, s.text), func(t *testing.T) {
result := security.HS256(s.text, s.secret)
if result != s.expected {
t.Fatalf("Expected \n%v, \ngot \n%v", s.expected, result)
}
})
}
}
func TestHS512(t *testing.T) {
scenarios := []struct {
text string
secret string
expected string
}{
{" ", "test", "eb3bdb0352c95c38880c1f645fc7e1d1332644f938f50de0d73876e42d6f302e599bb526531ba79940e8b314369aaef3675322d8d851f9fc6ea9ed121286d196"},
{" ", "test2", "8b69e84e9252af78ae8b1c4bed3c9f737f69a3df33064cfbefe76b36d19d1827285e543cdf066cdc8bd556cc0cd0e212d52e9c12a50cd16046181ff127f4cf7f"},
{"hello", "test", "44f280e11103e295c26cd61dd1cdd8178b531b860466867c13b1c37a26b6389f8af110efbe0bb0717b9d9c87f6fe1c97b3b1690936578890e5669abf279fe7fd"},
{"hello", "test2", "d7f10b1b66941b20817689b973ca9dfc971090e28cfb8becbddd6824569b323eca6a0cdf2c387aa41e15040007dca5a011dd4e4bb61cfd5011aa7354d866f6ef"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d-%q", i, s.text), func(t *testing.T) {
result := security.HS512(s.text, s.secret)
if result != s.expected {
t.Fatalf("Expected \n%v, \ngot \n%v", s.expected, result)
}
})
}
}
func TestEqual(t *testing.T) {
scenarios := []struct {
hash1 string
hash2 string
expected bool
}{
{"", "", true},
{"abc", "abd", false},
{"abc", "abc", true},
}
for _, s := range scenarios {
t.Run(fmt.Sprintf("%qVS%q", s.hash1, s.hash2), func(t *testing.T) {
result := security.Equal(s.hash1, s.hash2)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/encrypt_test.go | tools/security/encrypt_test.go | package security_test
import (
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/security"
)
func TestEncrypt(t *testing.T) {
scenarios := []struct {
data string
key string
expectError bool
}{
{"", "", true},
{"123", "test", true}, // key must be valid 32 char aes string
{"123", "abcdabcdabcdabcdabcdabcdabcdabcd", false},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.data), func(t *testing.T) {
result, err := security.Encrypt([]byte(s.data), s.key)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
if result != "" {
t.Fatalf("Expected empty Encrypt result on error, got %q", result)
}
return
}
// try to decrypt
decrypted, err := security.Decrypt(result, s.key)
if err != nil || string(decrypted) != s.data {
t.Fatalf("Expected decrypted value to match with the data input, got %q (%v)", decrypted, err)
}
})
}
}
func TestDecrypt(t *testing.T) {
scenarios := []struct {
cipher string
key string
expectError bool
expectedData string
}{
{"", "", true, ""},
{"123", "test", true, ""}, // key must be valid 32 char aes string
{"8kcEqilvvYKYcfnSr0aSC54gmnQCsB02SaB8ATlnA==", "abcdabcdabcdabcdabcdabcdabcdabcd", true, ""}, // illegal base64 encoded cipherText
{"8kcEqilvv+YKYcfnSr0aSC54gmnQCsB02SaB8ATlnA==", "abcdabcdabcdabcdabcdabcdabcdabcd", false, "123"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.key), func(t *testing.T) {
result, err := security.Decrypt(s.cipher, s.key)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
return
}
if str := string(result); str != s.expectedData {
t.Fatalf("Expected %q, got %q", s.expectedData, str)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/encrypt.go | tools/security/encrypt.go | package security
import (
"crypto/aes"
"crypto/cipher"
crand "crypto/rand"
"encoding/base64"
"io"
)
// Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key).
//
// This method uses AES-256-GCM block cypher mode.
func Encrypt(data []byte, key string) (string, error) {
block, err := aes.NewCipher([]byte(key))
if err != nil {
return "", err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return "", err
}
nonce := make([]byte, gcm.NonceSize())
// populates the nonce with a cryptographically secure random sequence
if _, err := io.ReadFull(crand.Reader, nonce); err != nil {
return "", err
}
cipherByte := gcm.Seal(nonce, nonce, data, nil)
result := base64.StdEncoding.EncodeToString(cipherByte)
return result, nil
}
// Decrypt decrypts encrypted text with key (must be valid 32 chars AES key).
//
// This method uses AES-256-GCM block cypher mode.
func Decrypt(cipherText string, key string) ([]byte, error) {
block, err := aes.NewCipher([]byte(key))
if err != nil {
return nil, err
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonceSize := gcm.NonceSize()
cipherByte, err := base64.StdEncoding.DecodeString(cipherText)
if err != nil {
return nil, err
}
nonce, cipherByteClean := cipherByte[:nonceSize], cipherByte[nonceSize:]
return gcm.Open(nil, nonce, cipherByteClean, nil)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/random_by_regex_test.go | tools/security/random_by_regex_test.go | package security_test
import (
"fmt"
"regexp"
"regexp/syntax"
"slices"
"testing"
"github.com/pocketbase/pocketbase/tools/security"
)
func TestRandomStringByRegex(t *testing.T) {
generated := []string{}
scenarios := []struct {
pattern string
flags []syntax.Flags
expectError bool
}{
{``, nil, true},
{`test`, nil, false},
{`\d+`, []syntax.Flags{syntax.POSIX}, true},
{`\d+`, nil, false},
{`\d*`, nil, false},
{`\d{1,20}`, nil, false},
{`\d{5}`, nil, false},
{`\d{0,}-abc`, nil, false},
{`[a-zA-Z_]*`, nil, false},
{`[^a-zA-Z]{5,30}`, nil, false},
{`\w+_abc`, nil, false},
{`[2-9]{10}-\w+`, nil, false},
{`(a|b|c)`, nil, false},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%q", i, s.pattern), func(t *testing.T) {
var run func(int)
run = func(attempt int) {
str, err := security.RandomStringByRegex(s.pattern, s.flags...)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
return
}
r, err := regexp.Compile(s.pattern)
if err != nil {
t.Fatal(err)
}
if !r.Match([]byte(str)) {
t.Fatalf("Expected %q to match pattern %v", str, s.pattern)
}
if slices.Contains(generated, str) {
if attempt > 3 {
t.Fatalf("The generated string %q already exists in\n%v", str, generated)
}
// rerun
run(attempt + 1)
return
}
generated = append(generated, str)
}
run(1)
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/jwt.go | tools/security/jwt.go | package security
import (
"errors"
"time"
"github.com/golang-jwt/jwt/v5"
)
// ParseUnverifiedJWT parses JWT and returns its claims
// but DOES NOT verify the signature.
//
// It verifies only the exp, iat and nbf claims.
func ParseUnverifiedJWT(token string) (jwt.MapClaims, error) {
claims := jwt.MapClaims{}
parser := &jwt.Parser{}
_, _, err := parser.ParseUnverified(token, claims)
if err == nil {
err = jwt.NewValidator(jwt.WithIssuedAt()).Validate(claims)
}
return claims, err
}
// ParseJWT verifies and parses JWT and returns its claims.
func ParseJWT(token string, verificationKey string) (jwt.MapClaims, error) {
parser := jwt.NewParser(jwt.WithValidMethods([]string{"HS256"}))
parsedToken, err := parser.Parse(token, func(t *jwt.Token) (any, error) {
return []byte(verificationKey), nil
})
if err != nil {
return nil, err
}
if claims, ok := parsedToken.Claims.(jwt.MapClaims); ok && parsedToken.Valid {
return claims, nil
}
return nil, errors.New("unable to parse token")
}
// NewJWT generates and returns new HS256 signed JWT.
func NewJWT(payload jwt.MapClaims, signingKey string, duration time.Duration) (string, error) {
claims := jwt.MapClaims{
"exp": time.Now().Add(duration).Unix(),
}
for k, v := range payload {
claims[k] = v
}
return jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(signingKey))
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/crypto.go | tools/security/crypto.go | package security
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/base64"
"fmt"
"strings"
)
// S256Challenge creates base64 encoded sha256 challenge string derived from code.
// The padding of the result base64 string is stripped per [RFC 7636].
//
// [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2
func S256Challenge(code string) string {
h := sha256.New()
h.Write([]byte(code))
return strings.TrimRight(base64.URLEncoding.EncodeToString(h.Sum(nil)), "=")
}
// MD5 creates md5 hash from the provided plain text.
func MD5(text string) string {
h := md5.New()
h.Write([]byte(text))
return fmt.Sprintf("%x", h.Sum(nil))
}
// SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text.
func SHA256(text string) string {
h := sha256.New()
h.Write([]byte(text))
return fmt.Sprintf("%x", h.Sum(nil))
}
// SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text.
func SHA512(text string) string {
h := sha512.New()
h.Write([]byte(text))
return fmt.Sprintf("%x", h.Sum(nil))
}
// HS256 creates a HMAC hash with sha256 digest algorithm.
func HS256(text string, secret string) string {
h := hmac.New(sha256.New, []byte(secret))
h.Write([]byte(text))
return fmt.Sprintf("%x", h.Sum(nil))
}
// HS512 creates a HMAC hash with sha512 digest algorithm.
func HS512(text string, secret string) string {
h := hmac.New(sha512.New, []byte(secret))
h.Write([]byte(text))
return fmt.Sprintf("%x", h.Sum(nil))
}
// Equal compares two hash strings for equality without leaking timing information.
func Equal(hash1 string, hash2 string) bool {
return subtle.ConstantTimeCompare([]byte(hash1), []byte(hash2)) == 1
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/random_by_regex.go | tools/security/random_by_regex.go | package security
import (
cryptoRand "crypto/rand"
"errors"
"fmt"
"math/big"
"regexp/syntax"
"strings"
)
const defaultMaxRepeat = 6
var anyCharNotNLPairs = []rune{'A', 'Z', 'a', 'z', '0', '9'}
// RandomStringByRegex generates a random string matching the regex pattern.
// If optFlags is not set, fallbacks to [syntax.Perl].
//
// NB! While the source of the randomness comes from [crypto/rand] this method
// is not recommended to be used on its own in critical secure contexts because
// the generated length could vary too much on the used pattern and may not be
// as secure as simply calling [security.RandomString].
// If you still insist on using it for such purposes, consider at least
// a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`.
//
// This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages.
func RandomStringByRegex(pattern string, optFlags ...syntax.Flags) (string, error) {
var flags syntax.Flags
if len(optFlags) == 0 {
flags = syntax.Perl
} else {
for _, f := range optFlags {
flags |= f
}
}
r, err := syntax.Parse(pattern, flags)
if err != nil {
return "", err
}
var sb = new(strings.Builder)
err = writeRandomStringByRegex(r, sb)
if err != nil {
return "", err
}
return sb.String(), nil
}
func writeRandomStringByRegex(r *syntax.Regexp, sb *strings.Builder) error {
// https://pkg.go.dev/regexp/syntax#Op
switch r.Op {
case syntax.OpCharClass:
c, err := randomRuneFromPairs(r.Rune)
if err != nil {
return err
}
_, err = sb.WriteRune(c)
return err
case syntax.OpAnyChar, syntax.OpAnyCharNotNL:
c, err := randomRuneFromPairs(anyCharNotNLPairs)
if err != nil {
return err
}
_, err = sb.WriteRune(c)
return err
case syntax.OpAlternate:
idx, err := randomNumber(len(r.Sub))
if err != nil {
return err
}
return writeRandomStringByRegex(r.Sub[idx], sb)
case syntax.OpConcat:
var err error
for _, sub := range r.Sub {
err = writeRandomStringByRegex(sub, sb)
if err != nil {
break
}
}
return err
case syntax.OpRepeat:
return repeatRandomStringByRegex(r.Sub[0], sb, r.Min, r.Max)
case syntax.OpQuest:
return repeatRandomStringByRegex(r.Sub[0], sb, 0, 1)
case syntax.OpPlus:
return repeatRandomStringByRegex(r.Sub[0], sb, 1, -1)
case syntax.OpStar:
return repeatRandomStringByRegex(r.Sub[0], sb, 0, -1)
case syntax.OpCapture:
return writeRandomStringByRegex(r.Sub[0], sb)
case syntax.OpLiteral:
_, err := sb.WriteString(string(r.Rune))
return err
default:
return fmt.Errorf("unsupported pattern operator %d", r.Op)
}
}
func repeatRandomStringByRegex(r *syntax.Regexp, sb *strings.Builder, min int, max int) error {
if max < 0 {
max = defaultMaxRepeat
}
if max < min {
max = min
}
n := min
if max != min {
randRange, err := randomNumber(max - min)
if err != nil {
return err
}
n += randRange
}
var err error
for i := 0; i < n; i++ {
err = writeRandomStringByRegex(r, sb)
if err != nil {
return err
}
}
return nil
}
func randomRuneFromPairs(pairs []rune) (rune, error) {
if len(pairs)%2 != 0 {
return 0, fmt.Errorf("invalid pairs slice: odd number of elements")
}
// Pre-calculate the cumulative size of all ranges to make the selection process more efficient.
cumulativeSizes := make([]int, len(pairs)/2)
totalRunes := 0
for i := 0; i < len(pairs); i += 2 {
start, end := pairs[i], pairs[i+1]
if start > end {
return 0, fmt.Errorf("invalid range: start '%c' > end '%c'", start, end)
}
totalRunes += int(end - start + 1)
cumulativeSizes[i/2] = totalRunes
}
if totalRunes == 0 {
return 0, errors.New("no runes to choose from")
}
// Select a random number in the range of total runes.
runeNumber, err := randomNumber(totalRunes)
if err != nil {
return 0, fmt.Errorf("failed to generate random number: %w", err)
}
// Find which range the selected number falls into using the pre-calculated cumulative sizes.
for i, size := range cumulativeSizes {
if runeNumber < size {
startRune := pairs[i*2]
previousSize := 0
if i > 0 {
previousSize = cumulativeSizes[i-1]
}
return startRune + rune(runeNumber-previousSize), nil
}
}
// This part should be unreachable if the logic is correct.
// It indicates a bug in this function or in randomNumber.
panic("unreachable: failed to find a rune")
}
func randomNumber(maxSoft int) (int, error) {
randRange, err := cryptoRand.Int(cryptoRand.Reader, big.NewInt(int64(maxSoft)))
return int(randRange.Int64()), err
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/security/random.go | tools/security/random.go | package security
import (
cryptoRand "crypto/rand"
"math/big"
mathRand "math/rand/v2"
)
const defaultRandomAlphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
// RandomString generates a cryptographically random string with the specified length.
//
// The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
func RandomString(length int) string {
return RandomStringWithAlphabet(length, defaultRandomAlphabet)
}
// RandomStringWithAlphabet generates a cryptographically random string
// with the specified length and characters set.
//
// It panics if for some reason rand.Int returns a non-nil error.
func RandomStringWithAlphabet(length int, alphabet string) string {
b := make([]byte, length)
max := big.NewInt(int64(len(alphabet)))
for i := range b {
n, err := cryptoRand.Int(cryptoRand.Reader, max)
if err != nil {
panic(err)
}
b[i] = alphabet[n.Int64()]
}
return string(b)
}
// PseudorandomString generates a pseudorandom string with the specified length.
//
// The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding.
//
// For a cryptographically random string (but a little bit slower) use RandomString instead.
func PseudorandomString(length int) string {
return PseudorandomStringWithAlphabet(length, defaultRandomAlphabet)
}
// PseudorandomStringWithAlphabet generates a pseudorandom string
// with the specified length and characters set.
//
// For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead.
func PseudorandomStringWithAlphabet(length int, alphabet string) string {
b := make([]byte, length)
max := len(alphabet)
for i := range b {
b[i] = alphabet[mathRand.IntN(max)]
}
return string(b)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/template/registry.go | tools/template/registry.go | // Package template is a thin wrapper around the standard html/template
// and text/template packages that implements a convenient registry to
// load and cache templates on the fly concurrently.
//
// It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code.
//
// Example:
//
// registry := template.NewRegistry()
//
// html1, err := registry.LoadFiles(
// // the files set wil be parsed only once and then cached
// "layout.html",
// "content.html",
// ).Render(map[string]any{"name": "John"})
//
// html2, err := registry.LoadFiles(
// // reuse the already parsed and cached files set
// "layout.html",
// "content.html",
// ).Render(map[string]any{"name": "Jane"})
package template
import (
"fmt"
"html/template"
"io/fs"
"path/filepath"
"strings"
"github.com/pocketbase/pocketbase/tools/store"
)
// NewRegistry creates and initializes a new templates registry with
// some defaults (eg. global "raw" template function for unescaped HTML).
//
// Use the Registry.Load* methods to load templates into the registry.
func NewRegistry() *Registry {
return &Registry{
cache: store.New[string, *Renderer](nil),
funcs: template.FuncMap{
"raw": func(str string) template.HTML {
return template.HTML(str)
},
},
}
}
// Registry defines a templates registry that is safe to be used by multiple goroutines.
//
// Use the Registry.Load* methods to load templates into the registry.
type Registry struct {
cache *store.Store[string, *Renderer]
funcs template.FuncMap
}
// AddFuncs registers new global template functions.
//
// The key of each map entry is the function name that will be used in the templates.
// If a function with the map entry name already exists it will be replaced with the new one.
//
// The value of each map entry is a function that must have either a
// single return value, or two return values of which the second has type error.
//
// Example:
//
// r.AddFuncs(map[string]any{
// "toUpper": func(str string) string {
// return strings.ToUppser(str)
// },
// ...
// })
func (r *Registry) AddFuncs(funcs map[string]any) *Registry {
for name, f := range funcs {
r.funcs[name] = f
}
return r
}
// LoadFiles caches (if not already) the specified filenames set as a
// single template and returns a ready to use Renderer instance.
//
// There must be at least 1 filename specified.
func (r *Registry) LoadFiles(filenames ...string) *Renderer {
key := strings.Join(filenames, ",")
found := r.cache.Get(key)
if found == nil {
// parse and cache
tpl, err := template.New(filepath.Base(filenames[0])).Funcs(r.funcs).ParseFiles(filenames...)
found = &Renderer{template: tpl, parseError: err}
r.cache.Set(key, found)
}
return found
}
// LoadString caches (if not already) the specified inline string as a
// single template and returns a ready to use Renderer instance.
func (r *Registry) LoadString(text string) *Renderer {
found := r.cache.Get(text)
if found == nil {
// parse and cache (using the text as key)
tpl, err := template.New("").Funcs(r.funcs).Parse(text)
found = &Renderer{template: tpl, parseError: err}
r.cache.Set(text, found)
}
return found
}
// LoadFS caches (if not already) the specified fs and globPatterns
// pair as single template and returns a ready to use Renderer instance.
//
// There must be at least 1 file matching the provided globPattern(s)
// (note that most file names serves as glob patterns matching themselves).
func (r *Registry) LoadFS(fsys fs.FS, globPatterns ...string) *Renderer {
key := fmt.Sprintf("%v%v", fsys, globPatterns)
found := r.cache.Get(key)
if found == nil {
// find the first file to use as template name (it is required when specifying Funcs)
var firstFilename string
if len(globPatterns) > 0 {
list, _ := fs.Glob(fsys, globPatterns[0])
if len(list) > 0 {
firstFilename = filepath.Base(list[0])
}
}
tpl, err := template.New(firstFilename).Funcs(r.funcs).ParseFS(fsys, globPatterns...)
found = &Renderer{template: tpl, parseError: err}
r.cache.Set(key, found)
}
return found
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/template/renderer.go | tools/template/renderer.go | package template
import (
"bytes"
"errors"
"html/template"
)
// Renderer defines a single parsed template.
type Renderer struct {
template *template.Template
parseError error
}
// Render executes the template with the specified data as the dot object
// and returns the result as plain string.
func (r *Renderer) Render(data any) (string, error) {
if r.parseError != nil {
return "", r.parseError
}
if r.template == nil {
return "", errors.New("invalid or nil template")
}
buf := new(bytes.Buffer)
if err := r.template.Execute(buf, data); err != nil {
return "", err
}
return buf.String(), nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/template/renderer_test.go | tools/template/renderer_test.go | package template
import (
"errors"
"html/template"
"testing"
)
func TestRendererRender(t *testing.T) {
tpl, _ := template.New("").Parse("Hello {{.Name}}!")
tpl.Option("missingkey=error") // enforce execute errors
scenarios := map[string]struct {
renderer *Renderer
data any
expectedHasErr bool
expectedResult string
}{
"with nil template": {
&Renderer{},
nil,
true,
"",
},
"with parse error": {
&Renderer{
template: tpl,
parseError: errors.New("test"),
},
nil,
true,
"",
},
"with execute error": {
&Renderer{template: tpl},
nil,
true,
"",
},
"no error": {
&Renderer{template: tpl},
struct{ Name string }{"world"},
false,
"Hello world!",
},
}
for name, s := range scenarios {
t.Run(name, func(t *testing.T) {
result, err := s.renderer.Render(s.data)
hasErr := err != nil
if s.expectedHasErr != hasErr {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectedHasErr, hasErr, err)
}
if s.expectedResult != result {
t.Fatalf("Expected result %v, got %v", s.expectedResult, result)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/template/registry_test.go | tools/template/registry_test.go | package template
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
)
func checkRegistryFuncs(t *testing.T, r *Registry, expectedFuncs ...string) {
if v := len(r.funcs); v != len(expectedFuncs) {
t.Fatalf("Expected total %d funcs, got %d", len(expectedFuncs), v)
}
for _, name := range expectedFuncs {
if _, ok := r.funcs[name]; !ok {
t.Fatalf("Missing %q func", name)
}
}
}
func TestNewRegistry(t *testing.T) {
r := NewRegistry()
if r.cache == nil {
t.Fatalf("Expected cache store to be initialized, got nil")
}
if v := r.cache.Length(); v != 0 {
t.Fatalf("Expected cache store length to be 0, got %d", v)
}
checkRegistryFuncs(t, r, "raw")
}
func TestRegistryAddFuncs(t *testing.T) {
r := NewRegistry()
r.AddFuncs(map[string]any{
"test": func(a string) string { return a + "-TEST" },
})
checkRegistryFuncs(t, r, "raw", "test")
result, err := r.LoadString(`{{.|test}}`).Render("example")
if err != nil {
t.Fatalf("Unexpected Render() error, got %v", err)
}
expected := "example-TEST"
if result != expected {
t.Fatalf("Expected Render() result %q, got %q", expected, result)
}
}
func TestRegistryLoadFiles(t *testing.T) {
r := NewRegistry()
t.Run("invalid or missing files", func(t *testing.T) {
r.LoadFiles("file1.missing", "file2.missing")
key := "file1.missing,file2.missing"
renderer := r.cache.Get(key)
if renderer == nil {
t.Fatal("Expected renderer to be initialized even if invalid, got nil")
}
if renderer.template != nil {
t.Fatalf("Expected renderer template to be nil, got %v", renderer.template)
}
if renderer.parseError == nil {
t.Fatalf("Expected renderer parseError to be set, got nil")
}
})
t.Run("valid files", func(t *testing.T) {
// create test templates
dir, err := os.MkdirTemp(os.TempDir(), "template_test")
if err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(dir, "base.html"), []byte(`Base:{{template "content" .}}`), 0644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(dir, "content.html"), []byte(`{{define "content"}}Content:{{.|raw}}{{end}}`), 0644); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
files := []string{filepath.Join(dir, "base.html"), filepath.Join(dir, "content.html")}
r.LoadFiles(files...)
renderer := r.cache.Get(strings.Join(files, ","))
if renderer == nil {
t.Fatal("Expected renderer to be initialized even if invalid, got nil")
}
if renderer.template == nil {
t.Fatal("Expected renderer template to be set, got nil")
}
if renderer.parseError != nil {
t.Fatalf("Expected renderer parseError to be nil, got %v", renderer.parseError)
}
result, err := renderer.Render("<h1>123</h1>")
if err != nil {
t.Fatalf("Unexpected Render() error, got %v", err)
}
expected := "Base:Content:<h1>123</h1>"
if result != expected {
t.Fatalf("Expected Render() result %q, got %q", expected, result)
}
})
}
func TestRegistryLoadString(t *testing.T) {
r := NewRegistry()
t.Run("invalid template string", func(t *testing.T) {
txt := `test {{define "content"}}`
r.LoadString(txt)
renderer := r.cache.Get(txt)
if renderer == nil {
t.Fatal("Expected renderer to be initialized even if invalid, got nil")
}
if renderer.template != nil {
t.Fatalf("Expected renderer template to be nil, got %v", renderer.template)
}
if renderer.parseError == nil {
t.Fatalf("Expected renderer parseError to be set, got nil")
}
})
t.Run("valid template string", func(t *testing.T) {
txt := `test {{.|raw}}`
r.LoadString(txt)
renderer := r.cache.Get(txt)
if renderer == nil {
t.Fatal("Expected renderer to be initialized even if invalid, got nil")
}
if renderer.template == nil {
t.Fatal("Expected renderer template to be set, got nil")
}
if renderer.parseError != nil {
t.Fatalf("Expected renderer parseError to be nil, got %v", renderer.parseError)
}
result, err := renderer.Render("<h1>123</h1>")
if err != nil {
t.Fatalf("Unexpected Render() error, got %v", err)
}
expected := "test <h1>123</h1>"
if result != expected {
t.Fatalf("Expected Render() result %q, got %q", expected, result)
}
})
}
func TestRegistryLoadFS(t *testing.T) {
r := NewRegistry()
t.Run("invalid fs", func(t *testing.T) {
fs := os.DirFS("__missing__")
files := []string{"missing1", "missing2"}
key := fmt.Sprintf("%v%v", fs, files)
r.LoadFS(fs, files...)
renderer := r.cache.Get(key)
if renderer == nil {
t.Fatal("Expected renderer to be initialized even if invalid, got nil")
}
if renderer.template != nil {
t.Fatalf("Expected renderer template to be nil, got %v", renderer.template)
}
if renderer.parseError == nil {
t.Fatalf("Expected renderer parseError to be set, got nil")
}
})
t.Run("valid fs", func(t *testing.T) {
// create test templates
dir, err := os.MkdirTemp(os.TempDir(), "template_test2")
if err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(dir, "base.html"), []byte(`Base:{{template "content" .}}`), 0644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(dir, "content.html"), []byte(`{{define "content"}}Content:{{.|raw}}{{end}}`), 0644); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
fs := os.DirFS(dir)
files := []string{"base.html", "content.html"}
key := fmt.Sprintf("%v%v", fs, files)
r.LoadFS(fs, files...)
renderer := r.cache.Get(key)
if renderer == nil {
t.Fatal("Expected renderer to be initialized even if invalid, got nil")
}
if renderer.template == nil {
t.Fatal("Expected renderer template to be set, got nil")
}
if renderer.parseError != nil {
t.Fatalf("Expected renderer parseError to be nil, got %v", renderer.parseError)
}
result, err := renderer.Render("<h1>123</h1>")
if err != nil {
t.Fatalf("Unexpected Render() error, got %v", err)
}
expected := "Base:Content:<h1>123</h1>"
if result != expected {
t.Fatalf("Expected Render() result %q, got %q", expected, result)
}
})
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/token_functions_test.go | tools/search/token_functions_test.go | package search
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/ganigeorgiev/fexpr"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/security"
)
func TestTokenFunctionsGeoDistance(t *testing.T) {
t.Parallel()
testDB, err := createTestDB()
if err != nil {
t.Fatal(err)
}
defer testDB.Close()
fn, ok := TokenFunctions["geoDistance"]
if !ok {
t.Error("Expected geoDistance token function to be registered.")
}
baseTokenResolver := func(t fexpr.Token) (*ResolverResult, error) {
placeholder := "t" + security.PseudorandomString(5)
return &ResolverResult{Identifier: "{:" + placeholder + "}", Params: map[string]any{placeholder: t.Literal}}, nil
}
scenarios := []struct {
name string
args []fexpr.Token
resolver func(t fexpr.Token) (*ResolverResult, error)
result *ResolverResult
expectErr bool
}{
{
"no args",
nil,
baseTokenResolver,
nil,
true,
},
{
"< 4 args",
[]fexpr.Token{
{Literal: "1", Type: fexpr.TokenNumber},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "3", Type: fexpr.TokenNumber},
},
baseTokenResolver,
nil,
true,
},
{
"> 4 args",
[]fexpr.Token{
{Literal: "1", Type: fexpr.TokenNumber},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "3", Type: fexpr.TokenNumber},
{Literal: "4", Type: fexpr.TokenNumber},
{Literal: "5", Type: fexpr.TokenNumber},
},
baseTokenResolver,
nil,
true,
},
{
"unsupported function argument",
[]fexpr.Token{
{Literal: "1", Type: fexpr.TokenFunction},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "3", Type: fexpr.TokenNumber},
{Literal: "4", Type: fexpr.TokenNumber},
},
baseTokenResolver,
nil,
true,
},
{
"unsupported text argument",
[]fexpr.Token{
{Literal: "1", Type: fexpr.TokenText},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "3", Type: fexpr.TokenNumber},
{Literal: "4", Type: fexpr.TokenNumber},
},
baseTokenResolver,
nil,
true,
},
{
"4 valid arguments but with resolver error",
[]fexpr.Token{
{Literal: "1", Type: fexpr.TokenNumber},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "3", Type: fexpr.TokenNumber},
{Literal: "4", Type: fexpr.TokenNumber},
},
func(t fexpr.Token) (*ResolverResult, error) {
return nil, errors.New("test")
},
nil,
true,
},
{
"4 valid arguments",
[]fexpr.Token{
{Literal: "1", Type: fexpr.TokenNumber},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "3", Type: fexpr.TokenNumber},
{Literal: "4", Type: fexpr.TokenNumber},
},
baseTokenResolver,
&ResolverResult{
NoCoalesce: true,
Identifier: `(6371 * acos(cos(radians({:latA})) * cos(radians({:latB})) * cos(radians({:lonB}) - radians({:lonA})) + sin(radians({:latA})) * sin(radians({:latB}))))`,
Params: map[string]any{
"lonA": 1,
"latA": 2,
"lonB": 3,
"latB": 4,
},
},
false,
},
{
"mixed arguments",
[]fexpr.Token{
{Literal: "null", Type: fexpr.TokenIdentifier},
{Literal: "2", Type: fexpr.TokenNumber},
{Literal: "false", Type: fexpr.TokenIdentifier},
{Literal: "4", Type: fexpr.TokenNumber},
},
baseTokenResolver,
&ResolverResult{
NoCoalesce: true,
Identifier: `(6371 * acos(cos(radians({:latA})) * cos(radians({:latB})) * cos(radians({:lonB}) - radians({:lonA})) + sin(radians({:latA})) * sin(radians({:latB}))))`,
Params: map[string]any{
"lonA": "null",
"latA": 2,
"lonB": false,
"latB": 4,
},
},
false,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result, err := fn(s.resolver, s.args...)
hasErr := err != nil
if hasErr != s.expectErr {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectErr, hasErr, err)
}
testCompareResults(t, s.result, result)
})
}
}
func TestTokenFunctionsGeoDistanceExec(t *testing.T) {
t.Parallel()
testDB, err := createTestDB()
if err != nil {
t.Fatal(err)
}
defer testDB.Close()
fn, ok := TokenFunctions["geoDistance"]
if !ok {
t.Error("Expected geoDistance token function to be registered.")
}
result, err := fn(
func(t fexpr.Token) (*ResolverResult, error) {
placeholder := "t" + security.PseudorandomString(5)
return &ResolverResult{Identifier: "{:" + placeholder + "}", Params: map[string]any{placeholder: t.Literal}}, nil
},
fexpr.Token{Literal: "23.23033854945808", Type: fexpr.TokenNumber},
fexpr.Token{Literal: "42.713146090563384", Type: fexpr.TokenNumber},
fexpr.Token{Literal: "23.44920680886216", Type: fexpr.TokenNumber},
fexpr.Token{Literal: "42.7078484153991", Type: fexpr.TokenNumber},
)
if err != nil {
t.Fatal(err)
}
column := []float64{}
err = testDB.NewQuery("select " + result.Identifier).Bind(result.Params).Column(&column)
if err != nil {
t.Fatal(err)
}
if len(column) != 1 {
t.Fatalf("Expected exactly 1 column value as result, got %v", column)
}
expected := "17.89"
distance := fmt.Sprintf("%.2f", column[0])
if distance != expected {
t.Fatalf("Expected distance value %s, got %s", expected, distance)
}
}
// -------------------------------------------------------------------
func testCompareResults(t *testing.T, a, b *ResolverResult) {
testDB, err := createTestDB()
if err != nil {
t.Fatal(err)
}
defer testDB.Close()
aIsNil := a == nil
bIsNil := b == nil
if aIsNil != bIsNil {
t.Fatalf("Expected aIsNil and bIsNil to be the same, got %v vs %v", aIsNil, bIsNil)
}
if aIsNil && bIsNil {
return
}
aHasAfterBuild := a.AfterBuild == nil
bHasAfterBuild := b.AfterBuild == nil
if aHasAfterBuild != bHasAfterBuild {
t.Fatalf("Expected aHasAfterBuild and bHasAfterBuild to be the same, got %v vs %v", aHasAfterBuild, bHasAfterBuild)
}
var aAfterBuild string
if a.AfterBuild != nil {
aAfterBuild = a.AfterBuild(dbx.NewExp("test")).Build(testDB.DB, a.Params)
}
var bAfterBuild string
if b.AfterBuild != nil {
bAfterBuild = b.AfterBuild(dbx.NewExp("test")).Build(testDB.DB, a.Params)
}
if aAfterBuild != bAfterBuild {
t.Fatalf("Expected bAfterBuild and bAfterBuild to be the same, got\n%s\nvs\n%s", aAfterBuild, bAfterBuild)
}
var aMultiMatchSubQuery string
if a.MultiMatchSubQuery != nil {
aMultiMatchSubQuery = a.MultiMatchSubQuery.Build(testDB.DB, a.Params)
}
var bMultiMatchSubQuery string
if b.MultiMatchSubQuery != nil {
bMultiMatchSubQuery = b.MultiMatchSubQuery.Build(testDB.DB, b.Params)
}
if aMultiMatchSubQuery != bMultiMatchSubQuery {
t.Fatalf("Expected bMultiMatchSubQuery and bMultiMatchSubQuery to be the same, got\n%s\nvs\n%s", aMultiMatchSubQuery, bMultiMatchSubQuery)
}
if a.NoCoalesce != b.NoCoalesce {
t.Fatalf("Expected NoCoalesce to match, got %v vs %v", a.NoCoalesce, b.NoCoalesce)
}
// loose placeholders replacement
var aResolved = a.Identifier
for k, v := range a.Params {
aResolved = strings.ReplaceAll(aResolved, "{:"+k+"}", fmt.Sprintf("%v", v))
}
var bResolved = b.Identifier
for k, v := range b.Params {
bResolved = strings.ReplaceAll(bResolved, "{:"+k+"}", fmt.Sprintf("%v", v))
}
if aResolved != bResolved {
t.Fatalf("Expected resolved identifiers to match, got\n%s\nvs\n%s", aResolved, bResolved)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/sort.go | tools/search/sort.go | package search
import (
"fmt"
"strings"
)
const (
randomSortKey string = "@random"
rowidSortKey string = "@rowid"
)
// sort field directions
const (
SortAsc string = "ASC"
SortDesc string = "DESC"
)
// SortField defines a single search sort field.
type SortField struct {
Name string `json:"name"`
Direction string `json:"direction"`
}
// BuildExpr resolves the sort field into a valid db sort expression.
func (s *SortField) BuildExpr(fieldResolver FieldResolver) (string, error) {
// special case for random sort
if s.Name == randomSortKey {
return "RANDOM()", nil
}
// special case for the builtin SQLite rowid column
if s.Name == rowidSortKey {
return fmt.Sprintf("[[_rowid_]] %s", s.Direction), nil
}
result, err := fieldResolver.Resolve(s.Name)
// invalidate empty fields and non-column identifiers
if err != nil || len(result.Params) > 0 || result.Identifier == "" || strings.ToLower(result.Identifier) == "null" {
return "", fmt.Errorf("invalid sort field %q", s.Name)
}
return fmt.Sprintf("%s %s", result.Identifier, s.Direction), nil
}
// ParseSortFromString parses the provided string expression
// into a slice of SortFields.
//
// Example:
//
// fields := search.ParseSortFromString("-name,+created")
func ParseSortFromString(str string) (fields []SortField) {
data := strings.Split(str, ",")
for _, field := range data {
// trim whitespaces
field = strings.TrimSpace(field)
if strings.HasPrefix(field, "-") {
fields = append(fields, SortField{strings.TrimPrefix(field, "-"), SortDesc})
} else {
fields = append(fields, SortField{strings.TrimPrefix(field, "+"), SortAsc})
}
}
return
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/identifier_macros.go | tools/search/identifier_macros.go | package search
import (
"fmt"
"time"
"github.com/pocketbase/pocketbase/tools/types"
)
// note: used primarily for the tests
var timeNow = func() time.Time {
return time.Now()
}
var identifierMacros = map[string]func() (any, error){
"@now": func() (any, error) {
today := timeNow().UTC()
d, err := types.ParseDateTime(today)
if err != nil {
return "", fmt.Errorf("@now: %w", err)
}
return d.String(), nil
},
"@yesterday": func() (any, error) {
yesterday := timeNow().UTC().AddDate(0, 0, -1)
d, err := types.ParseDateTime(yesterday)
if err != nil {
return "", fmt.Errorf("@yesterday: %w", err)
}
return d.String(), nil
},
"@tomorrow": func() (any, error) {
tomorrow := timeNow().UTC().AddDate(0, 0, 1)
d, err := types.ParseDateTime(tomorrow)
if err != nil {
return "", fmt.Errorf("@tomorrow: %w", err)
}
return d.String(), nil
},
"@second": func() (any, error) {
return timeNow().UTC().Second(), nil
},
"@minute": func() (any, error) {
return timeNow().UTC().Minute(), nil
},
"@hour": func() (any, error) {
return timeNow().UTC().Hour(), nil
},
"@day": func() (any, error) {
return timeNow().UTC().Day(), nil
},
"@month": func() (any, error) {
return int(timeNow().UTC().Month()), nil
},
"@weekday": func() (any, error) {
return int(timeNow().UTC().Weekday()), nil
},
"@year": func() (any, error) {
return timeNow().UTC().Year(), nil
},
"@todayStart": func() (any, error) {
today := timeNow().UTC()
start := time.Date(today.Year(), today.Month(), today.Day(), 0, 0, 0, 0, time.UTC)
d, err := types.ParseDateTime(start)
if err != nil {
return "", fmt.Errorf("@todayStart: %w", err)
}
return d.String(), nil
},
"@todayEnd": func() (any, error) {
today := timeNow().UTC()
start := time.Date(today.Year(), today.Month(), today.Day(), 23, 59, 59, 999999999, time.UTC)
d, err := types.ParseDateTime(start)
if err != nil {
return "", fmt.Errorf("@todayEnd: %w", err)
}
return d.String(), nil
},
"@monthStart": func() (any, error) {
today := timeNow().UTC()
start := time.Date(today.Year(), today.Month(), 1, 0, 0, 0, 0, time.UTC)
d, err := types.ParseDateTime(start)
if err != nil {
return "", fmt.Errorf("@monthStart: %w", err)
}
return d.String(), nil
},
"@monthEnd": func() (any, error) {
today := timeNow().UTC()
start := time.Date(today.Year(), today.Month(), 1, 23, 59, 59, 999999999, time.UTC)
end := start.AddDate(0, 1, -1)
d, err := types.ParseDateTime(end)
if err != nil {
return "", fmt.Errorf("@monthEnd: %w", err)
}
return d.String(), nil
},
"@yearStart": func() (any, error) {
today := timeNow().UTC()
start := time.Date(today.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
d, err := types.ParseDateTime(start)
if err != nil {
return "", fmt.Errorf("@yearStart: %w", err)
}
return d.String(), nil
},
"@yearEnd": func() (any, error) {
today := timeNow().UTC()
end := time.Date(today.Year(), 12, 31, 23, 59, 59, 999999999, time.UTC)
d, err := types.ParseDateTime(end)
if err != nil {
return "", fmt.Errorf("@yearEnd: %w", err)
}
return d.String(), nil
},
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/simple_field_resolver_test.go | tools/search/simple_field_resolver_test.go | package search_test
import (
"fmt"
"testing"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/search"
)
func TestSimpleFieldResolverUpdateQuery(t *testing.T) {
r := search.NewSimpleFieldResolver("test")
scenarios := []struct {
fieldName string
expectQuery string
}{
// missing field (the query shouldn't change)
{"", `SELECT "id" FROM "test"`},
// unknown field (the query shouldn't change)
{"unknown", `SELECT "id" FROM "test"`},
// allowed field (the query shouldn't change)
{"test", `SELECT "id" FROM "test"`},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.fieldName), func(t *testing.T) {
db := dbx.NewFromDB(nil, "")
query := db.Select("id").From("test")
r.Resolve(s.fieldName)
if err := r.UpdateQuery(nil); err != nil {
t.Fatalf("UpdateQuery failed with error %v", err)
}
rawQuery := query.Build().SQL()
if rawQuery != s.expectQuery {
t.Fatalf("Expected query %v, got \n%v", s.expectQuery, rawQuery)
}
})
}
}
func TestSimpleFieldResolverResolve(t *testing.T) {
r := search.NewSimpleFieldResolver("test", `^test_regex\d+$`, "Test columnify!", "data.test")
scenarios := []struct {
fieldName string
expectError bool
expectName string
}{
{"", true, ""},
{" ", true, ""},
{"unknown", true, ""},
{"test", false, "[[test]]"},
{"test.sub", true, ""},
{"test_regex", true, ""},
{"test_regex1", false, "[[test_regex1]]"},
{"Test columnify!", false, "[[Testcolumnify]]"},
{"data.test", false, "JSON_EXTRACT([[data]], '$.test')"},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.fieldName), func(t *testing.T) {
r, err := r.Resolve(s.fieldName)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
return
}
if r.Identifier != s.expectName {
t.Fatalf("Expected r.Identifier %q, got %q", s.expectName, r.Identifier)
}
if len(r.Params) != 0 {
t.Fatalf("r.Params should be empty, got %v", r.Params)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/filter.go | tools/search/filter.go | package search
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"github.com/ganigeorgiev/fexpr"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/security"
"github.com/pocketbase/pocketbase/tools/store"
"github.com/spf13/cast"
)
// FilterData is a filter expression string following the `fexpr` package grammar.
//
// The filter string can also contain dbx placeholder parameters (eg. "title = {:name}"),
// that will be safely replaced and properly quoted inplace with the placeholderReplacements values.
//
// Example:
//
// var filter FilterData = "id = null || (name = 'test' && status = true) || (total >= {:min} && total <= {:max})"
// resolver := search.NewSimpleFieldResolver("id", "name", "status")
// expr, err := filter.BuildExpr(resolver, dbx.Params{"min": 100, "max": 200})
type FilterData string
// parsedFilterData holds a cache with previously parsed filter data expressions
// (initialized with some preallocated empty data map)
var parsedFilterData = store.New(make(map[string][]fexpr.ExprGroup, 50))
// BuildExpr parses the current filter data and returns a new db WHERE expression.
//
// The filter string can also contain dbx placeholder parameters (eg. "title = {:name}"),
// that will be safely replaced and properly quoted inplace with the placeholderReplacements values.
//
// The parsed expressions are limited up to DefaultFilterExprLimit.
// Use [FilterData.BuildExprWithLimit] if you want to set a custom limit.
func (f FilterData) BuildExpr(
fieldResolver FieldResolver,
placeholderReplacements ...dbx.Params,
) (dbx.Expression, error) {
return f.BuildExprWithLimit(fieldResolver, DefaultFilterExprLimit, placeholderReplacements...)
}
// BuildExpr parses the current filter data and returns a new db WHERE expression.
//
// The filter string can also contain dbx placeholder parameters (eg. "title = {:name}"),
// that will be safely replaced and properly quoted inplace with the placeholderReplacements values.
func (f FilterData) BuildExprWithLimit(
fieldResolver FieldResolver,
maxExpressions int,
placeholderReplacements ...dbx.Params,
) (dbx.Expression, error) {
raw := string(f)
// replace the placeholder params in the raw string filter
for _, p := range placeholderReplacements {
for key, value := range p {
var replacement string
switch v := value.(type) {
case nil:
replacement = "null"
case bool, float64, float32, int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
replacement = cast.ToString(v)
default:
replacement = cast.ToString(v)
// try to json serialize as fallback
if replacement == "" {
raw, _ := json.Marshal(v)
replacement = string(raw)
}
replacement = strconv.Quote(replacement)
}
raw = strings.ReplaceAll(raw, "{:"+key+"}", replacement)
}
}
cacheKey := raw + "/" + strconv.Itoa(maxExpressions)
if data, ok := parsedFilterData.GetOk(cacheKey); ok {
return buildParsedFilterExpr(data, fieldResolver, &maxExpressions)
}
data, err := fexpr.Parse(raw)
if err != nil {
// depending on the users demand we may allow empty expressions
// (aka. expressions consisting only of whitespaces or comments)
// but for now disallow them as it seems unnecessary
// if errors.Is(err, fexpr.ErrEmpty) {
// return dbx.NewExp("1=1"), nil
// }
return nil, err
}
// store in cache
// (the limit size is arbitrary and it is there to prevent the cache growing too big)
parsedFilterData.SetIfLessThanLimit(cacheKey, data, 500)
return buildParsedFilterExpr(data, fieldResolver, &maxExpressions)
}
func buildParsedFilterExpr(data []fexpr.ExprGroup, fieldResolver FieldResolver, maxExpressions *int) (dbx.Expression, error) {
if len(data) == 0 {
return nil, fexpr.ErrEmpty
}
result := &concatExpr{separator: " "}
for _, group := range data {
var expr dbx.Expression
var exprErr error
switch item := group.Item.(type) {
case fexpr.Expr:
if *maxExpressions <= 0 {
return nil, ErrFilterExprLimit
}
*maxExpressions--
expr, exprErr = resolveTokenizedExpr(item, fieldResolver)
case fexpr.ExprGroup:
expr, exprErr = buildParsedFilterExpr([]fexpr.ExprGroup{item}, fieldResolver, maxExpressions)
case []fexpr.ExprGroup:
expr, exprErr = buildParsedFilterExpr(item, fieldResolver, maxExpressions)
default:
exprErr = errors.New("unsupported expression item")
}
if exprErr != nil {
return nil, exprErr
}
if len(result.parts) > 0 {
var op string
if group.Join == fexpr.JoinOr {
op = "OR"
} else {
op = "AND"
}
result.parts = append(result.parts, &opExpr{op})
}
result.parts = append(result.parts, expr)
}
return result, nil
}
func resolveTokenizedExpr(expr fexpr.Expr, fieldResolver FieldResolver) (dbx.Expression, error) {
lResult, lErr := resolveToken(expr.Left, fieldResolver)
if lErr != nil || lResult.Identifier == "" {
return nil, fmt.Errorf("invalid left operand %q - %v", expr.Left.Literal, lErr)
}
rResult, rErr := resolveToken(expr.Right, fieldResolver)
if rErr != nil || rResult.Identifier == "" {
return nil, fmt.Errorf("invalid right operand %q - %v", expr.Right.Literal, rErr)
}
return buildResolversExpr(lResult, expr.Op, rResult)
}
func buildResolversExpr(
left *ResolverResult,
op fexpr.SignOp,
right *ResolverResult,
) (dbx.Expression, error) {
var expr dbx.Expression
switch op {
case fexpr.SignEq, fexpr.SignAnyEq:
expr = resolveEqualExpr(true, left, right)
case fexpr.SignNeq, fexpr.SignAnyNeq:
expr = resolveEqualExpr(false, left, right)
case fexpr.SignLike, fexpr.SignAnyLike:
// the right side is a column and therefor wrap it with "%" for contains like behavior
if len(right.Params) == 0 {
expr = dbx.NewExp(fmt.Sprintf("%s LIKE ('%%' || %s || '%%') ESCAPE '\\'", left.Identifier, right.Identifier), left.Params)
} else {
expr = dbx.NewExp(fmt.Sprintf("%s LIKE %s ESCAPE '\\'", left.Identifier, right.Identifier), mergeParams(left.Params, wrapLikeParams(right.Params)))
}
case fexpr.SignNlike, fexpr.SignAnyNlike:
// the right side is a column and therefor wrap it with "%" for not-contains like behavior
if len(right.Params) == 0 {
expr = dbx.NewExp(fmt.Sprintf("%s NOT LIKE ('%%' || %s || '%%') ESCAPE '\\'", left.Identifier, right.Identifier), left.Params)
} else {
expr = dbx.NewExp(fmt.Sprintf("%s NOT LIKE %s ESCAPE '\\'", left.Identifier, right.Identifier), mergeParams(left.Params, wrapLikeParams(right.Params)))
}
case fexpr.SignLt, fexpr.SignAnyLt:
expr = dbx.NewExp(fmt.Sprintf("%s < %s", left.Identifier, right.Identifier), mergeParams(left.Params, right.Params))
case fexpr.SignLte, fexpr.SignAnyLte:
expr = dbx.NewExp(fmt.Sprintf("%s <= %s", left.Identifier, right.Identifier), mergeParams(left.Params, right.Params))
case fexpr.SignGt, fexpr.SignAnyGt:
expr = dbx.NewExp(fmt.Sprintf("%s > %s", left.Identifier, right.Identifier), mergeParams(left.Params, right.Params))
case fexpr.SignGte, fexpr.SignAnyGte:
expr = dbx.NewExp(fmt.Sprintf("%s >= %s", left.Identifier, right.Identifier), mergeParams(left.Params, right.Params))
}
if expr == nil {
return nil, fmt.Errorf("unknown expression operator %q", op)
}
// multi-match expressions
if !isAnyMatchOp(op) {
if left.MultiMatchSubQuery != nil && right.MultiMatchSubQuery != nil {
mm := &manyVsManyExpr{
left: left,
right: right,
op: op,
}
expr = dbx.Enclose(dbx.And(expr, mm))
} else if left.MultiMatchSubQuery != nil {
mm := &manyVsOneExpr{
noCoalesce: left.NoCoalesce,
subQuery: left.MultiMatchSubQuery,
op: op,
otherOperand: right,
}
expr = dbx.Enclose(dbx.And(expr, mm))
} else if right.MultiMatchSubQuery != nil {
mm := &manyVsOneExpr{
noCoalesce: right.NoCoalesce,
subQuery: right.MultiMatchSubQuery,
op: op,
otherOperand: left,
inverse: true,
}
expr = dbx.Enclose(dbx.And(expr, mm))
}
}
if left.AfterBuild != nil {
expr = left.AfterBuild(expr)
}
if right.AfterBuild != nil {
expr = right.AfterBuild(expr)
}
return expr, nil
}
var normalizedIdentifiers = map[string]string{
// if `null` field is missing, treat `null` identifier as NULL token
"null": "NULL",
// if `true` field is missing, treat `true` identifier as TRUE token
"true": "1",
// if `false` field is missing, treat `false` identifier as FALSE token
"false": "0",
}
func resolveToken(token fexpr.Token, fieldResolver FieldResolver) (*ResolverResult, error) {
switch token.Type {
case fexpr.TokenIdentifier:
// check for macros
// ---
if macroFunc, ok := identifierMacros[token.Literal]; ok {
placeholder := "t" + security.PseudorandomString(8)
macroValue, err := macroFunc()
if err != nil {
return nil, err
}
return &ResolverResult{
Identifier: "{:" + placeholder + "}",
Params: dbx.Params{placeholder: macroValue},
}, nil
}
// custom resolver
// ---
result, err := fieldResolver.Resolve(token.Literal)
if err != nil || result.Identifier == "" {
for k, v := range normalizedIdentifiers {
if strings.EqualFold(k, token.Literal) {
return &ResolverResult{Identifier: v}, nil
}
}
return nil, err
}
return result, err
case fexpr.TokenText:
placeholder := "t" + security.PseudorandomString(8)
return &ResolverResult{
Identifier: "{:" + placeholder + "}",
Params: dbx.Params{placeholder: token.Literal},
}, nil
case fexpr.TokenNumber:
placeholder := "t" + security.PseudorandomString(8)
return &ResolverResult{
Identifier: "{:" + placeholder + "}",
Params: dbx.Params{placeholder: cast.ToFloat64(token.Literal)},
}, nil
case fexpr.TokenFunction:
fn, ok := TokenFunctions[token.Literal]
if !ok {
return nil, fmt.Errorf("unknown function %q", token.Literal)
}
args, _ := token.Meta.([]fexpr.Token)
return fn(func(argToken fexpr.Token) (*ResolverResult, error) {
return resolveToken(argToken, fieldResolver)
}, args...)
}
return nil, fmt.Errorf("unsupported token type %q", token.Type)
}
// Resolves = and != expressions in an attempt to minimize the COALESCE
// usage and to gracefully handle null vs empty string normalizations.
//
// The expression `a = "" OR a is null` tends to perform better than
// `COALESCE(a, "") = ""` since the direct match can be accomplished
// with a seek while the COALESCE will induce a table scan.
func resolveEqualExpr(equal bool, left, right *ResolverResult) dbx.Expression {
isLeftEmpty := isEmptyIdentifier(left) || (len(left.Params) == 1 && hasEmptyParamValue(left))
isRightEmpty := isEmptyIdentifier(right) || (len(right.Params) == 1 && hasEmptyParamValue(right))
equalOp := "="
nullEqualOp := "IS"
concatOp := "OR"
nullExpr := "IS NULL"
if !equal {
// always use `IS NOT` instead of `!=` because direct non-equal comparisons
// to nullable column values that are actually NULL yields to NULL instead of TRUE, eg.:
// `'example' != nullableColumn` -> NULL even if nullableColumn row value is NULL
equalOp = "IS NOT"
nullEqualOp = equalOp
concatOp = "AND"
nullExpr = "IS NOT NULL"
}
// no coalesce (eg. compare to a json field)
// a IS b
// a IS NOT b
if left.NoCoalesce || right.NoCoalesce {
return dbx.NewExp(
fmt.Sprintf("%s %s %s", left.Identifier, nullEqualOp, right.Identifier),
mergeParams(left.Params, right.Params),
)
}
// both operands are empty
if isLeftEmpty && isRightEmpty {
return dbx.NewExp(fmt.Sprintf("'' %s ''", equalOp), mergeParams(left.Params, right.Params))
}
// direct compare since at least one of the operands is known to be non-empty
// eg. a = 'example'
if isKnownNonEmptyIdentifier(left) || isKnownNonEmptyIdentifier(right) {
leftIdentifier := left.Identifier
if isLeftEmpty {
leftIdentifier = "''"
}
rightIdentifier := right.Identifier
if isRightEmpty {
rightIdentifier = "''"
}
return dbx.NewExp(
fmt.Sprintf("%s %s %s", leftIdentifier, equalOp, rightIdentifier),
mergeParams(left.Params, right.Params),
)
}
// "" = b OR b IS NULL
// "" IS NOT b AND b IS NOT NULL
if isLeftEmpty {
return dbx.NewExp(
fmt.Sprintf("('' %s %s %s %s %s)", equalOp, right.Identifier, concatOp, right.Identifier, nullExpr),
mergeParams(left.Params, right.Params),
)
}
// a = "" OR a IS NULL
// a IS NOT "" AND a IS NOT NULL
if isRightEmpty {
return dbx.NewExp(
fmt.Sprintf("(%s %s '' %s %s %s)", left.Identifier, equalOp, concatOp, left.Identifier, nullExpr),
mergeParams(left.Params, right.Params),
)
}
// fallback to a COALESCE comparison
return dbx.NewExp(
fmt.Sprintf(
"COALESCE(%s, '') %s COALESCE(%s, '')",
left.Identifier,
equalOp,
right.Identifier,
),
mergeParams(left.Params, right.Params),
)
}
func hasEmptyParamValue(result *ResolverResult) bool {
for _, p := range result.Params {
switch v := p.(type) {
case nil:
return true
case string:
if v == "" {
return true
}
}
}
return false
}
func isKnownNonEmptyIdentifier(result *ResolverResult) bool {
switch strings.ToLower(result.Identifier) {
case "1", "0", "false", `true`:
return true
}
return len(result.Params) > 0 && !hasEmptyParamValue(result) && !isEmptyIdentifier(result)
}
func isEmptyIdentifier(result *ResolverResult) bool {
switch strings.ToLower(result.Identifier) {
case "", "null", "''", `""`, "``":
return true
default:
return false
}
}
func isAnyMatchOp(op fexpr.SignOp) bool {
switch op {
case
fexpr.SignAnyEq,
fexpr.SignAnyNeq,
fexpr.SignAnyLike,
fexpr.SignAnyNlike,
fexpr.SignAnyLt,
fexpr.SignAnyLte,
fexpr.SignAnyGt,
fexpr.SignAnyGte:
return true
}
return false
}
// mergeParams returns new dbx.Params where each provided params item
// is merged in the order they are specified.
func mergeParams(params ...dbx.Params) dbx.Params {
result := dbx.Params{}
for _, p := range params {
for k, v := range p {
result[k] = v
}
}
return result
}
// @todo consider adding support for custom single character wildcard
//
// wrapLikeParams wraps each provided param value string with `%`
// if the param doesn't contain an explicit wildcard (`%`) character already.
func wrapLikeParams(params dbx.Params) dbx.Params {
result := dbx.Params{}
for k, v := range params {
vStr := cast.ToString(v)
if !containsUnescapedChar(vStr, '%') {
// note: this is done to minimize the breaking changes and to preserve the original autoescape behavior
vStr = escapeUnescapedChars(vStr, '\\', '%', '_')
vStr = "%" + vStr + "%"
}
result[k] = vStr
}
return result
}
func escapeUnescapedChars(str string, escapeChars ...rune) string {
rs := []rune(str)
total := len(rs)
result := make([]rune, 0, total)
var match bool
for i := total - 1; i >= 0; i-- {
if match {
// check if already escaped
if rs[i] != '\\' {
result = append(result, '\\')
}
match = false
} else {
for _, ec := range escapeChars {
if rs[i] == ec {
match = true
break
}
}
}
result = append(result, rs[i])
// in case the matching char is at the beginning
if i == 0 && match {
result = append(result, '\\')
}
}
// reverse
for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
result[i], result[j] = result[j], result[i]
}
return string(result)
}
func containsUnescapedChar(str string, ch rune) bool {
var prev rune
for _, c := range str {
if c == ch && prev != '\\' {
return true
}
if c == '\\' && prev == '\\' {
prev = rune(0) // reset escape sequence
} else {
prev = c
}
}
return false
}
// -------------------------------------------------------------------
var _ dbx.Expression = (*opExpr)(nil)
// opExpr defines an expression that contains a raw sql operator string.
type opExpr struct {
op string
}
// Build converts the expression into a SQL fragment.
//
// Implements [dbx.Expression] interface.
func (e *opExpr) Build(db *dbx.DB, params dbx.Params) string {
return e.op
}
// -------------------------------------------------------------------
var _ dbx.Expression = (*concatExpr)(nil)
// concatExpr defines an expression that concatenates multiple
// other expressions with a specified separator.
type concatExpr struct {
separator string
parts []dbx.Expression
}
// Build converts the expression into a SQL fragment.
//
// Implements [dbx.Expression] interface.
func (e *concatExpr) Build(db *dbx.DB, params dbx.Params) string {
if len(e.parts) == 0 {
return ""
}
stringParts := make([]string, 0, len(e.parts))
for _, p := range e.parts {
if p == nil {
continue
}
if sql := p.Build(db, params); sql != "" {
stringParts = append(stringParts, sql)
}
}
// skip extra parenthesis for single concat expression
if len(stringParts) == 1 &&
// check for already concatenated raw/plain expressions
!strings.Contains(strings.ToUpper(stringParts[0]), " AND ") &&
!strings.Contains(strings.ToUpper(stringParts[0]), " OR ") {
return stringParts[0]
}
return "(" + strings.Join(stringParts, e.separator) + ")"
}
// -------------------------------------------------------------------
var _ dbx.Expression = (*manyVsManyExpr)(nil)
// manyVsManyExpr constructs a multi-match many<->many db where expression.
//
// Expects leftSubQuery and rightSubQuery to return a subquery with a
// single "multiMatchValue" column.
type manyVsManyExpr struct {
left *ResolverResult
right *ResolverResult
op fexpr.SignOp
}
// Build converts the expression into a SQL fragment.
//
// Implements [dbx.Expression] interface.
func (e *manyVsManyExpr) Build(db *dbx.DB, params dbx.Params) string {
if e.left.MultiMatchSubQuery == nil || e.right.MultiMatchSubQuery == nil {
return "0=1"
}
lAlias := "__ml" + security.PseudorandomString(8)
rAlias := "__mr" + security.PseudorandomString(8)
whereExpr, buildErr := buildResolversExpr(
&ResolverResult{
NoCoalesce: e.left.NoCoalesce,
Identifier: "[[" + lAlias + ".multiMatchValue]]",
},
e.op,
&ResolverResult{
NoCoalesce: e.right.NoCoalesce,
Identifier: "[[" + rAlias + ".multiMatchValue]]",
// note: the AfterBuild needs to be handled only once and it
// doesn't matter whether it is applied on the left or right subquery operand
AfterBuild: dbx.Not, // inverse for the not-exist expression
},
)
if buildErr != nil {
return "0=1"
}
return fmt.Sprintf(
"NOT EXISTS (SELECT 1 FROM (%s) {{%s}} LEFT JOIN (%s) {{%s}} WHERE %s)",
e.left.MultiMatchSubQuery.Build(db, params),
lAlias,
e.right.MultiMatchSubQuery.Build(db, params),
rAlias,
whereExpr.Build(db, params),
)
}
// -------------------------------------------------------------------
var _ dbx.Expression = (*manyVsOneExpr)(nil)
// manyVsOneExpr constructs a multi-match many<->one db where expression.
//
// Expects subQuery to return a subquery with a single "multiMatchValue" column.
//
// You can set inverse=false to reverse the condition sides (aka. one<->many).
type manyVsOneExpr struct {
otherOperand *ResolverResult
subQuery dbx.Expression
op fexpr.SignOp
inverse bool
noCoalesce bool
}
// Build converts the expression into a SQL fragment.
//
// Implements [dbx.Expression] interface.
func (e *manyVsOneExpr) Build(db *dbx.DB, params dbx.Params) string {
if e.subQuery == nil {
return "0=1"
}
alias := "__sm" + security.PseudorandomString(8)
r1 := &ResolverResult{
NoCoalesce: e.noCoalesce,
Identifier: "[[" + alias + ".multiMatchValue]]",
AfterBuild: dbx.Not, // inverse for the not-exist expression
}
r2 := &ResolverResult{
Identifier: e.otherOperand.Identifier,
Params: e.otherOperand.Params,
}
var whereExpr dbx.Expression
var buildErr error
if e.inverse {
whereExpr, buildErr = buildResolversExpr(r2, e.op, r1)
} else {
whereExpr, buildErr = buildResolversExpr(r1, e.op, r2)
}
if buildErr != nil {
return "0=1"
}
return fmt.Sprintf(
"NOT EXISTS (SELECT 1 FROM (%s) {{%s}} WHERE %s)",
e.subQuery.Build(db, params),
alias,
whereExpr.Build(db, params),
)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/filter_test.go | tools/search/filter_test.go | package search_test
import (
"context"
"database/sql"
"fmt"
"regexp"
"strings"
"testing"
"time"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/search"
)
func TestFilterDataBuildExpr(t *testing.T) {
resolver := search.NewSimpleFieldResolver("test1", "test2", "test3", `^test4_\w+$`, `^test5\.[\w\.\:]*\w+$`)
scenarios := []struct {
name string
filterData search.FilterData
expectError bool
expectPattern string
}{
{
"empty",
"",
true,
"",
},
{
"invalid format",
"(test1 > 1",
true,
"",
},
{
"invalid operator",
"test1 + 123",
true,
"",
},
{
"unknown field",
"test1 = 'example' && unknown > 1",
true,
"",
},
{
"simple expression",
"test1 > 1",
false,
"[[test1]] > {:TEST}",
},
{
"empty string vs null",
"'' = null && null != ''",
false,
"('' = '' AND '' IS NOT '')",
},
{
"like with 2 columns",
"test1 ~ test2",
false,
"[[test1]] LIKE ('%' || [[test2]] || '%') ESCAPE '\\'",
},
{
"like with right column operand",
"'lorem' ~ test1",
false,
"{:TEST} LIKE ('%' || [[test1]] || '%') ESCAPE '\\'",
},
{
"like with left column operand and text as right operand",
"test1 ~ 'lorem'",
false,
"[[test1]] LIKE {:TEST} ESCAPE '\\'",
},
{
"not like with 2 columns",
"test1 !~ test2",
false,
"[[test1]] NOT LIKE ('%' || [[test2]] || '%') ESCAPE '\\'",
},
{
"not like with right column operand",
"'lorem' !~ test1",
false,
"{:TEST} NOT LIKE ('%' || [[test1]] || '%') ESCAPE '\\'",
},
{
"like with left column operand and text as right operand",
"test1 !~ 'lorem'",
false,
"[[test1]] NOT LIKE {:TEST} ESCAPE '\\'",
},
{
"nested json no coalesce",
"test5.a = test5.b || test5.c != test5.d",
false,
"(JSON_EXTRACT([[test5]], '$.a') IS JSON_EXTRACT([[test5]], '$.b') OR JSON_EXTRACT([[test5]], '$.c') IS NOT JSON_EXTRACT([[test5]], '$.d'))",
},
{
"macros",
`
test4_1 > @now &&
test4_2 > @second &&
test4_3 > @minute &&
test4_4 > @hour &&
test4_5 > @day &&
test4_6 > @year &&
test4_7 > @month &&
test4_9 > @weekday &&
test4_9 > @todayStart &&
test4_10 > @todayEnd &&
test4_11 > @monthStart &&
test4_12 > @monthEnd &&
test4_13 > @yearStart &&
test4_14 > @yearEnd
`,
false,
"([[test4_1]] > {:TEST} AND [[test4_2]] > {:TEST} AND [[test4_3]] > {:TEST} AND [[test4_4]] > {:TEST} AND [[test4_5]] > {:TEST} AND [[test4_6]] > {:TEST} AND [[test4_7]] > {:TEST} AND [[test4_9]] > {:TEST} AND [[test4_9]] > {:TEST} AND [[test4_10]] > {:TEST} AND [[test4_11]] > {:TEST} AND [[test4_12]] > {:TEST} AND [[test4_13]] > {:TEST} AND [[test4_14]] > {:TEST})",
},
{
"complex expression",
"((test1 > 1) || (test2 != 2)) && test3 ~ '%%example' && test4_sub = null",
false,
"(([[test1]] > {:TEST} OR [[test2]] IS NOT {:TEST}) AND [[test3]] LIKE {:TEST} ESCAPE '\\' AND ([[test4_sub]] = '' OR [[test4_sub]] IS NULL))",
},
{
"combination of special literals (null, true, false)",
"test1=true && test2 != false && null = test3 || null != test4_sub",
false,
"([[test1]] = 1 AND [[test2]] IS NOT 0 AND ('' = [[test3]] OR [[test3]] IS NULL) OR ('' IS NOT [[test4_sub]] AND [[test4_sub]] IS NOT NULL))",
},
{
"all operators",
"(test1 = test2 || test2 != test3) && (test2 ~ 'example' || test2 !~ '%%abc') && 'switch1%%' ~ test1 && 'switch2' !~ test2 && test3 > 1 && test3 >= 0 && test3 <= 4 && 2 < 5",
false,
"((COALESCE([[test1]], '') = COALESCE([[test2]], '') OR COALESCE([[test2]], '') IS NOT COALESCE([[test3]], '')) AND ([[test2]] LIKE {:TEST} ESCAPE '\\' OR [[test2]] NOT LIKE {:TEST} ESCAPE '\\') AND {:TEST} LIKE ('%' || [[test1]] || '%') ESCAPE '\\' AND {:TEST} NOT LIKE ('%' || [[test2]] || '%') ESCAPE '\\' AND [[test3]] > {:TEST} AND [[test3]] >= {:TEST} AND [[test3]] <= {:TEST} AND {:TEST} < {:TEST})",
},
{
"geoDistance function",
"geoDistance(1,2,3,4) < 567",
false,
"(6371 * acos(cos(radians({:TEST})) * cos(radians({:TEST})) * cos(radians({:TEST}) - radians({:TEST})) + sin(radians({:TEST})) * sin(radians({:TEST})))) < {:TEST}",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
expr, err := s.filterData.BuildExpr(resolver)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("[%s] Expected hasErr %v, got %v (%v)", s.name, s.expectError, hasErr, err)
}
if hasErr {
return
}
dummyDB := &dbx.DB{}
rawSql := expr.Build(dummyDB, dbx.Params{})
// replace TEST placeholder with .+ regex pattern
expectPattern := strings.ReplaceAll(
"^"+regexp.QuoteMeta(s.expectPattern)+"$",
"TEST",
`\w+`,
)
pattern := regexp.MustCompile(expectPattern)
if !pattern.MatchString(rawSql) {
t.Fatalf("[%s] Pattern %v don't match with expression: \n%v", s.name, expectPattern, rawSql)
}
})
}
}
func TestFilterDataBuildExprWithParams(t *testing.T) {
// create a dummy db
sqlDB, err := sql.Open("sqlite", "file::memory:?cache=shared")
if err != nil {
t.Fatal(err)
}
db := dbx.NewFromDB(sqlDB, "sqlite")
calledQueries := []string{}
db.QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) {
calledQueries = append(calledQueries, sql)
}
db.ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) {
calledQueries = append(calledQueries, sql)
}
date, err := time.Parse("2006-01-02", "2023-01-01")
if err != nil {
t.Fatal(err)
}
resolver := search.NewSimpleFieldResolver(`^test\w+$`)
filter := search.FilterData(`
test1 = {:test1} ||
test2 = {:test2} ||
test3a = {:test3} ||
test3b = {:test3} ||
test4 = {:test4} ||
test5 = {:test5} ||
test6 = {:test6} ||
test7 = {:test7} ||
test8 = {:test8} ||
test9 = {:test9} ||
test10 = {:test10} ||
test11 = {:test11} ||
test12 = {:test12}
`)
replacements := []dbx.Params{
{"test1": true},
{"test2": false},
{"test3": 123.456},
{"test4": nil},
{"test5": "", "test6": "simple", "test7": `'single_quotes'`, "test8": `"double_quotes"`, "test9": `escape\"quote`},
{"test10": date},
{"test11": []string{"a", "b", `"quote`}},
{"test12": map[string]any{"a": 123, "b": `quote"`}},
}
expr, err := filter.BuildExpr(resolver, replacements...)
if err != nil {
t.Fatal(err)
}
db.Select().Where(expr).Build().Execute()
if len(calledQueries) != 1 {
t.Fatalf("Expected 1 query, got %d", len(calledQueries))
}
expectedQuery := `SELECT * WHERE ([[test1]] = 1 OR [[test2]] = 0 OR [[test3a]] = 123.456 OR [[test3b]] = 123.456 OR ([[test4]] = '' OR [[test4]] IS NULL) OR [[test5]] = '""' OR [[test6]] = 'simple' OR [[test7]] = '''single_quotes''' OR [[test8]] = '"double_quotes"' OR [[test9]] = 'escape\\"quote' OR [[test10]] = '2023-01-01 00:00:00 +0000 UTC' OR [[test11]] = '["a","b","\\"quote"]' OR [[test12]] = '{"a":123,"b":"quote\\""}')`
if expectedQuery != calledQueries[0] {
t.Fatalf("Expected query \n%s, \ngot \n%s", expectedQuery, calledQueries[0])
}
}
func TestFilterDataBuildExprWithLimit(t *testing.T) {
resolver := search.NewSimpleFieldResolver(`^\w+$`)
scenarios := []struct {
limit int
filter search.FilterData
expectError bool
}{
{1, "1 = 1", false},
{0, "1 = 1", true}, // new cache entry should be created
{2, "1 = 1 || 1 = 1", false},
{1, "1 = 1 || 1 = 1", true},
{3, "1 = 1 || 1 = 1", false},
{6, "(1=1 || 1=1) && (1=1 || (1=1 || 1=1)) && (1=1)", false},
{5, "(1=1 || 1=1) && (1=1 || (1=1 || 1=1)) && (1=1)", true},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("limit_%d:%d", i, s.limit), func(t *testing.T) {
_, err := s.filter.BuildExprWithLimit(resolver, s.limit)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v", s.expectError, hasErr)
}
})
}
}
func TestLikeParamsWrapping(t *testing.T) {
// create a dummy db
sqlDB, err := sql.Open("sqlite", "file::memory:?cache=shared")
if err != nil {
t.Fatal(err)
}
db := dbx.NewFromDB(sqlDB, "sqlite")
calledQueries := []string{}
db.QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) {
calledQueries = append(calledQueries, sql)
}
db.ExecLogFunc = func(ctx context.Context, t time.Duration, sql string, result sql.Result, err error) {
calledQueries = append(calledQueries, sql)
}
resolver := search.NewSimpleFieldResolver(`^test\w+$`)
filter := search.FilterData(`
test1 ~ {:p1} ||
test2 ~ {:p2} ||
test3 ~ {:p3} ||
test4 ~ {:p4} ||
test5 ~ {:p5} ||
test6 ~ {:p6} ||
test7 ~ {:p7} ||
test8 ~ {:p8} ||
test9 ~ {:p9} ||
test10 ~ {:p10} ||
test11 ~ {:p11} ||
test12 ~ {:p12}
`)
replacements := []dbx.Params{
{"p1": `abc`},
{"p2": `ab%c`},
{"p3": `ab\%c`},
{"p4": `%ab\%c`},
{"p5": `ab\\%c`},
{"p6": `ab\\\%c`},
{"p7": `ab_c`},
{"p8": `ab\_c`},
{"p9": `%ab_c`},
{"p10": `ab\c`},
{"p11": `_ab\c_`},
{"p12": `ab\c%`},
}
expr, err := filter.BuildExpr(resolver, replacements...)
if err != nil {
t.Fatal(err)
}
db.Select().Where(expr).Build().Execute()
if len(calledQueries) != 1 {
t.Fatalf("Expected 1 query, got %d", len(calledQueries))
}
expectedQuery := `SELECT * WHERE ([[test1]] LIKE '%abc%' ESCAPE '\' OR [[test2]] LIKE 'ab%c' ESCAPE '\' OR [[test3]] LIKE 'ab\\%c' ESCAPE '\' OR [[test4]] LIKE '%ab\\%c' ESCAPE '\' OR [[test5]] LIKE 'ab\\\\%c' ESCAPE '\' OR [[test6]] LIKE 'ab\\\\\\%c' ESCAPE '\' OR [[test7]] LIKE '%ab\_c%' ESCAPE '\' OR [[test8]] LIKE '%ab\\\_c%' ESCAPE '\' OR [[test9]] LIKE '%ab_c' ESCAPE '\' OR [[test10]] LIKE '%ab\\c%' ESCAPE '\' OR [[test11]] LIKE '%\_ab\\c\_%' ESCAPE '\' OR [[test12]] LIKE 'ab\\c%' ESCAPE '\')`
if expectedQuery != calledQueries[0] {
t.Fatalf("Expected query \n%s, \ngot \n%s", expectedQuery, calledQueries[0])
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/simple_field_resolver.go | tools/search/simple_field_resolver.go | package search
import (
"fmt"
"strconv"
"strings"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/inflector"
"github.com/pocketbase/pocketbase/tools/list"
)
// ResolverResult defines a single FieldResolver.Resolve() successfully parsed result.
type ResolverResult struct {
// Identifier is the plain SQL identifier/column that will be used
// in the final db expression as left or right operand.
Identifier string
// NoCoalesce instructs to not use COALESCE or NULL fallbacks
// when building the identifier expression.
NoCoalesce bool
// Params is a map with db placeholder->value pairs that will be added
// to the query when building both resolved operands/sides in a single expression.
Params dbx.Params
// MultiMatchSubQuery is an optional sub query expression that will be added
// in addition to the combined ResolverResult expression during build.
MultiMatchSubQuery dbx.Expression
// AfterBuild is an optional function that will be called after building
// and combining the result of both resolved operands/sides in a single expression.
AfterBuild func(expr dbx.Expression) dbx.Expression
}
// FieldResolver defines an interface for managing search fields.
type FieldResolver interface {
// UpdateQuery allows to updated the provided db query based on the
// resolved search fields (eg. adding joins aliases, etc.).
//
// Called internally by `search.Provider` before executing the search request.
UpdateQuery(query *dbx.SelectQuery) error
// Resolve parses the provided field and returns a properly
// formatted db identifier (eg. NULL, quoted column, placeholder parameter, etc.).
Resolve(field string) (*ResolverResult, error)
}
// NewSimpleFieldResolver creates a new `SimpleFieldResolver` with the
// provided `allowedFields`.
//
// Each `allowedFields` could be a plain string (eg. "name")
// or a regexp pattern (eg. `^\w+[\w\.]*$`).
func NewSimpleFieldResolver(allowedFields ...string) *SimpleFieldResolver {
return &SimpleFieldResolver{
allowedFields: allowedFields,
}
}
// SimpleFieldResolver defines a generic search resolver that allows
// only its listed fields to be resolved and take part in a search query.
//
// If `allowedFields` are empty no fields filtering is applied.
type SimpleFieldResolver struct {
allowedFields []string
}
// UpdateQuery implements `search.UpdateQuery` interface.
func (r *SimpleFieldResolver) UpdateQuery(query *dbx.SelectQuery) error {
// nothing to update...
return nil
}
// Resolve implements `search.Resolve` interface.
//
// Returns error if `field` is not in `r.allowedFields`.
func (r *SimpleFieldResolver) Resolve(field string) (*ResolverResult, error) {
if !list.ExistInSliceWithRegex(field, r.allowedFields) {
return nil, fmt.Errorf("failed to resolve field %q", field)
}
parts := strings.Split(field, ".")
// single regular field
if len(parts) == 1 {
return &ResolverResult{
Identifier: "[[" + inflector.Columnify(parts[0]) + "]]",
}, nil
}
// treat as json path
var jsonPath strings.Builder
jsonPath.WriteString("$")
for _, part := range parts[1:] {
if _, err := strconv.Atoi(part); err == nil {
jsonPath.WriteString("[")
jsonPath.WriteString(inflector.Columnify(part))
jsonPath.WriteString("]")
} else {
jsonPath.WriteString(".")
jsonPath.WriteString(inflector.Columnify(part))
}
}
return &ResolverResult{
NoCoalesce: true,
Identifier: fmt.Sprintf(
"JSON_EXTRACT([[%s]], '%s')",
inflector.Columnify(parts[0]),
jsonPath.String(),
),
}, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/identifier_macros_test.go | tools/search/identifier_macros_test.go | package search
import (
"testing"
"time"
)
func TestIdentifierMacros(t *testing.T) {
originalTimeNow := timeNow
timeNow = func() time.Time {
return time.Date(2023, 2, 3, 4, 5, 6, 7, time.UTC)
}
testMacros := map[string]any{
"@now": "2023-02-03 04:05:06.000Z",
"@yesterday": "2023-02-02 04:05:06.000Z",
"@tomorrow": "2023-02-04 04:05:06.000Z",
"@second": 6,
"@minute": 5,
"@hour": 4,
"@day": 3,
"@month": 2,
"@weekday": 5,
"@year": 2023,
"@todayStart": "2023-02-03 00:00:00.000Z",
"@todayEnd": "2023-02-03 23:59:59.999Z",
"@monthStart": "2023-02-01 00:00:00.000Z",
"@monthEnd": "2023-02-28 23:59:59.999Z",
"@yearStart": "2023-01-01 00:00:00.000Z",
"@yearEnd": "2023-12-31 23:59:59.999Z",
}
if len(testMacros) != len(identifierMacros) {
t.Fatalf("Expected %d macros, got %d", len(testMacros), len(identifierMacros))
}
for key, expected := range testMacros {
t.Run(key, func(t *testing.T) {
macro, ok := identifierMacros[key]
if !ok {
t.Fatalf("Missing macro %s", key)
}
result, err := macro()
if err != nil {
t.Fatal(err)
}
if result != expected {
t.Fatalf("Expected %q, got %q", expected, result)
}
})
}
// restore
timeNow = originalTimeNow
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/token_functions.go | tools/search/token_functions.go | package search
import (
"fmt"
"github.com/ganigeorgiev/fexpr"
)
var TokenFunctions = map[string]func(
argTokenResolverFunc func(fexpr.Token) (*ResolverResult, error),
args ...fexpr.Token,
) (*ResolverResult, error){
// geoDistance(lonA, latA, lonB, latB) calculates the Haversine
// distance between 2 points in kilometres (https://www.movable-type.co.uk/scripts/latlong.html).
//
// The accepted arguments at the moment could be either a plain number or a column identifier (including NULL).
// If the column identifier cannot be resolved and converted to a numeric value, it resolves to NULL.
//
// Similar to the built-in SQLite functions, geoDistance doesn't apply
// a "match-all" constraints in case there are multiple relation fields arguments.
// Or in other words, if a collection has "orgs" multiple relation field pointing to "orgs" collection that has "office" as "geoPoint" field,
// then the filter: `geoDistance(orgs.office.lon, orgs.office.lat, 1, 2) < 200`
// will evaluate to true if for at-least-one of the "orgs.office" records the function result in a value satisfying the condition (aka. "result < 200").
"geoDistance": func(argTokenResolverFunc func(fexpr.Token) (*ResolverResult, error), args ...fexpr.Token) (*ResolverResult, error) {
if len(args) != 4 {
return nil, fmt.Errorf("[geoDistance] expected 4 arguments, got %d", len(args))
}
resolvedArgs := make([]*ResolverResult, 4)
for i, arg := range args {
if arg.Type != fexpr.TokenIdentifier && arg.Type != fexpr.TokenNumber {
return nil, fmt.Errorf("[geoDistance] argument %d must be an identifier or number", i)
}
resolved, err := argTokenResolverFunc(arg)
if err != nil {
return nil, fmt.Errorf("[geoDistance] failed to resolve argument %d: %w", i, err)
}
resolvedArgs[i] = resolved
}
lonA := resolvedArgs[0].Identifier
latA := resolvedArgs[1].Identifier
lonB := resolvedArgs[2].Identifier
latB := resolvedArgs[3].Identifier
return &ResolverResult{
NoCoalesce: true,
Identifier: `(6371 * acos(` +
`cos(radians(` + latA + `)) * cos(radians(` + latB + `)) * ` +
`cos(radians(` + lonB + `) - radians(` + lonA + `)) + ` +
`sin(radians(` + latA + `)) * sin(radians(` + latB + `))` +
`))`,
Params: mergeParams(resolvedArgs[0].Params, resolvedArgs[1].Params, resolvedArgs[2].Params, resolvedArgs[3].Params),
}, nil
},
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/provider.go | tools/search/provider.go | package search
import (
"errors"
"math"
"net/url"
"strconv"
"strings"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/inflector"
"golang.org/x/sync/errgroup"
)
const (
// DefaultPerPage specifies the default number of returned search result items.
DefaultPerPage int = 30
// DefaultFilterExprLimit specifies the default filter expressions limit.
DefaultFilterExprLimit int = 200
// DefaultSortExprLimit specifies the default sort expressions limit.
DefaultSortExprLimit int = 8
// MaxPerPage specifies the max allowed search result items returned in a single page.
MaxPerPage int = 1000
// MaxFilterLength specifies the max allowed individual search filter parsable length.
MaxFilterLength int = 3500
// MaxSortFieldLength specifies the max allowed individual sort field parsable length.
MaxSortFieldLength int = 255
)
// Common search errors.
var (
ErrEmptyQuery = errors.New("search query is not set")
ErrSortExprLimit = errors.New("max sort expressions limit reached")
ErrFilterExprLimit = errors.New("max filter expressions limit reached")
ErrFilterLengthLimit = errors.New("max filter length limit reached")
ErrSortFieldLengthLimit = errors.New("max sort field length limit reached")
)
// URL search query params
const (
PageQueryParam string = "page"
PerPageQueryParam string = "perPage"
SortQueryParam string = "sort"
FilterQueryParam string = "filter"
SkipTotalQueryParam string = "skipTotal"
)
// Result defines the returned search result structure.
type Result struct {
Items any `json:"items"`
Page int `json:"page"`
PerPage int `json:"perPage"`
TotalItems int `json:"totalItems"`
TotalPages int `json:"totalPages"`
}
// Provider represents a single configured search provider instance.
type Provider struct {
fieldResolver FieldResolver
query *dbx.SelectQuery
countCol string
sort []SortField
filter []FilterData
page int
perPage int
skipTotal bool
maxFilterExprLimit int
maxSortExprLimit int
}
// NewProvider initializes and returns a new search provider.
//
// Example:
//
// baseQuery := db.Select("*").From("user")
// fieldResolver := search.NewSimpleFieldResolver("id", "name")
// models := []*YourDataStruct{}
//
// result, err := search.NewProvider(fieldResolver).
// Query(baseQuery).
// ParseAndExec("page=2&filter=id>0&sort=-email", &models)
func NewProvider(fieldResolver FieldResolver) *Provider {
return &Provider{
fieldResolver: fieldResolver,
countCol: "id",
page: 1,
perPage: DefaultPerPage,
sort: []SortField{},
filter: []FilterData{},
maxFilterExprLimit: DefaultFilterExprLimit,
maxSortExprLimit: DefaultSortExprLimit,
}
}
// MaxFilterExprLimit changes the default max allowed filter expressions.
//
// Note that currently the limit is applied individually for each separate filter.
func (s *Provider) MaxFilterExprLimit(max int) *Provider {
s.maxFilterExprLimit = max
return s
}
// MaxSortExprLimit changes the default max allowed sort expressions.
func (s *Provider) MaxSortExprLimit(max int) *Provider {
s.maxSortExprLimit = max
return s
}
// Query sets the base query that will be used to fetch the search items.
func (s *Provider) Query(query *dbx.SelectQuery) *Provider {
s.query = query
return s
}
// SkipTotal changes the `skipTotal` field of the current search provider.
func (s *Provider) SkipTotal(skipTotal bool) *Provider {
s.skipTotal = skipTotal
return s
}
// CountCol allows changing the default column (id) that is used
// to generate the COUNT SQL query statement.
//
// This field is ignored if skipTotal is true.
func (s *Provider) CountCol(name string) *Provider {
s.countCol = name
return s
}
// Page sets the `page` field of the current search provider.
//
// Normalization on the `page` value is done during `Exec()`.
func (s *Provider) Page(page int) *Provider {
s.page = page
return s
}
// PerPage sets the `perPage` field of the current search provider.
//
// Normalization on the `perPage` value is done during `Exec()`.
func (s *Provider) PerPage(perPage int) *Provider {
s.perPage = perPage
return s
}
// Sort sets the `sort` field of the current search provider.
func (s *Provider) Sort(sort []SortField) *Provider {
s.sort = sort
return s
}
// AddSort appends the provided SortField to the existing provider's sort field.
func (s *Provider) AddSort(field SortField) *Provider {
s.sort = append(s.sort, field)
return s
}
// Filter sets the `filter` field of the current search provider.
func (s *Provider) Filter(filter []FilterData) *Provider {
s.filter = filter
return s
}
// AddFilter appends the provided FilterData to the existing provider's filter field.
func (s *Provider) AddFilter(filter FilterData) *Provider {
if filter != "" {
s.filter = append(s.filter, filter)
}
return s
}
// Parse parses the search query parameter from the provided query string
// and assigns the found fields to the current search provider.
//
// The data from the "sort" and "filter" query parameters are appended
// to the existing provider's `sort` and `filter` fields
// (aka. using `AddSort` and `AddFilter`).
func (s *Provider) Parse(urlQuery string) error {
params, err := url.ParseQuery(urlQuery)
if err != nil {
return err
}
if raw := params.Get(SkipTotalQueryParam); raw != "" {
v, err := strconv.ParseBool(raw)
if err != nil {
return err
}
s.SkipTotal(v)
}
if raw := params.Get(PageQueryParam); raw != "" {
v, err := strconv.Atoi(raw)
if err != nil {
return err
}
s.Page(v)
}
if raw := params.Get(PerPageQueryParam); raw != "" {
v, err := strconv.Atoi(raw)
if err != nil {
return err
}
s.PerPage(v)
}
if raw := params.Get(SortQueryParam); raw != "" {
for _, sortField := range ParseSortFromString(raw) {
s.AddSort(sortField)
}
}
if raw := params.Get(FilterQueryParam); raw != "" {
s.AddFilter(FilterData(raw))
}
return nil
}
// Exec executes the search provider and fills/scans
// the provided `items` slice with the found models.
func (s *Provider) Exec(items any) (*Result, error) {
if s.query == nil {
return nil, ErrEmptyQuery
}
// shallow clone the provider's query
modelsQuery := *s.query
// build filters
for _, f := range s.filter {
if len(f) > MaxFilterLength {
return nil, ErrFilterLengthLimit
}
expr, err := f.BuildExprWithLimit(s.fieldResolver, s.maxFilterExprLimit)
if err != nil {
return nil, err
}
if expr != nil {
modelsQuery.AndWhere(expr)
}
}
// apply sorting
if len(s.sort) > s.maxSortExprLimit {
return nil, ErrSortExprLimit
}
for _, sortField := range s.sort {
if len(sortField.Name) > MaxSortFieldLength {
return nil, ErrSortFieldLengthLimit
}
expr, err := sortField.BuildExpr(s.fieldResolver)
if err != nil {
return nil, err
}
if expr != "" {
// ensure that _rowid_ expressions are always prefixed with the first FROM table
if sortField.Name == rowidSortKey && !strings.Contains(expr, ".") {
queryInfo := modelsQuery.Info()
if len(queryInfo.From) > 0 {
expr = "[[" + inflector.Columnify(queryInfo.From[0]) + "]]." + expr
}
}
modelsQuery.AndOrderBy(expr)
}
}
// apply field resolver query modifications (if any)
if err := s.fieldResolver.UpdateQuery(&modelsQuery); err != nil {
return nil, err
}
// normalize page
if s.page <= 0 {
s.page = 1
}
// normalize perPage
if s.perPage <= 0 {
s.perPage = DefaultPerPage
} else if s.perPage > MaxPerPage {
s.perPage = MaxPerPage
}
// negative value to differentiate from the zero default
totalCount := -1
totalPages := -1
// prepare a count query from the base one
countQuery := modelsQuery // shallow clone
countExec := func() error {
queryInfo := countQuery.Info()
countCol := s.countCol
if len(queryInfo.From) > 0 {
countCol = queryInfo.From[0] + "." + countCol
}
// note: countQuery is shallow cloned and slice/map in-place modifications should be avoided
err := countQuery.Distinct(false).
Select("COUNT(DISTINCT [[" + countCol + "]])").
OrderBy( /* reset */ ).
Row(&totalCount)
if err != nil {
return err
}
totalPages = int(math.Ceil(float64(totalCount) / float64(s.perPage)))
return nil
}
// apply pagination to the original query and fetch the models
modelsExec := func() error {
modelsQuery.Limit(int64(s.perPage))
modelsQuery.Offset(int64(s.perPage * (s.page - 1)))
return modelsQuery.All(items)
}
if !s.skipTotal {
// execute the 2 queries concurrently
errg := new(errgroup.Group)
errg.Go(countExec)
errg.Go(modelsExec)
if err := errg.Wait(); err != nil {
return nil, err
}
} else {
if err := modelsExec(); err != nil {
return nil, err
}
}
result := &Result{
Page: s.page,
PerPage: s.perPage,
TotalItems: totalCount,
TotalPages: totalPages,
Items: items,
}
return result, nil
}
// ParseAndExec is a short convenient method to trigger both
// `Parse()` and `Exec()` in a single call.
func (s *Provider) ParseAndExec(urlQuery string, modelsSlice any) (*Result, error) {
if err := s.Parse(urlQuery); err != nil {
return nil, err
}
return s.Exec(modelsSlice)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/provider_test.go | tools/search/provider_test.go | package search
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/list"
_ "modernc.org/sqlite"
)
func TestNewProvider(t *testing.T) {
r := &testFieldResolver{}
p := NewProvider(r)
if p.page != 1 {
t.Fatalf("Expected page %d, got %d", 1, p.page)
}
if p.perPage != DefaultPerPage {
t.Fatalf("Expected perPage %d, got %d", DefaultPerPage, p.perPage)
}
if p.maxFilterExprLimit != DefaultFilterExprLimit {
t.Fatalf("Expected maxFilterExprLimit %d, got %d", DefaultFilterExprLimit, p.maxFilterExprLimit)
}
if p.maxSortExprLimit != DefaultSortExprLimit {
t.Fatalf("Expected maxSortExprLimit %d, got %d", DefaultSortExprLimit, p.maxSortExprLimit)
}
}
func TestMaxFilterExprLimit(t *testing.T) {
p := NewProvider(&testFieldResolver{})
testVals := []int{0, -10, 10}
for _, val := range testVals {
t.Run("max_"+strconv.Itoa(val), func(t *testing.T) {
p.MaxFilterExprLimit(val)
if p.maxFilterExprLimit != val {
t.Fatalf("Expected maxFilterExprLimit to change to %d, got %d", val, p.maxFilterExprLimit)
}
})
}
}
func TestMaxSortExprLimit(t *testing.T) {
p := NewProvider(&testFieldResolver{})
testVals := []int{0, -10, 10}
for _, val := range testVals {
t.Run("max_"+strconv.Itoa(val), func(t *testing.T) {
p.MaxSortExprLimit(val)
if p.maxSortExprLimit != val {
t.Fatalf("Expected maxSortExprLimit to change to %d, got %d", val, p.maxSortExprLimit)
}
})
}
}
func TestProviderQuery(t *testing.T) {
db := dbx.NewFromDB(nil, "")
query := db.Select("id").From("test")
querySql := query.Build().SQL()
r := &testFieldResolver{}
p := NewProvider(r).Query(query)
expected := p.query.Build().SQL()
if querySql != expected {
t.Fatalf("Expected %v, got %v", expected, querySql)
}
}
func TestProviderSkipTotal(t *testing.T) {
p := NewProvider(&testFieldResolver{})
if p.skipTotal {
t.Fatalf("Expected the default skipTotal to be %v, got %v", false, p.skipTotal)
}
p.SkipTotal(true)
if !p.skipTotal {
t.Fatalf("Expected skipTotal to change to %v, got %v", true, p.skipTotal)
}
}
func TestProviderCountCol(t *testing.T) {
p := NewProvider(&testFieldResolver{})
if p.countCol != "id" {
t.Fatalf("Expected the default countCol to be %s, got %s", "id", p.countCol)
}
p.CountCol("test")
if p.countCol != "test" {
t.Fatalf("Expected colCount to change to %s, got %s", "test", p.countCol)
}
}
func TestProviderPage(t *testing.T) {
r := &testFieldResolver{}
p := NewProvider(r).Page(10)
if p.page != 10 {
t.Fatalf("Expected page %v, got %v", 10, p.page)
}
}
func TestProviderPerPage(t *testing.T) {
r := &testFieldResolver{}
p := NewProvider(r).PerPage(456)
if p.perPage != 456 {
t.Fatalf("Expected perPage %v, got %v", 456, p.perPage)
}
}
func TestProviderSort(t *testing.T) {
initialSort := []SortField{{"test1", SortAsc}, {"test2", SortAsc}}
r := &testFieldResolver{}
p := NewProvider(r).
Sort(initialSort).
AddSort(SortField{"test3", SortDesc})
encoded, _ := json.Marshal(p.sort)
expected := `[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"ASC"},{"name":"test3","direction":"DESC"}]`
if string(encoded) != expected {
t.Fatalf("Expected sort %v, got \n%v", expected, string(encoded))
}
}
func TestProviderFilter(t *testing.T) {
initialFilter := []FilterData{"test1", "test2"}
r := &testFieldResolver{}
p := NewProvider(r).
Filter(initialFilter).
AddFilter("test3")
encoded, _ := json.Marshal(p.filter)
expected := `["test1","test2","test3"]`
if string(encoded) != expected {
t.Fatalf("Expected filter %v, got \n%v", expected, string(encoded))
}
}
func TestProviderParse(t *testing.T) {
initialPage := 2
initialPerPage := 123
initialSort := []SortField{{"test1", SortAsc}, {"test2", SortAsc}}
initialFilter := []FilterData{"test1", "test2"}
scenarios := []struct {
query string
expectError bool
expectPage int
expectPerPage int
expectSort string
expectFilter string
}{
// empty
{
"",
false,
initialPage,
initialPerPage,
`[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"ASC"}]`,
`["test1","test2"]`,
},
// invalid query
{
"invalid;",
true,
initialPage,
initialPerPage,
`[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"ASC"}]`,
`["test1","test2"]`,
},
// invalid page
{
"page=a",
true,
initialPage,
initialPerPage,
`[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"ASC"}]`,
`["test1","test2"]`,
},
// invalid perPage
{
"perPage=a",
true,
initialPage,
initialPerPage,
`[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"ASC"}]`,
`["test1","test2"]`,
},
// valid query parameters
{
"page=3&perPage=456&filter=test3&sort=-a,b,+c&other=123",
false,
3,
456,
`[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"ASC"},{"name":"a","direction":"DESC"},{"name":"b","direction":"ASC"},{"name":"c","direction":"ASC"}]`,
`["test1","test2","test3"]`,
},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.query), func(t *testing.T) {
r := &testFieldResolver{}
p := NewProvider(r).
Page(initialPage).
PerPage(initialPerPage).
Sort(initialSort).
Filter(initialFilter)
err := p.Parse(s.query)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if p.page != s.expectPage {
t.Fatalf("Expected page %v, got %v", s.expectPage, p.page)
}
if p.perPage != s.expectPerPage {
t.Fatalf("Expected perPage %v, got %v", s.expectPerPage, p.perPage)
}
encodedSort, _ := json.Marshal(p.sort)
if string(encodedSort) != s.expectSort {
t.Fatalf("Expected sort %v, got \n%v", s.expectSort, string(encodedSort))
}
encodedFilter, _ := json.Marshal(p.filter)
if string(encodedFilter) != s.expectFilter {
t.Fatalf("Expected filter %v, got \n%v", s.expectFilter, string(encodedFilter))
}
})
}
}
func TestProviderExecEmptyQuery(t *testing.T) {
p := NewProvider(&testFieldResolver{}).
Query(nil)
_, err := p.Exec(&[]testTableStruct{})
if err == nil {
t.Fatalf("Expected error with empty query, got nil")
}
}
func TestProviderExecNonEmptyQuery(t *testing.T) {
testDB, err := createTestDB()
if err != nil {
t.Fatal(err)
}
defer testDB.Close()
query := testDB.Select("*").
From("test").
Where(dbx.Not(dbx.HashExp{"test1": nil})).
OrderBy("test1 ASC")
scenarios := []struct {
name string
page int
perPage int
sort []SortField
filter []FilterData
skipTotal bool
expectError bool
expectResult string
expectQueries []string
}{
{
"page normalization",
-1,
10,
[]SortField{},
[]FilterData{},
false,
false,
`{"items":[{"test1":1,"test2":"test2.1","test3":""},{"test1":2,"test2":"test2.2","test3":""}],"page":1,"perPage":10,"totalItems":2,"totalPages":1}`,
[]string{
"SELECT COUNT(DISTINCT [[test.id]]) FROM `test` WHERE NOT (`test1` IS NULL)",
"SELECT * FROM `test` WHERE NOT (`test1` IS NULL) ORDER BY `test1` ASC LIMIT 10",
},
},
{
"perPage normalization",
10,
0, // fallback to default
[]SortField{},
[]FilterData{},
false,
false,
`{"items":[],"page":10,"perPage":30,"totalItems":2,"totalPages":1}`,
[]string{
"SELECT COUNT(DISTINCT [[test.id]]) FROM `test` WHERE NOT (`test1` IS NULL)",
"SELECT * FROM `test` WHERE NOT (`test1` IS NULL) ORDER BY `test1` ASC LIMIT 30 OFFSET 270",
},
},
{
"invalid sort field",
1,
10,
[]SortField{{"unknown", SortAsc}},
[]FilterData{},
false,
true,
"",
nil,
},
{
"invalid filter",
1,
10,
[]SortField{},
[]FilterData{"test2 = 'test2.1'", "invalid"},
false,
true,
"",
nil,
},
{
"valid sort and filter fields",
1,
5555, // will be limited by MaxPerPage
[]SortField{{"test2", SortDesc}},
[]FilterData{"test2 != null", "test1 >= 2"},
false,
false,
`{"items":[{"test1":2,"test2":"test2.2","test3":""}],"page":1,"perPage":` + fmt.Sprint(MaxPerPage) + `,"totalItems":1,"totalPages":1}`,
[]string{
"SELECT COUNT(DISTINCT [[test.id]]) FROM `test` WHERE ((NOT (`test1` IS NULL)) AND (((test2 IS NOT '' AND test2 IS NOT NULL)))) AND (test1 >= 2)",
"SELECT * FROM `test` WHERE ((NOT (`test1` IS NULL)) AND (((test2 IS NOT '' AND test2 IS NOT NULL)))) AND (test1 >= 2) ORDER BY `test1` ASC, `test2` DESC LIMIT " + fmt.Sprint(MaxPerPage),
},
},
{
"valid sort and filter fields (skipTotal=1)",
1,
5555, // will be limited by MaxPerPage
[]SortField{{"test2", SortDesc}},
[]FilterData{"test2 != null", "test1 >= 2"},
true,
false,
`{"items":[{"test1":2,"test2":"test2.2","test3":""}],"page":1,"perPage":` + fmt.Sprint(MaxPerPage) + `,"totalItems":-1,"totalPages":-1}`,
[]string{
"SELECT * FROM `test` WHERE ((NOT (`test1` IS NULL)) AND (((test2 IS NOT '' AND test2 IS NOT NULL)))) AND (test1 >= 2) ORDER BY `test1` ASC, `test2` DESC LIMIT " + fmt.Sprint(MaxPerPage),
},
},
{
"valid sort and filter fields (zero results)",
1,
10,
[]SortField{{"test3", SortAsc}},
[]FilterData{"test3 != ''"},
false,
false,
`{"items":[],"page":1,"perPage":10,"totalItems":0,"totalPages":0}`,
[]string{
"SELECT COUNT(DISTINCT [[test.id]]) FROM `test` WHERE (NOT (`test1` IS NULL)) AND (((test3 IS NOT '' AND test3 IS NOT NULL)))",
"SELECT * FROM `test` WHERE (NOT (`test1` IS NULL)) AND (((test3 IS NOT '' AND test3 IS NOT NULL))) ORDER BY `test1` ASC, `test3` ASC LIMIT 10",
},
},
{
"valid sort and filter fields (zero results; skipTotal=1)",
1,
10,
[]SortField{{"test3", SortAsc}},
[]FilterData{"test3 != ''"},
true,
false,
`{"items":[],"page":1,"perPage":10,"totalItems":-1,"totalPages":-1}`,
[]string{
"SELECT * FROM `test` WHERE (NOT (`test1` IS NULL)) AND (((test3 IS NOT '' AND test3 IS NOT NULL))) ORDER BY `test1` ASC, `test3` ASC LIMIT 10",
},
},
{
"pagination test",
2,
1,
[]SortField{},
[]FilterData{},
false,
false,
`{"items":[{"test1":2,"test2":"test2.2","test3":""}],"page":2,"perPage":1,"totalItems":2,"totalPages":2}`,
[]string{
"SELECT COUNT(DISTINCT [[test.id]]) FROM `test` WHERE NOT (`test1` IS NULL)",
"SELECT * FROM `test` WHERE NOT (`test1` IS NULL) ORDER BY `test1` ASC LIMIT 1 OFFSET 1",
},
},
{
"pagination test (skipTotal=1)",
2,
1,
[]SortField{},
[]FilterData{},
true,
false,
`{"items":[{"test1":2,"test2":"test2.2","test3":""}],"page":2,"perPage":1,"totalItems":-1,"totalPages":-1}`,
[]string{
"SELECT * FROM `test` WHERE NOT (`test1` IS NULL) ORDER BY `test1` ASC LIMIT 1 OFFSET 1",
},
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
testDB.CalledQueries = []string{} // reset
testResolver := &testFieldResolver{}
p := NewProvider(testResolver).
Query(query).
Page(s.page).
PerPage(s.perPage).
Sort(s.sort).
SkipTotal(s.skipTotal).
Filter(s.filter)
result, err := p.Exec(&[]testTableStruct{})
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
return
}
if testResolver.UpdateQueryCalls != 1 {
t.Fatalf("Expected resolver.Update to be called %d, got %d", 1, testResolver.UpdateQueryCalls)
}
encoded, _ := json.Marshal(result)
if string(encoded) != s.expectResult {
t.Fatalf("Expected result %v, got \n%v", s.expectResult, string(encoded))
}
if len(s.expectQueries) != len(testDB.CalledQueries) {
t.Fatalf("Expected %d queries, got %d: \n%v", len(s.expectQueries), len(testDB.CalledQueries), testDB.CalledQueries)
}
for _, q := range testDB.CalledQueries {
if !list.ExistInSliceWithRegex(q, s.expectQueries) {
t.Fatalf("Didn't expect query \n%v \nin \n%v", q, s.expectQueries)
}
}
})
}
}
func TestProviderFilterAndSortLimits(t *testing.T) {
testDB, err := createTestDB()
if err != nil {
t.Fatal(err)
}
defer testDB.Close()
query := testDB.Select("*").
From("test").
Where(dbx.Not(dbx.HashExp{"test1": nil})).
OrderBy("test1 ASC")
scenarios := []struct {
name string
filter []FilterData
sort []SortField
maxFilterExprLimit int
maxSortExprLimit int
expectError bool
}{
// filter
{
"<= max filter length",
[]FilterData{
"1=2",
FilterData("1='" + strings.Repeat("a", MaxFilterLength-4) + "'"),
},
[]SortField{},
1,
0,
false,
},
{
"> max filter length",
[]FilterData{
"1=2",
FilterData("1='" + strings.Repeat("a", MaxFilterLength-3) + "'"),
},
[]SortField{},
1,
0,
true,
},
{
"<= max filter exprs",
[]FilterData{
"1=2",
"(1=1 || 1=1) && (1=1 || (1=1 || 1=1)) && (1=1)",
},
[]SortField{},
6,
0,
false,
},
{
"> max filter exprs",
[]FilterData{
"1=2",
"(1=1 || 1=1) && (1=1 || (1=1 || 1=1)) && (1=1)",
},
[]SortField{},
5,
0,
true,
},
// sort
{
"<= max sort field length",
[]FilterData{},
[]SortField{
{"id", SortAsc},
{"test1", SortDesc},
{strings.Repeat("a", MaxSortFieldLength), SortDesc},
},
0,
10,
false,
},
{
"> max sort field length",
[]FilterData{},
[]SortField{
{"id", SortAsc},
{"test1", SortDesc},
{strings.Repeat("b", MaxSortFieldLength+1), SortDesc},
},
0,
10,
true,
},
{
"<= max sort exprs",
[]FilterData{},
[]SortField{
{"id", SortAsc},
{"test1", SortDesc},
},
0,
2,
false,
},
{
"> max sort exprs",
[]FilterData{},
[]SortField{
{"id", SortAsc},
{"test1", SortDesc},
},
0,
1,
true,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
testResolver := &testFieldResolver{}
p := NewProvider(testResolver).
Query(query).
Sort(s.sort).
Filter(s.filter).
MaxFilterExprLimit(s.maxFilterExprLimit).
MaxSortExprLimit(s.maxSortExprLimit)
_, err := p.Exec(&[]testTableStruct{})
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v", s.expectError, hasErr)
}
})
}
}
func TestProviderParseAndExec(t *testing.T) {
testDB, err := createTestDB()
if err != nil {
t.Fatal(err)
}
defer testDB.Close()
query := testDB.Select("*").
From("test").
Where(dbx.Not(dbx.HashExp{"test1": nil})).
OrderBy("test1 ASC")
scenarios := []struct {
name string
queryString string
expectError bool
expectResult string
}{
{
"no extra query params (aka. use the provider presets)",
"",
false,
`{"items":[],"page":2,"perPage":123,"totalItems":2,"totalPages":1}`,
},
{
"invalid query",
"invalid;",
true,
"",
},
{
"invalid page",
"page=a",
true,
"",
},
{
"invalid perPage",
"perPage=a",
true,
"",
},
{
"invalid skipTotal",
"skipTotal=a",
true,
"",
},
{
"invalid sorting field",
"sort=-unknown",
true,
"",
},
{
"invalid filter field",
"filter=unknown>1",
true,
"",
},
{
"page > existing",
"page=3&perPage=9999",
false,
`{"items":[],"page":3,"perPage":1000,"totalItems":2,"totalPages":1}`,
},
{
"valid query params",
"page=1&perPage=9999&filter=test1>1&sort=-test2,test3",
false,
`{"items":[{"test1":2,"test2":"test2.2","test3":""}],"page":1,"perPage":1000,"totalItems":1,"totalPages":1}`,
},
{
"valid query params with skipTotal=1",
"page=1&perPage=9999&filter=test1>1&sort=-test2,test3&skipTotal=1",
false,
`{"items":[{"test1":2,"test2":"test2.2","test3":""}],"page":1,"perPage":1000,"totalItems":-1,"totalPages":-1}`,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
testDB.CalledQueries = []string{} // reset
testResolver := &testFieldResolver{}
provider := NewProvider(testResolver).
Query(query).
Page(2).
PerPage(123).
Sort([]SortField{{"test2", SortAsc}}).
Filter([]FilterData{"test1 > 0"})
result, err := provider.ParseAndExec(s.queryString, &[]testTableStruct{})
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if hasErr {
return
}
if testResolver.UpdateQueryCalls != 1 {
t.Fatalf("Expected resolver.Update to be called %d, got %d", 1, testResolver.UpdateQueryCalls)
}
expectedQueries := 2
if provider.skipTotal {
expectedQueries = 1
}
if len(testDB.CalledQueries) != expectedQueries {
t.Fatalf("Expected %d db queries, got %d: \n%v", expectedQueries, len(testDB.CalledQueries), testDB.CalledQueries)
}
encoded, _ := json.Marshal(result)
if string(encoded) != s.expectResult {
t.Fatalf("Expected result \n%v\ngot\n%v", s.expectResult, string(encoded))
}
})
}
}
// -------------------------------------------------------------------
// Helpers
// -------------------------------------------------------------------
type testTableStruct struct {
Test1 int `db:"test1" json:"test1"`
Test2 string `db:"test2" json:"test2"`
Test3 string `db:"test3" json:"test3"`
}
type testDB struct {
*dbx.DB
CalledQueries []string
}
// NB! Don't forget to call `db.Close()` at the end of the test.
func createTestDB() (*testDB, error) {
// using a shared cache to allow multiple connections access to
// the same in memory database https://www.sqlite.org/inmemorydb.html
sqlDB, err := sql.Open("sqlite", "file::memory:?cache=shared")
if err != nil {
return nil, err
}
db := testDB{DB: dbx.NewFromDB(sqlDB, "sqlite")}
db.CreateTable("test", map[string]string{
"id": "int default 0",
"test1": "int default 0",
"test2": "text default ''",
"test3": "text default ''",
strings.Repeat("a", MaxSortFieldLength): "text default ''",
strings.Repeat("b", MaxSortFieldLength+1): "text default ''",
}).Execute()
db.Insert("test", dbx.Params{"id": 1, "test1": 1, "test2": "test2.1"}).Execute()
db.Insert("test", dbx.Params{"id": 2, "test1": 2, "test2": "test2.2"}).Execute()
db.QueryLogFunc = func(ctx context.Context, t time.Duration, sql string, rows *sql.Rows, err error) {
db.CalledQueries = append(db.CalledQueries, sql)
}
return &db, nil
}
// ---
type testFieldResolver struct {
UpdateQueryCalls int
ResolveCalls int
}
func (t *testFieldResolver) UpdateQuery(query *dbx.SelectQuery) error {
t.UpdateQueryCalls++
return nil
}
func (t *testFieldResolver) Resolve(field string) (*ResolverResult, error) {
t.ResolveCalls++
if field == "unknown" {
return nil, errors.New("test error")
}
return &ResolverResult{Identifier: field}, nil
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/search/sort_test.go | tools/search/sort_test.go | package search_test
import (
"encoding/json"
"fmt"
"testing"
"github.com/pocketbase/pocketbase/tools/search"
)
func TestSortFieldBuildExpr(t *testing.T) {
resolver := search.NewSimpleFieldResolver("test1", "test2", "test3", "test4.sub")
scenarios := []struct {
sortField search.SortField
expectError bool
expectExpression string
}{
// empty
{search.SortField{"", search.SortDesc}, true, ""},
// unknown field
{search.SortField{"unknown", search.SortAsc}, true, ""},
// placeholder field
{search.SortField{"'test'", search.SortAsc}, true, ""},
// null field
{search.SortField{"null", search.SortAsc}, true, ""},
// allowed field - asc
{search.SortField{"test1", search.SortAsc}, false, "[[test1]] ASC"},
// allowed field - desc
{search.SortField{"test1", search.SortDesc}, false, "[[test1]] DESC"},
// special @random field (ignore direction)
{search.SortField{"@random", search.SortDesc}, false, "RANDOM()"},
// special _rowid_ field
{search.SortField{"@rowid", search.SortDesc}, false, "[[_rowid_]] DESC"},
}
for _, s := range scenarios {
t.Run(fmt.Sprintf("%s_%s", s.sortField.Name, s.sortField.Name), func(t *testing.T) {
result, err := s.sortField.BuildExpr(resolver)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
if result != s.expectExpression {
t.Fatalf("Expected expression %v, got %v", s.expectExpression, result)
}
})
}
}
func TestParseSortFromString(t *testing.T) {
scenarios := []struct {
value string
expected string
}{
{"", `[{"name":"","direction":"ASC"}]`},
{"test", `[{"name":"test","direction":"ASC"}]`},
{"+test", `[{"name":"test","direction":"ASC"}]`},
{"-test", `[{"name":"test","direction":"DESC"}]`},
{"test1,-test2,+test3", `[{"name":"test1","direction":"ASC"},{"name":"test2","direction":"DESC"},{"name":"test3","direction":"ASC"}]`},
{"@random,-test", `[{"name":"@random","direction":"ASC"},{"name":"test","direction":"DESC"}]`},
{"-@rowid,-test", `[{"name":"@rowid","direction":"DESC"},{"name":"test","direction":"DESC"}]`},
}
for _, s := range scenarios {
t.Run(s.value, func(t *testing.T) {
result := search.ParseSortFromString(s.value)
encoded, _ := json.Marshal(result)
encodedStr := string(encoded)
if encodedStr != s.expected {
t.Fatalf("Expected expression %s, got %s", s.expected, encodedStr)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/rereadable_read_closer_test.go | tools/router/rereadable_read_closer_test.go | package router_test
import (
"io"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/router"
)
func TestRereadableReadCloser(t *testing.T) {
content := "test"
rereadable := &router.RereadableReadCloser{
ReadCloser: io.NopCloser(strings.NewReader(content)),
}
// read multiple times
for i := 0; i < 3; i++ {
result, err := io.ReadAll(rereadable)
if err != nil {
t.Fatalf("[read:%d] %v", i, err)
}
if str := string(result); str != content {
t.Fatalf("[read:%d] Expected %q, got %q", i, content, result)
}
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/error.go | tools/router/error.go | package router
import (
"database/sql"
"errors"
"io/fs"
"net/http"
"strings"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/pocketbase/pocketbase/tools/inflector"
)
// SafeErrorItem defines a common error interface for a printable public safe error.
type SafeErrorItem interface {
// Code represents a fixed unique identifier of the error (usually used as translation key).
Code() string
// Error is the default English human readable error message that will be returned.
Error() string
}
// SafeErrorParamsResolver defines an optional interface for specifying dynamic error parameters.
type SafeErrorParamsResolver interface {
// Params defines a map with dynamic parameters to return as part of the public safe error view.
Params() map[string]any
}
// SafeErrorResolver defines an error interface for resolving the public safe error fields.
type SafeErrorResolver interface {
// Resolve allows modifying and returning a new public safe error data map.
Resolve(errData map[string]any) any
}
// ApiError defines the struct for a basic api error response.
type ApiError struct {
rawData any
Data map[string]any `json:"data"`
Message string `json:"message"`
Status int `json:"status"`
}
// Error makes it compatible with the `error` interface.
func (e *ApiError) Error() string {
return e.Message
}
// RawData returns the unformatted error data (could be an internal error, text, etc.)
func (e *ApiError) RawData() any {
return e.rawData
}
// Is reports whether the current ApiError wraps the target.
func (e *ApiError) Is(target error) bool {
err, ok := e.rawData.(error)
if ok {
return errors.Is(err, target)
}
apiErr, ok := target.(*ApiError)
return ok && e == apiErr
}
// NewNotFoundError creates and returns 404 ApiError.
func NewNotFoundError(message string, rawErrData any) *ApiError {
if message == "" {
message = "The requested resource wasn't found."
}
return NewApiError(http.StatusNotFound, message, rawErrData)
}
// NewBadRequestError creates and returns 400 ApiError.
func NewBadRequestError(message string, rawErrData any) *ApiError {
if message == "" {
message = "Something went wrong while processing your request."
}
return NewApiError(http.StatusBadRequest, message, rawErrData)
}
// NewForbiddenError creates and returns 403 ApiError.
func NewForbiddenError(message string, rawErrData any) *ApiError {
if message == "" {
message = "You are not allowed to perform this request."
}
return NewApiError(http.StatusForbidden, message, rawErrData)
}
// NewUnauthorizedError creates and returns 401 ApiError.
func NewUnauthorizedError(message string, rawErrData any) *ApiError {
if message == "" {
message = "Missing or invalid authentication."
}
return NewApiError(http.StatusUnauthorized, message, rawErrData)
}
// NewInternalServerError creates and returns 500 ApiError.
func NewInternalServerError(message string, rawErrData any) *ApiError {
if message == "" {
message = "Something went wrong while processing your request."
}
return NewApiError(http.StatusInternalServerError, message, rawErrData)
}
func NewTooManyRequestsError(message string, rawErrData any) *ApiError {
if message == "" {
message = "Too Many Requests."
}
return NewApiError(http.StatusTooManyRequests, message, rawErrData)
}
// NewApiError creates and returns new normalized ApiError instance.
func NewApiError(status int, message string, rawErrData any) *ApiError {
if message == "" {
message = http.StatusText(status)
}
return &ApiError{
rawData: rawErrData,
Data: safeErrorsData(rawErrData),
Status: status,
Message: strings.TrimSpace(inflector.Sentenize(message)),
}
}
// ToApiError wraps err into ApiError instance (if not already).
func ToApiError(err error) *ApiError {
var apiErr *ApiError
if !errors.As(err, &apiErr) {
// no ApiError found -> assign a generic one
if errors.Is(err, sql.ErrNoRows) || errors.Is(err, fs.ErrNotExist) {
apiErr = NewNotFoundError("", err)
} else {
apiErr = NewBadRequestError("", err)
}
}
return apiErr
}
// -------------------------------------------------------------------
func safeErrorsData(data any) map[string]any {
switch v := data.(type) {
case validation.Errors:
return resolveSafeErrorsData(v)
case error:
validationErrors := validation.Errors{}
if errors.As(v, &validationErrors) {
return resolveSafeErrorsData(validationErrors)
}
return map[string]any{} // not nil to ensure that is json serialized as object
case map[string]validation.Error:
return resolveSafeErrorsData(v)
case map[string]SafeErrorItem:
return resolveSafeErrorsData(v)
case map[string]error:
return resolveSafeErrorsData(v)
case map[string]string:
return resolveSafeErrorsData(v)
case map[string]any:
return resolveSafeErrorsData(v)
default:
return map[string]any{} // not nil to ensure that is json serialized as object
}
}
func resolveSafeErrorsData[T any](data map[string]T) map[string]any {
result := map[string]any{}
for name, err := range data {
if isNestedError(err) {
result[name] = safeErrorsData(err)
} else {
result[name] = resolveSafeErrorItem(err)
}
}
return result
}
func isNestedError(err any) bool {
switch err.(type) {
case validation.Errors,
map[string]validation.Error,
map[string]SafeErrorItem,
map[string]error,
map[string]string,
map[string]any:
return true
}
return false
}
// resolveSafeErrorItem extracts from each validation error its
// public safe error code and message.
func resolveSafeErrorItem(err any) any {
data := map[string]any{}
if obj, ok := err.(SafeErrorItem); ok {
// extract the specific error code and message
data["code"] = obj.Code()
data["message"] = inflector.Sentenize(obj.Error())
} else {
// fallback to the default public safe values
data["code"] = "validation_invalid_value"
data["message"] = "Invalid value."
}
if s, ok := err.(SafeErrorParamsResolver); ok {
params := s.Params()
if len(params) > 0 {
data["params"] = params
}
}
if s, ok := err.(SafeErrorResolver); ok {
return s.Resolve(data)
}
return data
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/group.go | tools/router/group.go | package router
import (
"net/http"
"regexp"
"strings"
"github.com/pocketbase/pocketbase/tools/hook"
)
// (note: the struct is named RouterGroup instead of Group so that it can
// be embedded in the Router without conflicting with the Group method)
// RouterGroup represents a collection of routes and other sub groups
// that share common pattern prefix and middlewares.
type RouterGroup[T hook.Resolver] struct {
excludedMiddlewares map[string]struct{}
children []any // Route or RouterGroup
Prefix string
Middlewares []*hook.Handler[T]
}
// Group creates and register a new child Group into the current one
// with the specified prefix.
//
// The prefix follows the standard Go net/http ServeMux pattern format ("[HOST]/[PATH]")
// and will be concatenated recursively into the final route path, meaning that
// only the root level group could have HOST as part of the prefix.
//
// Returns the newly created group to allow chaining and registering
// sub-routes and group specific middlewares.
func (group *RouterGroup[T]) Group(prefix string) *RouterGroup[T] {
newGroup := &RouterGroup[T]{}
newGroup.Prefix = prefix
group.children = append(group.children, newGroup)
return newGroup
}
// BindFunc registers one or multiple middleware functions to the current group.
//
// The registered middleware functions are "anonymous" and with default priority,
// aka. executes in the order they were registered.
//
// If you need to specify a named middleware (ex. so that it can be removed)
// or middleware with custom exec prirority, use [RouterGroup.Bind] method.
func (group *RouterGroup[T]) BindFunc(middlewareFuncs ...func(e T) error) *RouterGroup[T] {
for _, m := range middlewareFuncs {
group.Middlewares = append(group.Middlewares, &hook.Handler[T]{Func: m})
}
return group
}
// Bind registers one or multiple middleware handlers to the current group.
func (group *RouterGroup[T]) Bind(middlewares ...*hook.Handler[T]) *RouterGroup[T] {
group.Middlewares = append(group.Middlewares, middlewares...)
// unmark the newly added middlewares in case they were previously "excluded"
if group.excludedMiddlewares != nil {
for _, m := range middlewares {
if m.Id != "" {
delete(group.excludedMiddlewares, m.Id)
}
}
}
return group
}
// Unbind removes one or more middlewares with the specified id(s)
// from the current group and its children (if any).
//
// Anonymous middlewares are not removable, aka. this method does nothing
// if the middleware id is an empty string.
func (group *RouterGroup[T]) Unbind(middlewareIds ...string) *RouterGroup[T] {
for _, middlewareId := range middlewareIds {
if middlewareId == "" {
continue
}
// remove from the group middlwares
for i := len(group.Middlewares) - 1; i >= 0; i-- {
if group.Middlewares[i].Id == middlewareId {
group.Middlewares = append(group.Middlewares[:i], group.Middlewares[i+1:]...)
}
}
// remove from the group children
for i := len(group.children) - 1; i >= 0; i-- {
switch v := group.children[i].(type) {
case *RouterGroup[T]:
v.Unbind(middlewareId)
case *Route[T]:
v.Unbind(middlewareId)
}
}
// add to the exclude list
if group.excludedMiddlewares == nil {
group.excludedMiddlewares = map[string]struct{}{}
}
group.excludedMiddlewares[middlewareId] = struct{}{}
}
return group
}
// Route registers a single route into the current group.
//
// Note that the final route path will be the concatenation of all parent groups prefixes + the route path.
// The path follows the standard Go net/http ServeMux format ("[HOST]/[PATH]"),
// meaning that only a top level group route could have HOST as part of the prefix.
//
// Returns the newly created route to allow attaching route-only middlewares.
func (group *RouterGroup[T]) Route(method string, path string, action func(e T) error) *Route[T] {
route := &Route[T]{
Method: method,
Path: path,
Action: action,
}
group.children = append(group.children, route)
return route
}
// Any is a shorthand for [RouterGroup.AddRoute] with "" as route method (aka. matches any method).
func (group *RouterGroup[T]) Any(path string, action func(e T) error) *Route[T] {
return group.Route("", path, action)
}
// GET is a shorthand for [RouterGroup.AddRoute] with GET as route method.
func (group *RouterGroup[T]) GET(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodGet, path, action)
}
// SEARCH is a shorthand for [RouterGroup.AddRoute] with SEARCH as route method.
func (group *RouterGroup[T]) SEARCH(path string, action func(e T) error) *Route[T] {
return group.Route("SEARCH", path, action)
}
// POST is a shorthand for [RouterGroup.AddRoute] with POST as route method.
func (group *RouterGroup[T]) POST(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodPost, path, action)
}
// DELETE is a shorthand for [RouterGroup.AddRoute] with DELETE as route method.
func (group *RouterGroup[T]) DELETE(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodDelete, path, action)
}
// PATCH is a shorthand for [RouterGroup.AddRoute] with PATCH as route method.
func (group *RouterGroup[T]) PATCH(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodPatch, path, action)
}
// PUT is a shorthand for [RouterGroup.AddRoute] with PUT as route method.
func (group *RouterGroup[T]) PUT(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodPut, path, action)
}
// HEAD is a shorthand for [RouterGroup.AddRoute] with HEAD as route method.
func (group *RouterGroup[T]) HEAD(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodHead, path, action)
}
// OPTIONS is a shorthand for [RouterGroup.AddRoute] with OPTIONS as route method.
func (group *RouterGroup[T]) OPTIONS(path string, action func(e T) error) *Route[T] {
return group.Route(http.MethodOptions, path, action)
}
// HasRoute checks whether the specified route pattern (method + path)
// is registered in the current group or its children.
//
// This could be useful to conditionally register and checks for routes
// in order prevent panic on duplicated routes.
//
// Note that routes with anonymous and named wildcard placeholder are treated as equal,
// aka. "GET /abc/" is considered the same as "GET /abc/{something...}".
func (group *RouterGroup[T]) HasRoute(method string, path string) bool {
pattern := path
if method != "" {
pattern = strings.ToUpper(method) + " " + pattern
}
return group.hasRoute(pattern, nil)
}
func (group *RouterGroup[T]) hasRoute(pattern string, parents []*RouterGroup[T]) bool {
for _, child := range group.children {
switch v := child.(type) {
case *RouterGroup[T]:
if v.hasRoute(pattern, append(parents, group)) {
return true
}
case *Route[T]:
var result string
if v.Method != "" {
result += v.Method + " "
}
// add parent groups prefixes
for _, p := range parents {
result += p.Prefix
}
// add current group prefix
result += group.Prefix
// add current route path
result += v.Path
if result == pattern || // direct match
// compares without the named wildcard, aka. /abc/{test...} is equal to /abc/
stripWildcard(result) == stripWildcard(pattern) {
return true
}
}
}
return false
}
var wildcardPlaceholderRegex = regexp.MustCompile(`/{.+\.\.\.}$`)
func stripWildcard(pattern string) string {
return wildcardPlaceholderRegex.ReplaceAllString(pattern, "/")
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/route_test.go | tools/router/route_test.go | package router
import (
"slices"
"testing"
"github.com/pocketbase/pocketbase/tools/hook"
)
func TestRouteBindFunc(t *testing.T) {
t.Parallel()
r := Route[*Event]{}
calls := ""
// append one function
r.BindFunc(func(e *Event) error {
calls += "a"
return nil
})
// append multiple functions
r.BindFunc(
func(e *Event) error {
calls += "b"
return nil
},
func(e *Event) error {
calls += "c"
return nil
},
)
if total := len(r.Middlewares); total != 3 {
t.Fatalf("Expected %d middlewares, got %v", 3, total)
}
for _, h := range r.Middlewares {
_ = h.Func(nil)
}
if calls != "abc" {
t.Fatalf("Expected calls sequence %q, got %q", "abc", calls)
}
}
func TestRouteBind(t *testing.T) {
t.Parallel()
r := Route[*Event]{
// mock excluded middlewares to check whether the entry will be deleted
excludedMiddlewares: map[string]struct{}{"test2": {}},
}
calls := ""
// append one handler
r.Bind(&hook.Handler[*Event]{
Func: func(e *Event) error {
calls += "a"
return nil
},
})
// append multiple handlers
r.Bind(
&hook.Handler[*Event]{
Id: "test1",
Func: func(e *Event) error {
calls += "b"
return nil
},
},
&hook.Handler[*Event]{
Id: "test2",
Func: func(e *Event) error {
calls += "c"
return nil
},
},
)
if total := len(r.Middlewares); total != 3 {
t.Fatalf("Expected %d middlewares, got %v", 3, total)
}
for _, h := range r.Middlewares {
_ = h.Func(nil)
}
if calls != "abc" {
t.Fatalf("Expected calls %q, got %q", "abc", calls)
}
// ensures that the previously excluded middleware was removed
if len(r.excludedMiddlewares) != 0 {
t.Fatalf("Expected test2 to be removed from the excludedMiddlewares list, got %v", r.excludedMiddlewares)
}
}
func TestRouteUnbind(t *testing.T) {
t.Parallel()
r := Route[*Event]{}
calls := ""
// anonymous middlewares
r.Bind(&hook.Handler[*Event]{
Func: func(e *Event) error {
calls += "a"
return nil // unused value
},
})
// middlewares with id
r.Bind(&hook.Handler[*Event]{
Id: "test1",
Func: func(e *Event) error {
calls += "b"
return nil // unused value
},
})
r.Bind(&hook.Handler[*Event]{
Id: "test2",
Func: func(e *Event) error {
calls += "c"
return nil // unused value
},
})
r.Bind(&hook.Handler[*Event]{
Id: "test3",
Func: func(e *Event) error {
calls += "d"
return nil // unused value
},
})
// remove
r.Unbind("") // should be no-op
r.Unbind("test1", "test3")
if total := len(r.Middlewares); total != 2 {
t.Fatalf("Expected %d middlewares, got %v", 2, total)
}
for _, h := range r.Middlewares {
if err := h.Func(nil); err != nil {
continue
}
}
if calls != "ac" {
t.Fatalf("Expected calls %q, got %q", "ac", calls)
}
// ensure that the id was added in the exclude list
excluded := []string{"test1", "test3"}
if len(r.excludedMiddlewares) != len(excluded) {
t.Fatalf("Expected excludes %v, got %v", excluded, r.excludedMiddlewares)
}
for id := range r.excludedMiddlewares {
if !slices.Contains(excluded, id) {
t.Fatalf("Expected %q to be marked as excluded", id)
}
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/group_test.go | tools/router/group_test.go | package router
import (
"errors"
"fmt"
"net/http"
"slices"
"testing"
"github.com/pocketbase/pocketbase/tools/hook"
)
func TestRouterGroupGroup(t *testing.T) {
t.Parallel()
g0 := RouterGroup[*Event]{}
g1 := g0.Group("test1")
g2 := g0.Group("test2")
if total := len(g0.children); total != 2 {
t.Fatalf("Expected %d child groups, got %d", 2, total)
}
if g1.Prefix != "test1" {
t.Fatalf("Expected g1 with prefix %q, got %q", "test1", g1.Prefix)
}
if g2.Prefix != "test2" {
t.Fatalf("Expected g2 with prefix %q, got %q", "test2", g2.Prefix)
}
}
func TestRouterGroupBindFunc(t *testing.T) {
t.Parallel()
g := RouterGroup[*Event]{}
calls := ""
// append one function
g.BindFunc(func(e *Event) error {
calls += "a"
return nil
})
// append multiple functions
g.BindFunc(
func(e *Event) error {
calls += "b"
return nil
},
func(e *Event) error {
calls += "c"
return nil
},
)
if total := len(g.Middlewares); total != 3 {
t.Fatalf("Expected %d middlewares, got %v", 3, total)
}
for _, h := range g.Middlewares {
_ = h.Func(nil)
}
if calls != "abc" {
t.Fatalf("Expected calls sequence %q, got %q", "abc", calls)
}
}
func TestRouterGroupBind(t *testing.T) {
t.Parallel()
g := RouterGroup[*Event]{
// mock excluded middlewares to check whether the entry will be deleted
excludedMiddlewares: map[string]struct{}{"test2": {}},
}
calls := ""
// append one handler
g.Bind(&hook.Handler[*Event]{
Func: func(e *Event) error {
calls += "a"
return nil
},
})
// append multiple handlers
g.Bind(
&hook.Handler[*Event]{
Id: "test1",
Func: func(e *Event) error {
calls += "b"
return nil
},
},
&hook.Handler[*Event]{
Id: "test2",
Func: func(e *Event) error {
calls += "c"
return nil
},
},
)
if total := len(g.Middlewares); total != 3 {
t.Fatalf("Expected %d middlewares, got %v", 3, total)
}
for _, h := range g.Middlewares {
_ = h.Func(nil)
}
if calls != "abc" {
t.Fatalf("Expected calls %q, got %q", "abc", calls)
}
// ensures that the previously excluded middleware was removed
if len(g.excludedMiddlewares) != 0 {
t.Fatalf("Expected test2 to be removed from the excludedMiddlewares list, got %v", g.excludedMiddlewares)
}
}
func TestRouterGroupUnbind(t *testing.T) {
t.Parallel()
g := RouterGroup[*Event]{}
calls := ""
// anonymous middlewares
g.Bind(&hook.Handler[*Event]{
Func: func(e *Event) error {
calls += "a"
return nil // unused value
},
})
// middlewares with id
g.Bind(&hook.Handler[*Event]{
Id: "test1",
Func: func(e *Event) error {
calls += "b"
return nil // unused value
},
})
g.Bind(&hook.Handler[*Event]{
Id: "test2",
Func: func(e *Event) error {
calls += "c"
return nil // unused value
},
})
g.Bind(&hook.Handler[*Event]{
Id: "test3",
Func: func(e *Event) error {
calls += "d"
return nil // unused value
},
})
// remove
g.Unbind("") // should be no-op
g.Unbind("test1", "test3")
if total := len(g.Middlewares); total != 2 {
t.Fatalf("Expected %d middlewares, got %v", 2, total)
}
for _, h := range g.Middlewares {
if err := h.Func(nil); err != nil {
continue
}
}
if calls != "ac" {
t.Fatalf("Expected calls %q, got %q", "ac", calls)
}
// ensure that the ids were added in the exclude list
excluded := []string{"test1", "test3"}
if len(g.excludedMiddlewares) != len(excluded) {
t.Fatalf("Expected excludes %v, got %v", excluded, g.excludedMiddlewares)
}
for id := range g.excludedMiddlewares {
if !slices.Contains(excluded, id) {
t.Fatalf("Expected %q to be marked as excluded", id)
}
}
}
func TestRouterGroupRoute(t *testing.T) {
t.Parallel()
group := RouterGroup[*Event]{}
sub := group.Group("sub")
var called bool
route := group.Route(http.MethodPost, "/test", func(e *Event) error {
called = true
return nil
})
// ensure that the route was registered only to the main one
// ---
if len(sub.children) != 0 {
t.Fatalf("Expected no sub children, got %d", len(sub.children))
}
if len(group.children) != 2 {
t.Fatalf("Expected %d group children, got %d", 2, len(group.children))
}
// ---
// check the registered route
// ---
if route != group.children[1] {
t.Fatalf("Expected group children %v, got %v", route, group.children[1])
}
if route.Method != http.MethodPost {
t.Fatalf("Expected route method %q, got %q", http.MethodPost, route.Method)
}
if route.Path != "/test" {
t.Fatalf("Expected route path %q, got %q", "/test", route.Path)
}
route.Action(nil)
if !called {
t.Fatal("Expected route action to be called")
}
}
func TestRouterGroupRouteAliases(t *testing.T) {
t.Parallel()
group := RouterGroup[*Event]{}
testErr := errors.New("test")
testAction := func(e *Event) error {
return testErr
}
scenarios := []struct {
route *Route[*Event]
expectMethod string
expectPath string
}{
{
group.Any("/test", testAction),
"",
"/test",
},
{
group.GET("/test", testAction),
http.MethodGet,
"/test",
},
{
group.SEARCH("/test", testAction),
"SEARCH",
"/test",
},
{
group.POST("/test", testAction),
http.MethodPost,
"/test",
},
{
group.DELETE("/test", testAction),
http.MethodDelete,
"/test",
},
{
group.PATCH("/test", testAction),
http.MethodPatch,
"/test",
},
{
group.PUT("/test", testAction),
http.MethodPut,
"/test",
},
{
group.HEAD("/test", testAction),
http.MethodHead,
"/test",
},
{
group.OPTIONS("/test", testAction),
http.MethodOptions,
"/test",
},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s_%s", i, s.expectMethod, s.expectPath), func(t *testing.T) {
if s.route.Method != s.expectMethod {
t.Fatalf("Expected method %q, got %q", s.expectMethod, s.route.Method)
}
if s.route.Path != s.expectPath {
t.Fatalf("Expected path %q, got %q", s.expectPath, s.route.Path)
}
if err := s.route.Action(nil); !errors.Is(err, testErr) {
t.Fatal("Expected test action")
}
})
}
}
func TestRouterGroupHasRoute(t *testing.T) {
t.Parallel()
group := RouterGroup[*Event]{}
group.Any("/any", nil)
group.GET("/base", nil)
group.DELETE("/base", nil)
sub := group.Group("/sub1")
sub.GET("/a", nil)
sub.POST("/a", nil)
sub2 := sub.Group("/sub2")
sub2.GET("/b", nil)
sub2.GET("/b/{test}", nil)
// special cases to test the normalizations
group.GET("/c/", nil) // the same as /c/{test...}
group.GET("/d/{test...}", nil) // the same as /d/
scenarios := []struct {
method string
path string
expected bool
}{
{
http.MethodGet,
"",
false,
},
{
"",
"/any",
true,
},
{
http.MethodPost,
"/base",
false,
},
{
http.MethodGet,
"/base",
true,
},
{
http.MethodDelete,
"/base",
true,
},
{
http.MethodGet,
"/sub1",
false,
},
{
http.MethodGet,
"/sub1/a",
true,
},
{
http.MethodPost,
"/sub1/a",
true,
},
{
http.MethodDelete,
"/sub1/a",
false,
},
{
http.MethodGet,
"/sub2/b",
false,
},
{
http.MethodGet,
"/sub1/sub2/b",
true,
},
{
http.MethodGet,
"/sub1/sub2/b/{test}",
true,
},
{
http.MethodGet,
"/sub1/sub2/b/{test2}",
false,
},
{
http.MethodGet,
"/c/{test...}",
true,
},
{
http.MethodGet,
"/d/",
true,
},
}
for _, s := range scenarios {
t.Run(s.method+"_"+s.path, func(t *testing.T) {
has := group.HasRoute(s.method, s.path)
if has != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, has)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/error_test.go | tools/router/error_test.go | package router_test
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"io/fs"
"strconv"
"testing"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/pocketbase/pocketbase/tools/router"
)
func TestNewApiErrorWithRawData(t *testing.T) {
t.Parallel()
e := router.NewApiError(
300,
"message_test",
"rawData_test",
)
result, _ := json.Marshal(e)
expected := `{"data":{},"message":"Message_test.","status":300}`
if string(result) != expected {
t.Errorf("Expected\n%v\ngot\n%v", expected, string(result))
}
if e.Error() != "Message_test." {
t.Errorf("Expected %q, got %q", "Message_test.", e.Error())
}
if e.RawData() != "rawData_test" {
t.Errorf("Expected rawData\n%v\ngot\n%v", "rawData_test", e.RawData())
}
}
func TestNewApiErrorWithValidationData(t *testing.T) {
t.Parallel()
e := router.NewApiError(
300,
"message_test",
map[string]any{
"err1": errors.New("test error"), // should be normalized
"err2": validation.ErrRequired,
"err3": validation.Errors{
"err3.1": errors.New("test error"), // should be normalized
"err3.2": validation.ErrRequired,
"err3.3": validation.Errors{
"err3.3.1": validation.ErrRequired,
},
},
"err4": &mockSafeErrorItem{},
"err5": map[string]error{
"err5.1": validation.ErrRequired,
},
},
)
result, _ := json.Marshal(e)
expected := `{"data":{"err1":{"code":"validation_invalid_value","message":"Invalid value."},"err2":{"code":"validation_required","message":"Cannot be blank."},"err3":{"err3.1":{"code":"validation_invalid_value","message":"Invalid value."},"err3.2":{"code":"validation_required","message":"Cannot be blank."},"err3.3":{"err3.3.1":{"code":"validation_required","message":"Cannot be blank."}}},"err4":{"code":"mock_code","message":"Mock_error.","mock_resolve":123},"err5":{"err5.1":{"code":"validation_required","message":"Cannot be blank."}}},"message":"Message_test.","status":300}`
if string(result) != expected {
t.Errorf("Expected \n%v, \ngot \n%v", expected, string(result))
}
if e.Error() != "Message_test." {
t.Errorf("Expected %q, got %q", "Message_test.", e.Error())
}
if e.RawData() == nil {
t.Error("Expected non-nil rawData")
}
}
func TestNewNotFoundError(t *testing.T) {
t.Parallel()
scenarios := []struct {
message string
data any
expected string
}{
{"", nil, `{"data":{},"message":"The requested resource wasn't found.","status":404}`},
{"demo", "rawData_test", `{"data":{},"message":"Demo.","status":404}`},
{"demo", validation.Errors{"err1": validation.NewError("test_code", "test_message")}, `{"data":{"err1":{"code":"test_code","message":"Test_message."}},"message":"Demo.","status":404}`},
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i), func(t *testing.T) {
e := router.NewNotFoundError(s.message, s.data)
result, _ := json.Marshal(e)
if str := string(result); str != s.expected {
t.Fatalf("Expected\n%v\ngot\n%v", s.expected, str)
}
})
}
}
func TestNewBadRequestError(t *testing.T) {
t.Parallel()
scenarios := []struct {
message string
data any
expected string
}{
{"", nil, `{"data":{},"message":"Something went wrong while processing your request.","status":400}`},
{"demo", "rawData_test", `{"data":{},"message":"Demo.","status":400}`},
{"demo", validation.Errors{"err1": validation.NewError("test_code", "test_message")}, `{"data":{"err1":{"code":"test_code","message":"Test_message."}},"message":"Demo.","status":400}`},
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i), func(t *testing.T) {
e := router.NewBadRequestError(s.message, s.data)
result, _ := json.Marshal(e)
if str := string(result); str != s.expected {
t.Fatalf("Expected\n%v\ngot\n%v", s.expected, str)
}
})
}
}
func TestNewForbiddenError(t *testing.T) {
t.Parallel()
scenarios := []struct {
message string
data any
expected string
}{
{"", nil, `{"data":{},"message":"You are not allowed to perform this request.","status":403}`},
{"demo", "rawData_test", `{"data":{},"message":"Demo.","status":403}`},
{"demo", validation.Errors{"err1": validation.NewError("test_code", "test_message")}, `{"data":{"err1":{"code":"test_code","message":"Test_message."}},"message":"Demo.","status":403}`},
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i), func(t *testing.T) {
e := router.NewForbiddenError(s.message, s.data)
result, _ := json.Marshal(e)
if str := string(result); str != s.expected {
t.Fatalf("Expected\n%v\ngot\n%v", s.expected, str)
}
})
}
}
func TestNewUnauthorizedError(t *testing.T) {
t.Parallel()
scenarios := []struct {
message string
data any
expected string
}{
{"", nil, `{"data":{},"message":"Missing or invalid authentication.","status":401}`},
{"demo", "rawData_test", `{"data":{},"message":"Demo.","status":401}`},
{"demo", validation.Errors{"err1": validation.NewError("test_code", "test_message")}, `{"data":{"err1":{"code":"test_code","message":"Test_message."}},"message":"Demo.","status":401}`},
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i), func(t *testing.T) {
e := router.NewUnauthorizedError(s.message, s.data)
result, _ := json.Marshal(e)
if str := string(result); str != s.expected {
t.Fatalf("Expected\n%v\ngot\n%v", s.expected, str)
}
})
}
}
func TestNewInternalServerError(t *testing.T) {
t.Parallel()
scenarios := []struct {
message string
data any
expected string
}{
{"", nil, `{"data":{},"message":"Something went wrong while processing your request.","status":500}`},
{"demo", "rawData_test", `{"data":{},"message":"Demo.","status":500}`},
{"demo", validation.Errors{"err1": validation.NewError("test_code", "test_message")}, `{"data":{"err1":{"code":"test_code","message":"Test_message."}},"message":"Demo.","status":500}`},
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i), func(t *testing.T) {
e := router.NewInternalServerError(s.message, s.data)
result, _ := json.Marshal(e)
if str := string(result); str != s.expected {
t.Fatalf("Expected\n%v\ngot\n%v", s.expected, str)
}
})
}
}
func TestNewTooManyRequestsError(t *testing.T) {
t.Parallel()
scenarios := []struct {
message string
data any
expected string
}{
{"", nil, `{"data":{},"message":"Too Many Requests.","status":429}`},
{"demo", "rawData_test", `{"data":{},"message":"Demo.","status":429}`},
{"demo", validation.Errors{"err1": validation.NewError("test_code", "test_message").SetParams(map[string]any{"test": 123})}, `{"data":{"err1":{"code":"test_code","message":"Test_message.","params":{"test":123}}},"message":"Demo.","status":429}`},
}
for i, s := range scenarios {
t.Run(strconv.Itoa(i), func(t *testing.T) {
e := router.NewTooManyRequestsError(s.message, s.data)
result, _ := json.Marshal(e)
if str := string(result); str != s.expected {
t.Fatalf("Expected\n%v\ngot\n%v", s.expected, str)
}
})
}
}
func TestApiErrorIs(t *testing.T) {
t.Parallel()
err0 := router.NewInternalServerError("", nil)
err1 := router.NewInternalServerError("", nil)
err2 := errors.New("test")
err3 := fmt.Errorf("wrapped: %w", err0)
scenarios := []struct {
name string
err error
target error
expected bool
}{
{
"nil error",
err0,
nil,
false,
},
{
"non ApiError",
err0,
err1,
false,
},
{
"different ApiError",
err0,
err2,
false,
},
{
"same ApiError",
err0,
err0,
true,
},
{
"wrapped ApiError",
err3,
err0,
true,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
is := errors.Is(s.err, s.target)
if is != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, is)
}
})
}
}
func TestToApiError(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
err error
expected string
}{
{
"regular error",
errors.New("test"),
`{"data":{},"message":"Something went wrong while processing your request.","status":400}`,
},
{
"fs.ErrNotExist",
fs.ErrNotExist,
`{"data":{},"message":"The requested resource wasn't found.","status":404}`,
},
{
"sql.ErrNoRows",
sql.ErrNoRows,
`{"data":{},"message":"The requested resource wasn't found.","status":404}`,
},
{
"ApiError",
router.NewForbiddenError("test", nil),
`{"data":{},"message":"Test.","status":403}`,
},
{
"wrapped ApiError",
fmt.Errorf("wrapped: %w", router.NewForbiddenError("test", nil)),
`{"data":{},"message":"Test.","status":403}`,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
raw, err := json.Marshal(router.ToApiError(s.err))
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
if rawStr != s.expected {
t.Fatalf("Expected error\n%vgot\n%v", s.expected, rawStr)
}
})
}
}
// -------------------------------------------------------------------
var (
_ router.SafeErrorItem = (*mockSafeErrorItem)(nil)
_ router.SafeErrorResolver = (*mockSafeErrorItem)(nil)
)
type mockSafeErrorItem struct {
}
func (m *mockSafeErrorItem) Code() string {
return "mock_code"
}
func (m *mockSafeErrorItem) Error() string {
return "mock_error"
}
func (m *mockSafeErrorItem) Resolve(errData map[string]any) any {
errData["mock_resolve"] = 123
return errData
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/router.go | tools/router/router.go | package router
import (
"bufio"
"encoding/json"
"errors"
"io"
"log"
"net"
"net/http"
"github.com/pocketbase/pocketbase/tools/hook"
)
type EventCleanupFunc func()
// EventFactoryFunc defines the function responsible for creating a Route specific event
// based on the provided request handler ServeHTTP data.
//
// Optionally return a clean up function that will be invoked right after the route execution.
type EventFactoryFunc[T hook.Resolver] func(w http.ResponseWriter, r *http.Request) (T, EventCleanupFunc)
// Router defines a thin wrapper around the standard Go [http.ServeMux] by
// adding support for routing sub-groups, middlewares and other common utils.
//
// Example:
//
// r := NewRouter[*MyEvent](eventFactory)
//
// // middlewares
// r.BindFunc(m1, m2)
//
// // routes
// r.GET("/test", handler1)
//
// // sub-routers/groups
// api := r.Group("/api")
// api.GET("/admins", handler2)
//
// // generate a http.ServeMux instance based on the router configurations
// mux, _ := r.BuildMux()
//
// http.ListenAndServe("localhost:8090", mux)
type Router[T hook.Resolver] struct {
// @todo consider renaming the type to just Group and replace the embed type
// with an alias after Go 1.24 adds support for generic type aliases
*RouterGroup[T]
eventFactory EventFactoryFunc[T]
}
// NewRouter creates a new Router instance with the provided event factory function.
func NewRouter[T hook.Resolver](eventFactory EventFactoryFunc[T]) *Router[T] {
return &Router[T]{
RouterGroup: &RouterGroup[T]{},
eventFactory: eventFactory,
}
}
// BuildMux constructs a new mux [http.Handler] instance from the current router configurations.
func (r *Router[T]) BuildMux() (http.Handler, error) {
// Note that some of the default std Go handlers like the [http.NotFoundHandler]
// cannot be currently extended and requires defining a custom "catch-all" route
// so that the group middlewares could be executed.
//
// https://github.com/golang/go/issues/65648
if !r.HasRoute("", "/") {
r.Route("", "/", func(e T) error {
return NewNotFoundError("", nil)
})
}
mux := http.NewServeMux()
if err := r.loadMux(mux, r.RouterGroup, nil); err != nil {
return nil, err
}
return mux, nil
}
func (r *Router[T]) loadMux(mux *http.ServeMux, group *RouterGroup[T], parents []*RouterGroup[T]) error {
for _, child := range group.children {
switch v := child.(type) {
case *RouterGroup[T]:
if err := r.loadMux(mux, v, append(parents, group)); err != nil {
return err
}
case *Route[T]:
routeHook := &hook.Hook[T]{}
var pattern string
if v.Method != "" {
pattern = v.Method + " "
}
// add parent groups middlewares
for _, p := range parents {
pattern += p.Prefix
for _, h := range p.Middlewares {
if _, ok := p.excludedMiddlewares[h.Id]; !ok {
if _, ok = group.excludedMiddlewares[h.Id]; !ok {
if _, ok = v.excludedMiddlewares[h.Id]; !ok {
routeHook.Bind(h)
}
}
}
}
}
// add current groups middlewares
pattern += group.Prefix
for _, h := range group.Middlewares {
if _, ok := group.excludedMiddlewares[h.Id]; !ok {
if _, ok = v.excludedMiddlewares[h.Id]; !ok {
routeHook.Bind(h)
}
}
}
// add current route middlewares
pattern += v.Path
for _, h := range v.Middlewares {
if _, ok := v.excludedMiddlewares[h.Id]; !ok {
routeHook.Bind(h)
}
}
mux.HandleFunc(pattern, func(resp http.ResponseWriter, req *http.Request) {
// wrap the response to add write and status tracking
resp = &ResponseWriter{ResponseWriter: resp}
// wrap the request body to allow multiple reads
req.Body = &RereadableReadCloser{ReadCloser: req.Body}
event, cleanupFunc := r.eventFactory(resp, req)
// trigger the handler hook chain
err := routeHook.Trigger(event, v.Action)
if err != nil {
ErrorHandler(resp, req, err)
}
if cleanupFunc != nil {
cleanupFunc()
}
})
default:
return errors.New("invalid Group item type")
}
}
return nil
}
func ErrorHandler(resp http.ResponseWriter, req *http.Request, err error) {
if err == nil {
return
}
if ok, _ := getWritten(resp); ok {
return // a response was already written (aka. already handled)
}
header := resp.Header()
if header.Get("Content-Type") == "" {
header.Set("Content-Type", "application/json")
}
apiErr := ToApiError(err)
resp.WriteHeader(apiErr.Status)
if req.Method != http.MethodHead {
if jsonErr := json.NewEncoder(resp).Encode(apiErr); jsonErr != nil {
log.Println(jsonErr) // truly rare case, log to stderr only for dev purposes
}
}
}
// -------------------------------------------------------------------
type WriteTracker interface {
// Written reports whether a write operation has occurred.
Written() bool
}
type StatusTracker interface {
// Status reports the written response status code.
Status() int
}
type flushErrorer interface {
FlushError() error
}
var (
_ WriteTracker = (*ResponseWriter)(nil)
_ StatusTracker = (*ResponseWriter)(nil)
_ http.Flusher = (*ResponseWriter)(nil)
_ http.Hijacker = (*ResponseWriter)(nil)
_ http.Pusher = (*ResponseWriter)(nil)
_ io.ReaderFrom = (*ResponseWriter)(nil)
_ flushErrorer = (*ResponseWriter)(nil)
)
// ResponseWriter wraps a http.ResponseWriter to track its write state.
type ResponseWriter struct {
http.ResponseWriter
written bool
status int
}
func (rw *ResponseWriter) WriteHeader(status int) {
if rw.written {
return
}
rw.written = true
rw.status = status
rw.ResponseWriter.WriteHeader(status)
}
func (rw *ResponseWriter) Write(b []byte) (int, error) {
if !rw.written {
rw.WriteHeader(http.StatusOK)
}
return rw.ResponseWriter.Write(b)
}
// Written implements [WriteTracker] and returns whether the current response body has been already written.
func (rw *ResponseWriter) Written() bool {
return rw.written
}
// Written implements [StatusTracker] and returns the written status code of the current response.
func (rw *ResponseWriter) Status() int {
return rw.status
}
// Flush implements [http.Flusher] and allows an HTTP handler to flush buffered data to the client.
// This method is no-op if the wrapped writer doesn't support it.
func (rw *ResponseWriter) Flush() {
_ = rw.FlushError()
}
// FlushError is similar to [Flush] but returns [http.ErrNotSupported]
// if the wrapped writer doesn't support it.
func (rw *ResponseWriter) FlushError() error {
err := http.NewResponseController(rw.ResponseWriter).Flush()
if err == nil || !errors.Is(err, http.ErrNotSupported) {
rw.written = true
}
return err
}
// Hijack implements [http.Hijacker] and allows an HTTP handler to take over the current connection.
func (rw *ResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return http.NewResponseController(rw.ResponseWriter).Hijack()
}
// Pusher implements [http.Pusher] to indicate HTTP/2 server push support.
func (rw *ResponseWriter) Push(target string, opts *http.PushOptions) error {
w := rw.ResponseWriter
for {
switch p := w.(type) {
case http.Pusher:
return p.Push(target, opts)
case RWUnwrapper:
w = p.Unwrap()
default:
return http.ErrNotSupported
}
}
}
// ReaderFrom implements [io.ReaderFrom] by checking if the underlying writer supports it.
// Otherwise calls [io.Copy].
func (rw *ResponseWriter) ReadFrom(r io.Reader) (n int64, err error) {
if !rw.written {
rw.WriteHeader(http.StatusOK)
}
w := rw.ResponseWriter
for {
switch rf := w.(type) {
case io.ReaderFrom:
return rf.ReadFrom(r)
case RWUnwrapper:
w = rf.Unwrap()
default:
return io.Copy(rw.ResponseWriter, r)
}
}
}
// Unwrap returns the underlying ResponseWritter instance (usually used by [http.ResponseController]).
func (rw *ResponseWriter) Unwrap() http.ResponseWriter {
return rw.ResponseWriter
}
func getWritten(rw http.ResponseWriter) (bool, error) {
for {
switch w := rw.(type) {
case WriteTracker:
return w.Written(), nil
case RWUnwrapper:
rw = w.Unwrap()
default:
return false, http.ErrNotSupported
}
}
}
func getStatus(rw http.ResponseWriter) (int, error) {
for {
switch w := rw.(type) {
case StatusTracker:
return w.Status(), nil
case RWUnwrapper:
rw = w.Unwrap()
default:
return 0, http.ErrNotSupported
}
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/unmarshal_request_data_test.go | tools/router/unmarshal_request_data_test.go | package router_test
import (
"bytes"
"encoding/json"
"testing"
"time"
"github.com/pocketbase/pocketbase/tools/router"
)
func pointer[T any](val T) *T {
return &val
}
func TestUnmarshalRequestData(t *testing.T) {
t.Parallel()
mapData := map[string][]string{
"number1": {"1"},
"number2": {"2", "3"},
"number3": {"2.1", "-3.4"},
"number4": {"0", "-0", "0.0001"},
"string0": {""},
"string1": {"a"},
"string2": {"b", "c"},
"string3": {
"0.0",
"-0.0",
"000.1",
"000001",
"-000001",
"1.6E-35",
"-1.6E-35",
"10e100",
"1_000_000",
"1.000.000",
" 123 ",
"0b1",
"0xFF",
"1234A",
"Infinity",
"-Infinity",
"undefined",
"null",
},
"bool1": {"true"},
"bool2": {"true", "false"},
"mixed": {"true", "123", "test"},
"@jsonPayload": {`{"json_a":null,"json_b":123}`, `{"json_c":[1,2,3]}`},
}
structData := map[string][]string{
"anyTag": {"a", "b"},
"AnyPtr": {"b"},
"AnySlice": {"a", "b", "false", ""},
"anySlicePtrTag": {"d", "true"},
"AnySliceOfPtr": {"f", "123.456"},
"stringTag": {"a", "b"},
"StringPtr": {"b"},
"StringSlice": {"a", "b", "c", ""},
"stringSlicePtrTag": {"d", "e"},
"StringSliceOfPtr": {"f", "g"},
"boolTag": {"true"},
"BoolPtr": {"true"},
"BoolSlice": {"true", "false", ""},
"boolSlicePtrTag": {"false", "false", "true"},
"BoolSliceOfPtr": {"false", "true", "false"},
"int8Tag": {"-1", "2"},
"Int8Ptr": {"3"},
"Int8Slice": {"4", "5", ""},
"int8SlicePtrTag": {"5", "6"},
"Int8SliceOfPtr": {"7", "8"},
"int16Tag": {"-1", "2"},
"Int16Ptr": {"3"},
"Int16Slice": {"4", "5", ""},
"int16SlicePtrTag": {"5", "6"},
"Int16SliceOfPtr": {"7", "8"},
"int32Tag": {"-1", "2"},
"Int32Ptr": {"3"},
"Int32Slice": {"4", "5", ""},
"int32SlicePtrTag": {"5", "6"},
"Int32SliceOfPtr": {"7", "8"},
"int64Tag": {"-1", "2"},
"Int64Ptr": {"3"},
"Int64Slice": {"4", "5", ""},
"int64SlicePtrTag": {"5", "6"},
"Int64SliceOfPtr": {"7", "8"},
"intTag": {"-1", "2"},
"IntPtr": {"3"},
"IntSlice": {"4", "5", ""},
"intSlicePtrTag": {"5", "6"},
"IntSliceOfPtr": {"7", "8"},
"uint8Tag": {"1", "2"},
"Uint8Ptr": {"3"},
"Uint8Slice": {"4", "5", ""},
"uint8SlicePtrTag": {"5", "6"},
"Uint8SliceOfPtr": {"7", "8"},
"uint16Tag": {"1", "2"},
"Uint16Ptr": {"3"},
"Uint16Slice": {"4", "5", ""},
"uint16SlicePtrTag": {"5", "6"},
"Uint16SliceOfPtr": {"7", "8"},
"uint32Tag": {"1", "2"},
"Uint32Ptr": {"3"},
"Uint32Slice": {"4", "5", ""},
"uint32SlicePtrTag": {"5", "6"},
"Uint32SliceOfPtr": {"7", "8"},
"uint64Tag": {"1", "2"},
"Uint64Ptr": {"3"},
"Uint64Slice": {"4", "5", ""},
"uint64SlicePtrTag": {"5", "6"},
"Uint64SliceOfPtr": {"7", "8"},
"uintTag": {"1", "2"},
"UintPtr": {"3"},
"UintSlice": {"4", "5", ""},
"uintSlicePtrTag": {"5", "6"},
"UintSliceOfPtr": {"7", "8"},
"float32Tag": {"-1.2"},
"Float32Ptr": {"1.5", "2.0"},
"Float32Slice": {"1", "2.3", "-0.3", ""},
"float32SlicePtrTag": {"-1.3", "3"},
"Float32SliceOfPtr": {"0", "1.2"},
"float64Tag": {"-1.2"},
"Float64Ptr": {"1.5", "2.0"},
"Float64Slice": {"1", "2.3", "-0.3", ""},
"float64SlicePtrTag": {"-1.3", "3"},
"Float64SliceOfPtr": {"0", "1.2"},
"timeTag": {"2009-11-10T15:00:00Z"},
"TimePtr": {"2009-11-10T14:00:00Z", "2009-11-10T15:00:00Z"},
"TimeSlice": {"2009-11-10T14:00:00Z", "2009-11-10T15:00:00Z"},
"timeSlicePtrTag": {"2009-11-10T15:00:00Z", "2009-11-10T16:00:00Z"},
"TimeSliceOfPtr": {"2009-11-10T17:00:00Z", "2009-11-10T18:00:00Z"},
// @jsonPayload fields
"@jsonPayload": {
`{"payloadA":"test", "shouldBeIgnored": "abc"}`,
`{"payloadB":[1,2,3], "payloadC":true}`,
},
// unexported fields or `-` tags
"unexperted": {"test"},
"SkipExported": {"test"},
"unexportedStructFieldWithoutTag.Name": {"test"},
"unexportedStruct.Name": {"test"},
// structs
"StructWithoutTag.Name": {"test1"},
"exportedStruct.Name": {"test2"},
// embedded
"embed_name": {"test3"},
"embed2.embed_name2": {"test4"},
}
type embed1 struct {
Name string `form:"embed_name" json:"embed_name"`
}
type embed2 struct {
Name string `form:"embed_name2" json:"embed_name2"`
}
//nolint
type TestStruct struct {
Any any `form:"anyTag" query:"anyTag2"`
AnyPtr *any
AnySlice []any
AnySlicePtr *[]any `form:"anySlicePtrTag"`
AnySliceOfPtr []*any
String string `form:"stringTag" query:"stringTag2"`
StringPtr *string
StringSlice []string
StringSlicePtr *[]string `form:"stringSlicePtrTag"`
StringSliceOfPtr []*string
Bool bool `form:"boolTag" query:"boolTag2"`
BoolPtr *bool
BoolSlice []bool
BoolSlicePtr *[]bool `form:"boolSlicePtrTag"`
BoolSliceOfPtr []*bool
Int8 int8 `form:"int8Tag" query:"int8Tag2"`
Int8Ptr *int8
Int8Slice []int8
Int8SlicePtr *[]int8 `form:"int8SlicePtrTag"`
Int8SliceOfPtr []*int8
Int16 int16 `form:"int16Tag" query:"int16Tag2"`
Int16Ptr *int16
Int16Slice []int16
Int16SlicePtr *[]int16 `form:"int16SlicePtrTag"`
Int16SliceOfPtr []*int16
Int32 int32 `form:"int32Tag" query:"int32Tag2"`
Int32Ptr *int32
Int32Slice []int32
Int32SlicePtr *[]int32 `form:"int32SlicePtrTag"`
Int32SliceOfPtr []*int32
Int64 int64 `form:"int64Tag" query:"int64Tag2"`
Int64Ptr *int64
Int64Slice []int64
Int64SlicePtr *[]int64 `form:"int64SlicePtrTag"`
Int64SliceOfPtr []*int64
Int int `form:"intTag" query:"intTag2"`
IntPtr *int
IntSlice []int
IntSlicePtr *[]int `form:"intSlicePtrTag"`
IntSliceOfPtr []*int
Uint8 uint8 `form:"uint8Tag" query:"uint8Tag2"`
Uint8Ptr *uint8
Uint8Slice []uint8
Uint8SlicePtr *[]uint8 `form:"uint8SlicePtrTag"`
Uint8SliceOfPtr []*uint8
Uint16 uint16 `form:"uint16Tag" query:"uint16Tag2"`
Uint16Ptr *uint16
Uint16Slice []uint16
Uint16SlicePtr *[]uint16 `form:"uint16SlicePtrTag"`
Uint16SliceOfPtr []*uint16
Uint32 uint32 `form:"uint32Tag" query:"uint32Tag2"`
Uint32Ptr *uint32
Uint32Slice []uint32
Uint32SlicePtr *[]uint32 `form:"uint32SlicePtrTag"`
Uint32SliceOfPtr []*uint32
Uint64 uint64 `form:"uint64Tag" query:"uint64Tag2"`
Uint64Ptr *uint64
Uint64Slice []uint64
Uint64SlicePtr *[]uint64 `form:"uint64SlicePtrTag"`
Uint64SliceOfPtr []*uint64
Uint uint `form:"uintTag" query:"uintTag2"`
UintPtr *uint
UintSlice []uint
UintSlicePtr *[]uint `form:"uintSlicePtrTag"`
UintSliceOfPtr []*uint
Float32 float32 `form:"float32Tag" query:"float32Tag2"`
Float32Ptr *float32
Float32Slice []float32
Float32SlicePtr *[]float32 `form:"float32SlicePtrTag"`
Float32SliceOfPtr []*float32
Float64 float64 `form:"float64Tag" query:"float64Tag2"`
Float64Ptr *float64
Float64Slice []float64
Float64SlicePtr *[]float64 `form:"float64SlicePtrTag"`
Float64SliceOfPtr []*float64
// encoding.TextUnmarshaler
Time time.Time `form:"timeTag" query:"timeTag2"`
TimePtr *time.Time
TimeSlice []time.Time
TimeSlicePtr *[]time.Time `form:"timeSlicePtrTag"`
TimeSliceOfPtr []*time.Time
// @jsonPayload fields
JSONPayloadA string `form:"shouldBeIgnored" json:"payloadA"`
JSONPayloadB []int `json:"payloadB"`
JSONPayloadC bool `json:"-"`
// unexported fields or `-` tags
unexported string
SkipExported string `form:"-"`
unexportedStructFieldWithoutTag struct {
Name string `json:"unexportedStructFieldWithoutTag_name"`
}
unexportedStructFieldWithTag struct {
Name string `json:"unexportedStructFieldWithTag_name"`
} `form:"unexportedStruct"`
// structs
StructWithoutTag struct {
Name string `json:"StructWithoutTag_name"`
}
StructWithTag struct {
Name string `json:"StructWithTag_name"`
} `form:"exportedStruct"`
// embedded
embed1
embed2 `form:"embed2"`
}
scenarios := []struct {
name string
data map[string][]string
dst any
tag string
prefix string
error bool
result string
}{
{
name: "nil data",
data: nil,
dst: pointer(map[string]any{}),
error: false,
result: `{}`,
},
{
name: "non-pointer map[string]any",
data: mapData,
dst: map[string]any{},
error: true,
},
{
name: "unsupported *map[string]string",
data: mapData,
dst: pointer(map[string]string{}),
error: true,
},
{
name: "unsupported *map[string][]string",
data: mapData,
dst: pointer(map[string][]string{}),
error: true,
},
{
name: "*map[string]any",
data: mapData,
dst: pointer(map[string]any{}),
result: `{"bool1":true,"bool2":[true,false],"json_a":null,"json_b":123,"json_c":[1,2,3],"mixed":[true,123,"test"],"number1":1,"number2":[2,3],"number3":[2.1,-3.4],"number4":[0,-0,0.0001],"string0":"","string1":"a","string2":["b","c"],"string3":["0.0","-0.0","000.1","000001","-000001","1.6E-35","-1.6E-35","10e100","1_000_000","1.000.000"," 123 ","0b1","0xFF","1234A","Infinity","-Infinity","undefined","null"]}`,
},
{
name: "valid pointer struct (all fields)",
data: structData,
dst: &TestStruct{},
result: `{"Any":"a","AnyPtr":"b","AnySlice":["a","b",false,""],"AnySlicePtr":["d",true],"AnySliceOfPtr":["f",123.456],"String":"a","StringPtr":"b","StringSlice":["a","b","c",""],"StringSlicePtr":["d","e"],"StringSliceOfPtr":["f","g"],"Bool":true,"BoolPtr":true,"BoolSlice":[true,false,false],"BoolSlicePtr":[false,false,true],"BoolSliceOfPtr":[false,true,false],"Int8":-1,"Int8Ptr":3,"Int8Slice":[4,5,0],"Int8SlicePtr":[5,6],"Int8SliceOfPtr":[7,8],"Int16":-1,"Int16Ptr":3,"Int16Slice":[4,5,0],"Int16SlicePtr":[5,6],"Int16SliceOfPtr":[7,8],"Int32":-1,"Int32Ptr":3,"Int32Slice":[4,5,0],"Int32SlicePtr":[5,6],"Int32SliceOfPtr":[7,8],"Int64":-1,"Int64Ptr":3,"Int64Slice":[4,5,0],"Int64SlicePtr":[5,6],"Int64SliceOfPtr":[7,8],"Int":-1,"IntPtr":3,"IntSlice":[4,5,0],"IntSlicePtr":[5,6],"IntSliceOfPtr":[7,8],"Uint8":1,"Uint8Ptr":3,"Uint8Slice":"BAUA","Uint8SlicePtr":"BQY=","Uint8SliceOfPtr":[7,8],"Uint16":1,"Uint16Ptr":3,"Uint16Slice":[4,5,0],"Uint16SlicePtr":[5,6],"Uint16SliceOfPtr":[7,8],"Uint32":1,"Uint32Ptr":3,"Uint32Slice":[4,5,0],"Uint32SlicePtr":[5,6],"Uint32SliceOfPtr":[7,8],"Uint64":1,"Uint64Ptr":3,"Uint64Slice":[4,5,0],"Uint64SlicePtr":[5,6],"Uint64SliceOfPtr":[7,8],"Uint":1,"UintPtr":3,"UintSlice":[4,5,0],"UintSlicePtr":[5,6],"UintSliceOfPtr":[7,8],"Float32":-1.2,"Float32Ptr":1.5,"Float32Slice":[1,2.3,-0.3,0],"Float32SlicePtr":[-1.3,3],"Float32SliceOfPtr":[0,1.2],"Float64":-1.2,"Float64Ptr":1.5,"Float64Slice":[1,2.3,-0.3,0],"Float64SlicePtr":[-1.3,3],"Float64SliceOfPtr":[0,1.2],"Time":"2009-11-10T15:00:00Z","TimePtr":"2009-11-10T14:00:00Z","TimeSlice":["2009-11-10T14:00:00Z","2009-11-10T15:00:00Z"],"TimeSlicePtr":["2009-11-10T15:00:00Z","2009-11-10T16:00:00Z"],"TimeSliceOfPtr":["2009-11-10T17:00:00Z","2009-11-10T18:00:00Z"],"payloadA":"test","payloadB":[1,2,3],"SkipExported":"","StructWithoutTag":{"StructWithoutTag_name":"test1"},"StructWithTag":{"StructWithTag_name":"test2"},"embed_name":"test3","embed_name2":"test4"}`,
},
{
name: "non-pointer struct",
data: structData,
dst: TestStruct{},
error: true,
},
{
name: "invalid struct uint value",
data: map[string][]string{"uintTag": {"-1"}},
dst: &TestStruct{},
error: true,
},
{
name: "invalid struct int value",
data: map[string][]string{"intTag": {"abc"}},
dst: &TestStruct{},
error: true,
},
{
name: "invalid struct bool value",
data: map[string][]string{"boolTag": {"abc"}},
dst: &TestStruct{},
error: true,
},
{
name: "invalid struct float value",
data: map[string][]string{"float64Tag": {"abc"}},
dst: &TestStruct{},
error: true,
},
{
name: "invalid struct TextUnmarshaler value",
data: map[string][]string{"timeTag": {"123"}},
dst: &TestStruct{},
error: true,
},
{
name: "custom tagKey",
data: map[string][]string{
"tag1": {"a"},
"tag2": {"b"},
"tag3": {"c"},
"Item": {"d"},
},
dst: &struct {
Item string `form:"tag1" query:"tag2" json:"tag2"`
}{},
tag: "query",
result: `{"tag2":"b"}`,
},
{
name: "custom prefix",
data: map[string][]string{
"test.A": {"1"},
"A": {"2"},
"test.alias": {"3"},
},
dst: &struct {
A string
B string `form:"alias"`
}{},
prefix: "test",
result: `{"A":"1","B":"3"}`,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
err := router.UnmarshalRequestData(s.data, s.dst, s.tag, s.prefix)
hasErr := err != nil
if hasErr != s.error {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.error, hasErr, err)
}
if hasErr {
return
}
raw, err := json.Marshal(s.dst)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(raw, []byte(s.result)) {
t.Fatalf("Expected dst \n%s\ngot\n%s", s.result, raw)
}
})
}
}
// note: extra unexported checks in addition to the above test as there
// is no easy way to print nested structs with all their fields.
func TestUnmarshalRequestDataUnexportedFields(t *testing.T) {
t.Parallel()
//nolint:all
type TestStruct struct {
Exported string
unexported string
// to ensure that the reflection doesn't take tags with higher priority than the exported state
unexportedWithTag string `form:"unexportedWithTag" json:"unexportedWithTag"`
}
dst := &TestStruct{}
err := router.UnmarshalRequestData(map[string][]string{
"Exported": {"test"}, // just for reference
"Unexported": {"test"},
"unexported": {"test"},
"UnexportedWithTag": {"test"},
"unexportedWithTag": {"test"},
}, dst, "", "")
if err != nil {
t.Fatal(err)
}
if dst.Exported != "test" {
t.Fatalf("Expected the Exported field to be %q, got %q", "test", dst.Exported)
}
if dst.unexported != "" {
t.Fatalf("Expected the unexported field to remain empty, got %q", dst.unexported)
}
if dst.unexportedWithTag != "" {
t.Fatalf("Expected the unexportedWithTag field to remain empty, got %q", dst.unexportedWithTag)
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/event_test.go | tools/router/event_test.go | package router_test
import (
"bytes"
"crypto/tls"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"testing"
validation "github.com/go-ozzo/ozzo-validation/v4"
"github.com/pocketbase/pocketbase/tools/router"
)
type unwrapTester struct {
http.ResponseWriter
}
func (ut unwrapTester) Unwrap() http.ResponseWriter {
return ut.ResponseWriter
}
func TestEventWritten(t *testing.T) {
t.Parallel()
res1 := httptest.NewRecorder()
res2 := httptest.NewRecorder()
res2.Write([]byte("test"))
res3 := &router.ResponseWriter{ResponseWriter: unwrapTester{httptest.NewRecorder()}}
res4 := &router.ResponseWriter{ResponseWriter: unwrapTester{httptest.NewRecorder()}}
res4.Write([]byte("test"))
scenarios := []struct {
name string
response http.ResponseWriter
expected bool
}{
{
name: "non-written non-WriteTracker",
response: res1,
expected: false,
},
{
name: "written non-WriteTracker",
response: res2,
expected: false,
},
{
name: "non-written WriteTracker",
response: res3,
expected: false,
},
{
name: "written WriteTracker",
response: res4,
expected: true,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
event := router.Event{
Response: s.response,
}
result := event.Written()
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestEventStatus(t *testing.T) {
t.Parallel()
res1 := httptest.NewRecorder()
res2 := httptest.NewRecorder()
res2.WriteHeader(123)
res3 := &router.ResponseWriter{ResponseWriter: unwrapTester{httptest.NewRecorder()}}
res4 := &router.ResponseWriter{ResponseWriter: unwrapTester{httptest.NewRecorder()}}
res4.WriteHeader(123)
scenarios := []struct {
name string
response http.ResponseWriter
expected int
}{
{
name: "non-written non-StatusTracker",
response: res1,
expected: 0,
},
{
name: "written non-StatusTracker",
response: res2,
expected: 0,
},
{
name: "non-written StatusTracker",
response: res3,
expected: 0,
},
{
name: "written StatusTracker",
response: res4,
expected: 123,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
event := router.Event{
Response: s.response,
}
result := event.Status()
if result != s.expected {
t.Fatalf("Expected %d, got %d", s.expected, result)
}
})
}
}
func TestEventIsTLS(t *testing.T) {
t.Parallel()
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
event := router.Event{Request: req}
// without TLS
if event.IsTLS() {
t.Fatalf("Expected IsTLS false")
}
// dummy TLS state
req.TLS = new(tls.ConnectionState)
// with TLS
if !event.IsTLS() {
t.Fatalf("Expected IsTLS true")
}
}
func TestEventSetCookie(t *testing.T) {
t.Parallel()
event := router.Event{
Response: httptest.NewRecorder(),
}
cookie := event.Response.Header().Get("set-cookie")
if cookie != "" {
t.Fatalf("Expected empty cookie string, got %q", cookie)
}
event.SetCookie(&http.Cookie{Name: "test", Value: "a"})
expected := "test=a"
cookie = event.Response.Header().Get("set-cookie")
if cookie != expected {
t.Fatalf("Expected cookie %q, got %q", expected, cookie)
}
}
func TestEventRemoteIP(t *testing.T) {
t.Parallel()
scenarios := []struct {
remoteAddr string
expected string
}{
{"", "invalid IP"},
{"1.2.3.4", "invalid IP"},
{"1.2.3.4:8090", "1.2.3.4"},
{"[0000:0000:0000:0000:0000:0000:0000:0002]:80", "0000:0000:0000:0000:0000:0000:0000:0002"},
{"[::2]:80", "0000:0000:0000:0000:0000:0000:0000:0002"}, // should always return the expanded version
}
for _, s := range scenarios {
t.Run(s.remoteAddr, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
req.RemoteAddr = s.remoteAddr
event := router.Event{Request: req}
ip := event.RemoteIP()
if ip != s.expected {
t.Fatalf("Expected IP %q, got %q", s.expected, ip)
}
})
}
}
func TestFindUploadedFiles(t *testing.T) {
scenarios := []struct {
filename string
expectedPattern string
}{
{"ab.png", `^ab\w{10}_\w{10}\.png$`},
{"test", `^test_\w{10}\.txt$`},
{"a b c d!@$.j!@$pg", `^a_b_c_d_\w{10}\.jpg$`},
{strings.Repeat("a", 150), `^a{100}_\w{10}\.txt$`},
}
for _, s := range scenarios {
t.Run(s.filename, func(t *testing.T) {
// create multipart form file body
body := new(bytes.Buffer)
mp := multipart.NewWriter(body)
w, err := mp.CreateFormFile("test", s.filename)
if err != nil {
t.Fatal(err)
}
w.Write([]byte("test"))
mp.Close()
// ---
req := httptest.NewRequest(http.MethodPost, "/", body)
req.Header.Add("Content-Type", mp.FormDataContentType())
event := router.Event{Request: req}
result, err := event.FindUploadedFiles("test")
if err != nil {
t.Fatal(err)
}
if len(result) != 1 {
t.Fatalf("Expected 1 file, got %d", len(result))
}
if result[0].Size != 4 {
t.Fatalf("Expected the file size to be 4 bytes, got %d", result[0].Size)
}
pattern, err := regexp.Compile(s.expectedPattern)
if err != nil {
t.Fatalf("Invalid filename pattern %q: %v", s.expectedPattern, err)
}
if !pattern.MatchString(result[0].Name) {
t.Fatalf("Expected filename to match %s, got filename %s", s.expectedPattern, result[0].Name)
}
})
}
}
func TestFindUploadedFilesMissing(t *testing.T) {
body := new(bytes.Buffer)
mp := multipart.NewWriter(body)
mp.Close()
req := httptest.NewRequest(http.MethodPost, "/", body)
req.Header.Add("Content-Type", mp.FormDataContentType())
event := router.Event{Request: req}
result, err := event.FindUploadedFiles("test")
if err == nil {
t.Error("Expected error, got nil")
}
if result != nil {
t.Errorf("Expected result to be nil, got %v", result)
}
}
func TestEventSetGet(t *testing.T) {
event := router.Event{}
// get before any set (ensures that doesn't panic)
if v := event.Get("test"); v != nil {
t.Fatalf("Expected nil value, got %v", v)
}
event.Set("a", 123)
event.Set("b", 456)
scenarios := []struct {
key string
expected any
}{
{"", nil},
{"missing", nil},
{"a", 123},
{"b", 456},
}
for i, s := range scenarios {
t.Run(fmt.Sprintf("%d_%s", i, s.key), func(t *testing.T) {
result := event.Get(s.key)
if result != s.expected {
t.Fatalf("Expected %v, got %v", s.expected, result)
}
})
}
}
func TestEventSetAllGetAll(t *testing.T) {
data := map[string]any{
"a": 123,
"b": 456,
}
rawData, err := json.Marshal(data)
if err != nil {
t.Fatal(err)
}
event := router.Event{}
event.SetAll(data)
// modify the data to ensure that the map was shallow coppied
data["c"] = 789
result := event.GetAll()
rawResult, err := json.Marshal(result)
if err != nil {
t.Fatal(err)
}
if len(rawResult) == 0 || !bytes.Equal(rawData, rawResult) {
t.Fatalf("Expected\n%v\ngot\n%v", rawData, rawResult)
}
}
func TestEventString(t *testing.T) {
scenarios := []testResponseWriteScenario[string]{
{
name: "no explicit content-type",
status: 123,
headers: nil,
body: "test",
expectedStatus: 123,
expectedHeaders: map[string]string{"content-type": "text/plain; charset=utf-8"},
expectedBody: "test",
},
{
name: "with explicit content-type",
status: 123,
headers: map[string]string{"content-type": "text/test"},
body: "test",
expectedStatus: 123,
expectedHeaders: map[string]string{"content-type": "text/test"},
expectedBody: "test",
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.String(s.status, s.body)
})
}
}
func TestEventHTML(t *testing.T) {
scenarios := []testResponseWriteScenario[string]{
{
name: "no explicit content-type",
status: 123,
headers: nil,
body: "test",
expectedStatus: 123,
expectedHeaders: map[string]string{"content-type": "text/html; charset=utf-8"},
expectedBody: "test",
},
{
name: "with explicit content-type",
status: 123,
headers: map[string]string{"content-type": "text/test"},
body: "test",
expectedStatus: 123,
expectedHeaders: map[string]string{"content-type": "text/test"},
expectedBody: "test",
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.HTML(s.status, s.body)
})
}
}
func TestEventJSON(t *testing.T) {
body := map[string]any{"a": 123, "b": 456, "c": "test"}
expectedPickedBody := `{"a":123,"c":"test"}` + "\n"
expectedFullBody := `{"a":123,"b":456,"c":"test"}` + "\n"
scenarios := []testResponseWriteScenario[any]{
{
name: "no explicit content-type",
status: 200,
headers: nil,
body: body,
expectedStatus: 200,
expectedHeaders: map[string]string{"content-type": "application/json"},
expectedBody: expectedPickedBody,
},
{
name: "with explicit content-type (200)",
status: 200,
headers: map[string]string{"content-type": "application/test"},
body: body,
expectedStatus: 200,
expectedHeaders: map[string]string{"content-type": "application/test"},
expectedBody: expectedPickedBody,
},
{
name: "with explicit content-type (400)", // no fields picker
status: 400,
headers: map[string]string{"content-type": "application/test"},
body: body,
expectedStatus: 400,
expectedHeaders: map[string]string{"content-type": "application/test"},
expectedBody: expectedFullBody,
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
e.Request.URL.RawQuery = "fields=a,c" // ensures that the picker is invoked
return e.JSON(s.status, s.body)
})
}
}
func TestEventXML(t *testing.T) {
scenarios := []testResponseWriteScenario[string]{
{
name: "no explicit content-type",
status: 234,
headers: nil,
body: "test",
expectedStatus: 234,
expectedHeaders: map[string]string{"content-type": "application/xml; charset=utf-8"},
expectedBody: xml.Header + "<string>test</string>",
},
{
name: "with explicit content-type",
status: 234,
headers: map[string]string{"content-type": "text/test"},
body: "test",
expectedStatus: 234,
expectedHeaders: map[string]string{"content-type": "text/test"},
expectedBody: xml.Header + "<string>test</string>",
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.XML(s.status, s.body)
})
}
}
func TestEventStream(t *testing.T) {
scenarios := []testResponseWriteScenario[string]{
{
name: "stream",
status: 234,
headers: map[string]string{"content-type": "text/test"},
body: "test",
expectedStatus: 234,
expectedHeaders: map[string]string{"content-type": "text/test"},
expectedBody: "test",
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.Stream(s.status, s.headers["content-type"], strings.NewReader(s.body))
})
}
}
func TestEventBlob(t *testing.T) {
scenarios := []testResponseWriteScenario[[]byte]{
{
name: "blob",
status: 234,
headers: map[string]string{"content-type": "text/test"},
body: []byte("test"),
expectedStatus: 234,
expectedHeaders: map[string]string{"content-type": "text/test"},
expectedBody: "test",
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.Blob(s.status, s.headers["content-type"], s.body)
})
}
}
func TestEventNoContent(t *testing.T) {
s := testResponseWriteScenario[any]{
name: "no content",
status: 234,
headers: map[string]string{"content-type": "text/test"},
body: nil,
expectedStatus: 234,
expectedHeaders: map[string]string{"content-type": "text/test"},
expectedBody: "",
}
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.NoContent(s.status)
})
}
func TestEventFlush(t *testing.T) {
rec := httptest.NewRecorder()
event := &router.Event{
Response: unwrapTester{&router.ResponseWriter{ResponseWriter: rec}},
}
event.Response.Write([]byte("test"))
event.Flush()
if !rec.Flushed {
t.Fatal("Expected response to be flushed")
}
}
func TestEventRedirect(t *testing.T) {
scenarios := []testResponseWriteScenario[any]{
{
name: "non-30x status",
status: 200,
expectedStatus: 200,
expectedError: router.ErrInvalidRedirectStatusCode,
},
{
name: "30x status",
status: 302,
headers: map[string]string{"location": "test"}, // should be overwritten with the argument
expectedStatus: 302,
expectedHeaders: map[string]string{"location": "example"},
},
}
for _, s := range scenarios {
testEventResponseWrite(t, s, func(e *router.Event) error {
return e.Redirect(s.status, "example")
})
}
}
func TestEventFileFS(t *testing.T) {
// stub test files
// ---
dir, err := os.MkdirTemp("", "EventFileFS")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
err = os.WriteFile(filepath.Join(dir, "index.html"), []byte("index"), 0644)
if err != nil {
t.Fatal(err)
}
err = os.WriteFile(filepath.Join(dir, "test.txt"), []byte("test"), 0644)
if err != nil {
t.Fatal(err)
}
// create sub directory with an index.html file inside it
err = os.MkdirAll(filepath.Join(dir, "sub1"), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = os.WriteFile(filepath.Join(dir, "sub1", "index.html"), []byte("sub1 index"), 0644)
if err != nil {
t.Fatal(err)
}
err = os.MkdirAll(filepath.Join(dir, "sub2"), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = os.WriteFile(filepath.Join(dir, "sub2", "test.txt"), []byte("sub2 test"), 0644)
if err != nil {
t.Fatal(err)
}
// ---
scenarios := []struct {
name string
path string
expected string
}{
{"missing file", "", ""},
{"root with no explicit file", "", ""},
{"root with explicit file", "test.txt", "test"},
{"sub dir with no explicit file", "sub1", "sub1 index"},
{"sub dir with no explicit file (no index.html)", "sub2", ""},
{"sub dir explicit file", "sub2/test.txt", "sub2 test"},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
rec := httptest.NewRecorder()
event := &router.Event{
Request: req,
Response: rec,
}
err = event.FileFS(os.DirFS(dir), s.path)
hasErr := err != nil
expectErr := s.expected == ""
if hasErr != expectErr {
t.Fatalf("Expected hasErr %v, got %v (%v)", expectErr, hasErr, err)
}
result := rec.Result()
raw, err := io.ReadAll(result.Body)
result.Body.Close()
if err != nil {
t.Fatal(err)
}
if string(raw) != s.expected {
t.Fatalf("Expected body\n%s\ngot\n%s", s.expected, raw)
}
// ensure that the proper file headers are added
// (aka. http.ServeContent is invoked)
length, _ := strconv.Atoi(result.Header.Get("content-length"))
if length != len(s.expected) {
t.Fatalf("Expected Content-Length %d, got %d", len(s.expected), length)
}
})
}
}
func TestEventError(t *testing.T) {
err := new(router.Event).Error(123, "message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":123}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventBadRequestError(t *testing.T) {
err := new(router.Event).BadRequestError("message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":400}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventNotFoundError(t *testing.T) {
err := new(router.Event).NotFoundError("message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":404}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventForbiddenError(t *testing.T) {
err := new(router.Event).ForbiddenError("message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":403}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventUnauthorizedError(t *testing.T) {
err := new(router.Event).UnauthorizedError("message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":401}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventTooManyRequestsError(t *testing.T) {
err := new(router.Event).TooManyRequestsError("message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":429}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventInternalServerError(t *testing.T) {
err := new(router.Event).InternalServerError("message_test", map[string]any{"a": validation.Required, "b": "test"})
result, _ := json.Marshal(err)
expected := `{"data":{"a":{"code":"validation_invalid_value","message":"Invalid value."},"b":{"code":"validation_invalid_value","message":"Invalid value."}},"message":"Message_test.","status":500}`
if string(result) != expected {
t.Errorf("Expected\n%s\ngot\n%s", expected, result)
}
}
func TestEventBindBody(t *testing.T) {
type testDstStruct struct {
A int `json:"a" xml:"a" form:"a"`
B int `json:"b" xml:"b" form:"b"`
C string `json:"c" xml:"c" form:"c"`
}
emptyDst := `{"a":0,"b":0,"c":""}`
queryDst := `a=123&b=-456&c=test`
xmlDst := `
<?xml version="1.0" encoding="UTF-8" ?>
<root>
<a>123</a>
<b>-456</b>
<c>test</c>
</root>
`
jsonDst := `{"a":123,"b":-456,"c":"test"}`
// multipart
mpBody := &bytes.Buffer{}
mpWriter := multipart.NewWriter(mpBody)
mpWriter.WriteField("@jsonPayload", `{"a":123}`)
mpWriter.WriteField("b", "-456")
mpWriter.WriteField("c", "test")
if err := mpWriter.Close(); err != nil {
t.Fatal(err)
}
scenarios := []struct {
contentType string
body io.Reader
expectDst string
expectError bool
}{
{
contentType: "",
body: strings.NewReader(jsonDst),
expectDst: emptyDst,
expectError: true,
},
{
contentType: "application/rtf", // unsupported
body: strings.NewReader(jsonDst),
expectDst: emptyDst,
expectError: true,
},
// empty body
{
contentType: "application/json;charset=emptybody",
body: strings.NewReader(""),
expectDst: emptyDst,
},
// json
{
contentType: "application/json",
body: strings.NewReader(jsonDst),
expectDst: jsonDst,
},
{
contentType: "application/json;charset=abc",
body: strings.NewReader(jsonDst),
expectDst: jsonDst,
},
// xml
{
contentType: "text/xml",
body: strings.NewReader(xmlDst),
expectDst: jsonDst,
},
{
contentType: "text/xml;charset=abc",
body: strings.NewReader(xmlDst),
expectDst: jsonDst,
},
{
contentType: "application/xml",
body: strings.NewReader(xmlDst),
expectDst: jsonDst,
},
{
contentType: "application/xml;charset=abc",
body: strings.NewReader(xmlDst),
expectDst: jsonDst,
},
// x-www-form-urlencoded
{
contentType: "application/x-www-form-urlencoded",
body: strings.NewReader(queryDst),
expectDst: jsonDst,
},
{
contentType: "application/x-www-form-urlencoded;charset=abc",
body: strings.NewReader(queryDst),
expectDst: jsonDst,
},
// multipart
{
contentType: mpWriter.FormDataContentType(),
body: mpBody,
expectDst: jsonDst,
},
}
for _, s := range scenarios {
t.Run(s.contentType, func(t *testing.T) {
req, err := http.NewRequest(http.MethodPost, "/", s.body)
if err != nil {
t.Fatal(err)
}
req.Header.Add("content-type", s.contentType)
event := &router.Event{Request: req}
dst := testDstStruct{}
err = event.BindBody(&dst)
hasErr := err != nil
if hasErr != s.expectError {
t.Fatalf("Expected hasErr %v, got %v (%v)", s.expectError, hasErr, err)
}
dstRaw, err := json.Marshal(dst)
if err != nil {
t.Fatal(err)
}
if string(dstRaw) != s.expectDst {
t.Fatalf("Expected dst\n%s\ngot\n%s", s.expectDst, dstRaw)
}
})
}
}
// -------------------------------------------------------------------
type testResponseWriteScenario[T any] struct {
name string
status int
headers map[string]string
body T
expectedStatus int
expectedHeaders map[string]string
expectedBody string
expectedError error
}
func testEventResponseWrite[T any](
t *testing.T,
scenario testResponseWriteScenario[T],
writeFunc func(e *router.Event) error,
) {
t.Run(scenario.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/", nil)
if err != nil {
t.Fatal(err)
}
rec := httptest.NewRecorder()
event := &router.Event{
Request: req,
Response: &router.ResponseWriter{ResponseWriter: rec},
}
for k, v := range scenario.headers {
event.Response.Header().Add(k, v)
}
err = writeFunc(event)
if (scenario.expectedError != nil || err != nil) && !errors.Is(err, scenario.expectedError) {
t.Fatalf("Expected error %v, got %v", scenario.expectedError, err)
}
result := rec.Result()
if result.StatusCode != scenario.expectedStatus {
t.Fatalf("Expected status code %d, got %d", scenario.expectedStatus, result.StatusCode)
}
resultBody, err := io.ReadAll(result.Body)
result.Body.Close()
if err != nil {
t.Fatalf("Failed to read response body: %v", err)
}
resultBody, err = json.Marshal(string(resultBody))
if err != nil {
t.Fatal(err)
}
expectedBody, err := json.Marshal(scenario.expectedBody)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(resultBody, expectedBody) {
t.Fatalf("Expected body\n%s\ngot\n%s", expectedBody, resultBody)
}
for k, ev := range scenario.expectedHeaders {
if v := result.Header.Get(k); v != ev {
t.Fatalf("Expected %q header to be %q, got %q", k, ev, v)
}
}
})
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/unmarshal_request_data.go | tools/router/unmarshal_request_data.go | package router
import (
"encoding"
"encoding/json"
"errors"
"reflect"
"regexp"
"strconv"
)
var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
// JSONPayloadKey is the key for the special UnmarshalRequestData case
// used for reading serialized json payload without normalization.
const JSONPayloadKey string = "@jsonPayload"
// UnmarshalRequestData unmarshals url.Values type of data (query, multipart/form-data, etc.) into dst.
//
// dst must be a pointer to a map[string]any or struct.
//
// If dst is a map[string]any, each data value will be inferred and
// converted to its bool, numeric, or string equivalent value
// (refer to inferValue() for the exact rules).
//
// If dst is a struct, the following field types are supported:
// - bool
// - string
// - int, int8, int16, int32, int64
// - uint, uint8, uint16, uint32, uint64
// - float32, float64
// - serialized json string if submitted under the special "@jsonPayload" key
// - encoding.TextUnmarshaler
// - pointer and slice variations of the above primitives (ex. *string, []string, *[]string []*string, etc.)
// - named/anonymous struct fields
// Dot-notation is used to target nested fields, ex. "nestedStructField.title".
// - embedded struct fields
// The embedded struct fields are treated by default as if they were defined in their parent struct.
// If the embedded struct has a tag matching structTagKey then to set its fields the data keys must be prefixed with that tag
// similar to the regular nested struct fields.
//
// structTagKey and structPrefix are used only when dst is a struct.
//
// structTagKey represents the tag to use to match a data entry with a struct field (defaults to "form").
// If the struct field doesn't have the structTagKey tag, then the exported struct field name will be used as it is.
//
// structPrefix could be provided if all of the data keys are prefixed with a common string
// and you want the struct field to match only the value without the structPrefix
// (ex. for "user.name", "user.email" data keys and structPrefix "user", it will match "name" and "email" struct fields).
//
// Note that while the method was inspired by binders from echo, gorrila/schema, ozzo-routing
// and other similar common routing packages, it is not intended to be a drop-in replacement.
//
// @todo Consider adding support for dot-notation keys, in addition to the prefix, (ex. parent.child.title) to express nested object keys.
func UnmarshalRequestData(data map[string][]string, dst any, structTagKey string, structPrefix string) error {
if len(data) == 0 {
return nil // nothing to unmarshal
}
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Pointer {
return errors.New("dst must be a pointer")
}
dstValue = dereference(dstValue)
dstType := dstValue.Type()
switch dstType.Kind() {
case reflect.Map: // map[string]any
if dstType.Elem().Kind() != reflect.Interface {
return errors.New("dst map value type must be any/interface{}")
}
for k, v := range data {
if k == JSONPayloadKey {
continue // unmarshaled separately
}
total := len(v)
if total == 1 {
dstValue.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(inferValue(v[0])))
} else {
normalized := make([]any, total)
for i, vItem := range v {
normalized[i] = inferValue(vItem)
}
dstValue.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(normalized))
}
}
case reflect.Struct:
// set a default tag key
if structTagKey == "" {
structTagKey = "form"
}
err := unmarshalInStructValue(data, dstValue, structTagKey, structPrefix)
if err != nil {
return err
}
default:
return errors.New("dst must be a map[string]any or struct")
}
// @jsonPayload
//
// Special case to scan serialized json string without
// normalization alongside the other data values
// ---------------------------------------------------------------
jsonPayloadValues := data[JSONPayloadKey]
for _, payload := range jsonPayloadValues {
if err := json.Unmarshal([]byte(payload), dst); err != nil {
return err
}
}
return nil
}
// unmarshalInStructValue unmarshals data into the provided struct reflect.Value fields.
func unmarshalInStructValue(
data map[string][]string,
dstStructValue reflect.Value,
structTagKey string,
structPrefix string,
) error {
dstStructType := dstStructValue.Type()
for i := 0; i < dstStructValue.NumField(); i++ {
fieldType := dstStructType.Field(i)
tag := fieldType.Tag.Get(structTagKey)
if tag == "-" || (!fieldType.Anonymous && !fieldType.IsExported()) {
continue // disabled or unexported non-anonymous struct field
}
fieldValue := dereference(dstStructValue.Field(i))
ft := fieldType.Type
if ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
isSlice := ft.Kind() == reflect.Slice
if isSlice {
ft = ft.Elem()
}
name := tag
if name == "" && !fieldType.Anonymous {
name = fieldType.Name
}
if name != "" && structPrefix != "" {
name = structPrefix + "." + name
}
// (*)encoding.TextUnmarshaler field
// ---
if ft.Implements(textUnmarshalerType) || reflect.PointerTo(ft).Implements(textUnmarshalerType) {
values, ok := data[name]
if !ok || len(values) == 0 || !fieldValue.CanSet() {
continue // no value to load or the field cannot be set
}
if isSlice {
n := len(values)
slice := reflect.MakeSlice(fieldValue.Type(), n, n)
for i, v := range values {
unmarshaler, ok := dereference(slice.Index(i)).Addr().Interface().(encoding.TextUnmarshaler)
if ok {
if err := unmarshaler.UnmarshalText([]byte(v)); err != nil {
return err
}
}
}
fieldValue.Set(slice)
} else {
unmarshaler, ok := fieldValue.Addr().Interface().(encoding.TextUnmarshaler)
if ok {
if err := unmarshaler.UnmarshalText([]byte(values[0])); err != nil {
return err
}
}
}
continue
}
// "regular" field
// ---
if ft.Kind() != reflect.Struct {
values, ok := data[name]
if !ok || len(values) == 0 || !fieldValue.CanSet() {
continue // no value to load
}
if isSlice {
n := len(values)
slice := reflect.MakeSlice(fieldValue.Type(), n, n)
for i, v := range values {
if err := setRegularReflectedValue(dereference(slice.Index(i)), v); err != nil {
return err
}
}
fieldValue.Set(slice)
} else {
if err := setRegularReflectedValue(fieldValue, values[0]); err != nil {
return err
}
}
continue
}
// structs (embedded or nested)
// ---
// slice of structs
if isSlice {
// populating slice of structs is not supported at the moment
// because the filling rules are ambiguous
continue
}
if tag != "" {
structPrefix = tag
} else {
structPrefix = name // name is empty for anonymous structs -> no prefix
}
if err := unmarshalInStructValue(data, fieldValue, structTagKey, structPrefix); err != nil {
return err
}
}
return nil
}
// dereference returns the underlying value v points to.
func dereference(v reflect.Value) reflect.Value {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
// initialize with a new value and continue searching
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
return v
}
// setRegularReflectedValue sets and casts value into rv.
func setRegularReflectedValue(rv reflect.Value, value string) error {
switch rv.Kind() {
case reflect.String:
rv.SetString(value)
case reflect.Bool:
if value == "" {
value = "f"
}
v, err := strconv.ParseBool(value)
if err != nil {
return err
}
rv.SetBool(v)
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
if value == "" {
value = "0"
}
v, err := strconv.ParseInt(value, 0, 64)
if err != nil {
return err
}
rv.SetInt(v)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
if value == "" {
value = "0"
}
v, err := strconv.ParseUint(value, 0, 64)
if err != nil {
return err
}
rv.SetUint(v)
case reflect.Float32, reflect.Float64:
if value == "" {
value = "0"
}
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
rv.SetFloat(v)
case reflect.Interface:
if rv.CanSet() {
rv.Set(reflect.ValueOf(inferValue(value)))
}
default:
return errors.New("unknown value type " + rv.Kind().String())
}
return nil
}
var inferNumberCharsRegex = regexp.MustCompile(`^[\-\.\d]+$`)
// In order to support more seamlessly both json and multipart/form-data requests,
// the following normalization rules are applied for plain multipart string values:
// - "true" is converted to the json "true"
// - "false" is converted to the json "false"
// - numeric strings are converted to json number ONLY if the resulted
// minimal number string representation is the same as the provided raw string
// (aka. scientific notations, "Infinity", "0.0", "0001", etc. are kept as string)
// - any other string (empty string too) is left as it is
func inferValue(raw string) any {
switch raw {
case "":
return raw
case "true":
return true
case "false":
return false
default:
// try to convert to number
//
// note: expects the provided raw string to match exactly with the minimal string representation of the parsed float
if (raw[0] == '-' || (raw[0] >= '0' && raw[0] <= '9')) &&
inferNumberCharsRegex.Match([]byte(raw)) {
v, err := strconv.ParseFloat(raw, 64)
if err == nil && strconv.FormatFloat(v, 'f', -1, 64) == raw {
return v
}
}
return raw
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/router_test.go | tools/router/router_test.go | package router_test
import (
"errors"
"net/http"
"net/http/httptest"
"testing"
"github.com/pocketbase/pocketbase/tools/hook"
"github.com/pocketbase/pocketbase/tools/router"
)
func TestRouter(t *testing.T) {
calls := ""
r := router.NewRouter(func(w http.ResponseWriter, r *http.Request) (*router.Event, router.EventCleanupFunc) {
return &router.Event{
Response: w,
Request: r,
},
func() {
calls += ":cleanup"
}
})
r.BindFunc(func(e *router.Event) error {
calls += "root_m:"
err := e.Next()
if err != nil {
calls += "/error"
}
return err
})
r.Any("/any", func(e *router.Event) error {
calls += "/any"
return nil
})
r.GET("/a", func(e *router.Event) error {
calls += "/a"
return nil
})
g1 := r.Group("/a/b").BindFunc(func(e *router.Event) error {
calls += "a_b_group_m:"
return e.Next()
})
g1.GET("/1", func(e *router.Event) error {
calls += "/1_get"
return nil
}).BindFunc(func(e *router.Event) error {
calls += "1_get_m:"
return e.Next()
})
g1.POST("/1", func(e *router.Event) error {
calls += "/1_post"
return nil
})
g1.GET("/{param}", func(e *router.Event) error {
calls += "/" + e.Request.PathValue("param")
return errors.New("test") // should be normalized to an ApiError
})
mux, err := r.BuildMux()
if err != nil {
t.Fatal(err)
}
ts := httptest.NewServer(mux)
defer ts.Close()
client := ts.Client()
scenarios := []struct {
method string
path string
calls string
}{
{http.MethodGet, "/any", "root_m:/any:cleanup"},
{http.MethodOptions, "/any", "root_m:/any:cleanup"},
{http.MethodPatch, "/any", "root_m:/any:cleanup"},
{http.MethodPut, "/any", "root_m:/any:cleanup"},
{http.MethodPost, "/any", "root_m:/any:cleanup"},
{http.MethodDelete, "/any", "root_m:/any:cleanup"},
// ---
{http.MethodPost, "/a", "root_m:/error:cleanup"}, // missing
{http.MethodGet, "/a", "root_m:/a:cleanup"},
{http.MethodHead, "/a", "root_m:/a:cleanup"}, // auto registered with the GET
{http.MethodGet, "/a/b/1", "root_m:a_b_group_m:1_get_m:/1_get:cleanup"},
{http.MethodHead, "/a/b/1", "root_m:a_b_group_m:1_get_m:/1_get:cleanup"},
{http.MethodPost, "/a/b/1", "root_m:a_b_group_m:/1_post:cleanup"},
{http.MethodGet, "/a/b/456", "root_m:a_b_group_m:/456/error:cleanup"},
}
for _, s := range scenarios {
t.Run(s.method+"_"+s.path, func(t *testing.T) {
calls = "" // reset
req, err := http.NewRequest(s.method, ts.URL+s.path, nil)
if err != nil {
t.Fatal(err)
}
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
}
if calls != s.calls {
t.Fatalf("Expected calls\n%q\ngot\n%q", s.calls, calls)
}
})
}
}
func TestRouterUnbind(t *testing.T) {
calls := ""
r := router.NewRouter(func(w http.ResponseWriter, r *http.Request) (*router.Event, router.EventCleanupFunc) {
return &router.Event{
Response: w,
Request: r,
},
func() {
calls += ":cleanup"
}
})
r.Bind(&hook.Handler[*router.Event]{
Id: "root_1",
Func: func(e *router.Event) error {
calls += "root_1:"
return e.Next()
},
})
r.Bind(&hook.Handler[*router.Event]{
Id: "root_2",
Func: func(e *router.Event) error {
calls += "root_2:"
return e.Next()
},
})
r.Bind(&hook.Handler[*router.Event]{
Id: "root_3",
Func: func(e *router.Event) error {
calls += "root_3:"
return e.Next()
},
})
r.GET("/action", func(e *router.Event) error {
calls += "root_action"
return nil
}).Unbind("root_1")
ga := r.Group("/group_a")
ga.Unbind("root_1")
ga.Bind(&hook.Handler[*router.Event]{
Id: "group_a_1",
Func: func(e *router.Event) error {
calls += "group_a_1:"
return e.Next()
},
})
ga.Bind(&hook.Handler[*router.Event]{
Id: "group_a_2",
Func: func(e *router.Event) error {
calls += "group_a_2:"
return e.Next()
},
})
ga.Bind(&hook.Handler[*router.Event]{
Id: "group_a_3",
Func: func(e *router.Event) error {
calls += "group_a_3:"
return e.Next()
},
})
ga.GET("/action", func(e *router.Event) error {
calls += "group_a_action"
return nil
}).Unbind("root_2", "group_b_1", "group_a_1")
gb := r.Group("/group_b")
gb.Unbind("root_2")
gb.Bind(&hook.Handler[*router.Event]{
Id: "group_b_1",
Func: func(e *router.Event) error {
calls += "group_b_1:"
return e.Next()
},
})
gb.Bind(&hook.Handler[*router.Event]{
Id: "group_b_2",
Func: func(e *router.Event) error {
calls += "group_b_2:"
return e.Next()
},
})
gb.Bind(&hook.Handler[*router.Event]{
Id: "group_b_3",
Func: func(e *router.Event) error {
calls += "group_b_3:"
return e.Next()
},
})
gb.GET("/action", func(e *router.Event) error {
calls += "group_b_action"
return nil
}).Unbind("group_b_3", "group_a_3", "root_3")
mux, err := r.BuildMux()
if err != nil {
t.Fatal(err)
}
ts := httptest.NewServer(mux)
defer ts.Close()
client := ts.Client()
scenarios := []struct {
method string
path string
calls string
}{
{http.MethodGet, "/action", "root_2:root_3:root_action:cleanup"},
{http.MethodGet, "/group_a/action", "root_3:group_a_2:group_a_3:group_a_action:cleanup"},
{http.MethodGet, "/group_b/action", "root_1:group_b_1:group_b_2:group_b_action:cleanup"},
}
for _, s := range scenarios {
t.Run(s.method+"_"+s.path, func(t *testing.T) {
calls = "" // reset
req, err := http.NewRequest(s.method, ts.URL+s.path, nil)
if err != nil {
t.Fatal(err)
}
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
}
if calls != s.calls {
t.Fatalf("Expected calls\n%q\ngot\n%q", s.calls, calls)
}
})
}
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/event.go | tools/router/event.go | package router
import (
"encoding/json"
"encoding/xml"
"errors"
"io"
"io/fs"
"net"
"net/http"
"net/netip"
"path/filepath"
"strings"
"github.com/pocketbase/pocketbase/tools/filesystem"
"github.com/pocketbase/pocketbase/tools/hook"
"github.com/pocketbase/pocketbase/tools/picker"
"github.com/pocketbase/pocketbase/tools/store"
)
var ErrUnsupportedContentType = NewBadRequestError("Unsupported Content-Type", nil)
var ErrInvalidRedirectStatusCode = NewInternalServerError("Invalid redirect status code", nil)
var ErrFileNotFound = NewNotFoundError("File not found", nil)
const IndexPage = "index.html"
// Event specifies based Route handler event that is usually intended
// to be embedded as part of a custom event struct.
//
// NB! It is expected that the Response and Request fields are always set.
type Event struct {
Response http.ResponseWriter
Request *http.Request
hook.Event
data store.Store[string, any]
}
// RWUnwrapper specifies that an http.ResponseWriter could be "unwrapped"
// (usually used with [http.ResponseController]).
type RWUnwrapper interface {
Unwrap() http.ResponseWriter
}
// Written reports whether the current response has already been written.
//
// This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface
// (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
func (e *Event) Written() bool {
written, _ := getWritten(e.Response)
return written
}
// Status reports the status code of the current response.
//
// This method always returns 0 if e.Response doesn't implement the StatusTracker interface
// (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
func (e *Event) Status() int {
status, _ := getStatus(e.Response)
return status
}
// Flush flushes buffered data to the current response.
//
// Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface
// (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one).
func (e *Event) Flush() error {
return http.NewResponseController(e.Response).Flush()
}
// IsTLS reports whether the connection on which the request was received is TLS.
func (e *Event) IsTLS() bool {
return e.Request.TLS != nil
}
// SetCookie is an alias for [http.SetCookie].
//
// SetCookie adds a Set-Cookie header to the current response's headers.
// The provided cookie must have a valid Name.
// Invalid cookies may be silently dropped.
func (e *Event) SetCookie(cookie *http.Cookie) {
http.SetCookie(e.Response, cookie)
}
// RemoteIP returns the IP address of the client that sent the request.
//
// IPv6 addresses are returned expanded.
// For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001".
//
// Note that if you are behind reverse proxy(ies), this method returns
// the IP of the last connecting proxy.
func (e *Event) RemoteIP() string {
ip, _, _ := net.SplitHostPort(e.Request.RemoteAddr)
parsed, _ := netip.ParseAddr(ip)
return parsed.StringExpanded()
}
// FindUploadedFiles extracts all form files of "key" from a http request
// and returns a slice with filesystem.File instances (if any).
func (e *Event) FindUploadedFiles(key string) ([]*filesystem.File, error) {
if e.Request.MultipartForm == nil {
err := e.Request.ParseMultipartForm(DefaultMaxMemory)
if err != nil {
return nil, err
}
}
if e.Request.MultipartForm == nil || e.Request.MultipartForm.File == nil || len(e.Request.MultipartForm.File[key]) == 0 {
return nil, http.ErrMissingFile
}
result := make([]*filesystem.File, 0, len(e.Request.MultipartForm.File[key]))
for _, fh := range e.Request.MultipartForm.File[key] {
file, err := filesystem.NewFileFromMultipart(fh)
if err != nil {
return nil, err
}
result = append(result, file)
}
return result, nil
}
// Store
// -------------------------------------------------------------------
// Get retrieves single value from the current event data store.
func (e *Event) Get(key string) any {
return e.data.Get(key)
}
// GetAll returns a copy of the current event data store.
func (e *Event) GetAll() map[string]any {
return e.data.GetAll()
}
// Set saves single value into the current event data store.
func (e *Event) Set(key string, value any) {
e.data.Set(key, value)
}
// SetAll saves all items from m into the current event data store.
func (e *Event) SetAll(m map[string]any) {
for k, v := range m {
e.Set(k, v)
}
}
// Response writers
// -------------------------------------------------------------------
const headerContentType = "Content-Type"
func (e *Event) setResponseHeaderIfEmpty(key, value string) {
header := e.Response.Header()
if header.Get(key) == "" {
header.Set(key, value)
}
}
// String writes a plain string response.
func (e *Event) String(status int, data string) error {
e.setResponseHeaderIfEmpty(headerContentType, "text/plain; charset=utf-8")
e.Response.WriteHeader(status)
_, err := e.Response.Write([]byte(data))
return err
}
// HTML writes an HTML response.
func (e *Event) HTML(status int, data string) error {
e.setResponseHeaderIfEmpty(headerContentType, "text/html; charset=utf-8")
e.Response.WriteHeader(status)
_, err := e.Response.Write([]byte(data))
return err
}
const jsonFieldsParam = "fields"
// JSON writes a JSON response.
//
// It also provides a generic response data fields picker if the "fields" query parameter is set.
// For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`,
// it should result in a JSON response like: `{"a":1, "b": 2}`.
func (e *Event) JSON(status int, data any) error {
e.setResponseHeaderIfEmpty(headerContentType, "application/json")
e.Response.WriteHeader(status)
rawFields := e.Request.URL.Query().Get(jsonFieldsParam)
// error response or no fields to pick
if rawFields == "" || status < 200 || status > 299 {
return json.NewEncoder(e.Response).Encode(data)
}
// pick only the requested fields
modified, err := picker.Pick(data, rawFields)
if err != nil {
return err
}
return json.NewEncoder(e.Response).Encode(modified)
}
// XML writes an XML response.
// It automatically prepends the generic [xml.Header] string to the response.
func (e *Event) XML(status int, data any) error {
e.setResponseHeaderIfEmpty(headerContentType, "application/xml; charset=utf-8")
e.Response.WriteHeader(status)
if _, err := e.Response.Write([]byte(xml.Header)); err != nil {
return err
}
return xml.NewEncoder(e.Response).Encode(data)
}
// Stream streams the specified reader into the response.
func (e *Event) Stream(status int, contentType string, reader io.Reader) error {
e.Response.Header().Set(headerContentType, contentType)
e.Response.WriteHeader(status)
_, err := io.Copy(e.Response, reader)
return err
}
// Blob writes a blob (bytes slice) response.
func (e *Event) Blob(status int, contentType string, b []byte) error {
e.setResponseHeaderIfEmpty(headerContentType, contentType)
e.Response.WriteHeader(status)
_, err := e.Response.Write(b)
return err
}
// FileFS serves the specified filename from fsys.
//
// It is similar to [echo.FileFS] for consistency with earlier versions.
func (e *Event) FileFS(fsys fs.FS, filename string) error {
f, err := fsys.Open(filename)
if err != nil {
return ErrFileNotFound
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return err
}
// if it is a directory try to open its index.html file
if fi.IsDir() {
filename = filepath.ToSlash(filepath.Join(filename, IndexPage))
f, err = fsys.Open(filename)
if err != nil {
return ErrFileNotFound
}
defer f.Close()
fi, err = f.Stat()
if err != nil {
return err
}
}
ff, ok := f.(io.ReadSeeker)
if !ok {
return errors.New("[FileFS] file does not implement io.ReadSeeker")
}
http.ServeContent(e.Response, e.Request, fi.Name(), fi.ModTime(), ff)
return nil
}
// NoContent writes a response with no body (ex. 204).
func (e *Event) NoContent(status int) error {
e.Response.WriteHeader(status)
return nil
}
// Redirect writes a redirect response to the specified url.
// The status code must be in between 300 – 399 range.
func (e *Event) Redirect(status int, url string) error {
if status < 300 || status > 399 {
return ErrInvalidRedirectStatusCode
}
e.Response.Header().Set("Location", url)
e.Response.WriteHeader(status)
return nil
}
// ApiError helpers
// -------------------------------------------------------------------
func (e *Event) Error(status int, message string, errData any) *ApiError {
return NewApiError(status, message, errData)
}
func (e *Event) BadRequestError(message string, errData any) *ApiError {
return NewBadRequestError(message, errData)
}
func (e *Event) NotFoundError(message string, errData any) *ApiError {
return NewNotFoundError(message, errData)
}
func (e *Event) ForbiddenError(message string, errData any) *ApiError {
return NewForbiddenError(message, errData)
}
func (e *Event) UnauthorizedError(message string, errData any) *ApiError {
return NewUnauthorizedError(message, errData)
}
func (e *Event) TooManyRequestsError(message string, errData any) *ApiError {
return NewTooManyRequestsError(message, errData)
}
func (e *Event) InternalServerError(message string, errData any) *ApiError {
return NewInternalServerError(message, errData)
}
// Binders
// -------------------------------------------------------------------
const DefaultMaxMemory = 32 << 20 // 32mb
// BindBody unmarshal the request body into the provided dst.
//
// dst must be either a struct pointer or map[string]any.
//
// The rules how the body will be scanned depends on the request Content-Type.
//
// Currently the following Content-Types are supported:
// - application/json
// - text/xml, application/xml
// - multipart/form-data, application/x-www-form-urlencoded
//
// Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type):
// - "json" (json body)- uses the builtin Go json package for unmarshaling.
// - "xml" (xml body) - uses the builtin Go xml package for unmarshaling.
// - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method.
//
// NB! When dst is a struct make sure that it doesn't have public fields
// that shouldn't be bindable and it is advisible such fields to be unexported
// or have a separate struct just for the binding. For example:
//
// data := struct{
// somethingPrivate string
//
// Title string `json:"title" form:"title"`
// Total int `json:"total" form:"total"`
// }
// err := e.BindBody(&data)
func (e *Event) BindBody(dst any) error {
if e.Request.ContentLength == 0 {
return nil
}
contentType := e.Request.Header.Get(headerContentType)
if strings.HasPrefix(contentType, "application/json") {
dec := json.NewDecoder(e.Request.Body)
err := dec.Decode(dst)
if err == nil {
// manually call Reread because single call of json.Decoder.Decode()
// doesn't ensure that the entire body is a valid json string
// and it is not guaranteed that it will reach EOF to trigger the reread reset
// (ex. in case of trailing spaces or invalid trailing parts like: `{"test":1},something`)
if body, ok := e.Request.Body.(Rereader); ok {
body.Reread()
}
}
return err
}
if strings.HasPrefix(contentType, "multipart/form-data") {
if err := e.Request.ParseMultipartForm(DefaultMaxMemory); err != nil {
return err
}
return UnmarshalRequestData(e.Request.Form, dst, "", "")
}
if strings.HasPrefix(contentType, "application/x-www-form-urlencoded") {
if err := e.Request.ParseForm(); err != nil {
return err
}
return UnmarshalRequestData(e.Request.Form, dst, "", "")
}
if strings.HasPrefix(contentType, "text/xml") ||
strings.HasPrefix(contentType, "application/xml") {
return xml.NewDecoder(e.Request.Body).Decode(dst)
}
return ErrUnsupportedContentType
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/rereadable_read_closer.go | tools/router/rereadable_read_closer.go | package router
import (
"bytes"
"io"
)
var (
_ io.ReadCloser = (*RereadableReadCloser)(nil)
_ Rereader = (*RereadableReadCloser)(nil)
)
// Rereader defines an interface for rewindable readers.
type Rereader interface {
Reread()
}
// RereadableReadCloser defines a wrapper around a io.ReadCloser reader
// allowing to read the original reader multiple times.
type RereadableReadCloser struct {
io.ReadCloser
copy *bytes.Buffer
active io.Reader
}
// Read implements the standard io.Reader interface.
//
// It reads up to len(b) bytes into b and at at the same time writes
// the read data into an internal bytes buffer.
//
// On EOF the r is "rewinded" to allow reading from r multiple times.
func (r *RereadableReadCloser) Read(b []byte) (int, error) {
if r.active == nil {
if r.copy == nil {
r.copy = &bytes.Buffer{}
}
r.active = io.TeeReader(r.ReadCloser, r.copy)
}
n, err := r.active.Read(b)
if err == io.EOF {
r.Reread()
}
return n, err
}
// Reread satisfies the [Rereader] interface and resets the r internal state to allow rereads.
//
// note: not named Reset to avoid conflicts with other reader interfaces.
func (r *RereadableReadCloser) Reread() {
if r.copy == nil || r.copy.Len() == 0 {
return // nothing to reset or it has been already reset
}
oldCopy := r.copy
r.copy = &bytes.Buffer{}
r.active = io.TeeReader(oldCopy, r.copy)
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
pocketbase/pocketbase | https://github.com/pocketbase/pocketbase/blob/b1da83e5165f938453fbb21e748bca317b08239d/tools/router/route.go | tools/router/route.go | package router
import "github.com/pocketbase/pocketbase/tools/hook"
type Route[T hook.Resolver] struct {
excludedMiddlewares map[string]struct{}
Action func(e T) error
Method string
Path string
Middlewares []*hook.Handler[T]
}
// BindFunc registers one or multiple middleware functions to the current route.
//
// The registered middleware functions are "anonymous" and with default priority,
// aka. executes in the order they were registered.
//
// If you need to specify a named middleware (ex. so that it can be removed)
// or middleware with custom exec prirority, use the [Route.Bind] method.
func (route *Route[T]) BindFunc(middlewareFuncs ...func(e T) error) *Route[T] {
for _, m := range middlewareFuncs {
route.Middlewares = append(route.Middlewares, &hook.Handler[T]{Func: m})
}
return route
}
// Bind registers one or multiple middleware handlers to the current route.
func (route *Route[T]) Bind(middlewares ...*hook.Handler[T]) *Route[T] {
route.Middlewares = append(route.Middlewares, middlewares...)
// unmark the newly added middlewares in case they were previously "excluded"
if route.excludedMiddlewares != nil {
for _, m := range middlewares {
if m.Id != "" {
delete(route.excludedMiddlewares, m.Id)
}
}
}
return route
}
// Unbind removes one or more middlewares with the specified id(s) from the current route.
//
// It also adds the removed middleware ids to an exclude list so that they could be skipped from
// the execution chain in case the middleware is registered in a parent group.
//
// Anonymous middlewares are considered non-removable, aka. this method
// does nothing if the middleware id is an empty string.
func (route *Route[T]) Unbind(middlewareIds ...string) *Route[T] {
for _, middlewareId := range middlewareIds {
if middlewareId == "" {
continue
}
// remove from the route's middlewares
for i := len(route.Middlewares) - 1; i >= 0; i-- {
if route.Middlewares[i].Id == middlewareId {
route.Middlewares = append(route.Middlewares[:i], route.Middlewares[i+1:]...)
}
}
// add to the exclude list
if route.excludedMiddlewares == nil {
route.excludedMiddlewares = map[string]struct{}{}
}
route.excludedMiddlewares[middlewareId] = struct{}{}
}
return route
}
| go | MIT | b1da83e5165f938453fbb21e748bca317b08239d | 2026-01-07T08:35:43.517402Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.