CombinedText stringlengths 4 3.42M |
|---|
package main
import (
"bytes"
"encoding/binary"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
"github.com/boltdb/bolt"
)
var (
// ErrUsage is returned when a usage message was printed and the process
// should simply exit with an error.
ErrUsage = errors.New("usage")
// ErrUnknownCommand is returned when a CLI command is not specified.
ErrUnknownCommand = errors.New("unknown command")
// ErrPathRequired is returned when the path to a Bolt database is not specified.
ErrPathRequired = errors.New("path required")
// ErrFileNotFound is returned when a Bolt database does not exist.
ErrFileNotFound = errors.New("file not found")
// ErrInvalidValue is returned when a benchmark reads an unexpected value.
ErrInvalidValue = errors.New("invalid value")
// ErrCorrupt is returned when a checking a data file finds errors.
ErrCorrupt = errors.New("invalid value")
// ErrNonDivisibleBatchSize is returned when the batch size can't be evenly
// divided by the iteration count.
ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size")
// ErrPageIDRequired is returned when a required page id is not specified.
ErrPageIDRequired = errors.New("page id required")
)
func main() {
m := NewMain()
if err := m.Run(os.Args[1:]...); err == ErrUsage {
os.Exit(2)
} else if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
// Main represents the main program execution.
type Main struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewMain returns a new instance of Main connect to the standard input/output.
func NewMain() *Main {
return &Main{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
}
// Run executes the program.
func (m *Main) Run(args ...string) error {
// Require a command at the beginning.
if len(args) == 0 || strings.HasPrefix(args[0], "-") {
fmt.Fprintln(m.Stderr, m.Usage())
return ErrUsage
}
// Execute command.
switch args[0] {
case "help":
fmt.Fprintln(m.Stderr, m.Usage())
return ErrUsage
case "bench":
return newBenchCommand(m).Run(args[1:]...)
case "check":
return newCheckCommand(m).Run(args[1:]...)
case "dump":
return newDumpCommand(m).Run(args[1:]...)
case "info":
return newInfoCommand(m).Run(args[1:]...)
case "pages":
return newPagesCommand(m).Run(args[1:]...)
case "stats":
return newStatsCommand(m).Run(args[1:]...)
default:
return ErrUnknownCommand
}
}
// Usage returns the help message.
func (m *Main) Usage() string {
return strings.TrimLeft(`
Bolt is a tool for inspecting bolt databases.
Usage:
bolt command [arguments]
The commands are:
bench run synthetic benchmark against bolt
check verifies integrity of bolt database
info print basic info
help print this screen
pages print list of pages with their types
stats iterate over all pages and generate usage stats
Use "bolt [command] -h" for more information about a command.
`, "\n")
}
// CheckCommand represents the "check" command execution.
type CheckCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewCheckCommand returns a CheckCommand.
func newCheckCommand(m *Main) *CheckCommand {
return &CheckCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *CheckCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
// Perform consistency check.
return db.View(func(tx *bolt.Tx) error {
var count int
ch := tx.Check()
loop:
for {
select {
case err, ok := <-ch:
if !ok {
break loop
}
fmt.Fprintln(cmd.Stdout, err)
count++
}
}
// Print summary of errors.
if count > 0 {
fmt.Fprintf(cmd.Stdout, "%d errors found\n", count)
return ErrCorrupt
}
// Notify user that database is valid.
fmt.Fprintln(cmd.Stdout, "OK")
return nil
})
}
// Usage returns the help message.
func (cmd *CheckCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt check PATH
Check opens a database at PATH and runs an exhaustive check to verify that
all pages are accessible or are marked as freed. It also verifies that no
pages are double referenced.
Verification errors will stream out as they are found and the process will
return after all pages have been checked.
`, "\n")
}
// InfoCommand represents the "info" command execution.
type InfoCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewInfoCommand returns a InfoCommand.
func newInfoCommand(m *Main) *InfoCommand {
return &InfoCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *InfoCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open the database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
// Print basic database info.
info := db.Info()
fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize)
return nil
}
// Usage returns the help message.
func (cmd *InfoCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt info PATH
Info prints basic information about the Bolt database at PATH.
`, "\n")
}
// DumpCommand represents the "dump" command execution.
type DumpCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// newDumpCommand returns a DumpCommand.
func newDumpCommand(m *Main) *DumpCommand {
return &DumpCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *DumpCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
pageID := fs.Int("page", -1, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path and page id.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
} else if *pageID == -1 {
return ErrPageIDRequired
}
// Open database to retrieve page size.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
pageSize := db.Info().PageSize
_ = db.Close()
// Open database file handler.
f, err := os.Open(path)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
// Print page to stdout.
return cmd.PrintPage(cmd.Stdout, f, *pageID, pageSize)
}
// PrintPage prints a given page as hexidecimal.
func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error {
const bytesPerLineN = 16
// Read page into buffer.
buf := make([]byte, pageSize)
addr := pageID * pageSize
if n, err := r.ReadAt(buf, int64(addr)); err != nil {
return err
} else if n != pageSize {
return io.ErrUnexpectedEOF
}
// Write out to writer in 16-byte lines.
var prev []byte
var skipped bool
for offset := 0; offset < pageSize; offset += bytesPerLineN {
// Retrieve current 16-byte line.
line := buf[offset : offset+bytesPerLineN]
isLastLine := (offset == (pageSize - bytesPerLineN))
// If it's the same as the previous line then print a skip.
if bytes.Equal(line, prev) && !isLastLine {
if !skipped {
fmt.Fprintf(w, "%07x *\n", addr+offset)
skipped = true
}
} else {
// Print line as hexadecimal in 2-byte groups.
fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset,
line[0:2], line[2:4], line[4:6], line[6:8],
line[8:10], line[10:12], line[12:14], line[14:16],
)
skipped = false
}
// Save the previous line.
prev = line
}
fmt.Fprint(w, "\n")
return nil
}
// Usage returns the help message.
func (cmd *DumpCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt dump -page PAGEID PATH
Dump prints a hexidecimal dump of a single page.
`, "\n")
}
// PagesCommand represents the "pages" command execution.
type PagesCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewPagesCommand returns a PagesCommand.
func newPagesCommand(m *Main) *PagesCommand {
return &PagesCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *PagesCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer func() { _ = db.Close() }()
// Write header.
fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW")
fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======")
return db.Update(func(tx *bolt.Tx) error {
var id int
for {
p, err := tx.Page(id)
if err != nil {
return &PageError{ID: id, Err: err}
} else if p == nil {
break
}
// Only display count and overflow if this is a non-free page.
var count, overflow string
if p.Type != "free" {
count = strconv.Itoa(p.Count)
if p.OverflowCount > 0 {
overflow = strconv.Itoa(p.OverflowCount)
}
}
// Print table row.
fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow)
// Move to the next non-overflow page.
id += 1
if p.Type != "free" {
id += p.OverflowCount
}
}
return nil
})
}
// Usage returns the help message.
func (cmd *PagesCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt pages PATH
Pages prints a table of pages with their type (meta, leaf, branch, freelist).
Leaf and branch pages will show a key count in the "items" column while the
freelist will show the number of free pages in the "items" column.
The "overflow" column shows the number of blocks that the page spills over
into. Normally there is no overflow but large keys and values can cause
a single page to take up multiple blocks.
`, "\n")
}
// StatsCommand represents the "stats" command execution.
type StatsCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewStatsCommand returns a StatsCommand.
func newStatsCommand(m *Main) *StatsCommand {
return &StatsCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *StatsCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path, prefix := fs.Arg(0), fs.Arg(1)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
return db.View(func(tx *bolt.Tx) error {
var s bolt.BucketStats
var count int
if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error {
if bytes.HasPrefix(name, []byte(prefix)) {
s.Add(b.Stats())
count += 1
}
return nil
}); err != nil {
return err
}
fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count)
fmt.Fprintln(cmd.Stdout, "Page count statistics")
fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN)
fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN)
fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN)
fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN)
fmt.Fprintln(cmd.Stdout, "Tree statistics")
fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN)
fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth)
fmt.Fprintln(cmd.Stdout, "Page size utilization")
fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc)
var percentage int
if s.BranchAlloc != 0 {
percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc))
}
fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage)
fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc)
percentage = 0
if s.LeafAlloc != 0 {
percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc))
}
fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage)
fmt.Fprintln(cmd.Stdout, "Bucket statistics")
fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN)
percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN))
fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage)
percentage = 0
if s.LeafInuse != 0 {
percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse))
}
fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage)
return nil
})
}
// Usage returns the help message.
func (cmd *StatsCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt stats PATH
Stats performs an extensive search of the database to track every page
reference. It starts at the current meta page and recursively iterates
through every accessible bucket.
The following errors can be reported:
already freed
The page is referenced more than once in the freelist.
unreachable unfreed
The page is not referenced by a bucket or in the freelist.
reachable freed
The page is referenced by a bucket but is also in the freelist.
out of bounds
A page is referenced that is above the high water mark.
multiple references
A page is referenced by more than one other page.
invalid type
The page type is not "meta", "leaf", "branch", or "freelist".
No errors should occur in your database. However, if for some reason you
experience corruption, please submit a ticket to the Bolt project page:
https://github.com/boltdb/bolt/issues
`, "\n")
}
var benchBucketName = []byte("bench")
// BenchCommand represents the "bench" command execution.
type BenchCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewBenchCommand returns a BenchCommand using the
func newBenchCommand(m *Main) *BenchCommand {
return &BenchCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the "bench" command.
func (cmd *BenchCommand) Run(args ...string) error {
// Parse CLI arguments.
options, err := cmd.ParseFlags(args)
if err != nil {
return err
}
// Remove path if "-work" is not set. Otherwise keep path.
if options.Work {
fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path)
} else {
defer os.Remove(options.Path)
}
// Create database.
db, err := bolt.Open(options.Path, 0666, nil)
if err != nil {
return err
}
db.NoSync = options.NoSync
defer db.Close()
// Write to the database.
var results BenchResults
if err := cmd.runWrites(db, options, &results); err != nil {
return fmt.Errorf("write: ", err)
}
// Read from the database.
if err := cmd.runReads(db, options, &results); err != nil {
return fmt.Errorf("bench: read: %s", err)
}
// Print results.
fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond())
fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond())
fmt.Fprintln(os.Stderr, "")
return nil
}
// ParseFlags parses the command line flags.
func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) {
var options BenchOptions
// Parse flagset.
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "")
fs.StringVar(&options.WriteMode, "write-mode", "seq", "")
fs.StringVar(&options.ReadMode, "read-mode", "seq", "")
fs.IntVar(&options.Iterations, "count", 1000, "")
fs.IntVar(&options.BatchSize, "batch-size", 0, "")
fs.IntVar(&options.KeySize, "key-size", 8, "")
fs.IntVar(&options.ValueSize, "value-size", 32, "")
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
fs.StringVar(&options.MemProfile, "memprofile", "", "")
fs.StringVar(&options.BlockProfile, "blockprofile", "", "")
fs.StringVar(&options.BlockProfile, "blockprofile", "", "")
fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "")
fs.BoolVar(&options.NoSync, "no-sync", false, "")
fs.BoolVar(&options.Work, "work", false, "")
fs.StringVar(&options.Path, "path", "", "")
fs.SetOutput(cmd.Stderr)
if err := fs.Parse(args); err != nil {
return nil, err
}
// Set batch size to iteration size if not set.
// Require that batch size can be evenly divided by the iteration count.
if options.BatchSize == 0 {
options.BatchSize = options.Iterations
} else if options.Iterations%options.BatchSize != 0 {
return nil, ErrNonDivisibleBatchSize
}
// Generate temp path if one is not passed in.
if options.Path == "" {
f, err := ioutil.TempFile("", "bolt-bench-")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
f.Close()
os.Remove(f.Name())
options.Path = f.Name()
}
return &options, nil
}
// Writes to the database.
func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
// Start profiling for writes.
if options.ProfileMode == "rw" || options.ProfileMode == "w" {
cmd.startProfiling(options)
}
t := time.Now()
var err error
switch options.WriteMode {
case "seq":
err = cmd.runWritesSequential(db, options, results)
case "rnd":
err = cmd.runWritesRandom(db, options, results)
case "seq-nest":
err = cmd.runWritesSequentialNested(db, options, results)
case "rnd-nest":
err = cmd.runWritesRandomNested(db, options, results)
default:
return fmt.Errorf("invalid write mode: %s", options.WriteMode)
}
// Save time to write.
results.WriteDuration = time.Since(t)
// Stop profiling for writes only.
if options.ProfileMode == "w" {
cmd.stopProfiling()
}
return err
}
func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
var i = uint32(0)
return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i })
}
func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() })
}
func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
var i = uint32(0)
return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i })
}
func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() })
}
func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
results.WriteOps = options.Iterations
for i := 0; i < options.Iterations; i += options.BatchSize {
if err := db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
b.FillPercent = options.FillPercent
for j := 0; j < options.BatchSize; j++ {
key := make([]byte, options.KeySize)
value := make([]byte, options.ValueSize)
// Write key as uint32.
binary.BigEndian.PutUint32(key, keySource())
// Insert key/value.
if err := b.Put(key, value); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
results.WriteOps = options.Iterations
for i := 0; i < options.Iterations; i += options.BatchSize {
if err := db.Update(func(tx *bolt.Tx) error {
top, err := tx.CreateBucketIfNotExists(benchBucketName)
if err != nil {
return err
}
top.FillPercent = options.FillPercent
// Create bucket key.
name := make([]byte, options.KeySize)
binary.BigEndian.PutUint32(name, keySource())
// Create bucket.
b, err := top.CreateBucketIfNotExists(name)
if err != nil {
return err
}
b.FillPercent = options.FillPercent
for j := 0; j < options.BatchSize; j++ {
var key = make([]byte, options.KeySize)
var value = make([]byte, options.ValueSize)
// Generate key as uint32.
binary.BigEndian.PutUint32(key, keySource())
// Insert value into subbucket.
if err := b.Put(key, value); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// Reads from the database.
func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
// Start profiling for reads.
if options.ProfileMode == "r" {
cmd.startProfiling(options)
}
t := time.Now()
var err error
switch options.ReadMode {
case "seq":
switch options.WriteMode {
case "seq-nest", "rnd-nest":
err = cmd.runReadsSequentialNested(db, options, results)
default:
err = cmd.runReadsSequential(db, options, results)
}
default:
return fmt.Errorf("invalid read mode: %s", options.ReadMode)
}
// Save read time.
results.ReadDuration = time.Since(t)
// Stop profiling for reads.
if options.ProfileMode == "rw" || options.ProfileMode == "r" {
cmd.stopProfiling()
}
return err
}
func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
return db.View(func(tx *bolt.Tx) error {
t := time.Now()
for {
var count int
c := tx.Bucket(benchBucketName).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
return errors.New("invalid value")
}
count++
}
if options.WriteMode == "seq" && count != options.Iterations {
return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count)
}
results.ReadOps += count
// Make sure we do this for at least a second.
if time.Since(t) >= time.Second {
break
}
}
return nil
})
}
func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
return db.View(func(tx *bolt.Tx) error {
t := time.Now()
for {
var count int
var top = tx.Bucket(benchBucketName)
if err := top.ForEach(func(name, _ []byte) error {
c := top.Bucket(name).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
return ErrInvalidValue
}
count++
}
return nil
}); err != nil {
return err
}
if options.WriteMode == "seq-nest" && count != options.Iterations {
return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count)
}
results.ReadOps += count
// Make sure we do this for at least a second.
if time.Since(t) >= time.Second {
break
}
}
return nil
})
}
// File handlers for the various profiles.
var cpuprofile, memprofile, blockprofile *os.File
// Starts all profiles set on the options.
func (cmd *BenchCommand) startProfiling(options *BenchOptions) {
var err error
// Start CPU profiling.
if options.CPUProfile != "" {
cpuprofile, err = os.Create(options.CPUProfile)
if err != nil {
fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err)
os.Exit(1)
}
pprof.StartCPUProfile(cpuprofile)
}
// Start memory profiling.
if options.MemProfile != "" {
memprofile, err = os.Create(options.MemProfile)
if err != nil {
fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err)
os.Exit(1)
}
runtime.MemProfileRate = 4096
}
// Start fatal profiling.
if options.BlockProfile != "" {
blockprofile, err = os.Create(options.BlockProfile)
if err != nil {
fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err)
os.Exit(1)
}
runtime.SetBlockProfileRate(1)
}
}
// Stops all profiles.
func (cmd *BenchCommand) stopProfiling() {
if cpuprofile != nil {
pprof.StopCPUProfile()
cpuprofile.Close()
cpuprofile = nil
}
if memprofile != nil {
pprof.Lookup("heap").WriteTo(memprofile, 0)
memprofile.Close()
memprofile = nil
}
if blockprofile != nil {
pprof.Lookup("block").WriteTo(blockprofile, 0)
blockprofile.Close()
blockprofile = nil
runtime.SetBlockProfileRate(0)
}
}
// BenchOptions represents the set of options that can be passed to "bolt bench".
type BenchOptions struct {
ProfileMode string
WriteMode string
ReadMode string
Iterations int
BatchSize int
KeySize int
ValueSize int
CPUProfile string
MemProfile string
BlockProfile string
StatsInterval time.Duration
FillPercent float64
NoSync bool
Work bool
Path string
}
// BenchResults represents the performance results of the benchmark.
type BenchResults struct {
WriteOps int
WriteDuration time.Duration
ReadOps int
ReadDuration time.Duration
}
// Returns the duration for a single write operation.
func (r *BenchResults) WriteOpDuration() time.Duration {
if r.WriteOps == 0 {
return 0
}
return r.WriteDuration / time.Duration(r.WriteOps)
}
// Returns average number of write operations that can be performed per second.
func (r *BenchResults) WriteOpsPerSecond() int {
var op = r.WriteOpDuration()
if op == 0 {
return 0
}
return int(time.Second) / int(op)
}
// Returns the duration for a single read operation.
func (r *BenchResults) ReadOpDuration() time.Duration {
if r.ReadOps == 0 {
return 0
}
return r.ReadDuration / time.Duration(r.ReadOps)
}
// Returns average number of read operations that can be performed per second.
func (r *BenchResults) ReadOpsPerSecond() int {
var op = r.ReadOpDuration()
if op == 0 {
return 0
}
return int(time.Second) / int(op)
}
type PageError struct {
ID int
Err error
}
func (e *PageError) Error() string {
return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err)
}
Add 'bolt page' command.
package main
import (
"bytes"
"encoding/binary"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"unsafe"
"github.com/boltdb/bolt"
)
var (
// ErrUsage is returned when a usage message was printed and the process
// should simply exit with an error.
ErrUsage = errors.New("usage")
// ErrUnknownCommand is returned when a CLI command is not specified.
ErrUnknownCommand = errors.New("unknown command")
// ErrPathRequired is returned when the path to a Bolt database is not specified.
ErrPathRequired = errors.New("path required")
// ErrFileNotFound is returned when a Bolt database does not exist.
ErrFileNotFound = errors.New("file not found")
// ErrInvalidValue is returned when a benchmark reads an unexpected value.
ErrInvalidValue = errors.New("invalid value")
// ErrCorrupt is returned when a checking a data file finds errors.
ErrCorrupt = errors.New("invalid value")
// ErrNonDivisibleBatchSize is returned when the batch size can't be evenly
// divided by the iteration count.
ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size")
// ErrPageIDRequired is returned when a required page id is not specified.
ErrPageIDRequired = errors.New("page id required")
// ErrPageNotFound is returned when specifying a page above the high water mark.
ErrPageNotFound = errors.New("page not found")
// ErrPageFreed is returned when reading a page that has already been freed.
ErrPageFreed = errors.New("page freed")
)
// PageHeaderSize represents the size of the bolt.page header.
const PageHeaderSize = 16
func main() {
m := NewMain()
if err := m.Run(os.Args[1:]...); err == ErrUsage {
os.Exit(2)
} else if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
}
// Main represents the main program execution.
type Main struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewMain returns a new instance of Main connect to the standard input/output.
func NewMain() *Main {
return &Main{
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
}
// Run executes the program.
func (m *Main) Run(args ...string) error {
// Require a command at the beginning.
if len(args) == 0 || strings.HasPrefix(args[0], "-") {
fmt.Fprintln(m.Stderr, m.Usage())
return ErrUsage
}
// Execute command.
switch args[0] {
case "help":
fmt.Fprintln(m.Stderr, m.Usage())
return ErrUsage
case "bench":
return newBenchCommand(m).Run(args[1:]...)
case "check":
return newCheckCommand(m).Run(args[1:]...)
case "dump":
return newDumpCommand(m).Run(args[1:]...)
case "info":
return newInfoCommand(m).Run(args[1:]...)
case "page":
return newPageCommand(m).Run(args[1:]...)
case "pages":
return newPagesCommand(m).Run(args[1:]...)
case "stats":
return newStatsCommand(m).Run(args[1:]...)
default:
return ErrUnknownCommand
}
}
// Usage returns the help message.
func (m *Main) Usage() string {
return strings.TrimLeft(`
Bolt is a tool for inspecting bolt databases.
Usage:
bolt command [arguments]
The commands are:
bench run synthetic benchmark against bolt
check verifies integrity of bolt database
info print basic info
help print this screen
pages print list of pages with their types
stats iterate over all pages and generate usage stats
Use "bolt [command] -h" for more information about a command.
`, "\n")
}
// CheckCommand represents the "check" command execution.
type CheckCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewCheckCommand returns a CheckCommand.
func newCheckCommand(m *Main) *CheckCommand {
return &CheckCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *CheckCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
// Perform consistency check.
return db.View(func(tx *bolt.Tx) error {
var count int
ch := tx.Check()
loop:
for {
select {
case err, ok := <-ch:
if !ok {
break loop
}
fmt.Fprintln(cmd.Stdout, err)
count++
}
}
// Print summary of errors.
if count > 0 {
fmt.Fprintf(cmd.Stdout, "%d errors found\n", count)
return ErrCorrupt
}
// Notify user that database is valid.
fmt.Fprintln(cmd.Stdout, "OK")
return nil
})
}
// Usage returns the help message.
func (cmd *CheckCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt check PATH
Check opens a database at PATH and runs an exhaustive check to verify that
all pages are accessible or are marked as freed. It also verifies that no
pages are double referenced.
Verification errors will stream out as they are found and the process will
return after all pages have been checked.
`, "\n")
}
// InfoCommand represents the "info" command execution.
type InfoCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewInfoCommand returns a InfoCommand.
func newInfoCommand(m *Main) *InfoCommand {
return &InfoCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *InfoCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open the database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
// Print basic database info.
info := db.Info()
fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize)
return nil
}
// Usage returns the help message.
func (cmd *InfoCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt info PATH
Info prints basic information about the Bolt database at PATH.
`, "\n")
}
// DumpCommand represents the "dump" command execution.
type DumpCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// newDumpCommand returns a DumpCommand.
func newDumpCommand(m *Main) *DumpCommand {
return &DumpCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *DumpCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path and page id.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Read page ids.
pageIDs, err := atois(fs.Args()[1:])
if err != nil {
return err
} else if len(pageIDs) == 0 {
return ErrPageIDRequired
}
// Open database to retrieve page size.
pageSize, err := ReadPageSize(path)
if err != nil {
return err
}
// Open database file handler.
f, err := os.Open(path)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
// Print each page listed.
for i, pageID := range pageIDs {
// Print a separator.
if i > 0 {
fmt.Fprintln(cmd.Stdout, "===============================================\n")
}
// Print page to stdout.
if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil {
return err
}
}
return nil
}
// PrintPage prints a given page as hexidecimal.
func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error {
const bytesPerLineN = 16
// Read page into buffer.
buf := make([]byte, pageSize)
addr := pageID * pageSize
if n, err := r.ReadAt(buf, int64(addr)); err != nil {
return err
} else if n != pageSize {
return io.ErrUnexpectedEOF
}
// Write out to writer in 16-byte lines.
var prev []byte
var skipped bool
for offset := 0; offset < pageSize; offset += bytesPerLineN {
// Retrieve current 16-byte line.
line := buf[offset : offset+bytesPerLineN]
isLastLine := (offset == (pageSize - bytesPerLineN))
// If it's the same as the previous line then print a skip.
if bytes.Equal(line, prev) && !isLastLine {
if !skipped {
fmt.Fprintf(w, "%07x *\n", addr+offset)
skipped = true
}
} else {
// Print line as hexadecimal in 2-byte groups.
fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset,
line[0:2], line[2:4], line[4:6], line[6:8],
line[8:10], line[10:12], line[12:14], line[14:16],
)
skipped = false
}
// Save the previous line.
prev = line
}
fmt.Fprint(w, "\n")
return nil
}
// Usage returns the help message.
func (cmd *DumpCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt dump -page PAGEID PATH
Dump prints a hexidecimal dump of a single page.
`, "\n")
}
// PageCommand represents the "page" command execution.
type PageCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// newPageCommand returns a PageCommand.
func newPageCommand(m *Main) *PageCommand {
return &PageCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *PageCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path and page id.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Read page ids.
pageIDs, err := atois(fs.Args()[1:])
if err != nil {
return err
} else if len(pageIDs) == 0 {
return ErrPageIDRequired
}
// Open database file handler.
f, err := os.Open(path)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
// Print each page listed.
for i, pageID := range pageIDs {
// Print a separator.
if i > 0 {
fmt.Fprintln(cmd.Stdout, "===============================================\n")
}
// Retrieve page info and page size.
p, buf, err := ReadPage(path, pageID)
if err != nil {
return err
}
// Print basic page info.
fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id)
fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type())
fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf))
// Print type-specific data.
switch p.Type() {
case "meta":
err = cmd.PrintMeta(cmd.Stdout, buf)
case "leaf":
err = cmd.PrintLeaf(cmd.Stdout, buf)
case "branch":
err = cmd.PrintBranch(cmd.Stdout, buf)
case "freelist":
err = cmd.PrintFreelist(cmd.Stdout, buf)
}
if err != nil {
return err
}
}
return nil
}
// PrintMeta prints the data from the meta page.
func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error {
m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize]))
fmt.Fprintf(w, "Version: %d\n", m.version)
fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize)
fmt.Fprintf(w, "Flags: %08x\n", m.flags)
fmt.Fprintf(w, "Root: <pgid=%d>\n", m.root.root)
fmt.Fprintf(w, "Freelist: <pgid=%d>\n", m.freelist)
fmt.Fprintf(w, "HWM: <pgid=%d>\n", m.pgid)
fmt.Fprintf(w, "Txn ID: %d\n", m.txid)
fmt.Fprintf(w, "Checksum: %016x\n", m.checksum)
fmt.Fprintf(w, "\n")
return nil
}
// PrintLeaf prints the data for a leaf page.
func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error {
p := (*page)(unsafe.Pointer(&buf[0]))
// Print number of items.
fmt.Fprintf(w, "Item Count: %d\n", p.count)
fmt.Fprintf(w, "\n")
// Print each key/value.
for i := uint16(0); i < p.count; i++ {
e := p.leafPageElement(i)
// Format key as string.
var k string
if isPrintable(string(e.key())) {
k = fmt.Sprintf("%q", string(e.key()))
} else {
k = fmt.Sprintf("%x", string(e.key()))
}
// Format value as string.
var v string
if (e.flags & uint32(bucketLeafFlag)) != 0 {
b := (*bucket)(unsafe.Pointer(&e.value()[0]))
v = fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
} else if isPrintable(string(e.value())) {
k = fmt.Sprintf("%q", string(e.value()))
} else {
k = fmt.Sprintf("%x", string(e.value()))
}
fmt.Fprintf(w, "%s: %s\n", k, v)
}
fmt.Fprintf(w, "\n")
return nil
}
// PrintBranch prints the data for a leaf page.
func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error {
p := (*page)(unsafe.Pointer(&buf[0]))
// Print number of items.
fmt.Fprintf(w, "Item Count: %d\n", p.count)
fmt.Fprintf(w, "\n")
// Print each key/value.
for i := uint16(0); i < p.count; i++ {
e := p.branchPageElement(i)
// Format key as string.
var k string
if isPrintable(string(e.key())) {
k = fmt.Sprintf("%q", string(e.key()))
} else {
k = fmt.Sprintf("%x", string(e.key()))
}
fmt.Fprintf(w, "%s: <pgid=%d>\n", k, e.pgid)
}
fmt.Fprintf(w, "\n")
return nil
}
// PrintFreelist prints the data for a freelist page.
func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error {
p := (*page)(unsafe.Pointer(&buf[0]))
// Print number of items.
fmt.Fprintf(w, "Item Count: %d\n", p.count)
fmt.Fprintf(w, "\n")
// Print each page in the freelist.
ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr))
for i := uint16(0); i < p.count; i++ {
fmt.Fprintf(w, "%d\n", ids[i])
}
fmt.Fprintf(w, "\n")
return nil
}
// PrintPage prints a given page as hexidecimal.
func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error {
const bytesPerLineN = 16
// Read page into buffer.
buf := make([]byte, pageSize)
addr := pageID * pageSize
if n, err := r.ReadAt(buf, int64(addr)); err != nil {
return err
} else if n != pageSize {
return io.ErrUnexpectedEOF
}
// Write out to writer in 16-byte lines.
var prev []byte
var skipped bool
for offset := 0; offset < pageSize; offset += bytesPerLineN {
// Retrieve current 16-byte line.
line := buf[offset : offset+bytesPerLineN]
isLastLine := (offset == (pageSize - bytesPerLineN))
// If it's the same as the previous line then print a skip.
if bytes.Equal(line, prev) && !isLastLine {
if !skipped {
fmt.Fprintf(w, "%07x *\n", addr+offset)
skipped = true
}
} else {
// Print line as hexadecimal in 2-byte groups.
fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset,
line[0:2], line[2:4], line[4:6], line[6:8],
line[8:10], line[10:12], line[12:14], line[14:16],
)
skipped = false
}
// Save the previous line.
prev = line
}
fmt.Fprint(w, "\n")
return nil
}
// Usage returns the help message.
func (cmd *PageCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt page -page PATH pageid [pageid...]
Page prints one or more pages in human readable format.
`, "\n")
}
// PagesCommand represents the "pages" command execution.
type PagesCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewPagesCommand returns a PagesCommand.
func newPagesCommand(m *Main) *PagesCommand {
return &PagesCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *PagesCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path := fs.Arg(0)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer func() { _ = db.Close() }()
// Write header.
fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW")
fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======")
return db.Update(func(tx *bolt.Tx) error {
var id int
for {
p, err := tx.Page(id)
if err != nil {
return &PageError{ID: id, Err: err}
} else if p == nil {
break
}
// Only display count and overflow if this is a non-free page.
var count, overflow string
if p.Type != "free" {
count = strconv.Itoa(p.Count)
if p.OverflowCount > 0 {
overflow = strconv.Itoa(p.OverflowCount)
}
}
// Print table row.
fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow)
// Move to the next non-overflow page.
id += 1
if p.Type != "free" {
id += p.OverflowCount
}
}
return nil
})
}
// Usage returns the help message.
func (cmd *PagesCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt pages PATH
Pages prints a table of pages with their type (meta, leaf, branch, freelist).
Leaf and branch pages will show a key count in the "items" column while the
freelist will show the number of free pages in the "items" column.
The "overflow" column shows the number of blocks that the page spills over
into. Normally there is no overflow but large keys and values can cause
a single page to take up multiple blocks.
`, "\n")
}
// StatsCommand represents the "stats" command execution.
type StatsCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewStatsCommand returns a StatsCommand.
func newStatsCommand(m *Main) *StatsCommand {
return &StatsCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the command.
func (cmd *StatsCommand) Run(args ...string) error {
// Parse flags.
fs := flag.NewFlagSet("", flag.ContinueOnError)
help := fs.Bool("h", false, "")
if err := fs.Parse(args); err != nil {
return err
} else if *help {
fmt.Fprintln(cmd.Stderr, cmd.Usage())
return ErrUsage
}
// Require database path.
path, prefix := fs.Arg(0), fs.Arg(1)
if path == "" {
return ErrPathRequired
} else if _, err := os.Stat(path); os.IsNotExist(err) {
return ErrFileNotFound
}
// Open database.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
return db.View(func(tx *bolt.Tx) error {
var s bolt.BucketStats
var count int
if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error {
if bytes.HasPrefix(name, []byte(prefix)) {
s.Add(b.Stats())
count += 1
}
return nil
}); err != nil {
return err
}
fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count)
fmt.Fprintln(cmd.Stdout, "Page count statistics")
fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN)
fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN)
fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN)
fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN)
fmt.Fprintln(cmd.Stdout, "Tree statistics")
fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN)
fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth)
fmt.Fprintln(cmd.Stdout, "Page size utilization")
fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc)
var percentage int
if s.BranchAlloc != 0 {
percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc))
}
fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage)
fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc)
percentage = 0
if s.LeafAlloc != 0 {
percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc))
}
fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage)
fmt.Fprintln(cmd.Stdout, "Bucket statistics")
fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN)
percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN))
fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage)
percentage = 0
if s.LeafInuse != 0 {
percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse))
}
fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage)
return nil
})
}
// Usage returns the help message.
func (cmd *StatsCommand) Usage() string {
return strings.TrimLeft(`
usage: bolt stats PATH
Stats performs an extensive search of the database to track every page
reference. It starts at the current meta page and recursively iterates
through every accessible bucket.
The following errors can be reported:
already freed
The page is referenced more than once in the freelist.
unreachable unfreed
The page is not referenced by a bucket or in the freelist.
reachable freed
The page is referenced by a bucket but is also in the freelist.
out of bounds
A page is referenced that is above the high water mark.
multiple references
A page is referenced by more than one other page.
invalid type
The page type is not "meta", "leaf", "branch", or "freelist".
No errors should occur in your database. However, if for some reason you
experience corruption, please submit a ticket to the Bolt project page:
https://github.com/boltdb/bolt/issues
`, "\n")
}
var benchBucketName = []byte("bench")
// BenchCommand represents the "bench" command execution.
type BenchCommand struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
}
// NewBenchCommand returns a BenchCommand using the
func newBenchCommand(m *Main) *BenchCommand {
return &BenchCommand{
Stdin: m.Stdin,
Stdout: m.Stdout,
Stderr: m.Stderr,
}
}
// Run executes the "bench" command.
func (cmd *BenchCommand) Run(args ...string) error {
// Parse CLI arguments.
options, err := cmd.ParseFlags(args)
if err != nil {
return err
}
// Remove path if "-work" is not set. Otherwise keep path.
if options.Work {
fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path)
} else {
defer os.Remove(options.Path)
}
// Create database.
db, err := bolt.Open(options.Path, 0666, nil)
if err != nil {
return err
}
db.NoSync = options.NoSync
defer db.Close()
// Write to the database.
var results BenchResults
if err := cmd.runWrites(db, options, &results); err != nil {
return fmt.Errorf("write: ", err)
}
// Read from the database.
if err := cmd.runReads(db, options, &results); err != nil {
return fmt.Errorf("bench: read: %s", err)
}
// Print results.
fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond())
fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond())
fmt.Fprintln(os.Stderr, "")
return nil
}
// ParseFlags parses the command line flags.
func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) {
var options BenchOptions
// Parse flagset.
fs := flag.NewFlagSet("", flag.ContinueOnError)
fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "")
fs.StringVar(&options.WriteMode, "write-mode", "seq", "")
fs.StringVar(&options.ReadMode, "read-mode", "seq", "")
fs.IntVar(&options.Iterations, "count", 1000, "")
fs.IntVar(&options.BatchSize, "batch-size", 0, "")
fs.IntVar(&options.KeySize, "key-size", 8, "")
fs.IntVar(&options.ValueSize, "value-size", 32, "")
fs.StringVar(&options.CPUProfile, "cpuprofile", "", "")
fs.StringVar(&options.MemProfile, "memprofile", "", "")
fs.StringVar(&options.BlockProfile, "blockprofile", "", "")
fs.StringVar(&options.BlockProfile, "blockprofile", "", "")
fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "")
fs.BoolVar(&options.NoSync, "no-sync", false, "")
fs.BoolVar(&options.Work, "work", false, "")
fs.StringVar(&options.Path, "path", "", "")
fs.SetOutput(cmd.Stderr)
if err := fs.Parse(args); err != nil {
return nil, err
}
// Set batch size to iteration size if not set.
// Require that batch size can be evenly divided by the iteration count.
if options.BatchSize == 0 {
options.BatchSize = options.Iterations
} else if options.Iterations%options.BatchSize != 0 {
return nil, ErrNonDivisibleBatchSize
}
// Generate temp path if one is not passed in.
if options.Path == "" {
f, err := ioutil.TempFile("", "bolt-bench-")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
f.Close()
os.Remove(f.Name())
options.Path = f.Name()
}
return &options, nil
}
// Writes to the database.
func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
// Start profiling for writes.
if options.ProfileMode == "rw" || options.ProfileMode == "w" {
cmd.startProfiling(options)
}
t := time.Now()
var err error
switch options.WriteMode {
case "seq":
err = cmd.runWritesSequential(db, options, results)
case "rnd":
err = cmd.runWritesRandom(db, options, results)
case "seq-nest":
err = cmd.runWritesSequentialNested(db, options, results)
case "rnd-nest":
err = cmd.runWritesRandomNested(db, options, results)
default:
return fmt.Errorf("invalid write mode: %s", options.WriteMode)
}
// Save time to write.
results.WriteDuration = time.Since(t)
// Stop profiling for writes only.
if options.ProfileMode == "w" {
cmd.stopProfiling()
}
return err
}
func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
var i = uint32(0)
return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i })
}
func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() })
}
func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
var i = uint32(0)
return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i })
}
func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() })
}
func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
results.WriteOps = options.Iterations
for i := 0; i < options.Iterations; i += options.BatchSize {
if err := db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
b.FillPercent = options.FillPercent
for j := 0; j < options.BatchSize; j++ {
key := make([]byte, options.KeySize)
value := make([]byte, options.ValueSize)
// Write key as uint32.
binary.BigEndian.PutUint32(key, keySource())
// Insert key/value.
if err := b.Put(key, value); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error {
results.WriteOps = options.Iterations
for i := 0; i < options.Iterations; i += options.BatchSize {
if err := db.Update(func(tx *bolt.Tx) error {
top, err := tx.CreateBucketIfNotExists(benchBucketName)
if err != nil {
return err
}
top.FillPercent = options.FillPercent
// Create bucket key.
name := make([]byte, options.KeySize)
binary.BigEndian.PutUint32(name, keySource())
// Create bucket.
b, err := top.CreateBucketIfNotExists(name)
if err != nil {
return err
}
b.FillPercent = options.FillPercent
for j := 0; j < options.BatchSize; j++ {
var key = make([]byte, options.KeySize)
var value = make([]byte, options.ValueSize)
// Generate key as uint32.
binary.BigEndian.PutUint32(key, keySource())
// Insert value into subbucket.
if err := b.Put(key, value); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// Reads from the database.
func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
// Start profiling for reads.
if options.ProfileMode == "r" {
cmd.startProfiling(options)
}
t := time.Now()
var err error
switch options.ReadMode {
case "seq":
switch options.WriteMode {
case "seq-nest", "rnd-nest":
err = cmd.runReadsSequentialNested(db, options, results)
default:
err = cmd.runReadsSequential(db, options, results)
}
default:
return fmt.Errorf("invalid read mode: %s", options.ReadMode)
}
// Save read time.
results.ReadDuration = time.Since(t)
// Stop profiling for reads.
if options.ProfileMode == "rw" || options.ProfileMode == "r" {
cmd.stopProfiling()
}
return err
}
func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
return db.View(func(tx *bolt.Tx) error {
t := time.Now()
for {
var count int
c := tx.Bucket(benchBucketName).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
return errors.New("invalid value")
}
count++
}
if options.WriteMode == "seq" && count != options.Iterations {
return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count)
}
results.ReadOps += count
// Make sure we do this for at least a second.
if time.Since(t) >= time.Second {
break
}
}
return nil
})
}
func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
return db.View(func(tx *bolt.Tx) error {
t := time.Now()
for {
var count int
var top = tx.Bucket(benchBucketName)
if err := top.ForEach(func(name, _ []byte) error {
c := top.Bucket(name).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
return ErrInvalidValue
}
count++
}
return nil
}); err != nil {
return err
}
if options.WriteMode == "seq-nest" && count != options.Iterations {
return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count)
}
results.ReadOps += count
// Make sure we do this for at least a second.
if time.Since(t) >= time.Second {
break
}
}
return nil
})
}
// File handlers for the various profiles.
var cpuprofile, memprofile, blockprofile *os.File
// Starts all profiles set on the options.
func (cmd *BenchCommand) startProfiling(options *BenchOptions) {
var err error
// Start CPU profiling.
if options.CPUProfile != "" {
cpuprofile, err = os.Create(options.CPUProfile)
if err != nil {
fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err)
os.Exit(1)
}
pprof.StartCPUProfile(cpuprofile)
}
// Start memory profiling.
if options.MemProfile != "" {
memprofile, err = os.Create(options.MemProfile)
if err != nil {
fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err)
os.Exit(1)
}
runtime.MemProfileRate = 4096
}
// Start fatal profiling.
if options.BlockProfile != "" {
blockprofile, err = os.Create(options.BlockProfile)
if err != nil {
fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err)
os.Exit(1)
}
runtime.SetBlockProfileRate(1)
}
}
// Stops all profiles.
func (cmd *BenchCommand) stopProfiling() {
if cpuprofile != nil {
pprof.StopCPUProfile()
cpuprofile.Close()
cpuprofile = nil
}
if memprofile != nil {
pprof.Lookup("heap").WriteTo(memprofile, 0)
memprofile.Close()
memprofile = nil
}
if blockprofile != nil {
pprof.Lookup("block").WriteTo(blockprofile, 0)
blockprofile.Close()
blockprofile = nil
runtime.SetBlockProfileRate(0)
}
}
// BenchOptions represents the set of options that can be passed to "bolt bench".
type BenchOptions struct {
ProfileMode string
WriteMode string
ReadMode string
Iterations int
BatchSize int
KeySize int
ValueSize int
CPUProfile string
MemProfile string
BlockProfile string
StatsInterval time.Duration
FillPercent float64
NoSync bool
Work bool
Path string
}
// BenchResults represents the performance results of the benchmark.
type BenchResults struct {
WriteOps int
WriteDuration time.Duration
ReadOps int
ReadDuration time.Duration
}
// Returns the duration for a single write operation.
func (r *BenchResults) WriteOpDuration() time.Duration {
if r.WriteOps == 0 {
return 0
}
return r.WriteDuration / time.Duration(r.WriteOps)
}
// Returns average number of write operations that can be performed per second.
func (r *BenchResults) WriteOpsPerSecond() int {
var op = r.WriteOpDuration()
if op == 0 {
return 0
}
return int(time.Second) / int(op)
}
// Returns the duration for a single read operation.
func (r *BenchResults) ReadOpDuration() time.Duration {
if r.ReadOps == 0 {
return 0
}
return r.ReadDuration / time.Duration(r.ReadOps)
}
// Returns average number of read operations that can be performed per second.
func (r *BenchResults) ReadOpsPerSecond() int {
var op = r.ReadOpDuration()
if op == 0 {
return 0
}
return int(time.Second) / int(op)
}
type PageError struct {
ID int
Err error
}
func (e *PageError) Error() string {
return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err)
}
// isPrintable returns true if the string is valid unicode and contains only printable runes.
func isPrintable(s string) bool {
if !utf8.ValidString(s) {
return false
}
for _, ch := range s {
if !unicode.IsPrint(ch) {
return false
}
}
return true
}
// ReadPage reads page info & full page data from a path.
// This is not transactionally safe.
func ReadPage(path string, pageID int) (*page, []byte, error) {
// Find page size.
pageSize, err := ReadPageSize(path)
if err != nil {
return nil, nil, fmt.Errorf("read page size: %s", err)
}
// Open database file.
f, err := os.Open(path)
if err != nil {
return nil, nil, err
}
defer f.Close()
// Read one block into buffer.
buf := make([]byte, pageSize)
if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
return nil, nil, err
} else if n != len(buf) {
return nil, nil, io.ErrUnexpectedEOF
}
// Determine total number of blocks.
p := (*page)(unsafe.Pointer(&buf[0]))
overflowN := p.overflow
// Re-read entire page (with overflow) into buffer.
buf = make([]byte, (int(overflowN)+1)*pageSize)
if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil {
return nil, nil, err
} else if n != len(buf) {
return nil, nil, io.ErrUnexpectedEOF
}
p = (*page)(unsafe.Pointer(&buf[0]))
return p, buf, nil
}
// ReadPageSize reads page size a path.
// This is not transactionally safe.
func ReadPageSize(path string) (int, error) {
// Open database file.
f, err := os.Open(path)
if err != nil {
return 0, err
}
defer f.Close()
// Read 4KB chunk.
buf := make([]byte, 4096)
if _, err := io.ReadFull(f, buf); err != nil {
return 0, err
}
// Read page size from metadata.
m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize]))
return int(m.pageSize), nil
}
// atois parses a slice of strings into integers.
func atois(strs []string) ([]int, error) {
var a []int
for _, str := range strs {
i, err := strconv.Atoi(str)
if err != nil {
return nil, err
}
a = append(a, i)
}
return a, nil
}
// DO NOT EDIT. Copied from the "bolt" package.
const maxAllocSize = 0xFFFFFFF
// DO NOT EDIT. Copied from the "bolt" package.
const (
branchPageFlag = 0x01
leafPageFlag = 0x02
metaPageFlag = 0x04
freelistPageFlag = 0x10
)
// DO NOT EDIT. Copied from the "bolt" package.
const bucketLeafFlag = 0x01
// DO NOT EDIT. Copied from the "bolt" package.
type pgid uint64
// DO NOT EDIT. Copied from the "bolt" package.
type txid uint64
// DO NOT EDIT. Copied from the "bolt" package.
type meta struct {
magic uint32
version uint32
pageSize uint32
flags uint32
root bucket
freelist pgid
pgid pgid
txid txid
checksum uint64
}
// DO NOT EDIT. Copied from the "bolt" package.
type bucket struct {
root pgid
sequence uint64
}
// DO NOT EDIT. Copied from the "bolt" package.
type page struct {
id pgid
flags uint16
count uint16
overflow uint32
ptr uintptr
}
// DO NOT EDIT. Copied from the "bolt" package.
func (p *page) Type() string {
if (p.flags & branchPageFlag) != 0 {
return "branch"
} else if (p.flags & leafPageFlag) != 0 {
return "leaf"
} else if (p.flags & metaPageFlag) != 0 {
return "meta"
} else if (p.flags & freelistPageFlag) != 0 {
return "freelist"
}
return fmt.Sprintf("unknown<%02x>", p.flags)
}
// DO NOT EDIT. Copied from the "bolt" package.
func (p *page) leafPageElement(index uint16) *leafPageElement {
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
return n
}
// DO NOT EDIT. Copied from the "bolt" package.
func (p *page) branchPageElement(index uint16) *branchPageElement {
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
}
// DO NOT EDIT. Copied from the "bolt" package.
type branchPageElement struct {
pos uint32
ksize uint32
pgid pgid
}
// DO NOT EDIT. Copied from the "bolt" package.
func (n *branchPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return buf[n.pos : n.pos+n.ksize]
}
// DO NOT EDIT. Copied from the "bolt" package.
type leafPageElement struct {
flags uint32
pos uint32
ksize uint32
vsize uint32
}
// DO NOT EDIT. Copied from the "bolt" package.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return buf[n.pos : n.pos+n.ksize]
}
// DO NOT EDIT. Copied from the "bolt" package.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]
}
|
package aws
import (
"fmt"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSRolePolicyAttachment_basic(t *testing.T) {
var out iam.ListAttachedRolePoliciesOutput
rInt := acctest.RandInt()
testPolicy := fmt.Sprintf("test-policy-%d", rInt)
testPolicy2 := fmt.Sprintf("test-policy2-%d", rInt)
testPolicy3 := fmt.Sprintf("test-policy3-%d", rInt)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRolePolicyAttachmentDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRolePolicyAttachConfig(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 1, &out),
testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy}, &out),
),
},
{
Config: testAccAWSRolePolicyAttachConfigUpdate(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 2, &out),
testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy2, testPolicy3}, &out),
),
},
},
})
}
func testAccCheckAWSRolePolicyAttachmentDestroy(s *terraform.State) error {
return nil
}
func testAccCheckAWSRolePolicyAttachmentExists(n string, c int, out *iam.ListAttachedRolePoliciesOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No policy name is set")
}
conn := testAccProvider.Meta().(*AWSClient).iamconn
role := rs.Primary.Attributes["role"]
attachedPolicies, err := conn.ListAttachedRolePolicies(&iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(role),
})
if err != nil {
return fmt.Errorf("Error: Failed to get attached policies for role %s (%s)", role, n)
}
if c != len(attachedPolicies.AttachedPolicies) {
return fmt.Errorf("Error: Role (%s) has wrong number of policies attached on initial creation", n)
}
*out = *attachedPolicies
return nil
}
}
func testAccCheckAWSRolePolicyAttachmentAttributes(policies []string, out *iam.ListAttachedRolePoliciesOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
matched := 0
for _, p := range policies {
for _, ap := range out.AttachedPolicies {
// *ap.PolicyArn like arn:aws:iam::111111111111:policy/test-policy
parts := strings.Split(*ap.PolicyArn, "/")
if len(parts) == 2 && p == parts[1] {
matched++
}
}
}
if matched != len(policies) || matched != len(out.AttachedPolicies) {
return fmt.Errorf("Error: Number of attached policies was incorrect: expected %d matched policies, matched %d of %d", len(policies), matched, len(out.AttachedPolicies))
}
return nil
}
}
func testAccAWSRolePolicyAttachConfig(rInt int) string {
return fmt.Sprintf(`
resource "aws_iam_role" "role" {
name = "test-role-%d"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy" {
name = "test-policy-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "test-attach" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}`, rInt, rInt)
}
func testAccAWSRolePolicyAttachConfigUpdate(rInt int) string {
return fmt.Sprintf(`
resource "aws_iam_role" "role" {
name = "test-role-%d"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy" {
name = "test-policy-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_policy" "policy2" {
name = "test-policy2-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_policy" "policy3" {
name = "test-policy3-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "test-attach" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy2.arn}"
}
resource "aws_iam_role_policy_attachment" "test-attach2" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy3.arn}"
}`, rInt, rInt, rInt, rInt)
}
update test-resource names
package aws
import (
"fmt"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSRolePolicyAttachment_basic(t *testing.T) {
var out iam.ListAttachedRolePoliciesOutput
rInt := acctest.RandInt()
testPolicy := fmt.Sprintf("tf-acctest-%d", rInt)
testPolicy2 := fmt.Sprintf("tf-acctest2-%d", rInt)
testPolicy3 := fmt.Sprintf("tf-acctest3-%d", rInt)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRolePolicyAttachmentDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSRolePolicyAttachConfig(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 1, &out),
testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy}, &out),
),
},
{
Config: testAccAWSRolePolicyAttachConfigUpdate(rInt),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRolePolicyAttachmentExists("aws_iam_role_policy_attachment.test-attach", 2, &out),
testAccCheckAWSRolePolicyAttachmentAttributes([]string{testPolicy2, testPolicy3}, &out),
),
},
},
})
}
func testAccCheckAWSRolePolicyAttachmentDestroy(s *terraform.State) error {
return nil
}
func testAccCheckAWSRolePolicyAttachmentExists(n string, c int, out *iam.ListAttachedRolePoliciesOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No policy name is set")
}
conn := testAccProvider.Meta().(*AWSClient).iamconn
role := rs.Primary.Attributes["role"]
attachedPolicies, err := conn.ListAttachedRolePolicies(&iam.ListAttachedRolePoliciesInput{
RoleName: aws.String(role),
})
if err != nil {
return fmt.Errorf("Error: Failed to get attached policies for role %s (%s)", role, n)
}
if c != len(attachedPolicies.AttachedPolicies) {
return fmt.Errorf("Error: Role (%s) has wrong number of policies attached on initial creation", n)
}
*out = *attachedPolicies
return nil
}
}
func testAccCheckAWSRolePolicyAttachmentAttributes(policies []string, out *iam.ListAttachedRolePoliciesOutput) resource.TestCheckFunc {
return func(s *terraform.State) error {
matched := 0
for _, p := range policies {
for _, ap := range out.AttachedPolicies {
// *ap.PolicyArn like arn:aws:iam::111111111111:policy/test-policy
parts := strings.Split(*ap.PolicyArn, "/")
if len(parts) == 2 && p == parts[1] {
matched++
}
}
}
if matched != len(policies) || matched != len(out.AttachedPolicies) {
return fmt.Errorf("Error: Number of attached policies was incorrect: expected %d matched policies, matched %d of %d", len(policies), matched, len(out.AttachedPolicies))
}
return nil
}
}
func testAccAWSRolePolicyAttachConfig(rInt int) string {
return fmt.Sprintf(`
resource "aws_iam_role" "role" {
name = "test-role-%d"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy" {
name = "tf-acctest-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "test-attach" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy.arn}"
}`, rInt, rInt)
}
func testAccAWSRolePolicyAttachConfigUpdate(rInt int) string {
return fmt.Sprintf(`
resource "aws_iam_role" "role" {
name = "test-role-%d"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_policy" "policy" {
name = "tf-acctest-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_policy" "policy2" {
name = "tf-acctest2-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_policy" "policy3" {
name = "tf-acctest3-%d"
description = "A test policy"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"iam:ChangePassword"
],
"Resource": "*",
"Effect": "Allow"
}
]
}
EOF
}
resource "aws_iam_role_policy_attachment" "test-attach" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy2.arn}"
}
resource "aws_iam_role_policy_attachment" "test-attach2" {
role = "${aws_iam_role.role.name}"
policy_arn = "${aws_iam_policy.policy3.arn}"
}`, rInt, rInt, rInt, rInt)
}
|
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"os"
"os/signal"
"sort"
"strings"
"github.com/ianremmler/clac"
"github.com/kless/term"
"github.com/peterh/liner"
)
var (
trm *term.Terminal
lnr *liner.State
cl = clac.New()
cmdList = []string{}
cmdMap = map[string]func() error{
"neg": cl.Neg,
"abs": cl.Abs,
"inv": cl.Inv,
"+": cl.Add,
"-": cl.Sub,
"*": cl.Mul,
"/": cl.Div,
"mod": cl.Mod,
"exp": cl.Exp,
"pow": cl.Pow,
"pow2": cl.Pow2,
"pow10": cl.Pow10,
"ln": cl.Ln,
"log": cl.Log,
"lg": cl.Lg,
"sqrt": cl.Sqrt,
"hypot": cl.Hypot,
"sin": cl.Sin,
"cos": cl.Cos,
"tan": cl.Tan,
"asin": cl.Asin,
"acos": cl.Acos,
"atan": cl.Atan,
"sinh": cl.Sin,
"cosh": cl.Cos,
"tanh": cl.Tan,
"asinh": cl.Asin,
"acosh": cl.Acos,
"atanh": cl.Atan,
"atan2": cl.Atan2,
"dtor": cl.DegToRad,
"rtod": cl.RadToDeg,
"floor": cl.Floor,
"ceil": cl.Ceil,
"trunc": cl.Trunc,
"and": cl.And,
"or": cl.Or,
"xor": cl.Xor,
"not": cl.Not,
"clear": cl.Clear,
"drop": cl.Drop,
"dropn": cl.Dropn,
"dropr": cl.Dropr,
"dup": cl.Dup,
"dupn": cl.Dupn,
"dupr": cl.Dupr,
"pick": cl.Pick,
"swap": cl.Swap,
"undo": cl.Undo,
"redo": cl.Redo,
"rot": func() error { return cl.Rot(true) },
"rotr": func() error { return cl.Rotr(true) },
"unrot": func() error { return cl.Rot(false) },
"unrotr": func() error { return cl.Rotr(false) },
"pi": func() error { return cl.Push(math.Pi) },
"e": func() error { return cl.Push(math.E) },
"phi": func() error { return cl.Push(math.Phi) },
"quit": func() error { exit(); return nil },
"help": func() error { help(); return nil },
}
)
func init() {
log.SetFlags(0)
log.SetPrefix("Error: ")
for cmd := range cmdMap {
cmdList = append(cmdList, cmd)
}
sort.Strings(cmdList)
}
func main() {
if processPipe() {
fmt.Println(strings.Trim(fmt.Sprint(cl.Stack()), "[]"))
os.Exit(0)
}
if !term.SupportANSI() {
log.Fatalln("terminal does not support ANSI codes.")
}
var err error
trm, err = term.New()
if err != nil {
log.Fatalln(err)
}
lnr = liner.NewLiner()
lnr.SetWordCompleter(complete)
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
go func() {
<-sigChan
exit()
}()
for {
printStack(cl.Stack())
input, err := lnr.Prompt("> ")
if err == io.EOF {
exit()
}
if err != nil {
continue
}
if strings.TrimSpace(input) != "" {
lnr.AppendHistory(input)
}
parseInput(input)
fmt.Println()
}
}
func processPipe() bool {
if stat, err := os.Stdin.Stat(); err == nil && stat.Mode()&os.ModeNamedPipe != 0 {
if input, err := ioutil.ReadAll(os.Stdin); err == nil {
parseInput(string(input))
return true
}
}
return false
}
func exit() {
fmt.Println()
lnr.Close()
os.Exit(0)
}
func help() {
clearScreen()
for i := range cmdList {
fmt.Printf("%-8s", cmdList[i])
if (i+1)%5 == 0 {
fmt.Println()
}
}
if len(cmdList)%5 != 0 {
fmt.Println()
}
fmt.Print("\n[Press any key to continue]")
waitKey()
}
func parseInput(input string) {
cmdReader := strings.NewReader(input)
for {
tok := ""
if _, err := fmt.Fscan(cmdReader, &tok); err != nil {
if err != io.EOF {
log.Println(err)
}
break
}
num, err := clac.ParseNum(tok)
if err == nil {
if err = cl.Push(num); err != nil {
log.Println(tok+":", err)
waitKey()
}
continue
}
if cmd, ok := cmdMap[tok]; ok {
if err = cmd(); err != nil {
log.Println(tok+":", err)
waitKey()
}
continue
}
log.Println(tok + ": invalid input")
waitKey()
}
}
func complete(in string, pos int) (string, []string, string) {
start := strings.LastIndexAny(in[:pos], " \t") + 1
end := len(in)
if idx := strings.IndexAny(in[pos:], " \t"); idx >= 0 {
end = pos + idx
}
head, word, tail := in[:start], in[start:end], in[end:]
cmds := []string{}
for i := range cmdList {
if strings.HasPrefix(cmdList[i], word) {
cmds = append(cmds, cmdList[i])
}
}
return head, cmds, tail
}
func printStack(stack clac.Stack) {
numRows, _, err := trm.GetSize()
if err != nil {
numRows = len(stack) + 1
}
clearScreen()
for i := numRows - 3; i >= 0; i-- {
fmt.Printf("%2d:", i)
if i < len(stack) {
fmt.Printf("%16.10g", stack[i])
if math.Abs(stack[i]) < math.MaxInt64 {
fmt.Printf(" %#19x", int64(stack[i]))
}
}
fmt.Println()
}
fmt.Println(strings.Repeat("-", 40))
}
func clearScreen() {
fmt.Print("\033[2J\033[H")
}
func waitKey() {
bufio.NewReader(os.Stdin).ReadByte()
}
Process args as well as piped input in CLI mode.
package main
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"os"
"os/signal"
"sort"
"strings"
"github.com/ianremmler/clac"
"github.com/kless/term"
"github.com/peterh/liner"
)
var (
trm *term.Terminal
lnr *liner.State
cl = clac.New()
cmdList = []string{}
cmdMap = map[string]func() error{
"neg": cl.Neg,
"abs": cl.Abs,
"inv": cl.Inv,
"+": cl.Add,
"-": cl.Sub,
"*": cl.Mul,
"/": cl.Div,
"mod": cl.Mod,
"exp": cl.Exp,
"pow": cl.Pow,
"pow2": cl.Pow2,
"pow10": cl.Pow10,
"ln": cl.Ln,
"log": cl.Log,
"lg": cl.Lg,
"sqrt": cl.Sqrt,
"hypot": cl.Hypot,
"sin": cl.Sin,
"cos": cl.Cos,
"tan": cl.Tan,
"asin": cl.Asin,
"acos": cl.Acos,
"atan": cl.Atan,
"sinh": cl.Sin,
"cosh": cl.Cos,
"tanh": cl.Tan,
"asinh": cl.Asin,
"acosh": cl.Acos,
"atanh": cl.Atan,
"atan2": cl.Atan2,
"dtor": cl.DegToRad,
"rtod": cl.RadToDeg,
"floor": cl.Floor,
"ceil": cl.Ceil,
"trunc": cl.Trunc,
"and": cl.And,
"or": cl.Or,
"xor": cl.Xor,
"not": cl.Not,
"clear": cl.Clear,
"drop": cl.Drop,
"dropn": cl.Dropn,
"dropr": cl.Dropr,
"dup": cl.Dup,
"dupn": cl.Dupn,
"dupr": cl.Dupr,
"pick": cl.Pick,
"swap": cl.Swap,
"undo": cl.Undo,
"redo": cl.Redo,
"rot": func() error { return cl.Rot(true) },
"rotr": func() error { return cl.Rotr(true) },
"unrot": func() error { return cl.Rot(false) },
"unrotr": func() error { return cl.Rotr(false) },
"pi": func() error { return cl.Push(math.Pi) },
"e": func() error { return cl.Push(math.E) },
"phi": func() error { return cl.Push(math.Phi) },
"quit": func() error { exit(); return nil },
"help": func() error { help(); return nil },
}
)
func init() {
log.SetFlags(0)
log.SetPrefix("Error: ")
for cmd := range cmdMap {
cmdList = append(cmdList, cmd)
}
sort.Strings(cmdList)
}
func main() {
if processCmdLine() {
fmt.Println(strings.Trim(fmt.Sprint(cl.Stack()), "[]"))
os.Exit(0)
}
if !term.SupportANSI() {
log.Fatalln("terminal does not support ANSI codes.")
}
var err error
trm, err = term.New()
if err != nil {
log.Fatalln(err)
}
lnr = liner.NewLiner()
lnr.SetWordCompleter(complete)
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
go func() {
<-sigChan
exit()
}()
for {
printStack(cl.Stack())
input, err := lnr.Prompt("> ")
if err == io.EOF {
exit()
}
if err != nil {
continue
}
if strings.TrimSpace(input) != "" {
lnr.AppendHistory(input)
}
parseInput(input)
fmt.Println()
}
}
func processCmdLine() bool {
input := ""
if stat, err := os.Stdin.Stat(); err == nil && stat.Mode()&os.ModeNamedPipe != 0 {
if pipeInput, err := ioutil.ReadAll(os.Stdin); err == nil {
input = string(pipeInput)
}
}
if len(os.Args) > 1 {
input += " " + strings.Join(os.Args[1:], " ")
}
if input != "" {
parseInput(string(input))
return true
}
return false
}
func exit() {
fmt.Println()
lnr.Close()
os.Exit(0)
}
func help() {
clearScreen()
for i := range cmdList {
fmt.Printf("%-8s", cmdList[i])
if (i+1)%5 == 0 {
fmt.Println()
}
}
if len(cmdList)%5 != 0 {
fmt.Println()
}
fmt.Print("\n[Press any key to continue]")
waitKey()
}
func parseInput(input string) {
cmdReader := strings.NewReader(input)
for {
tok := ""
if _, err := fmt.Fscan(cmdReader, &tok); err != nil {
if err != io.EOF {
log.Println(err)
}
break
}
num, err := clac.ParseNum(tok)
if err == nil {
if err = cl.Push(num); err != nil {
log.Println(tok+":", err)
waitKey()
}
continue
}
if cmd, ok := cmdMap[tok]; ok {
if err = cmd(); err != nil {
log.Println(tok+":", err)
waitKey()
}
continue
}
log.Println(tok + ": invalid input")
waitKey()
}
}
func complete(in string, pos int) (string, []string, string) {
start := strings.LastIndexAny(in[:pos], " \t") + 1
end := len(in)
if idx := strings.IndexAny(in[pos:], " \t"); idx >= 0 {
end = pos + idx
}
head, word, tail := in[:start], in[start:end], in[end:]
cmds := []string{}
for i := range cmdList {
if strings.HasPrefix(cmdList[i], word) {
cmds = append(cmds, cmdList[i])
}
}
return head, cmds, tail
}
func printStack(stack clac.Stack) {
numRows, _, err := trm.GetSize()
if err != nil {
numRows = len(stack) + 1
}
clearScreen()
for i := numRows - 3; i >= 0; i-- {
fmt.Printf("%2d:", i)
if i < len(stack) {
fmt.Printf("%16.10g", stack[i])
if math.Abs(stack[i]) < math.MaxInt64 {
fmt.Printf(" %#19x", int64(stack[i]))
}
}
fmt.Println()
}
fmt.Println(strings.Repeat("-", 40))
}
func clearScreen() {
fmt.Print("\033[2J\033[H")
}
func waitKey() {
bufio.NewReader(os.Stdin).ReadByte()
}
|
// Code generated by go-bindata.
// sources:
// templates/consul_catalog.tmpl
// templates/docker.tmpl
// templates/ecs.tmpl
// templates/eureka.tmpl
// templates/kubernetes.tmpl
// templates/kv.tmpl
// templates/marathon.tmpl
// templates/mesos.tmpl
// templates/notFound.tmpl
// templates/rancher.tmpl
// DO NOT EDIT!
package gentemplates
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _templatesConsul_catalogTmpl = []byte(`[backends]
{{range $service := .Services}}
{{ $circuitBreaker := getCircuitBreaker $service.Attributes }}
{{if $circuitBreaker }}
[backends."backend-{{ getServiceBackendName $service }}".circuitBreaker]
expression = "{{ $circuitBreaker.Expression }}"
{{end}}
{{ $loadBalancer := getLoadBalancer $service.Attributes }}
{{if $loadBalancer }}
[backends."backend-{{ getServiceBackendName $service }}".loadBalancer]
method = "{{ $loadBalancer.Method }}"
sticky = {{ $loadBalancer.Sticky }}
{{if $loadBalancer.Stickiness }}
[backends."backend-{{ getServiceBackendName $service }}".loadBalancer.stickiness]
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
{{end}}
{{end}}
{{ $maxConn := getMaxConn $service.Attributes }}
{{if $maxConn }}
[backends."backend-{{ getServiceBackendName $service }}".maxConn]
extractorFunc = "{{ $maxConn.ExtractorFunc }}"
amount = {{ $maxConn.Amount }}
{{end}}
{{ $healthCheck := getHealthCheck $service.Attributes }}
{{if $healthCheck }}
[backends.backend-{{ getServiceBackendName $service }}.healthCheck]
path = "{{ $healthCheck.Path }}"
port = {{ $healthCheck.Port }}
interval = "{{ $healthCheck.Interval }}"
{{end}}
{{end}}
{{range $index, $node := .Nodes}}
[backends."backend-{{ getNodeBackendName $node }}".servers."{{ getServerName $node $index }}"]
url = "{{ getProtocol $node.Service.Tags }}://{{ getBackendAddress $node }}:{{ $node.Service.Port }}"
weight = {{ getWeight $node.Service.Tags }}
{{end}}
[frontends]
{{range $service := .Services}}
[frontends."frontend-{{ $service.ServiceName }}"]
backend = "backend-{{ getServiceBackendName $service }}"
priority = {{ getPriority $service.Attributes }}
passHostHeader = {{ getPassHostHeader $service.Attributes }}
passTLSCert = {{ getPassTLSCert $service.Attributes }}
entryPoints = [{{range getFrontEndEntryPoints $service.Attributes }}
"{{.}}",
{{end}}]
{{ $whitelistSourceRange := getWhitelistSourceRange $service.Attributes }}
{{if $whitelistSourceRange }}
whitelistSourceRange = [{{range $whitelistSourceRange}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $service.Attributes }}
"{{.}}",
{{end}}]
{{ $redirect := getRedirect $service.Attributes }}
{{if $redirect }}
[frontends."frontend-{{ $service.ServiceName }}".redirect]
entryPoint = "{{ $redirect.EntryPoint }}"
regex = "{{ $redirect.Regex }}"
replacement = "{{ $redirect.Replacement }}"
{{end}}
{{ if hasErrorPages $service.Attributes }}
[frontends."frontend-{{ $service.ServiceName }}".errors]
{{ range $pageName, $page := getErrorPages $service.Attributes }}
[frontends."frontend-{{ $service.ServiceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
{{ if hasRateLimit $service.Attributes }}
{{ $rateLimit := getRateLimit $service.Attributes }}
[frontends."frontend-{{ $service.ServiceName }}".rateLimit]
extractorFunc = "{{ $rateLimit.ExtractorFunc }}"
[frontends."frontend-{{ $service.ServiceName }}".rateLimit.rateSet]
{{ range $limitName, $limit := $rateLimit.RateSet }}
[frontends."frontend-{{ $service.ServiceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $limit.Period }}"
average = {{ $limit.Average }}
burst = {{ $limit.Burst }}
{{end}}
{{end}}
{{ $headers := getHeaders $service.Attributes }}
{{ if $headers }}
[frontends."frontend-{{ $service.ServiceName }}".headers]
SSLRedirect = {{ $headers.SSLRedirect }}
SSLTemporaryRedirect = {{ $headers.SSLTemporaryRedirect }}
SSLHost = "{{ $headers.SSLHost }}"
STSSeconds = {{ $headers.STSSeconds }}
STSIncludeSubdomains = {{ $headers.STSIncludeSubdomains }}
STSPreload = {{ $headers.STSPreload }}
ForceSTSHeader = {{ $headers.ForceSTSHeader }}
FrameDeny = {{ $headers.FrameDeny }}
CustomFrameOptionsValue = "{{ $headers.CustomFrameOptionsValue }}"
ContentTypeNosniff = {{ $headers.ContentTypeNosniff }}
BrowserXSSFilter = {{ $headers.BrowserXSSFilter }}
ContentSecurityPolicy = "{{ $headers.ContentSecurityPolicy }}"
PublicKey = "{{ $headers.PublicKey }}"
ReferrerPolicy = "{{ $headers.ReferrerPolicy }}"
IsDevelopment = {{ $headers.IsDevelopment }}
{{ if $headers.AllowedHosts }}
AllowedHosts = [{{ range $headers.AllowedHosts }}
"{{.}}",
{{end}}]
{{end}}
{{ if $headers.HostsProxyHeaders }}
HostsProxyHeaders = [{{ range $headers.HostsProxyHeaders }}
"{{.}}",
{{end}}]
{{end}}
{{ if $headers.CustomRequestHeaders }}
[frontends."frontend-{{ $service.ServiceName }}".headers.customRequestHeaders]
{{ range $k, $v := $headers.CustomRequestHeaders }}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{ if $headers.CustomResponseHeaders }}
[frontends."frontend-{{ $service.ServiceName }}".headers.customResponseHeaders]
{{ range $k, $v := $headers.CustomResponseHeaders }}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{ if $headers.SSLProxyHeaders }}
[frontends."frontend-{{ $service.ServiceName }}".headers.SSLProxyHeaders]
{{range $k, $v := $headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."frontend-{{ $service.ServiceName }}".routes."route-host-{{ $service.ServiceName }}"]
rule = "{{ getFrontendRule $service }}"
{{end}}
`)
func templatesConsul_catalogTmplBytes() ([]byte, error) {
return _templatesConsul_catalogTmpl, nil
}
func templatesConsul_catalogTmpl() (*asset, error) {
bytes, err := templatesConsul_catalogTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/consul_catalog.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}}
[backends]
{{range $backendName, $backend := .Backends}}
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxConn]
amount = {{getMaxConnAmount $backend}}
extractorFunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{if hasHealthCheckLabels $backend}}
[backends.backend-{{$backendName}}.healthCheck]
path = "{{getHealthCheckPath $backend}}"
port = {{getHealthCheckPort $backend}}
interval = "{{getHealthCheckInterval $backend}}"
{{end}}
{{$servers := index $backendServers $backendName}}
{{range $serverName, $server := $servers}}
{{if hasServices $server}}
{{$services := getServiceNames $server}}
{{range $serviceIndex, $serviceName := $services}}
[backends.backend-{{getServiceBackend $server $serviceName}}.servers.service-{{$serverName}}]
url = "{{getServiceProtocol $server $serviceName}}://{{getIPAddress $server}}:{{getServicePort $server $serviceName}}"
weight = {{getServiceWeight $server $serviceName}}
{{end}}
{{else}}
[backends.backend-{{$backendName}}.servers.server-{{$server.Name | replace "/" "" | replace "." "-"}}]
url = "{{getProtocol $server}}://{{getIPAddress $server}}:{{getPort $server}}"
weight = {{getWeight $server}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $frontend, $containers := .Frontends}}
{{$container := index $containers 0}}
{{if hasServices $container}}
{{$services := getServiceNames $container}}
{{range $serviceIndex, $serviceName := $services}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}"]
backend = "backend-{{getServiceBackend $container $serviceName}}"
priority = {{getServicePriority $container $serviceName}}
passHostHeader = {{getServicePassHostHeader $container $serviceName}}
passTLSCert = {{getServicePassTLSCert $container $serviceName}}
entryPoints = [{{range getServiceEntryPoints $container $serviceName}}
"{{.}}",
{{end}}]
{{if getServiceWhitelistSourceRange $container $serviceName}}
whitelistSourceRange = [{{range getServiceWhitelistSourceRange $container $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getServiceBasicAuth $container $serviceName}}
"{{.}}",
{{end}}]
{{if hasServiceRedirect $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".redirect]
entryPoint = "{{getServiceRedirectEntryPoint $container $serviceName}}"
regex = "{{getServiceRedirectRegex $container $serviceName}}"
replacement = "{{getServiceRedirectReplacement $container $serviceName}}"
{{end}}
{{ if hasServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors]
{{ range $pageName, $page := getServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors.{{$pageName}}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container $serviceName }}"
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".routes."service-{{$serviceName | replace "/" "" | replace "." "-"}}"]
rule = "{{getServiceFrontendRule $container $serviceName}}"
{{if hasServiceRequestHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customRequestHeaders]
{{range $k, $v := getServiceRequestHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasServiceResponseHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customResponseHeaders]
{{range $k, $v := getServiceResponseHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}} ## end range services
{{else}}
[frontends."frontend-{{$frontend}}"]
backend = "backend-{{getBackend $container}}"
priority = {{getPriority $container}}
passHostHeader = {{getPassHostHeader $container}}
passTLSCert = {{getPassTLSCert $container}}
entryPoints = [{{range getEntryPoints $container}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $container}}
whitelistSourceRange = [{{range getWhitelistSourceRange $container}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $container}}
"{{.}}",
{{end}}]
{{if hasRedirect $container}}
[frontends."frontend-{{$frontend}}".redirect]
entryPoint = "{{getRedirectEntryPoint $container}}"
regex = "{{getRedirectRegex $container}}"
replacement = "{{getRedirectReplacement $container}}"
{{end}}
{{ if hasErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors]
{{ range $pageName, $page := getErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container }}"
[frontends."frontend-{{$frontend}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{ if hasHeaders $container}}
[frontends."frontend-{{$frontend}}".headers]
{{if hasSSLRedirectHeaders $container}}
SSLRedirect = {{getSSLRedirectHeaders $container}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $container}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $container}}
{{end}}
{{if hasSSLHostHeaders $container}}
SSLHost = "{{getSSLHostHeaders $container}}"
{{end}}
{{if hasSTSSecondsHeaders $container}}
STSSeconds = {{getSTSSecondsHeaders $container}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $container}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $container}}
{{end}}
{{if hasSTSPreloadHeaders $container}}
STSPreload = {{getSTSPreloadHeaders $container}}
{{end}}
{{if hasForceSTSHeaderHeaders $container}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $container}}
{{end}}
{{if hasFrameDenyHeaders $container}}
FrameDeny = {{getFrameDenyHeaders $container}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $container}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $container}}"
{{end}}
{{if hasContentTypeNosniffHeaders $container}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $container}}
{{end}}
{{if hasBrowserXSSFilterHeaders $container}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $container}}
{{end}}
{{if hasContentSecurityPolicyHeaders $container}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $container}}"
{{end}}
{{if hasPublicKeyHeaders $container}}
PublicKey = "{{getPublicKeyHeaders $container}}"
{{end}}
{{if hasReferrerPolicyHeaders $container}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $container}}"
{{end}}
{{if hasIsDevelopmentHeaders $container}}
IsDevelopment = {{getIsDevelopmentHeaders $container}}
{{end}}
{{if hasAllowedHostsHeaders $container}}
AllowedHosts = [{{range getAllowedHostsHeaders $container}}
"{{.}}",
{{end}}]
{{end}}
{{if hasHostsProxyHeaders $container}}
HostsProxyHeaders = [{{range getHostsProxyHeaders $container}}
"{{.}}",
{{end}}]
{{end}}
{{if hasRequestHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."frontend-{{$frontend}}".routes."route-frontend-{{$frontend}}"]
rule = "{{getFrontendRule $container}}"
{{end}}
{{end}}
`)
func templatesDockerTmplBytes() ([]byte, error) {
return _templatesDockerTmpl, nil
}
func templatesDockerTmpl() (*asset, error) {
bytes, err := templatesDockerTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/docker.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEcsTmpl = []byte(`[backends]
{{range $serviceName, $instances := .Services}}
{{if hasCircuitBreakerLabel $instances}}
[backends.backend-{{ $serviceName }}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $instances}}"
{{end}}
{{if hasLoadBalancerLabel $instances}}
[backends.backend-{{ $serviceName }}.loadBalancer]
method = "{{ getLoadBalancerMethod $instances}}"
sticky = {{ getSticky $instances}}
{{if hasStickinessLabel $instances}}
[backends.backend-{{ $serviceName }}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $instances}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $instances}}
[backends.backend-{{ $serviceName }}.maxConn]
amount = {{getMaxConnAmount $instances}}
extractorFunc = "{{getMaxConnExtractorFunc $instances}}"
{{end}}
{{ if hasHealthCheckLabels $instances }}
[backends.backend-{{ $serviceName }}.healthCheck]
path = "{{getHealthCheckPath $instances }}"
port = {{getHealthCheckPort $instances}}
interval = "{{getHealthCheckInterval $instances }}"
{{end}}
{{range $index, $instance := $instances}}
[backends.backend-{{ $instance.Name }}.servers.server-{{ $instance.Name }}{{ $instance.ID }}]
url = "{{ getProtocol $instance }}://{{ getHost $instance }}:{{ getPort $instance }}"
weight = {{ getWeight $instance}}
{{end}}
{{end}}
[frontends]
{{range $serviceName, $instances := .Services}}
{{range $instance := filterFrontends $instances}}
[frontends.frontend-{{ $serviceName }}]
backend = "backend-{{ $serviceName }}"
priority = {{ getPriority $instance}}
passHostHeader = {{ getPassHostHeader $instance}}
passTLSCert = {{ getPassTLSCert $instance}}
entryPoints = [{{range getEntryPoints $instance}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $instance}}
whitelistSourceRange = [{{range getWhitelistSourceRange $instance}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $instance}}
"{{.}}",
{{end}}]
{{if hasRedirect $instance}}
[frontends."frontend-{{ $serviceName }}".redirect]
entryPoint = "{{getRedirectEntryPoint $instance}}"
regex = "{{getRedirectRegex $instance}}"
replacement = "{{getRedirectReplacement $instance}}"
{{end}}
{{ if hasErrorPages $instance }}
[frontends."frontend-{{ $serviceName }}".errors]
{{ range $pageName, $page := getErrorPages $instance }}
[frontends."frontend-{{ $serviceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $instance }}
[frontends."frontend-{{ $serviceName }}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $instance }}"
[frontends."frontend-{{ $serviceName }}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $instance }}
[frontends."frontend-{{ $serviceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{if hasHeaders $instance }}
[frontends."frontend-{{ $serviceName }}".headers]
{{if hasSSLRedirectHeaders $instance}}
SSLRedirect = {{getSSLRedirectHeaders $instance}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $instance}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $instance}}
{{end}}
{{if hasSSLHostHeaders $instance}}
SSLHost = "{{getSSLHostHeaders $instance}}"
{{end}}
{{if hasSTSSecondsHeaders $instance}}
STSSeconds = {{getSTSSecondsHeaders $instance}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $instance}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $instance}}
{{end}}
{{if hasSTSPreloadHeaders $instance}}
STSPreload = {{getSTSPreloadHeaders $instance}}
{{end}}
{{if hasForceSTSHeaderHeaders $instance}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $instance}}
{{end}}
{{if hasFrameDenyHeaders $instance}}
FrameDeny = {{getFrameDenyHeaders $instance}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $instance}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $instance}}"
{{end}}
{{if hasContentTypeNosniffHeaders $instance}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $instance}}
{{end}}
{{if hasBrowserXSSFilterHeaders $instance}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $instance}}
{{end}}
{{if hasContentSecurityPolicyHeaders $instance}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $instance}}"
{{end}}
{{if hasPublicKeyHeaders $instance}}
PublicKey = "{{getPublicKeyHeaders $instance}}"
{{end}}
{{if hasReferrerPolicyHeaders $instance}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $instance}}"
{{end}}
{{if hasIsDevelopmentHeaders $instance}}
IsDevelopment = {{getIsDevelopmentHeaders $instance}}
{{end}}
{{if hasRequestHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $instance}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $instance}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasAllowedHostsHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.AllowedHosts]
{{range getAllowedHostsHeaders $instance}}
"{{.}}"
{{end}}
{{end}}
{{if hasHostsProxyHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.HostsProxyHeaders]
{{range getHostsProxyHeaders $instance}}
"{{.}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $instance}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends.frontend-{{ $serviceName }}.routes.route-frontend-{{ $serviceName }}]
rule = "{{getFrontendRule $instance}}"
{{end}}
{{end}}`)
func templatesEcsTmplBytes() ([]byte, error) {
return _templatesEcsTmpl, nil
}
func templatesEcsTmpl() (*asset, error) {
bytes, err := templatesEcsTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/ecs.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEurekaTmpl = []byte(`[backends]{{range .Applications}}
{{ $app := .}}
{{range .Instances}}
[backends.backend{{$app.Name}}.servers.server-{{ getInstanceID . }}]
url = "{{ getProtocol . }}://{{ .IpAddr }}:{{ getPort . }}"
weight = {{ getWeight . }}
{{end}}{{end}}
[frontends]{{range .Applications}}
[frontends.frontend{{.Name}}]
backend = "backend{{.Name}}"
entryPoints = ["http"]
[frontends.frontend{{.Name }}.routes.route-host{{.Name}}]
rule = "Host:{{ .Name | tolower }}"
{{end}}
`)
func templatesEurekaTmplBytes() ([]byte, error) {
return _templatesEurekaTmpl, nil
}
func templatesEurekaTmpl() (*asset, error) {
bytes, err := templatesEurekaTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/eureka.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKubernetesTmpl = []byte(`[backends]{{range $backendName, $backend := .Backends}}
[backends."{{$backendName}}"]
{{if $backend.CircuitBreaker}}
[backends."{{$backendName}}".circuitbreaker]
expression = "{{$backend.CircuitBreaker.Expression}}"
{{end}}
[backends."{{$backendName}}".loadbalancer]
method = "{{$backend.LoadBalancer.Method}}"
{{if $backend.LoadBalancer.Sticky}}
sticky = true
{{end}}
{{if $backend.LoadBalancer.Stickiness}}
[backends."{{$backendName}}".loadbalancer.stickiness]
cookieName = "{{$backend.LoadBalancer.Stickiness.CookieName}}"
{{end}}
{{range $serverName, $server := $backend.Servers}}
[backends."{{$backendName}}".servers."{{$serverName}}"]
url = "{{$server.URL}}"
weight = {{$server.Weight}}
{{end}}
{{end}}
[frontends]{{range $frontendName, $frontend := .Frontends}}
[frontends."{{$frontendName}}"]
backend = "{{$frontend.Backend}}"
priority = {{$frontend.Priority}}
passHostHeader = {{$frontend.PassHostHeader}}
entryPoints = [{{range $frontend.EntryPoints}}
"{{.}}",
{{end}}]
basicAuth = [{{range $frontend.BasicAuth}}
"{{.}}",
{{end}}]
whitelistSourceRange = [{{range $frontend.WhitelistSourceRange}}
"{{.}}",
{{end}}]
{{if $frontend.Redirect}}
[frontends."{{$frontendName}}".redirect]
entryPoint = "{{$frontend.RedirectEntryPoint}}"
regex = "{{$frontend.RedirectRegex}}"
replacement = "{{$frontend.RedirectReplacement}}"
{{end}}
{{if $frontend.Headers }}
[frontends."{{$frontendName}}".headers]
SSLRedirect = {{$frontend.Headers.SSLRedirect}}
SSLTemporaryRedirect = {{$frontend.Headers.SSLTemporaryRedirect}}
SSLHost = "{{$frontend.Headers.SSLHost}}"
STSSeconds = {{$frontend.Headers.STSSeconds}}
STSIncludeSubdomains = {{$frontend.Headers.STSIncludeSubdomains}}
STSPreload = {{$frontend.Headers.STSPreload}}
ForceSTSHeader = {{$frontend.Headers.ForceSTSHeader}}
FrameDeny = {{$frontend.Headers.FrameDeny}}
CustomFrameOptionsValue = "{{$frontend.Headers.CustomFrameOptionsValue}}"
ContentTypeNosniff = {{$frontend.Headers.ContentTypeNosniff}}
BrowserXSSFilter = {{$frontend.Headers.BrowserXSSFilter}}
ContentSecurityPolicy = "{{$frontend.Headers.ContentSecurityPolicy}}"
PublicKey = "{{$frontend.Headers.PublicKey}}"
ReferrerPolicy = "{{$frontend.Headers.ReferrerPolicy}}"
IsDevelopment = {{$frontend.Headers.IsDevelopment}}
{{if $frontend.Headers.AllowedHosts}}
AllowedHosts = [{{range $frontend.Headers.AllowedHosts}}
"{{.}}",
{{end}}]
{{end}}
{{if $frontend.Headers.HostsProxyHeaders}}
HostsProxyHeaders = [{{range $frontend.Headers.HostsProxyHeaders}}
"{{.}}",
{{end}}]
{{end}}
{{if $frontend.Headers.CustomRequestHeaders}}
[frontends."{{$frontendName}}".headers.customrequestheaders]
{{range $k, $v := $frontend.Headers.CustomRequestHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.CustomResponseHeaders}}
[frontends."{{$frontendName}}".headers.customresponseheaders]
{{range $k, $v := $frontend.Headers.CustomResponseHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.SSLProxyHeaders}}
[frontends."{{$frontendName}}".headers.SSLProxyHeaders]
{{range $k, $v := $frontend.Headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
{{range $routeName, $route := $frontend.Routes}}
[frontends."{{$frontendName}}".routes."{{$routeName}}"]
rule = "{{$route.Rule}}"
{{end}}
{{end}}`)
func templatesKubernetesTmplBytes() ([]byte, error) {
return _templatesKubernetesTmpl, nil
}
func templatesKubernetesTmpl() (*asset, error) {
bytes, err := templatesKubernetesTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kubernetes.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKvTmpl = []byte(`[backends]
{{range $backend := List .Prefix "/backends/"}}
{{$backendName := Last $backend}}
{{$circuitBreaker := Get "" $backend "/circuitbreaker/expression"}}
{{with $circuitBreaker}}
[backends."{{$backendName}}".circuitBreaker]
expression = "{{$circuitBreaker}}"
{{end}}
{{$loadBalancer := Get "" $backend "/loadbalancer/method"}}
{{with $loadBalancer}}
[backends."{{$backendName}}".loadBalancer]
method = "{{$loadBalancer}}"
sticky = {{ getSticky $backend }}
{{if hasStickinessLabel $backend}}
[backends."{{$backendName}}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{$maxConnAmt := Get "" $backend "/maxconn/amount"}}
{{$maxConnExtractorFunc := Get "" $backend "/maxconn/extractorfunc"}}
{{with $maxConnAmt}}
{{with $maxConnExtractorFunc}}
[backends."{{$backendName}}".maxConn]
amount = {{$maxConnAmt}}
extractorFunc = "{{$maxConnExtractorFunc}}"
{{end}}
{{end}}
{{$healthCheck := Get "" $backend "/healthcheck/path"}}
{{with $healthCheck}}
[backends."{{$backendName}}".healthCheck]
path = "{{$healthCheck}}"
interval = "{{ Get "30s" $backend "/healthcheck/interval" }}"
{{end}}
{{range $server := ListServers $backend}}
[backends."{{$backendName}}".servers."{{Last $server}}"]
url = "{{Get "" $server "/url"}}"
weight = {{Get "0" $server "/weight"}}
{{end}}
{{end}}
[frontends]
{{range $frontend := List .Prefix "/frontends/" }}
{{$frontendName := Last $frontend}}
[frontends."{{$frontendName}}"]
backend = "{{Get "" $frontend "/backend"}}"
priority = {{Get "0" $frontend "/priority"}}
passHostHeader = {{Get "true" $frontend "/passHostHeader"}}
{{$entryPoints := SplitGet $frontend "/entrypoints"}}
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
{{range $route := List $frontend "/routes/"}}
[frontends."{{$frontendName}}".routes."{{Last $route}}"]
rule = "{{Get "" $route "/rule"}}"
{{end}}
{{end}}
{{range $tlsConfiguration := List .Prefix "/tlsconfiguration/"}}
[[tlsConfiguration]]
{{$entryPoints := SplitGet $tlsConfiguration "/entrypoints"}}
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
[tlsConfiguration.certificate]
certFile = """{{Get "" $tlsConfiguration "/certificate/certfile"}}"""
keyFile = """{{Get "" $tlsConfiguration "/certificate/keyfile"}}"""
{{end}}
`)
func templatesKvTmplBytes() ([]byte, error) {
return _templatesKvTmpl, nil
}
func templatesKvTmpl() (*asset, error) {
bytes, err := templatesKvTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kv.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMarathonTmpl = []byte(`{{$apps := .Applications}}
[backends]
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName }}"]
{{ if hasCircuitBreakerLabels $app }}
[backends."{{getBackend $app $serviceName }}".circuitBreaker]
expression = "{{getCircuitBreakerExpression $app }}"
{{end}}
{{ if hasLoadBalancerLabels $app }}
[backends."{{getBackend $app $serviceName }}".loadBalancer]
method = "{{getLoadBalancerMethod $app }}"
sticky = {{getSticky $app}}
{{if hasStickinessLabel $app}}
[backends."{{getBackend $app $serviceName }}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $app}}"
{{end}}
{{end}}
{{ if hasMaxConnLabels $app }}
[backends."{{getBackend $app $serviceName }}".maxConn]
amount = {{getMaxConnAmount $app }}
extractorFunc = "{{getMaxConnExtractorFunc $app }}"
{{end}}
{{ if hasHealthCheckLabels $app }}
[backends."{{getBackend $app $serviceName }}".healthCheck]
path = "{{getHealthCheckPath $app }}"
port = {{getHealthCheckPort $app}}
interval = "{{getHealthCheckInterval $app }}"
{{end}}
{{end}}
{{range $task := $app.Tasks}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName}}".servers."server-{{$task.ID | replace "." "-"}}{{getServiceNameSuffix $serviceName }}"]
url = "{{getProtocol $app $serviceName}}://{{getBackendServer $task $app}}:{{getPort $task $app $serviceName}}"
weight = {{getWeight $app $serviceName}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames .}}
[frontends."{{ getFrontendName $app $serviceName }}"]
backend = "{{getBackend $app $serviceName}}"
priority = {{getPriority $app $serviceName}}
passHostHeader = {{getPassHostHeader $app $serviceName}}
passTLSCert = {{getPassTLSCert $app $serviceName}}
entryPoints = [{{range getEntryPoints $app $serviceName}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $app $serviceName}}
whitelistSourceRange = [{{range getWhitelistSourceRange $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $app $serviceName}}
"{{.}}",
{{end}}]
{{if hasRedirect $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".redirect]
entryPoint = "{{getRedirectEntryPoint $app $serviceName}}"
regex = "{{getRedirectRegex $app $serviceName}}"
replacement = "{{getRedirectReplacement $app $serviceName}}"
{{end}}
{{ if hasErrorPages $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".errors]
{{ range $pageName, $page := getErrorPages $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $app $serviceName }}"
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{if hasHeaders $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".headers]
{{if hasSSLRedirectHeaders $app $serviceName}}
SSLRedirect = {{getSSLRedirectHeaders $app $serviceName}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $app $serviceName}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $app $serviceName}}
{{end}}
{{if hasSSLHostHeaders $app $serviceName}}
SSLHost = "{{getSSLHostHeaders $app $serviceName}}"
{{end}}
{{if hasSTSSecondsHeaders $app $serviceName}}
STSSeconds = {{getSTSSecondsHeaders $app $serviceName}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $app $serviceName}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $app $serviceName}}
{{end}}
{{if hasSTSPreloadHeaders $app $serviceName}}
STSPreload = {{getSTSPreloadHeaders $app $serviceName}}
{{end}}
{{if hasForceSTSHeaderHeaders $app $serviceName}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $app $serviceName}}
{{end}}
{{if hasFrameDenyHeaders $app $serviceName}}
FrameDeny = {{getFrameDenyHeaders $app $serviceName}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $app $serviceName}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $app $serviceName}}"
{{end}}
{{if hasContentTypeNosniffHeaders $app $serviceName}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $app $serviceName}}
{{end}}
{{if hasBrowserXSSFilterHeaders $app $serviceName}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $app $serviceName}}
{{end}}
{{if hasContentSecurityPolicyHeaders $app $serviceName}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $app $serviceName}}"
{{end}}
{{if hasPublicKeyHeaders $app $serviceName}}
PublicKey = "{{getPublicKeyHeaders $app $serviceName}}"
{{end}}
{{if hasReferrerPolicyHeaders $app $serviceName}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $app $serviceName}}"
{{end}}
{{if hasIsDevelopmentHeaders $app $serviceName}}
IsDevelopment = {{getIsDevelopmentHeaders $app $serviceName}}
{{end}}
{{if hasAllowedHostsHeaders $app $serviceName}}
AllowedHosts = [{{range getAllowedHostsHeaders $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
{{if hasHostsProxyHeaders $app $serviceName}}
HostsProxyHeaders = [{{range getHostsProxyHeaders $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
{{if hasRequestHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."{{ getFrontendName $app $serviceName }}".routes."route-host{{$app.ID | replace "/" "-"}}{{getServiceNameSuffix $serviceName }}"]
rule = "{{getFrontendRule $app $serviceName}}"
{{end}}
{{end}}
`)
func templatesMarathonTmplBytes() ([]byte, error) {
return _templatesMarathonTmpl, nil
}
func templatesMarathonTmpl() (*asset, error) {
bytes, err := templatesMarathonTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/marathon.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMesosTmpl = []byte(`{{$apps := .Applications}}
[backends]{{range .Tasks}}
[backends.backend{{getBackend . $apps}}.servers.server-{{getID .}}]
url = "{{getProtocol . $apps}}://{{getHost .}}:{{getPort . $apps}}"
weight = {{getWeight . $apps}}
{{end}}
[frontends]{{range .Applications}}
[frontends.frontend-{{getFrontEndName .}}]
backend = "backend{{getFrontendBackend .}}"
passHostHeader = {{getPassHostHeader .}}
priority = {{getPriority .}}
entryPoints = [{{range getEntryPoints .}}
"{{.}}",
{{end}}]
[frontends.frontend-{{getFrontEndName .}}.routes.route-host{{getFrontEndName .}}]
rule = "{{getFrontendRule .}}"
{{end}}
`)
func templatesMesosTmplBytes() ([]byte, error) {
return _templatesMesosTmpl, nil
}
func templatesMesosTmpl() (*asset, error) {
bytes, err := templatesMesosTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/mesos.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesNotfoundTmpl = []byte(`<!DOCTYPE html>
<html>
<head>
<title>Traefik</title>
</head>
<body>
Ohhhh man, this is bad...
</body>
</html>`)
func templatesNotfoundTmplBytes() ([]byte, error) {
return _templatesNotfoundTmpl, nil
}
func templatesNotfoundTmpl() (*asset, error) {
bytes, err := templatesNotfoundTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/notFound.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesRancherTmpl = []byte(`{{$backendServers := .Backends}}
[backends]
{{range $backendName, $backend := .Backends}}
[backends.backend-{{$backendName}}]
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxConn]
amount = {{getMaxConnAmount $backend}}
extractorFunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{if hasHealthCheckLabels $backend}}
[backends.backend-{{$backendName}}.healthCheck]
path = "{{getHealthCheckPath $backend}}"
port = {{getHealthCheckPort $backend}}
interval = "{{getHealthCheckInterval $backend}}"
{{end}}
{{range $index, $ip := $backend.Containers}}
[backends.backend-{{$backendName}}.servers.server-{{$index}}]
url = "{{getProtocol $backend}}://{{$ip}}:{{getPort $backend}}"
weight = {{getWeight $backend}}
{{end}}
{{end}}
[frontends]
{{range $frontendName, $service := .Frontends}}
[frontends."frontend-{{$frontendName}}"]
backend = "backend-{{getBackend $service}}"
priority = {{getPriority $service}}
passHostHeader = {{getPassHostHeader $service}}
passTLSCert = {{getPassTLSCert $service}}
entryPoints = [{{range getEntryPoints $service}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $service}}
whitelistSourceRange = [{{range getWhitelistSourceRange $service}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $service}}
"{{.}}",
{{end}}]
{{if hasRedirect $service}}
[frontends."frontend-{{$frontendName}}".redirect]
entryPoint = "{{getRedirectEntryPoint $service}}"
regex = "{{getRedirectRegex $service}}"
replacement = "{{getRedirectReplacement $service}}"
{{end}}
{{ if hasErrorPages $service }}
[frontends."frontend-{{$frontendName}}".errors]
{{ range $pageName, $page := getErrorPages $service }}
[frontends."frontend-{{$frontendName}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $service }}
[frontends."frontend-{{$frontendName}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $service }}"
[frontends."frontend-{{$frontendName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $service }}
[frontends."frontend-{{$frontendName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{if hasHeaders $service }}
[frontends."frontend-{{$frontendName}}".headers]
SSLRedirect = {{getSSLRedirectHeaders $service}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $service}}
SSLHost = "{{getSSLHostHeaders $service}}"
STSSeconds = {{getSTSSecondsHeaders $service}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $service}}
STSPreload = {{getSTSPreloadHeaders $service}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $service}}
FrameDeny = {{getFrameDenyHeaders $service}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $service}}"
ContentTypeNosniff = {{getContentTypeNosniffHeaders $service}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $service}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $service}}"
PublicKey = "{{getPublicKeyHeaders $service}}"
ReferrerPolicy = "{{getReferrerPolicyHeaders $service}}"
IsDevelopment = {{getIsDevelopmentHeaders $service}}
AllowedHosts = [{{range getAllowedHostsHeaders $service}}
"{{.}}",
{{end}}]
HostsProxyHeaders = [{{range getHostsProxyHeaders $service}}
"{{.}}",
{{end}}]
{{if hasRequestHeaders $service}}
[frontends."frontend-{{$frontendName}}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $service}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $service}}
[frontends."frontend-{{$frontendName}}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $service}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $service}}
[frontends."frontend-{{$frontendName}}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $service}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."frontend-{{$frontendName}}".routes."route-frontend-{{$frontendName}}"]
rule = "{{getFrontendRule $service}}"
{{end}}
`)
func templatesRancherTmplBytes() ([]byte, error) {
return _templatesRancherTmpl, nil
}
func templatesRancherTmpl() (*asset, error) {
bytes, err := templatesRancherTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/rancher.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"templates/consul_catalog.tmpl": templatesConsul_catalogTmpl,
"templates/docker.tmpl": templatesDockerTmpl,
"templates/ecs.tmpl": templatesEcsTmpl,
"templates/eureka.tmpl": templatesEurekaTmpl,
"templates/kubernetes.tmpl": templatesKubernetesTmpl,
"templates/kv.tmpl": templatesKvTmpl,
"templates/marathon.tmpl": templatesMarathonTmpl,
"templates/mesos.tmpl": templatesMesosTmpl,
"templates/notFound.tmpl": templatesNotfoundTmpl,
"templates/rancher.tmpl": templatesRancherTmpl,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"templates": {nil, map[string]*bintree{
"consul_catalog.tmpl": {templatesConsul_catalogTmpl, map[string]*bintree{}},
"docker.tmpl": {templatesDockerTmpl, map[string]*bintree{}},
"ecs.tmpl": {templatesEcsTmpl, map[string]*bintree{}},
"eureka.tmpl": {templatesEurekaTmpl, map[string]*bintree{}},
"kubernetes.tmpl": {templatesKubernetesTmpl, map[string]*bintree{}},
"kv.tmpl": {templatesKvTmpl, map[string]*bintree{}},
"marathon.tmpl": {templatesMarathonTmpl, map[string]*bintree{}},
"mesos.tmpl": {templatesMesosTmpl, map[string]*bintree{}},
"notFound.tmpl": {templatesNotfoundTmpl, map[string]*bintree{}},
"rancher.tmpl": {templatesRancherTmpl, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
chore(kv): gen templates.
// Code generated by go-bindata.
// sources:
// templates/consul_catalog.tmpl
// templates/docker.tmpl
// templates/ecs.tmpl
// templates/eureka.tmpl
// templates/kubernetes.tmpl
// templates/kv.tmpl
// templates/marathon.tmpl
// templates/mesos.tmpl
// templates/notFound.tmpl
// templates/rancher.tmpl
// DO NOT EDIT!
package gentemplates
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _templatesConsul_catalogTmpl = []byte(`[backends]
{{range $service := .Services}}
{{ $circuitBreaker := getCircuitBreaker $service.Attributes }}
{{if $circuitBreaker }}
[backends."backend-{{ getServiceBackendName $service }}".circuitBreaker]
expression = "{{ $circuitBreaker.Expression }}"
{{end}}
{{ $loadBalancer := getLoadBalancer $service.Attributes }}
{{if $loadBalancer }}
[backends."backend-{{ getServiceBackendName $service }}".loadBalancer]
method = "{{ $loadBalancer.Method }}"
sticky = {{ $loadBalancer.Sticky }}
{{if $loadBalancer.Stickiness }}
[backends."backend-{{ getServiceBackendName $service }}".loadBalancer.stickiness]
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
{{end}}
{{end}}
{{ $maxConn := getMaxConn $service.Attributes }}
{{if $maxConn }}
[backends."backend-{{ getServiceBackendName $service }}".maxConn]
extractorFunc = "{{ $maxConn.ExtractorFunc }}"
amount = {{ $maxConn.Amount }}
{{end}}
{{ $healthCheck := getHealthCheck $service.Attributes }}
{{if $healthCheck }}
[backends.backend-{{ getServiceBackendName $service }}.healthCheck]
path = "{{ $healthCheck.Path }}"
port = {{ $healthCheck.Port }}
interval = "{{ $healthCheck.Interval }}"
{{end}}
{{end}}
{{range $index, $node := .Nodes}}
[backends."backend-{{ getNodeBackendName $node }}".servers."{{ getServerName $node $index }}"]
url = "{{ getProtocol $node.Service.Tags }}://{{ getBackendAddress $node }}:{{ $node.Service.Port }}"
weight = {{ getWeight $node.Service.Tags }}
{{end}}
[frontends]
{{range $service := .Services}}
[frontends."frontend-{{ $service.ServiceName }}"]
backend = "backend-{{ getServiceBackendName $service }}"
priority = {{ getPriority $service.Attributes }}
passHostHeader = {{ getPassHostHeader $service.Attributes }}
passTLSCert = {{ getPassTLSCert $service.Attributes }}
entryPoints = [{{range getFrontEndEntryPoints $service.Attributes }}
"{{.}}",
{{end}}]
{{ $whitelistSourceRange := getWhitelistSourceRange $service.Attributes }}
{{if $whitelistSourceRange }}
whitelistSourceRange = [{{range $whitelistSourceRange}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $service.Attributes }}
"{{.}}",
{{end}}]
{{ $redirect := getRedirect $service.Attributes }}
{{if $redirect }}
[frontends."frontend-{{ $service.ServiceName }}".redirect]
entryPoint = "{{ $redirect.EntryPoint }}"
regex = "{{ $redirect.Regex }}"
replacement = "{{ $redirect.Replacement }}"
{{end}}
{{ if hasErrorPages $service.Attributes }}
[frontends."frontend-{{ $service.ServiceName }}".errors]
{{ range $pageName, $page := getErrorPages $service.Attributes }}
[frontends."frontend-{{ $service.ServiceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status }}
"{{.}}",
{{end}}]
backend = "{{ $page.Backend }}"
query = "{{ $page.Query }}"
{{end}}
{{end}}
{{ if hasRateLimit $service.Attributes }}
{{ $rateLimit := getRateLimit $service.Attributes }}
[frontends."frontend-{{ $service.ServiceName }}".rateLimit]
extractorFunc = "{{ $rateLimit.ExtractorFunc }}"
[frontends."frontend-{{ $service.ServiceName }}".rateLimit.rateSet]
{{ range $limitName, $limit := $rateLimit.RateSet }}
[frontends."frontend-{{ $service.ServiceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $limit.Period }}"
average = {{ $limit.Average }}
burst = {{ $limit.Burst }}
{{end}}
{{end}}
{{ $headers := getHeaders $service.Attributes }}
{{ if $headers }}
[frontends."frontend-{{ $service.ServiceName }}".headers]
SSLRedirect = {{ $headers.SSLRedirect }}
SSLTemporaryRedirect = {{ $headers.SSLTemporaryRedirect }}
SSLHost = "{{ $headers.SSLHost }}"
STSSeconds = {{ $headers.STSSeconds }}
STSIncludeSubdomains = {{ $headers.STSIncludeSubdomains }}
STSPreload = {{ $headers.STSPreload }}
ForceSTSHeader = {{ $headers.ForceSTSHeader }}
FrameDeny = {{ $headers.FrameDeny }}
CustomFrameOptionsValue = "{{ $headers.CustomFrameOptionsValue }}"
ContentTypeNosniff = {{ $headers.ContentTypeNosniff }}
BrowserXSSFilter = {{ $headers.BrowserXSSFilter }}
ContentSecurityPolicy = "{{ $headers.ContentSecurityPolicy }}"
PublicKey = "{{ $headers.PublicKey }}"
ReferrerPolicy = "{{ $headers.ReferrerPolicy }}"
IsDevelopment = {{ $headers.IsDevelopment }}
{{ if $headers.AllowedHosts }}
AllowedHosts = [{{ range $headers.AllowedHosts }}
"{{.}}",
{{end}}]
{{end}}
{{ if $headers.HostsProxyHeaders }}
HostsProxyHeaders = [{{ range $headers.HostsProxyHeaders }}
"{{.}}",
{{end}}]
{{end}}
{{ if $headers.CustomRequestHeaders }}
[frontends."frontend-{{ $service.ServiceName }}".headers.customRequestHeaders]
{{ range $k, $v := $headers.CustomRequestHeaders }}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{ if $headers.CustomResponseHeaders }}
[frontends."frontend-{{ $service.ServiceName }}".headers.customResponseHeaders]
{{ range $k, $v := $headers.CustomResponseHeaders }}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{ if $headers.SSLProxyHeaders }}
[frontends."frontend-{{ $service.ServiceName }}".headers.SSLProxyHeaders]
{{range $k, $v := $headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."frontend-{{ $service.ServiceName }}".routes."route-host-{{ $service.ServiceName }}"]
rule = "{{ getFrontendRule $service }}"
{{end}}
`)
func templatesConsul_catalogTmplBytes() ([]byte, error) {
return _templatesConsul_catalogTmpl, nil
}
func templatesConsul_catalogTmpl() (*asset, error) {
bytes, err := templatesConsul_catalogTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/consul_catalog.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}}
[backends]
{{range $backendName, $backend := .Backends}}
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxConn]
amount = {{getMaxConnAmount $backend}}
extractorFunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{if hasHealthCheckLabels $backend}}
[backends.backend-{{$backendName}}.healthCheck]
path = "{{getHealthCheckPath $backend}}"
port = {{getHealthCheckPort $backend}}
interval = "{{getHealthCheckInterval $backend}}"
{{end}}
{{$servers := index $backendServers $backendName}}
{{range $serverName, $server := $servers}}
{{if hasServices $server}}
{{$services := getServiceNames $server}}
{{range $serviceIndex, $serviceName := $services}}
[backends.backend-{{getServiceBackend $server $serviceName}}.servers.service-{{$serverName}}]
url = "{{getServiceProtocol $server $serviceName}}://{{getIPAddress $server}}:{{getServicePort $server $serviceName}}"
weight = {{getServiceWeight $server $serviceName}}
{{end}}
{{else}}
[backends.backend-{{$backendName}}.servers.server-{{$server.Name | replace "/" "" | replace "." "-"}}]
url = "{{getProtocol $server}}://{{getIPAddress $server}}:{{getPort $server}}"
weight = {{getWeight $server}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $frontend, $containers := .Frontends}}
{{$container := index $containers 0}}
{{if hasServices $container}}
{{$services := getServiceNames $container}}
{{range $serviceIndex, $serviceName := $services}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}"]
backend = "backend-{{getServiceBackend $container $serviceName}}"
priority = {{getServicePriority $container $serviceName}}
passHostHeader = {{getServicePassHostHeader $container $serviceName}}
passTLSCert = {{getServicePassTLSCert $container $serviceName}}
entryPoints = [{{range getServiceEntryPoints $container $serviceName}}
"{{.}}",
{{end}}]
{{if getServiceWhitelistSourceRange $container $serviceName}}
whitelistSourceRange = [{{range getServiceWhitelistSourceRange $container $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getServiceBasicAuth $container $serviceName}}
"{{.}}",
{{end}}]
{{if hasServiceRedirect $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".redirect]
entryPoint = "{{getServiceRedirectEntryPoint $container $serviceName}}"
regex = "{{getServiceRedirectRegex $container $serviceName}}"
replacement = "{{getServiceRedirectReplacement $container $serviceName}}"
{{end}}
{{ if hasServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors]
{{ range $pageName, $page := getServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors.{{$pageName}}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container $serviceName }}"
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".routes."service-{{$serviceName | replace "/" "" | replace "." "-"}}"]
rule = "{{getServiceFrontendRule $container $serviceName}}"
{{if hasServiceRequestHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customRequestHeaders]
{{range $k, $v := getServiceRequestHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasServiceResponseHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customResponseHeaders]
{{range $k, $v := getServiceResponseHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}} ## end range services
{{else}}
[frontends."frontend-{{$frontend}}"]
backend = "backend-{{getBackend $container}}"
priority = {{getPriority $container}}
passHostHeader = {{getPassHostHeader $container}}
passTLSCert = {{getPassTLSCert $container}}
entryPoints = [{{range getEntryPoints $container}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $container}}
whitelistSourceRange = [{{range getWhitelistSourceRange $container}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $container}}
"{{.}}",
{{end}}]
{{if hasRedirect $container}}
[frontends."frontend-{{$frontend}}".redirect]
entryPoint = "{{getRedirectEntryPoint $container}}"
regex = "{{getRedirectRegex $container}}"
replacement = "{{getRedirectReplacement $container}}"
{{end}}
{{ if hasErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors]
{{ range $pageName, $page := getErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container }}"
[frontends."frontend-{{$frontend}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{ if hasHeaders $container}}
[frontends."frontend-{{$frontend}}".headers]
{{if hasSSLRedirectHeaders $container}}
SSLRedirect = {{getSSLRedirectHeaders $container}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $container}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $container}}
{{end}}
{{if hasSSLHostHeaders $container}}
SSLHost = "{{getSSLHostHeaders $container}}"
{{end}}
{{if hasSTSSecondsHeaders $container}}
STSSeconds = {{getSTSSecondsHeaders $container}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $container}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $container}}
{{end}}
{{if hasSTSPreloadHeaders $container}}
STSPreload = {{getSTSPreloadHeaders $container}}
{{end}}
{{if hasForceSTSHeaderHeaders $container}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $container}}
{{end}}
{{if hasFrameDenyHeaders $container}}
FrameDeny = {{getFrameDenyHeaders $container}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $container}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $container}}"
{{end}}
{{if hasContentTypeNosniffHeaders $container}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $container}}
{{end}}
{{if hasBrowserXSSFilterHeaders $container}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $container}}
{{end}}
{{if hasContentSecurityPolicyHeaders $container}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $container}}"
{{end}}
{{if hasPublicKeyHeaders $container}}
PublicKey = "{{getPublicKeyHeaders $container}}"
{{end}}
{{if hasReferrerPolicyHeaders $container}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $container}}"
{{end}}
{{if hasIsDevelopmentHeaders $container}}
IsDevelopment = {{getIsDevelopmentHeaders $container}}
{{end}}
{{if hasAllowedHostsHeaders $container}}
AllowedHosts = [{{range getAllowedHostsHeaders $container}}
"{{.}}",
{{end}}]
{{end}}
{{if hasHostsProxyHeaders $container}}
HostsProxyHeaders = [{{range getHostsProxyHeaders $container}}
"{{.}}",
{{end}}]
{{end}}
{{if hasRequestHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."frontend-{{$frontend}}".routes."route-frontend-{{$frontend}}"]
rule = "{{getFrontendRule $container}}"
{{end}}
{{end}}
`)
func templatesDockerTmplBytes() ([]byte, error) {
return _templatesDockerTmpl, nil
}
func templatesDockerTmpl() (*asset, error) {
bytes, err := templatesDockerTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/docker.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEcsTmpl = []byte(`[backends]
{{range $serviceName, $instances := .Services}}
{{if hasCircuitBreakerLabel $instances}}
[backends.backend-{{ $serviceName }}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $instances}}"
{{end}}
{{if hasLoadBalancerLabel $instances}}
[backends.backend-{{ $serviceName }}.loadBalancer]
method = "{{ getLoadBalancerMethod $instances}}"
sticky = {{ getSticky $instances}}
{{if hasStickinessLabel $instances}}
[backends.backend-{{ $serviceName }}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $instances}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $instances}}
[backends.backend-{{ $serviceName }}.maxConn]
amount = {{getMaxConnAmount $instances}}
extractorFunc = "{{getMaxConnExtractorFunc $instances}}"
{{end}}
{{ if hasHealthCheckLabels $instances }}
[backends.backend-{{ $serviceName }}.healthCheck]
path = "{{getHealthCheckPath $instances }}"
port = {{getHealthCheckPort $instances}}
interval = "{{getHealthCheckInterval $instances }}"
{{end}}
{{range $index, $instance := $instances}}
[backends.backend-{{ $instance.Name }}.servers.server-{{ $instance.Name }}{{ $instance.ID }}]
url = "{{ getProtocol $instance }}://{{ getHost $instance }}:{{ getPort $instance }}"
weight = {{ getWeight $instance}}
{{end}}
{{end}}
[frontends]
{{range $serviceName, $instances := .Services}}
{{range $instance := filterFrontends $instances}}
[frontends.frontend-{{ $serviceName }}]
backend = "backend-{{ $serviceName }}"
priority = {{ getPriority $instance}}
passHostHeader = {{ getPassHostHeader $instance}}
passTLSCert = {{ getPassTLSCert $instance}}
entryPoints = [{{range getEntryPoints $instance}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $instance}}
whitelistSourceRange = [{{range getWhitelistSourceRange $instance}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $instance}}
"{{.}}",
{{end}}]
{{if hasRedirect $instance}}
[frontends."frontend-{{ $serviceName }}".redirect]
entryPoint = "{{getRedirectEntryPoint $instance}}"
regex = "{{getRedirectRegex $instance}}"
replacement = "{{getRedirectReplacement $instance}}"
{{end}}
{{ if hasErrorPages $instance }}
[frontends."frontend-{{ $serviceName }}".errors]
{{ range $pageName, $page := getErrorPages $instance }}
[frontends."frontend-{{ $serviceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $instance }}
[frontends."frontend-{{ $serviceName }}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $instance }}"
[frontends."frontend-{{ $serviceName }}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $instance }}
[frontends."frontend-{{ $serviceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{if hasHeaders $instance }}
[frontends."frontend-{{ $serviceName }}".headers]
{{if hasSSLRedirectHeaders $instance}}
SSLRedirect = {{getSSLRedirectHeaders $instance}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $instance}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $instance}}
{{end}}
{{if hasSSLHostHeaders $instance}}
SSLHost = "{{getSSLHostHeaders $instance}}"
{{end}}
{{if hasSTSSecondsHeaders $instance}}
STSSeconds = {{getSTSSecondsHeaders $instance}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $instance}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $instance}}
{{end}}
{{if hasSTSPreloadHeaders $instance}}
STSPreload = {{getSTSPreloadHeaders $instance}}
{{end}}
{{if hasForceSTSHeaderHeaders $instance}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $instance}}
{{end}}
{{if hasFrameDenyHeaders $instance}}
FrameDeny = {{getFrameDenyHeaders $instance}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $instance}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $instance}}"
{{end}}
{{if hasContentTypeNosniffHeaders $instance}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $instance}}
{{end}}
{{if hasBrowserXSSFilterHeaders $instance}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $instance}}
{{end}}
{{if hasContentSecurityPolicyHeaders $instance}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $instance}}"
{{end}}
{{if hasPublicKeyHeaders $instance}}
PublicKey = "{{getPublicKeyHeaders $instance}}"
{{end}}
{{if hasReferrerPolicyHeaders $instance}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $instance}}"
{{end}}
{{if hasIsDevelopmentHeaders $instance}}
IsDevelopment = {{getIsDevelopmentHeaders $instance}}
{{end}}
{{if hasRequestHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $instance}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $instance}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasAllowedHostsHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.AllowedHosts]
{{range getAllowedHostsHeaders $instance}}
"{{.}}"
{{end}}
{{end}}
{{if hasHostsProxyHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.HostsProxyHeaders]
{{range getHostsProxyHeaders $instance}}
"{{.}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $instance}}
[frontends."frontend-{{ $serviceName }}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $instance}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends.frontend-{{ $serviceName }}.routes.route-frontend-{{ $serviceName }}]
rule = "{{getFrontendRule $instance}}"
{{end}}
{{end}}`)
func templatesEcsTmplBytes() ([]byte, error) {
return _templatesEcsTmpl, nil
}
func templatesEcsTmpl() (*asset, error) {
bytes, err := templatesEcsTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/ecs.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEurekaTmpl = []byte(`[backends]{{range .Applications}}
{{ $app := .}}
{{range .Instances}}
[backends.backend{{$app.Name}}.servers.server-{{ getInstanceID . }}]
url = "{{ getProtocol . }}://{{ .IpAddr }}:{{ getPort . }}"
weight = {{ getWeight . }}
{{end}}{{end}}
[frontends]{{range .Applications}}
[frontends.frontend{{.Name}}]
backend = "backend{{.Name}}"
entryPoints = ["http"]
[frontends.frontend{{.Name }}.routes.route-host{{.Name}}]
rule = "Host:{{ .Name | tolower }}"
{{end}}
`)
func templatesEurekaTmplBytes() ([]byte, error) {
return _templatesEurekaTmpl, nil
}
func templatesEurekaTmpl() (*asset, error) {
bytes, err := templatesEurekaTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/eureka.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKubernetesTmpl = []byte(`[backends]{{range $backendName, $backend := .Backends}}
[backends."{{$backendName}}"]
{{if $backend.CircuitBreaker}}
[backends."{{$backendName}}".circuitbreaker]
expression = "{{$backend.CircuitBreaker.Expression}}"
{{end}}
[backends."{{$backendName}}".loadbalancer]
method = "{{$backend.LoadBalancer.Method}}"
{{if $backend.LoadBalancer.Sticky}}
sticky = true
{{end}}
{{if $backend.LoadBalancer.Stickiness}}
[backends."{{$backendName}}".loadbalancer.stickiness]
cookieName = "{{$backend.LoadBalancer.Stickiness.CookieName}}"
{{end}}
{{range $serverName, $server := $backend.Servers}}
[backends."{{$backendName}}".servers."{{$serverName}}"]
url = "{{$server.URL}}"
weight = {{$server.Weight}}
{{end}}
{{end}}
[frontends]{{range $frontendName, $frontend := .Frontends}}
[frontends."{{$frontendName}}"]
backend = "{{$frontend.Backend}}"
priority = {{$frontend.Priority}}
passHostHeader = {{$frontend.PassHostHeader}}
entryPoints = [{{range $frontend.EntryPoints}}
"{{.}}",
{{end}}]
basicAuth = [{{range $frontend.BasicAuth}}
"{{.}}",
{{end}}]
whitelistSourceRange = [{{range $frontend.WhitelistSourceRange}}
"{{.}}",
{{end}}]
{{if $frontend.Redirect}}
[frontends."{{$frontendName}}".redirect]
entryPoint = "{{$frontend.RedirectEntryPoint}}"
regex = "{{$frontend.RedirectRegex}}"
replacement = "{{$frontend.RedirectReplacement}}"
{{end}}
{{if $frontend.Headers }}
[frontends."{{$frontendName}}".headers]
SSLRedirect = {{$frontend.Headers.SSLRedirect}}
SSLTemporaryRedirect = {{$frontend.Headers.SSLTemporaryRedirect}}
SSLHost = "{{$frontend.Headers.SSLHost}}"
STSSeconds = {{$frontend.Headers.STSSeconds}}
STSIncludeSubdomains = {{$frontend.Headers.STSIncludeSubdomains}}
STSPreload = {{$frontend.Headers.STSPreload}}
ForceSTSHeader = {{$frontend.Headers.ForceSTSHeader}}
FrameDeny = {{$frontend.Headers.FrameDeny}}
CustomFrameOptionsValue = "{{$frontend.Headers.CustomFrameOptionsValue}}"
ContentTypeNosniff = {{$frontend.Headers.ContentTypeNosniff}}
BrowserXSSFilter = {{$frontend.Headers.BrowserXSSFilter}}
ContentSecurityPolicy = "{{$frontend.Headers.ContentSecurityPolicy}}"
PublicKey = "{{$frontend.Headers.PublicKey}}"
ReferrerPolicy = "{{$frontend.Headers.ReferrerPolicy}}"
IsDevelopment = {{$frontend.Headers.IsDevelopment}}
{{if $frontend.Headers.AllowedHosts}}
AllowedHosts = [{{range $frontend.Headers.AllowedHosts}}
"{{.}}",
{{end}}]
{{end}}
{{if $frontend.Headers.HostsProxyHeaders}}
HostsProxyHeaders = [{{range $frontend.Headers.HostsProxyHeaders}}
"{{.}}",
{{end}}]
{{end}}
{{if $frontend.Headers.CustomRequestHeaders}}
[frontends."{{$frontendName}}".headers.customrequestheaders]
{{range $k, $v := $frontend.Headers.CustomRequestHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.CustomResponseHeaders}}
[frontends."{{$frontendName}}".headers.customresponseheaders]
{{range $k, $v := $frontend.Headers.CustomResponseHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.SSLProxyHeaders}}
[frontends."{{$frontendName}}".headers.SSLProxyHeaders]
{{range $k, $v := $frontend.Headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
{{range $routeName, $route := $frontend.Routes}}
[frontends."{{$frontendName}}".routes."{{$routeName}}"]
rule = "{{$route.Rule}}"
{{end}}
{{end}}`)
func templatesKubernetesTmplBytes() ([]byte, error) {
return _templatesKubernetesTmpl, nil
}
func templatesKubernetesTmpl() (*asset, error) {
bytes, err := templatesKubernetesTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kubernetes.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKvTmpl = []byte(`[backends]
{{range $backend := List .Prefix "/backends/"}}
{{$backendName := Last $backend}}
{{$circuitBreaker := Get "" $backend "/circuitbreaker/expression"}}
{{with $circuitBreaker}}
[backends."{{$backendName}}".circuitBreaker]
expression = "{{$circuitBreaker}}"
{{end}}
{{$loadBalancer := Get "" $backend "/loadbalancer/method"}}
{{with $loadBalancer}}
[backends."{{$backendName}}".loadBalancer]
method = "{{$loadBalancer}}"
sticky = {{ getSticky $backend }}
{{if hasStickinessLabel $backend}}
[backends."{{$backendName}}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{$maxConnAmt := Get "" $backend "/maxconn/amount"}}
{{$maxConnExtractorFunc := Get "" $backend "/maxconn/extractorfunc"}}
{{with $maxConnAmt}}
{{with $maxConnExtractorFunc}}
[backends."{{$backendName}}".maxConn]
amount = {{$maxConnAmt}}
extractorFunc = "{{$maxConnExtractorFunc}}"
{{end}}
{{end}}
{{$healthCheck := Get "" $backend "/healthcheck/path"}}
{{with $healthCheck}}
[backends."{{$backendName}}".healthCheck]
path = "{{$healthCheck}}"
port = {{ Get "0" $backend "/healthcheck/port" }}
interval = "{{ Get "30s" $backend "/healthcheck/interval" }}"
{{end}}
{{range $server := ListServers $backend}}
[backends."{{$backendName}}".servers."{{Last $server}}"]
url = "{{Get "" $server "/url"}}"
weight = {{Get "0" $server "/weight"}}
{{end}}
{{end}}
[frontends]
{{range $frontend := List .Prefix "/frontends/" }}
{{$frontendName := Last $frontend}}
[frontends."{{$frontendName}}"]
backend = "{{Get "" $frontend "/backend"}}"
priority = {{Get "0" $frontend "/priority"}}
passHostHeader = {{Get "true" $frontend "/passHostHeader"}}
passTLSCert = {{Get "false" $frontend "/passtlscert"}}
{{$entryPoints := SplitGet $frontend "/entrypoints"}}
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
{{$whitelistSourceRange := SplitGet $frontend "/whitelistsourcerange"}}
whitelistSourceRange = [{{range $whitelistSourceRange}}
"{{.}}",
{{end}}]
{{$basicAuth := SplitGet $frontend "/basicauth"}}
basicAuth = [{{range $basicAuth}}
"{{.}}",
{{end}}]
{{$redirect := getRedirect $frontend }}
{{ if $redirect }}
[frontends."{{$frontendName}}".redirect]
entryPoint = "{{ $redirect.EntryPoint }}"
regex = "{{ $redirect.Regex }}"
replacement = "{{ $redirect.Replacement }}"
{{end}}
{{ $errorPages := getErrorPages $frontend }}
{{ if $errorPages }}
[frontends."{{$frontendName}}".errors]
{{ range $pageName, $page := $errorPages }}
[frontends."{{$frontendName}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ $rateLimit := getRateLimit $frontend }}
{{ if $rateLimit }}
[frontends."{{$frontendName}}".rateLimit]
extractorFunc = "{{ $rateLimit.ExtractorFunc }}"
[frontends."{{$frontendName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := $rateLimit.RateSet }}
[frontends."{{$frontendName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{ $headers := getHeaders $frontend }}
{{ if $headers }}
[frontends."{{ $frontendName }}".headers]
SSLRedirect = {{ $headers.SSLRedirect }}
SSLTemporaryRedirect = {{ $headers.SSLTemporaryRedirect }}
SSLHost = "{{ $headers.SSLHost }}"
STSSeconds = {{ $headers.STSSeconds }}
STSIncludeSubdomains = {{ $headers.STSIncludeSubdomains }}
STSPreload = {{ $headers.STSPreload }}
ForceSTSHeader = {{ $headers.ForceSTSHeader }}
FrameDeny = {{ $headers.FrameDeny }}
CustomFrameOptionsValue = "{{ $headers.CustomFrameOptionsValue }}"
ContentTypeNosniff = {{ $headers.ContentTypeNosniff }}
BrowserXSSFilter = {{ $headers.BrowserXSSFilter }}
ContentSecurityPolicy = "{{ $headers.ContentSecurityPolicy }}"
PublicKey = "{{ $headers.PublicKey }}"
ReferrerPolicy = "{{ $headers.ReferrerPolicy }}"
IsDevelopment = {{ $headers.IsDevelopment }}
{{ if $headers.AllowedHosts }}
AllowedHosts = [{{ range $headers.AllowedHosts }}
"{{.}}",
{{end}}]
{{end}}
{{ if $headers.HostsProxyHeaders }}
HostsProxyHeaders = [{{ range $headers.HostsProxyHeaders }}
"{{.}}",
{{end}}]
{{end}}
{{ if $headers.CustomRequestHeaders }}
[frontends."{{ $frontendName }}".headers.customRequestHeaders]
{{ range $k, $v := $headers.CustomRequestHeaders }}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{ if $headers.CustomResponseHeaders }}
[frontends."{{ $frontendName }}".headers.customResponseHeaders]
{{ range $k, $v := $headers.CustomResponseHeaders }}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{ if $headers.SSLProxyHeaders }}
[frontends."{{ $frontendName }}".headers.SSLProxyHeaders]
{{range $k, $v := $headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
{{range $route := List $frontend "/routes/"}}
[frontends."{{$frontendName}}".routes."{{Last $route}}"]
rule = "{{Get "" $route "/rule"}}"
{{end}}
{{end}}
{{range $tlsConfiguration := List .Prefix "/tlsconfiguration/"}}
[[tlsConfiguration]]
{{$entryPoints := SplitGet $tlsConfiguration "/entrypoints"}}
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
[tlsConfiguration.certificate]
certFile = """{{Get "" $tlsConfiguration "/certificate/certfile"}}"""
keyFile = """{{Get "" $tlsConfiguration "/certificate/keyfile"}}"""
{{end}}
`)
func templatesKvTmplBytes() ([]byte, error) {
return _templatesKvTmpl, nil
}
func templatesKvTmpl() (*asset, error) {
bytes, err := templatesKvTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kv.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMarathonTmpl = []byte(`{{$apps := .Applications}}
[backends]
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName }}"]
{{ if hasCircuitBreakerLabels $app }}
[backends."{{getBackend $app $serviceName }}".circuitBreaker]
expression = "{{getCircuitBreakerExpression $app }}"
{{end}}
{{ if hasLoadBalancerLabels $app }}
[backends."{{getBackend $app $serviceName }}".loadBalancer]
method = "{{getLoadBalancerMethod $app }}"
sticky = {{getSticky $app}}
{{if hasStickinessLabel $app}}
[backends."{{getBackend $app $serviceName }}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $app}}"
{{end}}
{{end}}
{{ if hasMaxConnLabels $app }}
[backends."{{getBackend $app $serviceName }}".maxConn]
amount = {{getMaxConnAmount $app }}
extractorFunc = "{{getMaxConnExtractorFunc $app }}"
{{end}}
{{ if hasHealthCheckLabels $app }}
[backends."{{getBackend $app $serviceName }}".healthCheck]
path = "{{getHealthCheckPath $app }}"
port = {{getHealthCheckPort $app}}
interval = "{{getHealthCheckInterval $app }}"
{{end}}
{{end}}
{{range $task := $app.Tasks}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName}}".servers."server-{{$task.ID | replace "." "-"}}{{getServiceNameSuffix $serviceName }}"]
url = "{{getProtocol $app $serviceName}}://{{getBackendServer $task $app}}:{{getPort $task $app $serviceName}}"
weight = {{getWeight $app $serviceName}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames .}}
[frontends."{{ getFrontendName $app $serviceName }}"]
backend = "{{getBackend $app $serviceName}}"
priority = {{getPriority $app $serviceName}}
passHostHeader = {{getPassHostHeader $app $serviceName}}
passTLSCert = {{getPassTLSCert $app $serviceName}}
entryPoints = [{{range getEntryPoints $app $serviceName}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $app $serviceName}}
whitelistSourceRange = [{{range getWhitelistSourceRange $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $app $serviceName}}
"{{.}}",
{{end}}]
{{if hasRedirect $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".redirect]
entryPoint = "{{getRedirectEntryPoint $app $serviceName}}"
regex = "{{getRedirectRegex $app $serviceName}}"
replacement = "{{getRedirectReplacement $app $serviceName}}"
{{end}}
{{ if hasErrorPages $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".errors]
{{ range $pageName, $page := getErrorPages $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $app $serviceName }}"
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{if hasHeaders $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".headers]
{{if hasSSLRedirectHeaders $app $serviceName}}
SSLRedirect = {{getSSLRedirectHeaders $app $serviceName}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $app $serviceName}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $app $serviceName}}
{{end}}
{{if hasSSLHostHeaders $app $serviceName}}
SSLHost = "{{getSSLHostHeaders $app $serviceName}}"
{{end}}
{{if hasSTSSecondsHeaders $app $serviceName}}
STSSeconds = {{getSTSSecondsHeaders $app $serviceName}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $app $serviceName}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $app $serviceName}}
{{end}}
{{if hasSTSPreloadHeaders $app $serviceName}}
STSPreload = {{getSTSPreloadHeaders $app $serviceName}}
{{end}}
{{if hasForceSTSHeaderHeaders $app $serviceName}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $app $serviceName}}
{{end}}
{{if hasFrameDenyHeaders $app $serviceName}}
FrameDeny = {{getFrameDenyHeaders $app $serviceName}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $app $serviceName}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $app $serviceName}}"
{{end}}
{{if hasContentTypeNosniffHeaders $app $serviceName}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $app $serviceName}}
{{end}}
{{if hasBrowserXSSFilterHeaders $app $serviceName}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $app $serviceName}}
{{end}}
{{if hasContentSecurityPolicyHeaders $app $serviceName}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $app $serviceName}}"
{{end}}
{{if hasPublicKeyHeaders $app $serviceName}}
PublicKey = "{{getPublicKeyHeaders $app $serviceName}}"
{{end}}
{{if hasReferrerPolicyHeaders $app $serviceName}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $app $serviceName}}"
{{end}}
{{if hasIsDevelopmentHeaders $app $serviceName}}
IsDevelopment = {{getIsDevelopmentHeaders $app $serviceName}}
{{end}}
{{if hasAllowedHostsHeaders $app $serviceName}}
AllowedHosts = [{{range getAllowedHostsHeaders $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
{{if hasHostsProxyHeaders $app $serviceName}}
HostsProxyHeaders = [{{range getHostsProxyHeaders $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
{{if hasRequestHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."{{ getFrontendName $app $serviceName }}".routes."route-host{{$app.ID | replace "/" "-"}}{{getServiceNameSuffix $serviceName }}"]
rule = "{{getFrontendRule $app $serviceName}}"
{{end}}
{{end}}
`)
func templatesMarathonTmplBytes() ([]byte, error) {
return _templatesMarathonTmpl, nil
}
func templatesMarathonTmpl() (*asset, error) {
bytes, err := templatesMarathonTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/marathon.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMesosTmpl = []byte(`{{$apps := .Applications}}
[backends]{{range .Tasks}}
[backends.backend{{getBackend . $apps}}.servers.server-{{getID .}}]
url = "{{getProtocol . $apps}}://{{getHost .}}:{{getPort . $apps}}"
weight = {{getWeight . $apps}}
{{end}}
[frontends]{{range .Applications}}
[frontends.frontend-{{getFrontEndName .}}]
backend = "backend{{getFrontendBackend .}}"
passHostHeader = {{getPassHostHeader .}}
priority = {{getPriority .}}
entryPoints = [{{range getEntryPoints .}}
"{{.}}",
{{end}}]
[frontends.frontend-{{getFrontEndName .}}.routes.route-host{{getFrontEndName .}}]
rule = "{{getFrontendRule .}}"
{{end}}
`)
func templatesMesosTmplBytes() ([]byte, error) {
return _templatesMesosTmpl, nil
}
func templatesMesosTmpl() (*asset, error) {
bytes, err := templatesMesosTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/mesos.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesNotfoundTmpl = []byte(`<!DOCTYPE html>
<html>
<head>
<title>Traefik</title>
</head>
<body>
Ohhhh man, this is bad...
</body>
</html>`)
func templatesNotfoundTmplBytes() ([]byte, error) {
return _templatesNotfoundTmpl, nil
}
func templatesNotfoundTmpl() (*asset, error) {
bytes, err := templatesNotfoundTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/notFound.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesRancherTmpl = []byte(`{{$backendServers := .Backends}}
[backends]
{{range $backendName, $backend := .Backends}}
[backends.backend-{{$backendName}}]
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxConn]
amount = {{getMaxConnAmount $backend}}
extractorFunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{if hasHealthCheckLabels $backend}}
[backends.backend-{{$backendName}}.healthCheck]
path = "{{getHealthCheckPath $backend}}"
port = {{getHealthCheckPort $backend}}
interval = "{{getHealthCheckInterval $backend}}"
{{end}}
{{range $index, $ip := $backend.Containers}}
[backends.backend-{{$backendName}}.servers.server-{{$index}}]
url = "{{getProtocol $backend}}://{{$ip}}:{{getPort $backend}}"
weight = {{getWeight $backend}}
{{end}}
{{end}}
[frontends]
{{range $frontendName, $service := .Frontends}}
[frontends."frontend-{{$frontendName}}"]
backend = "backend-{{getBackend $service}}"
priority = {{getPriority $service}}
passHostHeader = {{getPassHostHeader $service}}
passTLSCert = {{getPassTLSCert $service}}
entryPoints = [{{range getEntryPoints $service}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $service}}
whitelistSourceRange = [{{range getWhitelistSourceRange $service}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $service}}
"{{.}}",
{{end}}]
{{if hasRedirect $service}}
[frontends."frontend-{{$frontendName}}".redirect]
entryPoint = "{{getRedirectEntryPoint $service}}"
regex = "{{getRedirectRegex $service}}"
replacement = "{{getRedirectReplacement $service}}"
{{end}}
{{ if hasErrorPages $service }}
[frontends."frontend-{{$frontendName}}".errors]
{{ range $pageName, $page := getErrorPages $service }}
[frontends."frontend-{{$frontendName}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $service }}
[frontends."frontend-{{$frontendName}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $service }}"
[frontends."frontend-{{$frontendName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $service }}
[frontends."frontend-{{$frontendName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
{{if hasHeaders $service }}
[frontends."frontend-{{$frontendName}}".headers]
SSLRedirect = {{getSSLRedirectHeaders $service}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $service}}
SSLHost = "{{getSSLHostHeaders $service}}"
STSSeconds = {{getSTSSecondsHeaders $service}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $service}}
STSPreload = {{getSTSPreloadHeaders $service}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $service}}
FrameDeny = {{getFrameDenyHeaders $service}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $service}}"
ContentTypeNosniff = {{getContentTypeNosniffHeaders $service}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $service}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $service}}"
PublicKey = "{{getPublicKeyHeaders $service}}"
ReferrerPolicy = "{{getReferrerPolicyHeaders $service}}"
IsDevelopment = {{getIsDevelopmentHeaders $service}}
AllowedHosts = [{{range getAllowedHostsHeaders $service}}
"{{.}}",
{{end}}]
HostsProxyHeaders = [{{range getHostsProxyHeaders $service}}
"{{.}}",
{{end}}]
{{if hasRequestHeaders $service}}
[frontends."frontend-{{$frontendName}}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $service}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $service}}
[frontends."frontend-{{$frontendName}}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $service}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $service}}
[frontends."frontend-{{$frontendName}}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $service}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}}
[frontends."frontend-{{$frontendName}}".routes."route-frontend-{{$frontendName}}"]
rule = "{{getFrontendRule $service}}"
{{end}}
`)
func templatesRancherTmplBytes() ([]byte, error) {
return _templatesRancherTmpl, nil
}
func templatesRancherTmpl() (*asset, error) {
bytes, err := templatesRancherTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/rancher.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"templates/consul_catalog.tmpl": templatesConsul_catalogTmpl,
"templates/docker.tmpl": templatesDockerTmpl,
"templates/ecs.tmpl": templatesEcsTmpl,
"templates/eureka.tmpl": templatesEurekaTmpl,
"templates/kubernetes.tmpl": templatesKubernetesTmpl,
"templates/kv.tmpl": templatesKvTmpl,
"templates/marathon.tmpl": templatesMarathonTmpl,
"templates/mesos.tmpl": templatesMesosTmpl,
"templates/notFound.tmpl": templatesNotfoundTmpl,
"templates/rancher.tmpl": templatesRancherTmpl,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"templates": {nil, map[string]*bintree{
"consul_catalog.tmpl": {templatesConsul_catalogTmpl, map[string]*bintree{}},
"docker.tmpl": {templatesDockerTmpl, map[string]*bintree{}},
"ecs.tmpl": {templatesEcsTmpl, map[string]*bintree{}},
"eureka.tmpl": {templatesEurekaTmpl, map[string]*bintree{}},
"kubernetes.tmpl": {templatesKubernetesTmpl, map[string]*bintree{}},
"kv.tmpl": {templatesKvTmpl, map[string]*bintree{}},
"marathon.tmpl": {templatesMarathonTmpl, map[string]*bintree{}},
"mesos.tmpl": {templatesMesosTmpl, map[string]*bintree{}},
"notFound.tmpl": {templatesNotfoundTmpl, map[string]*bintree{}},
"rancher.tmpl": {templatesRancherTmpl, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
|
package core
import (
"errors"
"reflect"
"strings"
"syscall"
"testing"
"time"
"github.com/elves/elvish/edit/tty"
"github.com/elves/elvish/edit/ui"
"github.com/elves/elvish/styled"
"github.com/elves/elvish/sys"
)
func TestReadCode_AbortsOnSetupError(t *testing.T) {
terminal := newFakeTTY()
terminal.setupErr = errors.New("a fake error")
ed := NewEditor(terminal, nil)
_, err := ed.ReadCode()
if err != terminal.setupErr {
t.Errorf("ReadCode returns error %v, want %v", err, terminal.setupErr)
}
}
func TestReadCode_CallsRestore(t *testing.T) {
restoreCalled := 0
terminal := newFakeTTY()
terminal.restoreFunc = func() { restoreCalled++ }
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed := NewEditor(terminal, nil)
ed.ReadCode()
if restoreCalled != 1 {
t.Errorf("Restore callback called %d times, want once", restoreCalled)
}
}
func TestReadCode_ResetsStateBeforeReturn(t *testing.T) {
terminal := newFakeTTY()
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed := NewEditor(terminal, nil)
ed.State.Raw.Code = "some code"
ed.ReadCode()
if code := ed.State.Raw.Code; code != "" {
t.Errorf("Editor state has code %q, want empty", code)
}
}
func TestReadCode_PassesInputEventsToMode(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
m := &fakeMode{maxKeys: 3}
ed.State.Raw.Mode = m
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
terminal.eventCh <- tty.KeyEvent{Rune: 'b'}
terminal.eventCh <- tty.KeyEvent{Rune: 'c'}
ed.ReadCode()
wantKeysHandled := []ui.Key{
ui.Key{Rune: 'a'}, ui.Key{Rune: 'b'}, ui.Key{Rune: 'c'},
}
if !reflect.DeepEqual(m.keysHandled, wantKeysHandled) {
t.Errorf("Mode gets keys %v, want %v", m.keysHandled, wantKeysHandled)
}
}
func TestReadCode_CallsBeforeReadlineOnce(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
called := 0
ed.Config.Raw.BeforeReadline = []func(){func() { called++ }}
// Causes BasicMode to quit
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed.ReadCode()
if called != 1 {
t.Errorf("BeforeReadline hook called %d times, want 1", called)
}
}
func TestReadCode_CallsAfterReadlineOnceWithCode(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
called := 0
code := ""
ed.Config.Raw.AfterReadline = []func(string){func(s string) {
called++
code = s
}}
// Causes BasicMode to write state.Code and then quit
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
terminal.eventCh <- tty.KeyEvent{Rune: 'b'}
terminal.eventCh <- tty.KeyEvent{Rune: 'c'}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed.ReadCode()
if called != 1 {
t.Errorf("AfterReadline hook called %d times, want 1", called)
}
if code != "abc" {
t.Errorf("AfterReadline hook called with %q, want %q", code, "abc")
}
}
func TestReadCode_RespectsMaxHeight(t *testing.T) {
maxHeight := 5
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
// Will fill more than maxHeight but less than terminal height
ed.State.Raw.Code = strings.Repeat("a", 80*10)
ed.State.Raw.Dot = len(ed.State.Raw.Code)
codeCh, _ := ed.readCodeAsync()
buf1 := <-terminal.bufCh
// Make sure that normally the height does exceed maxHeight.
if h := len(buf1.Lines); h <= maxHeight {
t.Errorf("Buffer height is %d, should > %d", h, maxHeight)
}
ed.Config.Mutex.Lock()
ed.Config.Raw.MaxHeight = maxHeight
ed.Config.Mutex.Unlock()
ed.Redraw(false)
buf2 := <-terminal.bufCh
if h := len(buf2.Lines); h > maxHeight {
t.Errorf("Buffer height is %d, should <= %d", h, maxHeight)
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
var bufChTimeout = 1 * time.Second
func TestReadCode_RendersHighlightedCode(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
ed.Config.Raw.Highlighter = func(code string) (styled.Text, []error) {
return styled.Text{
&styled.Segment{styled.Style{Foreground: "red"}, code}}, nil
}
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
terminal.eventCh <- tty.KeyEvent{Rune: 'b'}
terminal.eventCh <- tty.KeyEvent{Rune: 'c'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).
WriteString("abc", "31" /* SGR for red foreground */).
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Did not see buffer containing highlighted code")
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
func TestReadCode_RendersErrorFromHighlighter(t *testing.T) {
// TODO
}
func TestReadCode_RendersPrompt(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
ed.Config.Raw.Prompt = constPrompt{styled.Unstyled("> ")}
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).
WriteUnstyled("> a").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Did not see buffer containing prompt")
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
func TestReadCode_RendersRPrompt(t *testing.T) {
terminal := newFakeTTY()
terminal.width = 4
ed := NewEditor(terminal, nil)
ed.Config.Raw.RPrompt = constPrompt{styled.Unstyled("R")}
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(4).
WriteUnstyled("a").SetDotToCursor().WriteUnstyled(" R").Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Did not see buffer containing rprompt")
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
func TestReadCode_SupportsPersistentRPrompt(t *testing.T) {
// TODO
}
func TestReadCode_DrawsAndFlushesNotes(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
codeCh, _ := ed.readCodeAsync()
// Sanity-check initial state.
initBuf := ui.NewBufferBuilder(80).Buffer()
if !checkBuffer(initBuf, terminal.bufCh) {
t.Errorf("did not get initial state")
}
ed.Notify("note")
wantNotesBuf := ui.NewBufferBuilder(80).WriteUnstyled("note").Buffer()
if !checkBuffer(wantNotesBuf, terminal.notesBufCh) {
t.Errorf("did not render notes")
}
if n := len(ed.State.Raw.Notes); n > 0 {
t.Errorf("State.Raw.Notes has %d elements after redrawing, want 0", n)
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
func TestReadCode_UsesFinalStateInFinalRedraw(t *testing.T) {
terminal := newFakeTTY()
ed := NewEditor(terminal, nil)
ed.State.Raw.Code = "some code"
// We use the dot as a signal for distinguishing non-final and final state.
// In the final state, the dot will be set to the length of the code (9).
ed.State.Raw.Dot = 1
codeCh, _ := ed.readCodeAsync()
// Wait until a non-final state is drawn.
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("s").SetDotToCursor().
WriteUnstyled("ome code").Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending Enter")
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
// Last element in bufs is nil
finalBuf := terminal.bufs[len(terminal.bufs)-2]
wantFinalBuf := ui.NewBufferBuilder(80).WriteUnstyled("some code").
SetDotToCursor().Buffer()
if !reflect.DeepEqual(finalBuf, wantFinalBuf) {
t.Errorf("final buffer is %v, want %v", finalBuf, wantFinalBuf)
}
}
func TestReadCode_QuitsOnSIGHUP(t *testing.T) {
terminal := newFakeTTY()
sigs := newFakeSignalSource()
ed := NewEditor(terminal, sigs)
codeCh, _ := ed.readCodeAsync()
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("a").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending SIGHUP")
}
sigs.ch <- syscall.SIGHUP
select {
case <-codeCh:
// TODO: Test that ReadCode returns with io.EOF
case <-time.After(time.Second):
t.Errorf("SIGHUP did not cause ReadCode to return")
}
}
func TestReadCode_ResetsOnSIGHUP(t *testing.T) {
terminal := newFakeTTY()
sigs := newFakeSignalSource()
ed := NewEditor(terminal, sigs)
codeCh, _ := ed.readCodeAsync()
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("a").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending SIGINT")
}
sigs.ch <- syscall.SIGINT
wantBuf = ui.NewBufferBuilder(80).Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Terminal state is not reset after SIGINT")
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
func TestReadCode_RedrawsOnSIGWINCH(t *testing.T) {
terminal := newFakeTTY()
sigs := newFakeSignalSource()
ed := NewEditor(terminal, sigs)
ed.State.Raw.Code = "1234567890"
ed.State.Raw.Dot = len(ed.State.Raw.Code)
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("1234567890").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending SIGWINCH")
}
terminal.setSize(24, 4)
sigs.ch <- sys.SIGWINCH
wantBuf = ui.NewBufferBuilder(4).WriteUnstyled("1234567890").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Terminal is not redrawn after SIGWINCH")
}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
<-codeCh
}
newedit/core/editor_test.go: Extract setup and cleanup into functions.
package core
import (
"errors"
"reflect"
"strings"
"syscall"
"testing"
"time"
"github.com/elves/elvish/edit/tty"
"github.com/elves/elvish/edit/ui"
"github.com/elves/elvish/styled"
"github.com/elves/elvish/sys"
)
func TestReadCode_AbortsOnSetupError(t *testing.T) {
ed, terminal, _ := setup()
terminal.setupErr = errors.New("a fake error")
_, err := ed.ReadCode()
if err != terminal.setupErr {
t.Errorf("ReadCode returns error %v, want %v", err, terminal.setupErr)
}
}
func TestReadCode_CallsRestore(t *testing.T) {
ed, terminal, _ := setup()
restoreCalled := 0
terminal.restoreFunc = func() { restoreCalled++ }
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed.ReadCode()
if restoreCalled != 1 {
t.Errorf("Restore callback called %d times, want once", restoreCalled)
}
}
func TestReadCode_ResetsStateBeforeReturn(t *testing.T) {
ed, terminal, _ := setup()
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed.State.Raw.Code = "some code"
ed.ReadCode()
if code := ed.State.Raw.Code; code != "" {
t.Errorf("Editor state has code %q, want empty", code)
}
}
func TestReadCode_PassesInputEventsToMode(t *testing.T) {
ed, terminal, _ := setup()
m := &fakeMode{maxKeys: 3}
ed.State.Raw.Mode = m
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
terminal.eventCh <- tty.KeyEvent{Rune: 'b'}
terminal.eventCh <- tty.KeyEvent{Rune: 'c'}
ed.ReadCode()
wantKeysHandled := []ui.Key{
ui.Key{Rune: 'a'}, ui.Key{Rune: 'b'}, ui.Key{Rune: 'c'},
}
if !reflect.DeepEqual(m.keysHandled, wantKeysHandled) {
t.Errorf("Mode gets keys %v, want %v", m.keysHandled, wantKeysHandled)
}
}
func TestReadCode_CallsBeforeReadlineOnce(t *testing.T) {
ed, terminal, _ := setup()
called := 0
ed.Config.Raw.BeforeReadline = []func(){func() { called++ }}
// Causes BasicMode to quit
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed.ReadCode()
if called != 1 {
t.Errorf("BeforeReadline hook called %d times, want 1", called)
}
}
func TestReadCode_CallsAfterReadlineOnceWithCode(t *testing.T) {
ed, terminal, _ := setup()
called := 0
code := ""
ed.Config.Raw.AfterReadline = []func(string){func(s string) {
called++
code = s
}}
// Causes BasicMode to write state.Code and then quit
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
terminal.eventCh <- tty.KeyEvent{Rune: 'b'}
terminal.eventCh <- tty.KeyEvent{Rune: 'c'}
terminal.eventCh <- tty.KeyEvent{Rune: '\n'}
ed.ReadCode()
if called != 1 {
t.Errorf("AfterReadline hook called %d times, want 1", called)
}
if code != "abc" {
t.Errorf("AfterReadline hook called with %q, want %q", code, "abc")
}
}
func TestReadCode_RespectsMaxHeight(t *testing.T) {
ed, terminal, _ := setup()
maxHeight := 5
// Will fill more than maxHeight but less than terminal height
ed.State.Raw.Code = strings.Repeat("a", 80*10)
ed.State.Raw.Dot = len(ed.State.Raw.Code)
codeCh, _ := ed.readCodeAsync()
buf1 := <-terminal.bufCh
// Make sure that normally the height does exceed maxHeight.
if h := len(buf1.Lines); h <= maxHeight {
t.Errorf("Buffer height is %d, should > %d", h, maxHeight)
}
ed.Config.Mutex.Lock()
ed.Config.Raw.MaxHeight = maxHeight
ed.Config.Mutex.Unlock()
ed.Redraw(false)
buf2 := <-terminal.bufCh
if h := len(buf2.Lines); h > maxHeight {
t.Errorf("Buffer height is %d, should <= %d", h, maxHeight)
}
cleanup(terminal, codeCh)
}
var bufChTimeout = 1 * time.Second
func TestReadCode_RendersHighlightedCode(t *testing.T) {
ed, terminal, _ := setup()
ed.Config.Raw.Highlighter = func(code string) (styled.Text, []error) {
return styled.Text{
&styled.Segment{styled.Style{Foreground: "red"}, code}}, nil
}
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
terminal.eventCh <- tty.KeyEvent{Rune: 'b'}
terminal.eventCh <- tty.KeyEvent{Rune: 'c'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).
WriteString("abc", "31" /* SGR for red foreground */).
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Did not see buffer containing highlighted code")
}
cleanup(terminal, codeCh)
}
func TestReadCode_RendersErrorFromHighlighter(t *testing.T) {
// TODO
}
func TestReadCode_RendersPrompt(t *testing.T) {
ed, terminal, _ := setup()
ed.Config.Raw.Prompt = constPrompt{styled.Unstyled("> ")}
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).
WriteUnstyled("> a").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Did not see buffer containing prompt")
}
cleanup(terminal, codeCh)
}
func TestReadCode_RendersRPrompt(t *testing.T) {
ed, terminal, _ := setup()
terminal.width = 4
ed.Config.Raw.RPrompt = constPrompt{styled.Unstyled("R")}
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(4).
WriteUnstyled("a").SetDotToCursor().WriteUnstyled(" R").Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Did not see buffer containing rprompt")
}
cleanup(terminal, codeCh)
}
func TestReadCode_SupportsPersistentRPrompt(t *testing.T) {
// TODO
}
func TestReadCode_DrawsAndFlushesNotes(t *testing.T) {
ed, terminal, _ := setup()
codeCh, _ := ed.readCodeAsync()
// Sanity-check initial state.
initBuf := ui.NewBufferBuilder(80).Buffer()
if !checkBuffer(initBuf, terminal.bufCh) {
t.Errorf("did not get initial state")
}
ed.Notify("note")
wantNotesBuf := ui.NewBufferBuilder(80).WriteUnstyled("note").Buffer()
if !checkBuffer(wantNotesBuf, terminal.notesBufCh) {
t.Errorf("did not render notes")
}
if n := len(ed.State.Raw.Notes); n > 0 {
t.Errorf("State.Raw.Notes has %d elements after redrawing, want 0", n)
}
cleanup(terminal, codeCh)
}
func TestReadCode_UsesFinalStateInFinalRedraw(t *testing.T) {
ed, terminal, _ := setup()
ed.State.Raw.Code = "some code"
// We use the dot as a signal for distinguishing non-final and final state.
// In the final state, the dot will be set to the length of the code (9).
ed.State.Raw.Dot = 1
codeCh, _ := ed.readCodeAsync()
// Wait until a non-final state is drawn.
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("s").SetDotToCursor().
WriteUnstyled("ome code").Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending Enter")
}
cleanup(terminal, codeCh)
// Last element in bufs is nil
finalBuf := terminal.bufs[len(terminal.bufs)-2]
wantFinalBuf := ui.NewBufferBuilder(80).WriteUnstyled("some code").
SetDotToCursor().Buffer()
if !reflect.DeepEqual(finalBuf, wantFinalBuf) {
t.Errorf("final buffer is %v, want %v", finalBuf, wantFinalBuf)
}
}
func TestReadCode_QuitsOnSIGHUP(t *testing.T) {
ed, terminal, sigs := setup()
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("a").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending SIGHUP")
}
sigs.ch <- syscall.SIGHUP
select {
case <-codeCh:
// TODO: Test that ReadCode returns with io.EOF
case <-time.After(time.Second):
t.Errorf("SIGHUP did not cause ReadCode to return")
}
}
func TestReadCode_ResetsOnSIGHUP(t *testing.T) {
ed, terminal, sigs := setup()
terminal.eventCh <- tty.KeyEvent{Rune: 'a'}
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("a").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending SIGINT")
}
sigs.ch <- syscall.SIGINT
wantBuf = ui.NewBufferBuilder(80).Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Terminal state is not reset after SIGINT")
}
cleanup(terminal, codeCh)
}
func TestReadCode_RedrawsOnSIGWINCH(t *testing.T) {
ed, terminal, sigs := setup()
ed.State.Raw.Code = "1234567890"
ed.State.Raw.Dot = len(ed.State.Raw.Code)
codeCh, _ := ed.readCodeAsync()
wantBuf := ui.NewBufferBuilder(80).WriteUnstyled("1234567890").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("did not get expected buffer before sending SIGWINCH")
}
terminal.setSize(24, 4)
sigs.ch <- sys.SIGWINCH
wantBuf = ui.NewBufferBuilder(4).WriteUnstyled("1234567890").
SetDotToCursor().Buffer()
if !checkBuffer(wantBuf, terminal.bufCh) {
t.Errorf("Terminal is not redrawn after SIGWINCH")
}
cleanup(terminal, codeCh)
}
func setup() (*Editor, *fakeTTY, *fakeSignalSource) {
terminal := newFakeTTY()
sigsrc := newFakeSignalSource()
ed := NewEditor(terminal, sigsrc)
return ed, terminal, sigsrc
}
func cleanup(t *fakeTTY, codeCh <-chan string) {
// Causes BasicMode to quit
t.eventCh <- tty.KeyEvent{Rune: '\n'}
// Wait until ReadCode has finished execution
<-codeCh
}
|
package main
import (
"os"
"os/user"
"github.com/dcos/dcos-cli/pkg/cli"
"github.com/dcos/dcos-cli/pkg/cmd"
"github.com/spf13/afero"
)
func main() {
ctx := cli.NewContext(&cli.Environment{
Input: os.Stdin,
Out: os.Stdout,
ErrOut: os.Stderr,
EnvLookup: os.LookupEnv,
UserLookup: user.Current,
Fs: afero.NewOsFs(),
})
if err := cmd.NewDCOSCommand(ctx).Execute(); err != nil {
os.Exit(1)
}
}
Support the `--version` flag
It displays the CLI as well as installed plugins versions.
There are also version information regarding the currently
attached DC/OS cluster, if any.
https://jira.mesosphere.com/browse/DCOS_OSS-3702
package main
import (
"fmt"
"os"
"os/user"
"time"
"github.com/dcos/dcos-cli/pkg/cli"
"github.com/dcos/dcos-cli/pkg/cmd"
"github.com/dcos/dcos-cli/pkg/dcos"
"github.com/dcos/dcos-cli/pkg/httpclient"
"github.com/spf13/afero"
)
var version = "SNAPSHOT"
func main() {
ctx := cli.NewContext(&cli.Environment{
Input: os.Stdin,
Out: os.Stdout,
ErrOut: os.Stderr,
EnvLookup: os.LookupEnv,
UserLookup: user.Current,
Fs: afero.NewOsFs(),
})
if len(os.Args) == 2 && os.Args[1] == "--version" {
printVersion(ctx)
return
}
if err := cmd.NewDCOSCommand(ctx).Execute(); err != nil {
os.Exit(1)
}
}
// printVersion prints CLI version information.
func printVersion(ctx *cli.Context) {
fmt.Fprintln(ctx.Out(), "dcoscli.version="+version)
cluster, err := ctx.Cluster()
if err != nil {
return
}
dcosClient := dcos.NewClient(ctx.HTTPClient(cluster, httpclient.Timeout(3*time.Second)))
if dcosVersion, err := dcosClient.Version(); err == nil {
fmt.Fprintln(ctx.Out(), "dcos.version="+dcosVersion.Version)
fmt.Fprintln(ctx.Out(), "dcos.commit="+dcosVersion.DCOSImageCommit)
fmt.Fprintln(ctx.Out(), "dcos.bootstrap-id="+dcosVersion.BootstrapID)
} else {
fmt.Fprintln(ctx.Out(), "dcos.version=N/A")
fmt.Fprintln(ctx.Out(), "dcos.commit=N/A")
fmt.Fprintln(ctx.Out(), "dcos.bootstrap-id=N/A")
}
}
|
// Code generated by go-bindata.
// sources:
// templates/consul_catalog.tmpl
// templates/docker.tmpl
// templates/ecs.tmpl
// templates/eureka.tmpl
// templates/kubernetes.tmpl
// templates/kv.tmpl
// templates/marathon.tmpl
// templates/mesos.tmpl
// templates/notFound.tmpl
// templates/rancher.tmpl
// DO NOT EDIT!
package gentemplates
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _templatesConsul_catalogTmpl = []byte(`[backends]
{{range $index, $node := .Nodes}}
[backends."backend-{{getBackend $node}}".servers."{{getBackendName $node $index}}"]
url = "{{getAttribute "protocol" $node.Service.Tags "http"}}://{{getBackendAddress $node}}:{{$node.Service.Port}}"
{{$weight := getAttribute "backend.weight" $node.Service.Tags "0"}}
{{with $weight}}
weight = {{$weight}}
{{end}}
{{end}}
{{range .Services}}
{{$service := .ServiceName}}
{{$circuitBreaker := getAttribute "backend.circuitbreaker" .Attributes ""}}
{{with $circuitBreaker}}
[backends."backend-{{$service}}".circuitbreaker]
expression = "{{$circuitBreaker}}"
{{end}}
[backends."backend-{{$service}}".loadbalancer]
method = "{{getAttribute "backend.loadbalancer" .Attributes "wrr"}}"
sticky = {{getSticky .Attributes}}
{{if hasStickinessLabel .Attributes}}
[backends."backend-{{$service}}".loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName .Attributes}}"
{{end}}
{{if hasMaxconnAttributes .Attributes}}
[backends."backend-{{$service}}".maxconn]
amount = {{getAttribute "backend.maxconn.amount" .Attributes "" }}
extractorfunc = "{{getAttribute "backend.maxconn.extractorfunc" .Attributes "" }}"
{{end}}
{{end}}
[frontends]
{{range .Services}}
[frontends."frontend-{{.ServiceName}}"]
backend = "backend-{{.ServiceName}}"
passHostHeader = {{getAttribute "frontend.passHostHeader" .Attributes "true"}}
priority = {{getAttribute "frontend.priority" .Attributes "0"}}
{{$entryPoints := getAttribute "frontend.entrypoints" .Attributes ""}}
{{with $entryPoints}}
entrypoints = [{{range getEntryPoints $entryPoints}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth .Attributes}}
"{{.}}",
{{end}}]
[frontends."frontend-{{.ServiceName}}".routes."route-host-{{.ServiceName}}"]
rule = "{{getFrontendRule .}}"
{{end}}
`)
func templatesConsul_catalogTmplBytes() ([]byte, error) {
return _templatesConsul_catalogTmpl, nil
}
func templatesConsul_catalogTmpl() (*asset, error) {
bytes, err := templatesConsul_catalogTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/consul_catalog.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}}
[backends]
{{range $backendName, $backend := .Backends}}
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxConn]
amount = {{getMaxConnAmount $backend}}
extractorFunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{if hasHealthCheckLabels $backend}}
[backends.backend-{{$backendName}}.healthCheck]
path = "{{getHealthCheckPath $backend}}"
port = {{getHealthCheckPort $backend}}
interval = "{{getHealthCheckInterval $backend}}"
{{end}}
{{$servers := index $backendServers $backendName}}
{{range $serverName, $server := $servers}}
{{if hasServices $server}}
{{$services := getServiceNames $server}}
{{range $serviceIndex, $serviceName := $services}}
[backends.backend-{{getServiceBackend $server $serviceName}}.servers.service-{{$serverName}}]
url = "{{getServiceProtocol $server $serviceName}}://{{getIPAddress $server}}:{{getServicePort $server $serviceName}}"
weight = {{getServiceWeight $server $serviceName}}
{{end}}
{{else}}
[backends.backend-{{$backendName}}.servers.server-{{$server.Name | replace "/" "" | replace "." "-"}}]
url = "{{getProtocol $server}}://{{getIPAddress $server}}:{{getPort $server}}"
weight = {{getWeight $server}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $frontend, $containers := .Frontends}}
{{$container := index $containers 0}}
{{if hasServices $container}}
{{$services := getServiceNames $container}}
{{range $serviceIndex, $serviceName := $services}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}"]
backend = "backend-{{getServiceBackend $container $serviceName}}"
priority = {{getServicePriority $container $serviceName}}
passHostHeader = {{getServicePassHostHeader $container $serviceName}}
passTLSCert = {{getServicePassTLSCert $container $serviceName}}
entryPoints = [{{range getServiceEntryPoints $container $serviceName}}
"{{.}}",
{{end}}]
{{if getServiceWhitelistSourceRange $container $serviceName}}
whitelistSourceRange = [{{range getServiceWhitelistSourceRange $container $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getServiceBasicAuth $container $serviceName}}
"{{.}}",
{{end}}]
{{if hasServiceRedirect $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".redirect]
entryPoint = "{{getServiceRedirectEntryPoint $container $serviceName}}"
regex = "{{getServiceRedirectRegex $container $serviceName}}"
replacement = "{{getServiceRedirectReplacement $container $serviceName}}"
{{end}}
{{ if hasServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors]
{{ range $pageName, $page := getServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors.{{$pageName}}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container $serviceName }}"
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".routes."service-{{$serviceName | replace "/" "" | replace "." "-"}}"]
rule = "{{getServiceFrontendRule $container $serviceName}}"
{{if hasServiceRequestHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customRequestHeaders]
{{range $k, $v := getServiceRequestHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasServiceResponseHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customResponseHeaders]
{{range $k, $v := getServiceResponseHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}} ## end range services
{{else}}
[frontends."frontend-{{$frontend}}"]
backend = "backend-{{getBackend $container}}"
priority = {{getPriority $container}}
passHostHeader = {{getPassHostHeader $container}}
passTLSCert = {{getPassTLSCert $container}}
entryPoints = [{{range getEntryPoints $container}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $container}}
whitelistSourceRange = [{{range getWhitelistSourceRange $container}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $container}}
"{{.}}",
{{end}}]
{{if hasRedirect $container}}
[frontends."frontend-{{$frontend}}".redirect]
entryPoint = "{{getRedirectEntryPoint $container}}"
regex = "{{getRedirectRegex $container}}"
replacement = "{{getRedirectReplacement $container}}"
{{end}}
{{ if hasErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors]
{{ range $pageName, $page := getErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container }}"
[frontends."frontend-{{$frontend}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."frontend-{{$frontend}}".headers]
{{if hasSSLRedirectHeaders $container}}
SSLRedirect = {{getSSLRedirectHeaders $container}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $container}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $container}}
{{end}}
{{if hasSSLHostHeaders $container}}
SSLHost = "{{getSSLHostHeaders $container}}"
{{end}}
{{if hasSTSSecondsHeaders $container}}
STSSeconds = {{getSTSSecondsHeaders $container}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $container}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $container}}
{{end}}
{{if hasSTSPreloadHeaders $container}}
STSPreload = {{getSTSPreloadHeaders $container}}
{{end}}
{{if hasForceSTSHeaderHeaders $container}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $container}}
{{end}}
{{if hasFrameDenyHeaders $container}}
FrameDeny = {{getFrameDenyHeaders $container}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $container}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $container}}"
{{end}}
{{if hasContentTypeNosniffHeaders $container}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $container}}
{{end}}
{{if hasBrowserXSSFilterHeaders $container}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $container}}
{{end}}
{{if hasContentSecurityPolicyHeaders $container}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $container}}"
{{end}}
{{if hasPublicKeyHeaders $container}}
PublicKey = "{{getPublicKeyHeaders $container}}"
{{end}}
{{if hasReferrerPolicyHeaders $container}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $container}}"
{{end}}
{{if hasIsDevelopmentHeaders $container}}
IsDevelopment = {{getIsDevelopmentHeaders $container}}
{{end}}
{{if hasRequestHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasAllowedHostsHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.AllowedHosts]
{{range getAllowedHostsHeaders $container}}
"{{.}}"
{{end}}
{{end}}
{{if hasHostsProxyHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.HostsProxyHeaders]
{{range getHostsProxyHeaders $container}}
"{{.}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
[frontends."frontend-{{$frontend}}".routes."route-frontend-{{$frontend}}"]
rule = "{{getFrontendRule $container}}"
{{end}}
{{end}}
`)
func templatesDockerTmplBytes() ([]byte, error) {
return _templatesDockerTmpl, nil
}
func templatesDockerTmpl() (*asset, error) {
bytes, err := templatesDockerTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/docker.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEcsTmpl = []byte(`[backends]{{range $serviceName, $instances := .Services}}
[backends.backend-{{ $serviceName }}.loadbalancer]
method = "{{ getLoadBalancerMethod $instances}}"
sticky = {{ getSticky $instances}}
{{if hasStickinessLabel $instances}}
[backends.backend-{{ $serviceName }}.loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName $instances}}"
{{end}}
{{ if hasHealthCheckLabels $instances }}
[backends.backend-{{ $serviceName }}.healthcheck]
path = "{{getHealthCheckPath $instances }}"
interval = "{{getHealthCheckInterval $instances }}"
{{end}}
{{range $index, $i := $instances}}
[backends.backend-{{ $i.Name }}.servers.server-{{ $i.Name }}{{ $i.ID }}]
url = "{{ getProtocol $i }}://{{ getHost $i }}:{{ getPort $i }}"
weight = {{ getWeight $i}}
{{end}}
{{end}}
[frontends]{{range $serviceName, $instances := .Services}}
{{range filterFrontends $instances}}
[frontends.frontend-{{ $serviceName }}]
backend = "backend-{{ $serviceName }}"
passHostHeader = {{ getPassHostHeader .}}
priority = {{ getPriority .}}
entryPoints = [{{range getEntryPoints .}}
"{{.}}",
{{end}}]
basicAuth = [{{range getBasicAuth .}}
"{{.}}",
{{end}}]
[frontends.frontend-{{ $serviceName }}.routes.route-frontend-{{ $serviceName }}]
rule = "{{getFrontendRule .}}"
{{end}}
{{end}}`)
func templatesEcsTmplBytes() ([]byte, error) {
return _templatesEcsTmpl, nil
}
func templatesEcsTmpl() (*asset, error) {
bytes, err := templatesEcsTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/ecs.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEurekaTmpl = []byte(`[backends]{{range .Applications}}
{{ $app := .}}
{{range .Instances}}
[backends.backend{{$app.Name}}.servers.server-{{ getInstanceID . }}]
url = "{{ getProtocol . }}://{{ .IpAddr }}:{{ getPort . }}"
weight = {{ getWeight . }}
{{end}}{{end}}
[frontends]{{range .Applications}}
[frontends.frontend{{.Name}}]
backend = "backend{{.Name}}"
entryPoints = ["http"]
[frontends.frontend{{.Name }}.routes.route-host{{.Name}}]
rule = "Host:{{ .Name | tolower }}"
{{end}}
`)
func templatesEurekaTmplBytes() ([]byte, error) {
return _templatesEurekaTmpl, nil
}
func templatesEurekaTmpl() (*asset, error) {
bytes, err := templatesEurekaTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/eureka.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKubernetesTmpl = []byte(`[backends]{{range $backendName, $backend := .Backends}}
[backends."{{$backendName}}"]
{{if $backend.CircuitBreaker}}
[backends."{{$backendName}}".circuitbreaker]
expression = "{{$backend.CircuitBreaker.Expression}}"
{{end}}
[backends."{{$backendName}}".loadbalancer]
method = "{{$backend.LoadBalancer.Method}}"
{{if $backend.LoadBalancer.Sticky}}
sticky = true
{{end}}
{{if $backend.LoadBalancer.Stickiness}}
[backends."{{$backendName}}".loadbalancer.stickiness]
cookieName = "{{$backend.LoadBalancer.Stickiness.CookieName}}"
{{end}}
{{range $serverName, $server := $backend.Servers}}
[backends."{{$backendName}}".servers."{{$serverName}}"]
url = "{{$server.URL}}"
weight = {{$server.Weight}}
{{end}}
{{end}}
[frontends]{{range $frontendName, $frontend := .Frontends}}
[frontends."{{$frontendName}}"]
backend = "{{$frontend.Backend}}"
priority = {{$frontend.Priority}}
passHostHeader = {{$frontend.PassHostHeader}}
entryPoints = [{{range $frontend.EntryPoints}}
"{{.}}",
{{end}}]
basicAuth = [{{range $frontend.BasicAuth}}
"{{.}}",
{{end}}]
whitelistSourceRange = [{{range $frontend.WhitelistSourceRange}}
"{{.}}",
{{end}}]
{{if $frontend.Redirect}}
[frontends."{{$frontendName}}".redirect]
entryPoint = "{{$frontend.RedirectEntryPoint}}"
regex = "{{$frontend.RedirectRegex}}"
replacement = "{{$frontend.RedirectReplacement}}"
{{end}}
[frontends."{{$frontendName}}".headers]
SSLRedirect = {{$frontend.Headers.SSLRedirect}}
SSLTemporaryRedirect = {{$frontend.Headers.SSLTemporaryRedirect}}
SSLHost = "{{$frontend.Headers.SSLHost}}"
STSSeconds = {{$frontend.Headers.STSSeconds}}
STSIncludeSubdomains = {{$frontend.Headers.STSIncludeSubdomains}}
STSPreload = {{$frontend.Headers.STSPreload}}
ForceSTSHeader = {{$frontend.Headers.ForceSTSHeader}}
FrameDeny = {{$frontend.Headers.FrameDeny}}
CustomFrameOptionsValue = "{{$frontend.Headers.CustomFrameOptionsValue}}"
ContentTypeNosniff = {{$frontend.Headers.ContentTypeNosniff}}
BrowserXSSFilter = {{$frontend.Headers.BrowserXSSFilter}}
ContentSecurityPolicy = "{{$frontend.Headers.ContentSecurityPolicy}}"
PublicKey = "{{$frontend.Headers.PublicKey}}"
ReferrerPolicy = "{{$frontend.Headers.ReferrerPolicy}}"
IsDevelopment = {{$frontend.Headers.IsDevelopment}}
{{if $frontend.Headers.CustomRequestHeaders}}
[frontends."{{$frontendName}}".headers.customrequestheaders]
{{range $k, $v := $frontend.Headers.CustomRequestHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.CustomResponseHeaders}}
[frontends."{{$frontendName}}".headers.customresponseheaders]
{{range $k, $v := $frontend.Headers.CustomResponseHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.AllowedHosts}}
[frontends."{{$frontendName}}".headers.AllowedHosts]
{{range $frontend.Headers.AllowedHosts}}
"{{.}}"
{{end}}
{{end}}
{{if $frontend.Headers.HostsProxyHeaders}}
[frontends."{{$frontendName}}".headers.HostsProxyHeaders]
{{range $frontend.Headers.HostsProxyHeaders}}
"{{.}}"
{{end}}
{{end}}
{{if $frontend.Headers.SSLProxyHeaders}}
[frontends."{{$frontendName}}".headers.SSLProxyHeaders]
{{range $k, $v := $frontend.Headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{range $routeName, $route := $frontend.Routes}}
[frontends."{{$frontendName}}".routes."{{$routeName}}"]
rule = "{{$route.Rule}}"
{{end}}
{{end}}
`)
func templatesKubernetesTmplBytes() ([]byte, error) {
return _templatesKubernetesTmpl, nil
}
func templatesKubernetesTmpl() (*asset, error) {
bytes, err := templatesKubernetesTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kubernetes.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKvTmpl = []byte(`{{$frontends := List .Prefix "/frontends/" }}
{{$backends := List .Prefix "/backends/"}}
{{$tlsconfiguration := List .Prefix "/tlsconfiguration/"}}
[backends]{{range $backends}}
{{$backend := .}}
{{$backendName := Last $backend}}
{{$servers := ListServers $backend }}
{{$circuitBreaker := Get "" . "/circuitbreaker/" "expression"}}
{{with $circuitBreaker}}
[backends."{{$backendName}}".circuitBreaker]
expression = "{{$circuitBreaker}}"
{{end}}
{{$loadBalancer := Get "" . "/loadbalancer/" "method"}}
{{with $loadBalancer}}
[backends."{{$backendName}}".loadBalancer]
method = "{{$loadBalancer}}"
sticky = {{ getSticky . }}
{{if hasStickinessLabel $backend}}
[backends."{{$backendName}}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{$healthCheck := Get "" . "/healthcheck/" "path"}}
{{with $healthCheck}}
[backends."{{$backendName}}".healthCheck]
path = "{{$healthCheck}}"
interval = "{{ Get "30s" $backend "/healthcheck/" "interval" }}"
{{end}}
{{$maxConnAmt := Get "" . "/maxconn/" "amount"}}
{{$maxConnExtractorFunc := Get "" . "/maxconn/" "extractorfunc"}}
{{with $maxConnAmt}}
{{with $maxConnExtractorFunc}}
[backends."{{$backendName}}".maxConn]
amount = {{$maxConnAmt}}
extractorFunc = "{{$maxConnExtractorFunc}}"
{{end}}
{{end}}
{{range $servers}}
[backends."{{$backendName}}".servers."{{Last .}}"]
url = "{{Get "" . "/url"}}"
weight = {{Get "0" . "/weight"}}
{{end}}
{{end}}
[frontends]{{range $frontends}}
{{$frontend := Last .}}
{{$entryPoints := SplitGet . "/entrypoints"}}
[frontends."{{$frontend}}"]
backend = "{{Get "" . "/backend"}}"
passHostHeader = {{Get "true" . "/passHostHeader"}}
priority = {{Get "0" . "/priority"}}
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
{{$routes := List . "/routes/"}}
{{range $routes}}
[frontends."{{$frontend}}".routes."{{Last .}}"]
rule = "{{Get "" . "/rule"}}"
{{end}}
{{end}}
{{range $tlsconfiguration}}
{{$entryPoints := SplitGet . "/entrypoints"}}
[[tlsConfiguration]]
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
[tlsConfiguration.certificate]
certFile = """{{Get "" . "/certificate" "/certfile"}}"""
keyFile = """{{Get "" . "/certificate" "/keyfile"}}"""
{{end}}
`)
func templatesKvTmplBytes() ([]byte, error) {
return _templatesKvTmpl, nil
}
func templatesKvTmpl() (*asset, error) {
bytes, err := templatesKvTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kv.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMarathonTmpl = []byte(`{{$apps := .Applications}}
{{range $app := $apps}}
{{range $task := $app.Tasks}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName}}".servers."server-{{$task.ID | replace "." "-"}}{{getServiceNameSuffix $serviceName }}"]
url = "{{getProtocol $app $serviceName}}://{{getBackendServer $task $app}}:{{getPort $task $app $serviceName}}"
weight = {{getWeight $app $serviceName}}
{{end}}
{{end}}
{{end}}
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName }}"]
{{ if hasMaxConnLabels $app }}
[backends."{{getBackend $app $serviceName }}".maxconn]
amount = {{getMaxConnAmount $app }}
extractorfunc = "{{getMaxConnExtractorFunc $app }}"
{{end}}
{{ if hasLoadBalancerLabels $app }}
[backends."{{getBackend $app $serviceName }}".loadbalancer]
method = "{{getLoadBalancerMethod $app }}"
sticky = {{getSticky $app}}
{{if hasStickinessLabel $app}}
[backends."{{getBackend $app $serviceName }}".loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName $app}}"
{{end}}
{{end}}
{{ if hasCircuitBreakerLabels $app }}
[backends."{{getBackend $app $serviceName }}".circuitbreaker]
expression = "{{getCircuitBreakerExpression $app }}"
{{end}}
{{ if hasHealthCheckLabels $app }}
[backends."{{getBackend $app $serviceName }}".healthcheck]
path = "{{getHealthCheckPath $app }}"
interval = "{{getHealthCheckInterval $app }}"
{{end}}
{{end}}
{{end}}
[frontends]{{range $app := $apps}}{{range $serviceIndex, $serviceName := getServiceNames .}}
[frontends."{{ getFrontendName $app $serviceName }}"]
backend = "{{getBackend $app $serviceName}}"
passHostHeader = {{getPassHostHeader $app $serviceName}}
priority = {{getPriority $app $serviceName}}
entryPoints = [{{range getEntryPoints $app $serviceName}}
"{{.}}",
{{end}}]
basicAuth = [{{range getBasicAuth $app $serviceName}}
"{{.}}",
{{end}}]
[frontends."{{ getFrontendName $app $serviceName }}".routes."route-host{{$app.ID | replace "/" "-"}}{{getServiceNameSuffix $serviceName }}"]
rule = "{{getFrontendRule $app $serviceName}}"
{{end}}{{end}}
`)
func templatesMarathonTmplBytes() ([]byte, error) {
return _templatesMarathonTmpl, nil
}
func templatesMarathonTmpl() (*asset, error) {
bytes, err := templatesMarathonTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/marathon.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMesosTmpl = []byte(`{{$apps := .Applications}}
[backends]{{range .Tasks}}
[backends.backend{{getBackend . $apps}}.servers.server-{{getID .}}]
url = "{{getProtocol . $apps}}://{{getHost .}}:{{getPort . $apps}}"
weight = {{getWeight . $apps}}
{{end}}
[frontends]{{range .Applications}}
[frontends.frontend-{{getFrontEndName .}}]
backend = "backend{{getFrontendBackend .}}"
passHostHeader = {{getPassHostHeader .}}
priority = {{getPriority .}}
entryPoints = [{{range getEntryPoints .}}
"{{.}}",
{{end}}]
[frontends.frontend-{{getFrontEndName .}}.routes.route-host{{getFrontEndName .}}]
rule = "{{getFrontendRule .}}"
{{end}}
`)
func templatesMesosTmplBytes() ([]byte, error) {
return _templatesMesosTmpl, nil
}
func templatesMesosTmpl() (*asset, error) {
bytes, err := templatesMesosTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/mesos.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesNotfoundTmpl = []byte(`<!DOCTYPE html>
<html>
<head>
<title>Traefik</title>
</head>
<body>
Ohhhh man, this is bad...
</body>
</html>`)
func templatesNotfoundTmplBytes() ([]byte, error) {
return _templatesNotfoundTmpl, nil
}
func templatesNotfoundTmpl() (*asset, error) {
bytes, err := templatesNotfoundTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/notFound.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesRancherTmpl = []byte(`{{$backendServers := .Backends}}
[backends]{{range $backendName, $backend := .Backends}}
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitbreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadbalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxconn]
amount = {{getMaxConnAmount $backend}}
extractorfunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{range $index, $ip := $backend.Containers}}
[backends.backend-{{$backendName}}.servers.server-{{$index}}]
url = "{{getProtocol $backend}}://{{$ip}}:{{getPort $backend}}"
weight = {{getWeight $backend}}
{{end}}
{{end}}
[frontends]{{range $frontendName, $service := .Frontends}}
[frontends."frontend-{{$frontendName}}"]
backend = "backend-{{getBackend $service}}"
passHostHeader = {{getPassHostHeader $service}}
priority = {{getPriority $service}}
entryPoints = [{{range getEntryPoints $service}}
"{{.}}",
{{end}}]
basicAuth = [{{range getBasicAuth $service}}
"{{.}}",
{{end}}]
{{if hasRedirect $service}}
[frontends."frontend-{{$frontendName}}".redirect]
entryPoint = "{{getRedirectEntryPoint $service}}"
regex = "{{getRedirectRegex $service}}"
replacement = "{{getRedirectReplacement $service}}"
{{end}}
[frontends."frontend-{{$frontendName}}".routes."route-frontend-{{$frontendName}}"]
rule = "{{getFrontendRule $service}}"
{{end}}
`)
func templatesRancherTmplBytes() ([]byte, error) {
return _templatesRancherTmpl, nil
}
func templatesRancherTmpl() (*asset, error) {
bytes, err := templatesRancherTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/rancher.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"templates/consul_catalog.tmpl": templatesConsul_catalogTmpl,
"templates/docker.tmpl": templatesDockerTmpl,
"templates/ecs.tmpl": templatesEcsTmpl,
"templates/eureka.tmpl": templatesEurekaTmpl,
"templates/kubernetes.tmpl": templatesKubernetesTmpl,
"templates/kv.tmpl": templatesKvTmpl,
"templates/marathon.tmpl": templatesMarathonTmpl,
"templates/mesos.tmpl": templatesMesosTmpl,
"templates/notFound.tmpl": templatesNotfoundTmpl,
"templates/rancher.tmpl": templatesRancherTmpl,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"templates": {nil, map[string]*bintree{
"consul_catalog.tmpl": {templatesConsul_catalogTmpl, map[string]*bintree{}},
"docker.tmpl": {templatesDockerTmpl, map[string]*bintree{}},
"ecs.tmpl": {templatesEcsTmpl, map[string]*bintree{}},
"eureka.tmpl": {templatesEurekaTmpl, map[string]*bintree{}},
"kubernetes.tmpl": {templatesKubernetesTmpl, map[string]*bintree{}},
"kv.tmpl": {templatesKvTmpl, map[string]*bintree{}},
"marathon.tmpl": {templatesMarathonTmpl, map[string]*bintree{}},
"mesos.tmpl": {templatesMesosTmpl, map[string]*bintree{}},
"notFound.tmpl": {templatesNotfoundTmpl, map[string]*bintree{}},
"rancher.tmpl": {templatesRancherTmpl, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
chore(marathon): gen templates.
// Code generated by go-bindata.
// sources:
// templates/consul_catalog.tmpl
// templates/docker.tmpl
// templates/ecs.tmpl
// templates/eureka.tmpl
// templates/kubernetes.tmpl
// templates/kv.tmpl
// templates/marathon.tmpl
// templates/mesos.tmpl
// templates/notFound.tmpl
// templates/rancher.tmpl
// DO NOT EDIT!
package gentemplates
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _templatesConsul_catalogTmpl = []byte(`[backends]
{{range $index, $node := .Nodes}}
[backends."backend-{{getBackend $node}}".servers."{{getBackendName $node $index}}"]
url = "{{getAttribute "protocol" $node.Service.Tags "http"}}://{{getBackendAddress $node}}:{{$node.Service.Port}}"
{{$weight := getAttribute "backend.weight" $node.Service.Tags "0"}}
{{with $weight}}
weight = {{$weight}}
{{end}}
{{end}}
{{range .Services}}
{{$service := .ServiceName}}
{{$circuitBreaker := getAttribute "backend.circuitbreaker" .Attributes ""}}
{{with $circuitBreaker}}
[backends."backend-{{$service}}".circuitbreaker]
expression = "{{$circuitBreaker}}"
{{end}}
[backends."backend-{{$service}}".loadbalancer]
method = "{{getAttribute "backend.loadbalancer" .Attributes "wrr"}}"
sticky = {{getSticky .Attributes}}
{{if hasStickinessLabel .Attributes}}
[backends."backend-{{$service}}".loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName .Attributes}}"
{{end}}
{{if hasMaxconnAttributes .Attributes}}
[backends."backend-{{$service}}".maxconn]
amount = {{getAttribute "backend.maxconn.amount" .Attributes "" }}
extractorfunc = "{{getAttribute "backend.maxconn.extractorfunc" .Attributes "" }}"
{{end}}
{{end}}
[frontends]
{{range .Services}}
[frontends."frontend-{{.ServiceName}}"]
backend = "backend-{{.ServiceName}}"
passHostHeader = {{getAttribute "frontend.passHostHeader" .Attributes "true"}}
priority = {{getAttribute "frontend.priority" .Attributes "0"}}
{{$entryPoints := getAttribute "frontend.entrypoints" .Attributes ""}}
{{with $entryPoints}}
entrypoints = [{{range getEntryPoints $entryPoints}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth .Attributes}}
"{{.}}",
{{end}}]
[frontends."frontend-{{.ServiceName}}".routes."route-host-{{.ServiceName}}"]
rule = "{{getFrontendRule .}}"
{{end}}
`)
func templatesConsul_catalogTmplBytes() ([]byte, error) {
return _templatesConsul_catalogTmpl, nil
}
func templatesConsul_catalogTmpl() (*asset, error) {
bytes, err := templatesConsul_catalogTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/consul_catalog.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}}
[backends]
{{range $backendName, $backend := .Backends}}
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitBreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxConn]
amount = {{getMaxConnAmount $backend}}
extractorFunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{if hasHealthCheckLabels $backend}}
[backends.backend-{{$backendName}}.healthCheck]
path = "{{getHealthCheckPath $backend}}"
port = {{getHealthCheckPort $backend}}
interval = "{{getHealthCheckInterval $backend}}"
{{end}}
{{$servers := index $backendServers $backendName}}
{{range $serverName, $server := $servers}}
{{if hasServices $server}}
{{$services := getServiceNames $server}}
{{range $serviceIndex, $serviceName := $services}}
[backends.backend-{{getServiceBackend $server $serviceName}}.servers.service-{{$serverName}}]
url = "{{getServiceProtocol $server $serviceName}}://{{getIPAddress $server}}:{{getServicePort $server $serviceName}}"
weight = {{getServiceWeight $server $serviceName}}
{{end}}
{{else}}
[backends.backend-{{$backendName}}.servers.server-{{$server.Name | replace "/" "" | replace "." "-"}}]
url = "{{getProtocol $server}}://{{getIPAddress $server}}:{{getPort $server}}"
weight = {{getWeight $server}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $frontend, $containers := .Frontends}}
{{$container := index $containers 0}}
{{if hasServices $container}}
{{$services := getServiceNames $container}}
{{range $serviceIndex, $serviceName := $services}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}"]
backend = "backend-{{getServiceBackend $container $serviceName}}"
priority = {{getServicePriority $container $serviceName}}
passHostHeader = {{getServicePassHostHeader $container $serviceName}}
passTLSCert = {{getServicePassTLSCert $container $serviceName}}
entryPoints = [{{range getServiceEntryPoints $container $serviceName}}
"{{.}}",
{{end}}]
{{if getServiceWhitelistSourceRange $container $serviceName}}
whitelistSourceRange = [{{range getServiceWhitelistSourceRange $container $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getServiceBasicAuth $container $serviceName}}
"{{.}}",
{{end}}]
{{if hasServiceRedirect $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".redirect]
entryPoint = "{{getServiceRedirectEntryPoint $container $serviceName}}"
regex = "{{getServiceRedirectRegex $container $serviceName}}"
replacement = "{{getServiceRedirectReplacement $container $serviceName}}"
{{end}}
{{ if hasServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors]
{{ range $pageName, $page := getServiceErrorPages $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".errors.{{$pageName}}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container $serviceName }}"
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getServiceRateLimits $container $serviceName }}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".routes."service-{{$serviceName | replace "/" "" | replace "." "-"}}"]
rule = "{{getServiceFrontendRule $container $serviceName}}"
{{if hasServiceRequestHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customRequestHeaders]
{{range $k, $v := getServiceRequestHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasServiceResponseHeaders $container $serviceName}}
[frontends."frontend-{{getServiceBackend $container $serviceName}}".headers.customResponseHeaders]
{{range $k, $v := getServiceResponseHeaders $container $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{end}} ## end range services
{{else}}
[frontends."frontend-{{$frontend}}"]
backend = "backend-{{getBackend $container}}"
priority = {{getPriority $container}}
passHostHeader = {{getPassHostHeader $container}}
passTLSCert = {{getPassTLSCert $container}}
entryPoints = [{{range getEntryPoints $container}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $container}}
whitelistSourceRange = [{{range getWhitelistSourceRange $container}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $container}}
"{{.}}",
{{end}}]
{{if hasRedirect $container}}
[frontends."frontend-{{$frontend}}".redirect]
entryPoint = "{{getRedirectEntryPoint $container}}"
regex = "{{getRedirectRegex $container}}"
replacement = "{{getRedirectReplacement $container}}"
{{end}}
{{ if hasErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors]
{{ range $pageName, $page := getErrorPages $container }}
[frontends."frontend-{{$frontend}}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $container }}"
[frontends."frontend-{{$frontend}}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $container }}
[frontends."frontend-{{$frontend}}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."frontend-{{$frontend}}".headers]
{{if hasSSLRedirectHeaders $container}}
SSLRedirect = {{getSSLRedirectHeaders $container}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $container}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $container}}
{{end}}
{{if hasSSLHostHeaders $container}}
SSLHost = "{{getSSLHostHeaders $container}}"
{{end}}
{{if hasSTSSecondsHeaders $container}}
STSSeconds = {{getSTSSecondsHeaders $container}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $container}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $container}}
{{end}}
{{if hasSTSPreloadHeaders $container}}
STSPreload = {{getSTSPreloadHeaders $container}}
{{end}}
{{if hasForceSTSHeaderHeaders $container}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $container}}
{{end}}
{{if hasFrameDenyHeaders $container}}
FrameDeny = {{getFrameDenyHeaders $container}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $container}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $container}}"
{{end}}
{{if hasContentTypeNosniffHeaders $container}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $container}}
{{end}}
{{if hasBrowserXSSFilterHeaders $container}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $container}}
{{end}}
{{if hasContentSecurityPolicyHeaders $container}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $container}}"
{{end}}
{{if hasPublicKeyHeaders $container}}
PublicKey = "{{getPublicKeyHeaders $container}}"
{{end}}
{{if hasReferrerPolicyHeaders $container}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $container}}"
{{end}}
{{if hasIsDevelopmentHeaders $container}}
IsDevelopment = {{getIsDevelopmentHeaders $container}}
{{end}}
{{if hasRequestHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasAllowedHostsHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.AllowedHosts]
{{range getAllowedHostsHeaders $container}}
"{{.}}"
{{end}}
{{end}}
{{if hasHostsProxyHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.HostsProxyHeaders]
{{range getHostsProxyHeaders $container}}
"{{.}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $container}}
[frontends."frontend-{{$frontend}}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $container}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
[frontends."frontend-{{$frontend}}".routes."route-frontend-{{$frontend}}"]
rule = "{{getFrontendRule $container}}"
{{end}}
{{end}}
`)
func templatesDockerTmplBytes() ([]byte, error) {
return _templatesDockerTmpl, nil
}
func templatesDockerTmpl() (*asset, error) {
bytes, err := templatesDockerTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/docker.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEcsTmpl = []byte(`[backends]{{range $serviceName, $instances := .Services}}
[backends.backend-{{ $serviceName }}.loadbalancer]
method = "{{ getLoadBalancerMethod $instances}}"
sticky = {{ getSticky $instances}}
{{if hasStickinessLabel $instances}}
[backends.backend-{{ $serviceName }}.loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName $instances}}"
{{end}}
{{ if hasHealthCheckLabels $instances }}
[backends.backend-{{ $serviceName }}.healthcheck]
path = "{{getHealthCheckPath $instances }}"
interval = "{{getHealthCheckInterval $instances }}"
{{end}}
{{range $index, $i := $instances}}
[backends.backend-{{ $i.Name }}.servers.server-{{ $i.Name }}{{ $i.ID }}]
url = "{{ getProtocol $i }}://{{ getHost $i }}:{{ getPort $i }}"
weight = {{ getWeight $i}}
{{end}}
{{end}}
[frontends]{{range $serviceName, $instances := .Services}}
{{range filterFrontends $instances}}
[frontends.frontend-{{ $serviceName }}]
backend = "backend-{{ $serviceName }}"
passHostHeader = {{ getPassHostHeader .}}
priority = {{ getPriority .}}
entryPoints = [{{range getEntryPoints .}}
"{{.}}",
{{end}}]
basicAuth = [{{range getBasicAuth .}}
"{{.}}",
{{end}}]
[frontends.frontend-{{ $serviceName }}.routes.route-frontend-{{ $serviceName }}]
rule = "{{getFrontendRule .}}"
{{end}}
{{end}}`)
func templatesEcsTmplBytes() ([]byte, error) {
return _templatesEcsTmpl, nil
}
func templatesEcsTmpl() (*asset, error) {
bytes, err := templatesEcsTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/ecs.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesEurekaTmpl = []byte(`[backends]{{range .Applications}}
{{ $app := .}}
{{range .Instances}}
[backends.backend{{$app.Name}}.servers.server-{{ getInstanceID . }}]
url = "{{ getProtocol . }}://{{ .IpAddr }}:{{ getPort . }}"
weight = {{ getWeight . }}
{{end}}{{end}}
[frontends]{{range .Applications}}
[frontends.frontend{{.Name}}]
backend = "backend{{.Name}}"
entryPoints = ["http"]
[frontends.frontend{{.Name }}.routes.route-host{{.Name}}]
rule = "Host:{{ .Name | tolower }}"
{{end}}
`)
func templatesEurekaTmplBytes() ([]byte, error) {
return _templatesEurekaTmpl, nil
}
func templatesEurekaTmpl() (*asset, error) {
bytes, err := templatesEurekaTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/eureka.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKubernetesTmpl = []byte(`[backends]{{range $backendName, $backend := .Backends}}
[backends."{{$backendName}}"]
{{if $backend.CircuitBreaker}}
[backends."{{$backendName}}".circuitbreaker]
expression = "{{$backend.CircuitBreaker.Expression}}"
{{end}}
[backends."{{$backendName}}".loadbalancer]
method = "{{$backend.LoadBalancer.Method}}"
{{if $backend.LoadBalancer.Sticky}}
sticky = true
{{end}}
{{if $backend.LoadBalancer.Stickiness}}
[backends."{{$backendName}}".loadbalancer.stickiness]
cookieName = "{{$backend.LoadBalancer.Stickiness.CookieName}}"
{{end}}
{{range $serverName, $server := $backend.Servers}}
[backends."{{$backendName}}".servers."{{$serverName}}"]
url = "{{$server.URL}}"
weight = {{$server.Weight}}
{{end}}
{{end}}
[frontends]{{range $frontendName, $frontend := .Frontends}}
[frontends."{{$frontendName}}"]
backend = "{{$frontend.Backend}}"
priority = {{$frontend.Priority}}
passHostHeader = {{$frontend.PassHostHeader}}
entryPoints = [{{range $frontend.EntryPoints}}
"{{.}}",
{{end}}]
basicAuth = [{{range $frontend.BasicAuth}}
"{{.}}",
{{end}}]
whitelistSourceRange = [{{range $frontend.WhitelistSourceRange}}
"{{.}}",
{{end}}]
{{if $frontend.Redirect}}
[frontends."{{$frontendName}}".redirect]
entryPoint = "{{$frontend.RedirectEntryPoint}}"
regex = "{{$frontend.RedirectRegex}}"
replacement = "{{$frontend.RedirectReplacement}}"
{{end}}
[frontends."{{$frontendName}}".headers]
SSLRedirect = {{$frontend.Headers.SSLRedirect}}
SSLTemporaryRedirect = {{$frontend.Headers.SSLTemporaryRedirect}}
SSLHost = "{{$frontend.Headers.SSLHost}}"
STSSeconds = {{$frontend.Headers.STSSeconds}}
STSIncludeSubdomains = {{$frontend.Headers.STSIncludeSubdomains}}
STSPreload = {{$frontend.Headers.STSPreload}}
ForceSTSHeader = {{$frontend.Headers.ForceSTSHeader}}
FrameDeny = {{$frontend.Headers.FrameDeny}}
CustomFrameOptionsValue = "{{$frontend.Headers.CustomFrameOptionsValue}}"
ContentTypeNosniff = {{$frontend.Headers.ContentTypeNosniff}}
BrowserXSSFilter = {{$frontend.Headers.BrowserXSSFilter}}
ContentSecurityPolicy = "{{$frontend.Headers.ContentSecurityPolicy}}"
PublicKey = "{{$frontend.Headers.PublicKey}}"
ReferrerPolicy = "{{$frontend.Headers.ReferrerPolicy}}"
IsDevelopment = {{$frontend.Headers.IsDevelopment}}
{{if $frontend.Headers.CustomRequestHeaders}}
[frontends."{{$frontendName}}".headers.customrequestheaders]
{{range $k, $v := $frontend.Headers.CustomRequestHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.CustomResponseHeaders}}
[frontends."{{$frontendName}}".headers.customresponseheaders]
{{range $k, $v := $frontend.Headers.CustomResponseHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if $frontend.Headers.AllowedHosts}}
[frontends."{{$frontendName}}".headers.AllowedHosts]
{{range $frontend.Headers.AllowedHosts}}
"{{.}}"
{{end}}
{{end}}
{{if $frontend.Headers.HostsProxyHeaders}}
[frontends."{{$frontendName}}".headers.HostsProxyHeaders]
{{range $frontend.Headers.HostsProxyHeaders}}
"{{.}}"
{{end}}
{{end}}
{{if $frontend.Headers.SSLProxyHeaders}}
[frontends."{{$frontendName}}".headers.SSLProxyHeaders]
{{range $k, $v := $frontend.Headers.SSLProxyHeaders}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{range $routeName, $route := $frontend.Routes}}
[frontends."{{$frontendName}}".routes."{{$routeName}}"]
rule = "{{$route.Rule}}"
{{end}}
{{end}}
`)
func templatesKubernetesTmplBytes() ([]byte, error) {
return _templatesKubernetesTmpl, nil
}
func templatesKubernetesTmpl() (*asset, error) {
bytes, err := templatesKubernetesTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kubernetes.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesKvTmpl = []byte(`{{$frontends := List .Prefix "/frontends/" }}
{{$backends := List .Prefix "/backends/"}}
{{$tlsconfiguration := List .Prefix "/tlsconfiguration/"}}
[backends]{{range $backends}}
{{$backend := .}}
{{$backendName := Last $backend}}
{{$servers := ListServers $backend }}
{{$circuitBreaker := Get "" . "/circuitbreaker/" "expression"}}
{{with $circuitBreaker}}
[backends."{{$backendName}}".circuitBreaker]
expression = "{{$circuitBreaker}}"
{{end}}
{{$loadBalancer := Get "" . "/loadbalancer/" "method"}}
{{with $loadBalancer}}
[backends."{{$backendName}}".loadBalancer]
method = "{{$loadBalancer}}"
sticky = {{ getSticky . }}
{{if hasStickinessLabel $backend}}
[backends."{{$backendName}}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{$healthCheck := Get "" . "/healthcheck/" "path"}}
{{with $healthCheck}}
[backends."{{$backendName}}".healthCheck]
path = "{{$healthCheck}}"
interval = "{{ Get "30s" $backend "/healthcheck/" "interval" }}"
{{end}}
{{$maxConnAmt := Get "" . "/maxconn/" "amount"}}
{{$maxConnExtractorFunc := Get "" . "/maxconn/" "extractorfunc"}}
{{with $maxConnAmt}}
{{with $maxConnExtractorFunc}}
[backends."{{$backendName}}".maxConn]
amount = {{$maxConnAmt}}
extractorFunc = "{{$maxConnExtractorFunc}}"
{{end}}
{{end}}
{{range $servers}}
[backends."{{$backendName}}".servers."{{Last .}}"]
url = "{{Get "" . "/url"}}"
weight = {{Get "0" . "/weight"}}
{{end}}
{{end}}
[frontends]{{range $frontends}}
{{$frontend := Last .}}
{{$entryPoints := SplitGet . "/entrypoints"}}
[frontends."{{$frontend}}"]
backend = "{{Get "" . "/backend"}}"
passHostHeader = {{Get "true" . "/passHostHeader"}}
priority = {{Get "0" . "/priority"}}
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
{{$routes := List . "/routes/"}}
{{range $routes}}
[frontends."{{$frontend}}".routes."{{Last .}}"]
rule = "{{Get "" . "/rule"}}"
{{end}}
{{end}}
{{range $tlsconfiguration}}
{{$entryPoints := SplitGet . "/entrypoints"}}
[[tlsConfiguration]]
entryPoints = [{{range $entryPoints}}
"{{.}}",
{{end}}]
[tlsConfiguration.certificate]
certFile = """{{Get "" . "/certificate" "/certfile"}}"""
keyFile = """{{Get "" . "/certificate" "/keyfile"}}"""
{{end}}
`)
func templatesKvTmplBytes() ([]byte, error) {
return _templatesKvTmpl, nil
}
func templatesKvTmpl() (*asset, error) {
bytes, err := templatesKvTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/kv.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMarathonTmpl = []byte(`{{$apps := .Applications}}
[backends]
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName }}"]
{{ if hasCircuitBreakerLabels $app }}
[backends."{{getBackend $app $serviceName }}".circuitBreaker]
expression = "{{getCircuitBreakerExpression $app }}"
{{end}}
{{ if hasLoadBalancerLabels $app }}
[backends."{{getBackend $app $serviceName }}".loadBalancer]
method = "{{getLoadBalancerMethod $app }}"
sticky = {{getSticky $app}}
{{if hasStickinessLabel $app}}
[backends."{{getBackend $app $serviceName }}".loadBalancer.stickiness]
cookieName = "{{getStickinessCookieName $app}}"
{{end}}
{{end}}
{{ if hasMaxConnLabels $app }}
[backends."{{getBackend $app $serviceName }}".maxConn]
amount = {{getMaxConnAmount $app }}
extractorFunc = "{{getMaxConnExtractorFunc $app }}"
{{end}}
{{ if hasHealthCheckLabels $app }}
[backends."{{getBackend $app $serviceName }}".healthCheck]
path = "{{getHealthCheckPath $app }}"
port = {{getHealthCheckPort $app}}
interval = "{{getHealthCheckInterval $app }}"
{{end}}
{{end}}
{{range $task := $app.Tasks}}
{{range $serviceIndex, $serviceName := getServiceNames $app}}
[backends."{{getBackend $app $serviceName}}".servers."server-{{$task.ID | replace "." "-"}}{{getServiceNameSuffix $serviceName }}"]
url = "{{getProtocol $app $serviceName}}://{{getBackendServer $task $app}}:{{getPort $task $app $serviceName}}"
weight = {{getWeight $app $serviceName}}
{{end}}
{{end}}
{{end}}
[frontends]
{{range $app := $apps}}
{{range $serviceIndex, $serviceName := getServiceNames .}}
[frontends."{{ getFrontendName $app $serviceName }}"]
backend = "{{getBackend $app $serviceName}}"
priority = {{getPriority $app $serviceName}}
passHostHeader = {{getPassHostHeader $app $serviceName}}
passTLSCert = {{getPassTLSCert $app $serviceName}}
entryPoints = [{{range getEntryPoints $app $serviceName}}
"{{.}}",
{{end}}]
{{if getWhitelistSourceRange $app $serviceName}}
whitelistSourceRange = [{{range getWhitelistSourceRange $app $serviceName}}
"{{.}}",
{{end}}]
{{end}}
basicAuth = [{{range getBasicAuth $app $serviceName}}
"{{.}}",
{{end}}]
{{if hasRedirect $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".redirect]
entryPoint = "{{getRedirectEntryPoint $app $serviceName}}"
regex = "{{getRedirectRegex $app $serviceName}}"
replacement = "{{getRedirectReplacement $app $serviceName}}"
{{end}}
{{ if hasErrorPages $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".errors]
{{ range $pageName, $page := getErrorPages $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".errors.{{ $pageName }}]
status = [{{range $page.Status}}
"{{.}}",
{{end}}]
backend = "{{$page.Backend}}"
query = "{{$page.Query}}"
{{end}}
{{end}}
{{ if hasRateLimits $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit]
extractorFunc = "{{ getRateLimitsExtractorFunc $app $serviceName }}"
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit.rateSet]
{{ range $limitName, $rateLimit := getRateLimits $app $serviceName }}
[frontends."{{ getFrontendName $app $serviceName }}".rateLimit.rateSet.{{ $limitName }}]
period = "{{ $rateLimit.Period }}"
average = {{ $rateLimit.Average }}
burst = {{ $rateLimit.Burst }}
{{end}}
{{end}}
[frontends."{{ getFrontendName $app $serviceName }}".headers]
{{if hasSSLRedirectHeaders $app $serviceName}}
SSLRedirect = {{getSSLRedirectHeaders $app $serviceName}}
{{end}}
{{if hasSSLTemporaryRedirectHeaders $app $serviceName}}
SSLTemporaryRedirect = {{getSSLTemporaryRedirectHeaders $app $serviceName}}
{{end}}
{{if hasSSLHostHeaders $app $serviceName}}
SSLHost = "{{getSSLHostHeaders $app $serviceName}}"
{{end}}
{{if hasSTSSecondsHeaders $app $serviceName}}
STSSeconds = {{getSTSSecondsHeaders $app $serviceName}}
{{end}}
{{if hasSTSIncludeSubdomainsHeaders $app $serviceName}}
STSIncludeSubdomains = {{getSTSIncludeSubdomainsHeaders $app $serviceName}}
{{end}}
{{if hasSTSPreloadHeaders $app $serviceName}}
STSPreload = {{getSTSPreloadHeaders $app $serviceName}}
{{end}}
{{if hasForceSTSHeaderHeaders $app $serviceName}}
ForceSTSHeader = {{getForceSTSHeaderHeaders $app $serviceName}}
{{end}}
{{if hasFrameDenyHeaders $app $serviceName}}
FrameDeny = {{getFrameDenyHeaders $app $serviceName}}
{{end}}
{{if hasCustomFrameOptionsValueHeaders $app $serviceName}}
CustomFrameOptionsValue = "{{getCustomFrameOptionsValueHeaders $app $serviceName}}"
{{end}}
{{if hasContentTypeNosniffHeaders $app $serviceName}}
ContentTypeNosniff = {{getContentTypeNosniffHeaders $app $serviceName}}
{{end}}
{{if hasBrowserXSSFilterHeaders $app $serviceName}}
BrowserXSSFilter = {{getBrowserXSSFilterHeaders $app $serviceName}}
{{end}}
{{if hasContentSecurityPolicyHeaders $app $serviceName}}
ContentSecurityPolicy = "{{getContentSecurityPolicyHeaders $app $serviceName}}"
{{end}}
{{if hasPublicKeyHeaders $app $serviceName}}
PublicKey = "{{getPublicKeyHeaders $app $serviceName}}"
{{end}}
{{if hasReferrerPolicyHeaders $app $serviceName}}
ReferrerPolicy = "{{getReferrerPolicyHeaders $app $serviceName}}"
{{end}}
{{if hasIsDevelopmentHeaders $app $serviceName}}
IsDevelopment = {{getIsDevelopmentHeaders $app $serviceName}}
{{end}}
{{if hasRequestHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.customRequestHeaders]
{{range $k, $v := getRequestHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasResponseHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.customResponseHeaders]
{{range $k, $v := getResponseHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
{{if hasAllowedHostsHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.AllowedHosts]
{{range getAllowedHostsHeaders $app $serviceName}}
"{{.}}"
{{end}}
{{end}}
{{if hasHostsProxyHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.HostsProxyHeaders]
{{range getHostsProxyHeaders $app $serviceName}}
"{{.}}"
{{end}}
{{end}}
{{if hasSSLProxyHeaders $app $serviceName}}
[frontends."{{ getFrontendName $app $serviceName }}".headers.SSLProxyHeaders]
{{range $k, $v := getSSLProxyHeaders $app $serviceName}}
{{$k}} = "{{$v}}"
{{end}}
{{end}}
[frontends."{{ getFrontendName $app $serviceName }}".routes."route-host{{$app.ID | replace "/" "-"}}{{getServiceNameSuffix $serviceName }}"]
rule = "{{getFrontendRule $app $serviceName}}"
{{end}}
{{end}}
`)
func templatesMarathonTmplBytes() ([]byte, error) {
return _templatesMarathonTmpl, nil
}
func templatesMarathonTmpl() (*asset, error) {
bytes, err := templatesMarathonTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/marathon.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesMesosTmpl = []byte(`{{$apps := .Applications}}
[backends]{{range .Tasks}}
[backends.backend{{getBackend . $apps}}.servers.server-{{getID .}}]
url = "{{getProtocol . $apps}}://{{getHost .}}:{{getPort . $apps}}"
weight = {{getWeight . $apps}}
{{end}}
[frontends]{{range .Applications}}
[frontends.frontend-{{getFrontEndName .}}]
backend = "backend{{getFrontendBackend .}}"
passHostHeader = {{getPassHostHeader .}}
priority = {{getPriority .}}
entryPoints = [{{range getEntryPoints .}}
"{{.}}",
{{end}}]
[frontends.frontend-{{getFrontEndName .}}.routes.route-host{{getFrontEndName .}}]
rule = "{{getFrontendRule .}}"
{{end}}
`)
func templatesMesosTmplBytes() ([]byte, error) {
return _templatesMesosTmpl, nil
}
func templatesMesosTmpl() (*asset, error) {
bytes, err := templatesMesosTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/mesos.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesNotfoundTmpl = []byte(`<!DOCTYPE html>
<html>
<head>
<title>Traefik</title>
</head>
<body>
Ohhhh man, this is bad...
</body>
</html>`)
func templatesNotfoundTmplBytes() ([]byte, error) {
return _templatesNotfoundTmpl, nil
}
func templatesNotfoundTmpl() (*asset, error) {
bytes, err := templatesNotfoundTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/notFound.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _templatesRancherTmpl = []byte(`{{$backendServers := .Backends}}
[backends]{{range $backendName, $backend := .Backends}}
{{if hasCircuitBreakerLabel $backend}}
[backends.backend-{{$backendName}}.circuitbreaker]
expression = "{{getCircuitBreakerExpression $backend}}"
{{end}}
{{if hasLoadBalancerLabel $backend}}
[backends.backend-{{$backendName}}.loadbalancer]
method = "{{getLoadBalancerMethod $backend}}"
sticky = {{getSticky $backend}}
{{if hasStickinessLabel $backend}}
[backends.backend-{{$backendName}}.loadbalancer.stickiness]
cookieName = "{{getStickinessCookieName $backend}}"
{{end}}
{{end}}
{{if hasMaxConnLabels $backend}}
[backends.backend-{{$backendName}}.maxconn]
amount = {{getMaxConnAmount $backend}}
extractorfunc = "{{getMaxConnExtractorFunc $backend}}"
{{end}}
{{range $index, $ip := $backend.Containers}}
[backends.backend-{{$backendName}}.servers.server-{{$index}}]
url = "{{getProtocol $backend}}://{{$ip}}:{{getPort $backend}}"
weight = {{getWeight $backend}}
{{end}}
{{end}}
[frontends]{{range $frontendName, $service := .Frontends}}
[frontends."frontend-{{$frontendName}}"]
backend = "backend-{{getBackend $service}}"
passHostHeader = {{getPassHostHeader $service}}
priority = {{getPriority $service}}
entryPoints = [{{range getEntryPoints $service}}
"{{.}}",
{{end}}]
basicAuth = [{{range getBasicAuth $service}}
"{{.}}",
{{end}}]
{{if hasRedirect $service}}
[frontends."frontend-{{$frontendName}}".redirect]
entryPoint = "{{getRedirectEntryPoint $service}}"
regex = "{{getRedirectRegex $service}}"
replacement = "{{getRedirectReplacement $service}}"
{{end}}
[frontends."frontend-{{$frontendName}}".routes."route-frontend-{{$frontendName}}"]
rule = "{{getFrontendRule $service}}"
{{end}}
`)
func templatesRancherTmplBytes() ([]byte, error) {
return _templatesRancherTmpl, nil
}
func templatesRancherTmpl() (*asset, error) {
bytes, err := templatesRancherTmplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "templates/rancher.tmpl", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"templates/consul_catalog.tmpl": templatesConsul_catalogTmpl,
"templates/docker.tmpl": templatesDockerTmpl,
"templates/ecs.tmpl": templatesEcsTmpl,
"templates/eureka.tmpl": templatesEurekaTmpl,
"templates/kubernetes.tmpl": templatesKubernetesTmpl,
"templates/kv.tmpl": templatesKvTmpl,
"templates/marathon.tmpl": templatesMarathonTmpl,
"templates/mesos.tmpl": templatesMesosTmpl,
"templates/notFound.tmpl": templatesNotfoundTmpl,
"templates/rancher.tmpl": templatesRancherTmpl,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"templates": {nil, map[string]*bintree{
"consul_catalog.tmpl": {templatesConsul_catalogTmpl, map[string]*bintree{}},
"docker.tmpl": {templatesDockerTmpl, map[string]*bintree{}},
"ecs.tmpl": {templatesEcsTmpl, map[string]*bintree{}},
"eureka.tmpl": {templatesEurekaTmpl, map[string]*bintree{}},
"kubernetes.tmpl": {templatesKubernetesTmpl, map[string]*bintree{}},
"kv.tmpl": {templatesKvTmpl, map[string]*bintree{}},
"marathon.tmpl": {templatesMarathonTmpl, map[string]*bintree{}},
"mesos.tmpl": {templatesMesosTmpl, map[string]*bintree{}},
"notFound.tmpl": {templatesNotfoundTmpl, map[string]*bintree{}},
"rancher.tmpl": {templatesRancherTmpl, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
|
package main
import (
"flag"
"github.com/mackee/kuiperbelt_new"
)
func main() {
var configFilename string
flag.StringVar(&configFilename, "config", "config.yml", "config path")
flag.Parse()
kuiperbelt.Run(configFilename)
}
fix internal package path
package main
import (
"flag"
"github.com/mackee/kuiperbelt"
)
func main() {
var configFilename string
flag.StringVar(&configFilename, "config", "config.yml", "config path")
flag.Parse()
kuiperbelt.Run(configFilename)
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/golang/glog"
flag "github.com/spf13/pflag"
crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
backendconfigclient "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned"
"k8s.io/ingress-gce/pkg/context"
"k8s.io/ingress-gce/pkg/controller"
neg "k8s.io/ingress-gce/pkg/neg"
"k8s.io/ingress-gce/cmd/glbc/app"
"k8s.io/ingress-gce/pkg/backendconfig"
"k8s.io/ingress-gce/pkg/crd"
"k8s.io/ingress-gce/pkg/flags"
"k8s.io/ingress-gce/pkg/version"
)
func main() {
flags.Register()
rand.Seed(time.Now().UTC().UnixNano())
flag.Parse()
if flags.F.Verbose {
flag.Set("v", "3")
}
// TODO: remove this when we do a release so the -logtostderr can be
// used as a proper argument.
flag.Lookup("logtostderr").Value.Set("true")
if flags.F.Version {
fmt.Printf("Controller version: %s\n", version.Version)
os.Exit(0)
}
glog.V(0).Infof("Starting GLBC image: %q, cluster name %q", version.Version, flags.F.ClusterName)
glog.V(0).Infof("Latest commit hash: %q", version.GitCommit)
for i, a := range os.Args {
glog.V(0).Infof("argv[%d]: %q", i, a)
}
glog.V(2).Infof("Flags = %+v", flags.F)
kubeConfig, err := app.NewKubeConfig()
if err != nil {
glog.Fatalf("Failed to create kubernetes client config: %v", err)
}
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
if err != nil {
glog.Fatalf("Failed to create kubernetes client: %v", err)
}
var backendConfigClient backendconfigclient.Interface
if flags.F.EnableBackendConfig {
crdClient, err := crdclient.NewForConfig(kubeConfig)
if err != nil {
glog.Fatalf("Failed to create kubernetes CRD client: %v", err)
}
// TODO(rramkumar): Reuse this CRD handler for other CRD's coming.
crdHandler := crd.NewCRDHandler(crdClient)
backendConfigCRDMeta := backendconfig.CRDMeta()
if _, err := crdHandler.EnsureCRD(backendConfigCRDMeta); err != nil {
glog.Fatalf("Failed to ensure BackendConfig CRD: %v", err)
}
backendConfigClient, err = backendconfigclient.NewForConfig(kubeConfig)
if err != nil {
glog.Fatalf("Failed to create BackendConfig client: %v", err)
}
}
cloud := app.NewGCEClient()
enableNEG := flags.F.Features.NEG
ctx := context.NewControllerContext(kubeClient, backendConfigClient, cloud, flags.F.WatchNamespace, flags.F.ResyncPeriod, enableNEG, flags.F.EnableBackendConfig)
go app.RunHTTPServer(ctx.HealthCheck)
if !flags.F.LeaderElection.LeaderElect {
runControllers(ctx)
return
}
electionConfig, err := makeLeaderElectionConfig(kubeClient, ctx.Recorder(flags.F.LeaderElection.LockObjectNamespace), func() {
runControllers(ctx)
})
if err != nil {
glog.Fatalf("%v", err)
}
leaderelection.RunOrDie(*electionConfig)
}
// makeLeaderElectionConfig builds a leader election configuration. It will
// create a new resource lock associated with the configuration.
func makeLeaderElectionConfig(client clientset.Interface, recorder record.EventRecorder, run func()) (*leaderelection.LeaderElectionConfig, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id := fmt.Sprintf("%v_%x", hostname, rand.Intn(1e6))
rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock,
flags.F.LeaderElection.LockObjectNamespace,
flags.F.LeaderElection.LockObjectName,
client.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
})
if err != nil {
return nil, fmt.Errorf("couldn't create resource lock: %v", err)
}
return &leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: flags.F.LeaderElection.LeaseDuration.Duration,
RenewDeadline: flags.F.LeaderElection.RenewDeadline.Duration,
RetryPeriod: flags.F.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ <-chan struct{}) {
// Since we are committing a suicide after losing
// mastership, we can safely ignore the argument.
run()
},
OnStoppedLeading: func() {
glog.Fatalf("lost master")
},
},
}, nil
}
func runControllers(ctx *context.ControllerContext) {
namer, err := app.NewNamer(ctx.KubeClient, flags.F.ClusterName, controller.DefaultFirewallName)
if err != nil {
glog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, controller.DefaultFirewallName, err)
}
defaultBackendServicePortID := app.DefaultBackendServicePortID(ctx.KubeClient)
clusterManager, err := controller.NewClusterManager(ctx, namer, defaultBackendServicePortID, flags.F.HealthCheckPath, flags.F.DefaultSvcHealthCheckPath)
if err != nil {
glog.Fatalf("controller.NewClusterManager(cloud, namer, %+v, %q, %q) = %v", defaultBackendServicePortID, flags.F.HealthCheckPath, flags.F.DefaultSvcHealthCheckPath, err)
}
stopCh := make(chan struct{})
lbc, err := controller.NewLoadBalancerController(ctx, clusterManager, stopCh)
if err != nil {
glog.Fatalf("controller.NewLoadBalancerController(ctx, clusterManager, stopCh) = %v", err)
}
if clusterManager.ClusterNamer.UID() != "" {
glog.V(0).Infof("Cluster name: %+v", clusterManager.ClusterNamer.UID())
}
clusterManager.Init(lbc.Translator, lbc.Translator)
glog.V(0).Infof("clusterManager initialized")
if ctx.NEGEnabled {
// TODO: Refactor NEG to use cloud mocks so ctx.Cloud can be referenced within NewController.
negController, _ := neg.NewController(ctx.Cloud, ctx, lbc.Translator, namer, flags.F.ResyncPeriod)
go negController.Run(stopCh)
glog.V(0).Infof("negController started")
}
go app.RunSIGTERMHandler(lbc, flags.F.DeleteAllOnQuit)
ctx.Start(stopCh)
lbc.Run()
for {
glog.Infof("Handled quit, awaiting pod deletion.")
time.Sleep(30 * time.Second)
}
}
flush logs before shutdown
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/golang/glog"
flag "github.com/spf13/pflag"
crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
backendconfigclient "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned"
"k8s.io/ingress-gce/pkg/context"
"k8s.io/ingress-gce/pkg/controller"
neg "k8s.io/ingress-gce/pkg/neg"
"k8s.io/ingress-gce/cmd/glbc/app"
"k8s.io/ingress-gce/pkg/backendconfig"
"k8s.io/ingress-gce/pkg/crd"
"k8s.io/ingress-gce/pkg/flags"
"k8s.io/ingress-gce/pkg/version"
)
func main() {
flags.Register()
rand.Seed(time.Now().UTC().UnixNano())
flag.Parse()
if flags.F.Verbose {
flag.Set("v", "3")
}
// TODO: remove this when we do a release so the -logtostderr can be
// used as a proper argument.
flag.Lookup("logtostderr").Value.Set("true")
if flags.F.Version {
fmt.Printf("Controller version: %s\n", version.Version)
os.Exit(0)
}
glog.V(0).Infof("Starting GLBC image: %q, cluster name %q", version.Version, flags.F.ClusterName)
glog.V(0).Infof("Latest commit hash: %q", version.GitCommit)
for i, a := range os.Args {
glog.V(0).Infof("argv[%d]: %q", i, a)
}
glog.V(2).Infof("Flags = %+v", flags.F)
defer glog.Flush()
kubeConfig, err := app.NewKubeConfig()
if err != nil {
glog.Fatalf("Failed to create kubernetes client config: %v", err)
}
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
if err != nil {
glog.Fatalf("Failed to create kubernetes client: %v", err)
}
var backendConfigClient backendconfigclient.Interface
if flags.F.EnableBackendConfig {
crdClient, err := crdclient.NewForConfig(kubeConfig)
if err != nil {
glog.Fatalf("Failed to create kubernetes CRD client: %v", err)
}
// TODO(rramkumar): Reuse this CRD handler for other CRD's coming.
crdHandler := crd.NewCRDHandler(crdClient)
backendConfigCRDMeta := backendconfig.CRDMeta()
if _, err := crdHandler.EnsureCRD(backendConfigCRDMeta); err != nil {
glog.Fatalf("Failed to ensure BackendConfig CRD: %v", err)
}
backendConfigClient, err = backendconfigclient.NewForConfig(kubeConfig)
if err != nil {
glog.Fatalf("Failed to create BackendConfig client: %v", err)
}
}
cloud := app.NewGCEClient()
enableNEG := flags.F.Features.NEG
ctx := context.NewControllerContext(kubeClient, backendConfigClient, cloud, flags.F.WatchNamespace, flags.F.ResyncPeriod, enableNEG, flags.F.EnableBackendConfig)
go app.RunHTTPServer(ctx.HealthCheck)
if !flags.F.LeaderElection.LeaderElect {
runControllers(ctx)
return
}
electionConfig, err := makeLeaderElectionConfig(kubeClient, ctx.Recorder(flags.F.LeaderElection.LockObjectNamespace), func() {
runControllers(ctx)
})
if err != nil {
glog.Fatalf("%v", err)
}
leaderelection.RunOrDie(*electionConfig)
}
// makeLeaderElectionConfig builds a leader election configuration. It will
// create a new resource lock associated with the configuration.
func makeLeaderElectionConfig(client clientset.Interface, recorder record.EventRecorder, run func()) (*leaderelection.LeaderElectionConfig, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)
}
// add a uniquifier so that two processes on the same host don't accidentally both become active
id := fmt.Sprintf("%v_%x", hostname, rand.Intn(1e6))
rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock,
flags.F.LeaderElection.LockObjectNamespace,
flags.F.LeaderElection.LockObjectName,
client.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: recorder,
})
if err != nil {
return nil, fmt.Errorf("couldn't create resource lock: %v", err)
}
return &leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: flags.F.LeaderElection.LeaseDuration.Duration,
RenewDeadline: flags.F.LeaderElection.RenewDeadline.Duration,
RetryPeriod: flags.F.LeaderElection.RetryPeriod.Duration,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ <-chan struct{}) {
// Since we are committing a suicide after losing
// mastership, we can safely ignore the argument.
run()
},
OnStoppedLeading: func() {
glog.Fatalf("lost master")
},
},
}, nil
}
func runControllers(ctx *context.ControllerContext) {
namer, err := app.NewNamer(ctx.KubeClient, flags.F.ClusterName, controller.DefaultFirewallName)
if err != nil {
glog.Fatalf("app.NewNamer(ctx.KubeClient, %q, %q) = %v", flags.F.ClusterName, controller.DefaultFirewallName, err)
}
defaultBackendServicePortID := app.DefaultBackendServicePortID(ctx.KubeClient)
clusterManager, err := controller.NewClusterManager(ctx, namer, defaultBackendServicePortID, flags.F.HealthCheckPath, flags.F.DefaultSvcHealthCheckPath)
if err != nil {
glog.Fatalf("controller.NewClusterManager(cloud, namer, %+v, %q, %q) = %v", defaultBackendServicePortID, flags.F.HealthCheckPath, flags.F.DefaultSvcHealthCheckPath, err)
}
stopCh := make(chan struct{})
lbc, err := controller.NewLoadBalancerController(ctx, clusterManager, stopCh)
if err != nil {
glog.Fatalf("controller.NewLoadBalancerController(ctx, clusterManager, stopCh) = %v", err)
}
if clusterManager.ClusterNamer.UID() != "" {
glog.V(0).Infof("Cluster name: %+v", clusterManager.ClusterNamer.UID())
}
clusterManager.Init(lbc.Translator, lbc.Translator)
glog.V(0).Infof("clusterManager initialized")
if ctx.NEGEnabled {
// TODO: Refactor NEG to use cloud mocks so ctx.Cloud can be referenced within NewController.
negController, _ := neg.NewController(ctx.Cloud, ctx, lbc.Translator, namer, flags.F.ResyncPeriod)
go negController.Run(stopCh)
glog.V(0).Infof("negController started")
}
go app.RunSIGTERMHandler(lbc, flags.F.DeleteAllOnQuit)
ctx.Start(stopCh)
lbc.Run()
for {
glog.Infof("Handled quit, awaiting pod deletion.")
time.Sleep(30 * time.Second)
}
}
|
package cmd
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/cozy/cozy-stack/client"
"github.com/cozy/cozy-stack/pkg/consts"
"github.com/cozy/cozy-stack/pkg/couchdb"
"github.com/cozy/cozy-stack/pkg/instance"
humanize "github.com/dustin/go-humanize"
"github.com/spf13/cobra"
)
var flagDomain string
var flagLocale string
var flagTimezone string
var flagEmail string
var flagPublicName string
var flagSettings string
var flagDiskQuota string
var flagApps []string
var flagDev bool
var flagPassphrase string
var flagForce bool
var flagExpire time.Duration
var flagDry bool
// instanceCmdGroup represents the instances command
var instanceCmdGroup = &cobra.Command{
Use: "instances [command]",
Short: "Manage instances of a stack",
Long: `
cozy-stack instances allows to manage the instances of this stack
An instance is a logical space owned by one user and identified by a domain.
For example, bob.cozycloud.cc is the instance of Bob. A single cozy-stack
process can manage several instances.
Each instance has a separate space for storing files and a prefix used to
create its CouchDB databases.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
var cleanInstanceCmd = &cobra.Command{
Use: "clean [domain]",
Short: "Clean badly removed instances",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
i := couchdb.SimpleDatabasePrefix(domain)
return couchdb.DeleteAllDBs(i)
},
}
var showInstanceCmd = &cobra.Command{
Use: "show [domain]",
Short: "Show the instance of the specified domain",
Long: `
cozy-stack instances show allows to show the instance on the cozy for a
given domain.
`,
Example: "$ cozy-stack instances show cozy.tools:8080",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
c := newAdminClient()
in, err := c.GetInstance(domain)
if err != nil {
return err
}
json, err := json.MarshalIndent(in, "", " ")
if err != nil {
return err
}
fmt.Println(string(json))
return nil
},
}
var addInstanceCmd = &cobra.Command{
Use: "add [domain]",
Short: "Manage instances of a stack",
Long: `
cozy-stack instances add allows to create an instance on the cozy for a
given domain.
`,
Example: "$ cozy-stack instances add --dev --passphrase cozy --apps drive,photos,settings cozy.tools:8080",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
var quota int64
if flagDiskQuota != "" {
diskQuota, err := humanize.ParseBytes(flagDiskQuota)
if err != nil {
return err
}
quota = int64(diskQuota)
}
domain := args[0]
c := newAdminClient()
in, err := c.CreateInstance(&client.InstanceOptions{
Domain: domain,
Apps: flagApps,
Locale: flagLocale,
Timezone: flagTimezone,
Email: flagEmail,
PublicName: flagPublicName,
Settings: flagSettings,
DiskQuota: "a,
Dev: flagDev,
Passphrase: flagPassphrase,
})
if err != nil {
errPrintfln(
"Failed to create instance for domain %s", domain)
return err
}
fmt.Printf("Instance created with success for domain %s\n", in.Attrs.Domain)
if in.Attrs.RegisterToken != nil {
fmt.Printf("Registration token: \"%s\"\n", hex.EncodeToString(in.Attrs.RegisterToken))
}
if len(flagApps) == 0 {
return nil
}
apps, err := newClient(domain, consts.Apps).ListApps(consts.Apps)
if err == nil && len(flagApps) != len(apps) {
for _, slug := range flagApps {
found := false
for _, app := range apps {
if app.Attrs.Slug == slug {
found = true
break
}
}
if !found {
fmt.Printf("/!\\ Application %s has not been installed\n", slug)
}
}
}
return nil
},
}
var quotaInstanceCmd = &cobra.Command{
Use: "set-disk-quota [domain] [disk-quota]",
Short: "Change the disk-quota of the instance",
Long: `
cozy-stack instances set-disk-quota allows to change the disk-quota of the
instance of the given domain. Set the quota to 0 to remove the quota.
`,
Example: "$ cozy-stack instances set-disk-quota cozy.tools:8080 3GB",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return cmd.Help()
}
diskQuota, err := humanize.ParseBytes(args[1])
if err != nil {
return fmt.Errorf("Could not parse disk-quota: %s", err)
}
quota := int64(diskQuota)
domain := args[0]
c := newAdminClient()
_, err = c.ModifyInstance(domain, &client.InstanceOptions{
DiskQuota: "a,
})
return err
},
}
var debugInstanceCmd = &cobra.Command{
Use: "debug [domain] [true/false]",
Short: "Activate or deactivate debugging of the instance",
Long: `
cozy-stack instances debug allows to activate or deactivate the debugging of a
specific domain.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return cmd.Help()
}
domain := args[0]
debug, err := strconv.ParseBool(args[1])
if err != nil {
return err
}
c := newAdminClient()
_, err = c.ModifyInstance(domain, &client.InstanceOptions{
Debug: &debug,
})
return err
},
}
var lsInstanceCmd = &cobra.Command{
Use: "ls",
Short: "List instances",
Long: `
cozy-stack instances ls allows to list all the instances that can be served
by this server.
`,
RunE: func(cmd *cobra.Command, args []string) error {
c := newAdminClient()
list, err := c.ListInstances()
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
for _, i := range list {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\tv%d\n",
i.Attrs.Domain,
i.Attrs.Locale,
formatSize(i.Attrs.BytesDiskQuota),
formatDev(i.Attrs.Dev),
formatOnboarded(i),
i.Attrs.IndexViewsVersion,
)
}
return w.Flush()
},
}
func formatSize(size int64) string {
if size == 0 {
return "unlimited"
}
return humanize.Bytes(uint64(size))
}
func formatDev(dev bool) string {
if dev {
return "dev"
}
return "prod"
}
func formatOnboarded(i *client.Instance) string {
if i.Attrs.OnboardingFinished {
return "onboarded"
}
if len(i.Attrs.RegisterToken) > 0 {
return "onboarding"
}
return "pending"
}
var destroyInstanceCmd = &cobra.Command{
Use: "destroy [domain]",
Short: "Remove instance",
Long: `
cozy-stack instances destroy allows to remove an instance
and all its data.
`,
Aliases: []string{"rm"},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
if !flagForce {
reader := bufio.NewReader(os.Stdin)
fmt.Printf(`Are you sure you want to remove instance for domain %s ?
All data associated with this domain will be permanently lost.
[yes/NO]: `, domain)
str, err := reader.ReadString('\n')
if err != nil {
return err
}
str = strings.ToLower(strings.TrimSpace(str))
if str != "yes" && str != "y" {
return nil
}
fmt.Println()
}
c := newAdminClient()
err := c.DestroyInstance(domain)
if err != nil {
errPrintfln(
"An error occured while destroying instance for domain %s", domain)
return err
}
fmt.Printf("Instance for domain %s has been destroyed with success\n", domain)
return nil
},
}
var fsckInstanceCmd = &cobra.Command{
Use: "fsck [domain]",
Short: "Check and repair a vfs",
Long: `
The cozy-stack fsck command checks that the files in the VFS are not
desynchronized, ie a file present in CouchDB but not swift/localfs, or present
in swift/localfs but not couchdb.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
if !flagDry {
fmt.Printf("Sorry, only cozy-stack fsck --dry is implemented currently.")
return errors.New("Not implemented yet")
}
c := newAdminClient()
list, err := c.FsckInstance(domain)
if err != nil {
return err
}
if len(list) == 0 {
fmt.Printf("Instance for domain %s is clean\n", domain)
} else {
for _, entry := range list {
fmt.Printf("- %s: %s\n", entry["filename"], entry["message"])
}
}
return nil
},
}
var appTokenInstanceCmd = &cobra.Command{
Use: "token-app [domain] [slug]",
Short: "Generate a new application token",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return cmd.Help()
}
c := newAdminClient()
token, err := c.GetToken(&client.TokenOptions{
Domain: args[0],
Subject: args[1],
Audience: "app",
Expire: flagExpire,
})
if err != nil {
return err
}
_, err = fmt.Println(token)
return err
},
}
var cliTokenInstanceCmd = &cobra.Command{
Use: "token-cli [domain] [scopes]",
Short: "Generate a new CLI access token (global access)",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return cmd.Help()
}
c := newAdminClient()
token, err := c.GetToken(&client.TokenOptions{
Domain: args[0],
Scope: args[1:],
Audience: "cli",
Expire: flagExpire,
})
if err != nil {
return err
}
_, err = fmt.Println(token)
return err
},
}
var oauthTokenInstanceCmd = &cobra.Command{
Use: "token-oauth [domain] [clientid] [scopes]",
Short: "Generate a new OAuth access token",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 3 {
return cmd.Help()
}
c := newAdminClient()
token, err := c.GetToken(&client.TokenOptions{
Domain: args[0],
Subject: args[1],
Audience: "access-token",
Scope: args[2:],
Expire: flagExpire,
})
if err != nil {
return err
}
_, err = fmt.Println(token)
return err
},
}
var oauthClientInstanceCmd = &cobra.Command{
Use: "client-oauth [domain] [redirect_uri] [client_name] [software_id]",
Short: "Register a new OAuth client",
Long: `It registers a new OAuth client and returns its client_id`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 4 {
return cmd.Help()
}
c := newAdminClient()
clientID, err := c.RegisterOAuthClient(&client.OAuthClientOptions{
Domain: args[0],
RedirectURI: args[1],
ClientName: args[2],
SoftwareID: args[3],
})
if err != nil {
return err
}
_, err = fmt.Println(clientID)
return err
},
}
var updateCmd = &cobra.Command{
Use: "update [domain] [slugs...]",
Short: "Starts the updates for the specified domain instance.",
Long: `Starts the updates for the specified domain instance. Use whether the --domain
flag to specify the instance or the --all-domains flags to updates all domains.
The slugs arguments can be used to select which applications should be
updated.`,
Aliases: []string{"updates"},
RunE: func(cmd *cobra.Command, args []string) error {
c := newAdminClient()
if flagAllDomains {
return c.Updates(&client.UpdatesOptions{
Slugs: args,
})
}
if flagDomain == "" {
return errAppsMissingDomain
}
return c.Updates(&client.UpdatesOptions{
Domain: flagDomain,
Slugs: args,
})
},
}
func init() {
instanceCmdGroup.AddCommand(showInstanceCmd)
instanceCmdGroup.AddCommand(addInstanceCmd)
instanceCmdGroup.AddCommand(cleanInstanceCmd)
instanceCmdGroup.AddCommand(lsInstanceCmd)
instanceCmdGroup.AddCommand(quotaInstanceCmd)
instanceCmdGroup.AddCommand(debugInstanceCmd)
instanceCmdGroup.AddCommand(destroyInstanceCmd)
instanceCmdGroup.AddCommand(fsckInstanceCmd)
instanceCmdGroup.AddCommand(appTokenInstanceCmd)
instanceCmdGroup.AddCommand(cliTokenInstanceCmd)
instanceCmdGroup.AddCommand(oauthTokenInstanceCmd)
instanceCmdGroup.AddCommand(oauthClientInstanceCmd)
instanceCmdGroup.AddCommand(updateCmd)
addInstanceCmd.Flags().StringVar(&flagLocale, "locale", instance.DefaultLocale, "Locale of the new cozy instance")
addInstanceCmd.Flags().StringVar(&flagTimezone, "tz", "", "The timezone for the user")
addInstanceCmd.Flags().StringVar(&flagEmail, "email", "", "The email of the owner")
addInstanceCmd.Flags().StringVar(&flagPublicName, "public-name", "", "The public name of the owner")
addInstanceCmd.Flags().StringVar(&flagSettings, "settings", "", "A list of settings (eg context:foo,offer:premium)")
addInstanceCmd.Flags().StringVar(&flagDiskQuota, "disk-quota", "", "The quota allowed to the instance's VFS")
addInstanceCmd.Flags().StringSliceVar(&flagApps, "apps", nil, "Apps to be preinstalled")
addInstanceCmd.Flags().BoolVar(&flagDev, "dev", false, "To create a development instance")
addInstanceCmd.Flags().StringVar(&flagPassphrase, "passphrase", "", "Register the instance with this passphrase (useful for tests)")
destroyInstanceCmd.Flags().BoolVar(&flagForce, "force", false, "Force the deletion without asking for confirmation")
fsckInstanceCmd.Flags().BoolVar(&flagDry, "dry", false, "Don't modify the VFS, only show the inconsistencies")
appTokenInstanceCmd.Flags().DurationVar(&flagExpire, "expire", 0, "Make the token expires in this amount of time")
oauthTokenInstanceCmd.Flags().DurationVar(&flagExpire, "expire", 0, "Make the token expires in this amount of time")
updateCmd.Flags().BoolVar(&flagAllDomains, "all-domains", false, "work on all domains iterativelly")
updateCmd.Flags().StringVar(&flagDomain, "domain", "", "specify the domain name of the instance")
RootCmd.AddCommand(instanceCmdGroup)
}
s/Starts/Start/
package cmd
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/cozy/cozy-stack/client"
"github.com/cozy/cozy-stack/pkg/consts"
"github.com/cozy/cozy-stack/pkg/couchdb"
"github.com/cozy/cozy-stack/pkg/instance"
humanize "github.com/dustin/go-humanize"
"github.com/spf13/cobra"
)
var flagDomain string
var flagLocale string
var flagTimezone string
var flagEmail string
var flagPublicName string
var flagSettings string
var flagDiskQuota string
var flagApps []string
var flagDev bool
var flagPassphrase string
var flagForce bool
var flagExpire time.Duration
var flagDry bool
// instanceCmdGroup represents the instances command
var instanceCmdGroup = &cobra.Command{
Use: "instances [command]",
Short: "Manage instances of a stack",
Long: `
cozy-stack instances allows to manage the instances of this stack
An instance is a logical space owned by one user and identified by a domain.
For example, bob.cozycloud.cc is the instance of Bob. A single cozy-stack
process can manage several instances.
Each instance has a separate space for storing files and a prefix used to
create its CouchDB databases.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
var cleanInstanceCmd = &cobra.Command{
Use: "clean [domain]",
Short: "Clean badly removed instances",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
i := couchdb.SimpleDatabasePrefix(domain)
return couchdb.DeleteAllDBs(i)
},
}
var showInstanceCmd = &cobra.Command{
Use: "show [domain]",
Short: "Show the instance of the specified domain",
Long: `
cozy-stack instances show allows to show the instance on the cozy for a
given domain.
`,
Example: "$ cozy-stack instances show cozy.tools:8080",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
c := newAdminClient()
in, err := c.GetInstance(domain)
if err != nil {
return err
}
json, err := json.MarshalIndent(in, "", " ")
if err != nil {
return err
}
fmt.Println(string(json))
return nil
},
}
var addInstanceCmd = &cobra.Command{
Use: "add [domain]",
Short: "Manage instances of a stack",
Long: `
cozy-stack instances add allows to create an instance on the cozy for a
given domain.
`,
Example: "$ cozy-stack instances add --dev --passphrase cozy --apps drive,photos,settings cozy.tools:8080",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
var quota int64
if flagDiskQuota != "" {
diskQuota, err := humanize.ParseBytes(flagDiskQuota)
if err != nil {
return err
}
quota = int64(diskQuota)
}
domain := args[0]
c := newAdminClient()
in, err := c.CreateInstance(&client.InstanceOptions{
Domain: domain,
Apps: flagApps,
Locale: flagLocale,
Timezone: flagTimezone,
Email: flagEmail,
PublicName: flagPublicName,
Settings: flagSettings,
DiskQuota: "a,
Dev: flagDev,
Passphrase: flagPassphrase,
})
if err != nil {
errPrintfln(
"Failed to create instance for domain %s", domain)
return err
}
fmt.Printf("Instance created with success for domain %s\n", in.Attrs.Domain)
if in.Attrs.RegisterToken != nil {
fmt.Printf("Registration token: \"%s\"\n", hex.EncodeToString(in.Attrs.RegisterToken))
}
if len(flagApps) == 0 {
return nil
}
apps, err := newClient(domain, consts.Apps).ListApps(consts.Apps)
if err == nil && len(flagApps) != len(apps) {
for _, slug := range flagApps {
found := false
for _, app := range apps {
if app.Attrs.Slug == slug {
found = true
break
}
}
if !found {
fmt.Printf("/!\\ Application %s has not been installed\n", slug)
}
}
}
return nil
},
}
var quotaInstanceCmd = &cobra.Command{
Use: "set-disk-quota [domain] [disk-quota]",
Short: "Change the disk-quota of the instance",
Long: `
cozy-stack instances set-disk-quota allows to change the disk-quota of the
instance of the given domain. Set the quota to 0 to remove the quota.
`,
Example: "$ cozy-stack instances set-disk-quota cozy.tools:8080 3GB",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return cmd.Help()
}
diskQuota, err := humanize.ParseBytes(args[1])
if err != nil {
return fmt.Errorf("Could not parse disk-quota: %s", err)
}
quota := int64(diskQuota)
domain := args[0]
c := newAdminClient()
_, err = c.ModifyInstance(domain, &client.InstanceOptions{
DiskQuota: "a,
})
return err
},
}
var debugInstanceCmd = &cobra.Command{
Use: "debug [domain] [true/false]",
Short: "Activate or deactivate debugging of the instance",
Long: `
cozy-stack instances debug allows to activate or deactivate the debugging of a
specific domain.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return cmd.Help()
}
domain := args[0]
debug, err := strconv.ParseBool(args[1])
if err != nil {
return err
}
c := newAdminClient()
_, err = c.ModifyInstance(domain, &client.InstanceOptions{
Debug: &debug,
})
return err
},
}
var lsInstanceCmd = &cobra.Command{
Use: "ls",
Short: "List instances",
Long: `
cozy-stack instances ls allows to list all the instances that can be served
by this server.
`,
RunE: func(cmd *cobra.Command, args []string) error {
c := newAdminClient()
list, err := c.ListInstances()
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
for _, i := range list {
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\tv%d\n",
i.Attrs.Domain,
i.Attrs.Locale,
formatSize(i.Attrs.BytesDiskQuota),
formatDev(i.Attrs.Dev),
formatOnboarded(i),
i.Attrs.IndexViewsVersion,
)
}
return w.Flush()
},
}
func formatSize(size int64) string {
if size == 0 {
return "unlimited"
}
return humanize.Bytes(uint64(size))
}
func formatDev(dev bool) string {
if dev {
return "dev"
}
return "prod"
}
func formatOnboarded(i *client.Instance) string {
if i.Attrs.OnboardingFinished {
return "onboarded"
}
if len(i.Attrs.RegisterToken) > 0 {
return "onboarding"
}
return "pending"
}
var destroyInstanceCmd = &cobra.Command{
Use: "destroy [domain]",
Short: "Remove instance",
Long: `
cozy-stack instances destroy allows to remove an instance
and all its data.
`,
Aliases: []string{"rm"},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
if !flagForce {
reader := bufio.NewReader(os.Stdin)
fmt.Printf(`Are you sure you want to remove instance for domain %s ?
All data associated with this domain will be permanently lost.
[yes/NO]: `, domain)
str, err := reader.ReadString('\n')
if err != nil {
return err
}
str = strings.ToLower(strings.TrimSpace(str))
if str != "yes" && str != "y" {
return nil
}
fmt.Println()
}
c := newAdminClient()
err := c.DestroyInstance(domain)
if err != nil {
errPrintfln(
"An error occured while destroying instance for domain %s", domain)
return err
}
fmt.Printf("Instance for domain %s has been destroyed with success\n", domain)
return nil
},
}
var fsckInstanceCmd = &cobra.Command{
Use: "fsck [domain]",
Short: "Check and repair a vfs",
Long: `
The cozy-stack fsck command checks that the files in the VFS are not
desynchronized, ie a file present in CouchDB but not swift/localfs, or present
in swift/localfs but not couchdb.
`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
domain := args[0]
if !flagDry {
fmt.Printf("Sorry, only cozy-stack fsck --dry is implemented currently.")
return errors.New("Not implemented yet")
}
c := newAdminClient()
list, err := c.FsckInstance(domain)
if err != nil {
return err
}
if len(list) == 0 {
fmt.Printf("Instance for domain %s is clean\n", domain)
} else {
for _, entry := range list {
fmt.Printf("- %s: %s\n", entry["filename"], entry["message"])
}
}
return nil
},
}
var appTokenInstanceCmd = &cobra.Command{
Use: "token-app [domain] [slug]",
Short: "Generate a new application token",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return cmd.Help()
}
c := newAdminClient()
token, err := c.GetToken(&client.TokenOptions{
Domain: args[0],
Subject: args[1],
Audience: "app",
Expire: flagExpire,
})
if err != nil {
return err
}
_, err = fmt.Println(token)
return err
},
}
var cliTokenInstanceCmd = &cobra.Command{
Use: "token-cli [domain] [scopes]",
Short: "Generate a new CLI access token (global access)",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return cmd.Help()
}
c := newAdminClient()
token, err := c.GetToken(&client.TokenOptions{
Domain: args[0],
Scope: args[1:],
Audience: "cli",
Expire: flagExpire,
})
if err != nil {
return err
}
_, err = fmt.Println(token)
return err
},
}
var oauthTokenInstanceCmd = &cobra.Command{
Use: "token-oauth [domain] [clientid] [scopes]",
Short: "Generate a new OAuth access token",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 3 {
return cmd.Help()
}
c := newAdminClient()
token, err := c.GetToken(&client.TokenOptions{
Domain: args[0],
Subject: args[1],
Audience: "access-token",
Scope: args[2:],
Expire: flagExpire,
})
if err != nil {
return err
}
_, err = fmt.Println(token)
return err
},
}
var oauthClientInstanceCmd = &cobra.Command{
Use: "client-oauth [domain] [redirect_uri] [client_name] [software_id]",
Short: "Register a new OAuth client",
Long: `It registers a new OAuth client and returns its client_id`,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 4 {
return cmd.Help()
}
c := newAdminClient()
clientID, err := c.RegisterOAuthClient(&client.OAuthClientOptions{
Domain: args[0],
RedirectURI: args[1],
ClientName: args[2],
SoftwareID: args[3],
})
if err != nil {
return err
}
_, err = fmt.Println(clientID)
return err
},
}
var updateCmd = &cobra.Command{
Use: "update [domain] [slugs...]",
Short: "Start the updates for the specified domain instance.",
Long: `Start the updates for the specified domain instance. Use whether the --domain
flag to specify the instance or the --all-domains flags to updates all domains.
The slugs arguments can be used to select which applications should be
updated.`,
Aliases: []string{"updates"},
RunE: func(cmd *cobra.Command, args []string) error {
c := newAdminClient()
if flagAllDomains {
return c.Updates(&client.UpdatesOptions{
Slugs: args,
})
}
if flagDomain == "" {
return errAppsMissingDomain
}
return c.Updates(&client.UpdatesOptions{
Domain: flagDomain,
Slugs: args,
})
},
}
func init() {
instanceCmdGroup.AddCommand(showInstanceCmd)
instanceCmdGroup.AddCommand(addInstanceCmd)
instanceCmdGroup.AddCommand(cleanInstanceCmd)
instanceCmdGroup.AddCommand(lsInstanceCmd)
instanceCmdGroup.AddCommand(quotaInstanceCmd)
instanceCmdGroup.AddCommand(debugInstanceCmd)
instanceCmdGroup.AddCommand(destroyInstanceCmd)
instanceCmdGroup.AddCommand(fsckInstanceCmd)
instanceCmdGroup.AddCommand(appTokenInstanceCmd)
instanceCmdGroup.AddCommand(cliTokenInstanceCmd)
instanceCmdGroup.AddCommand(oauthTokenInstanceCmd)
instanceCmdGroup.AddCommand(oauthClientInstanceCmd)
instanceCmdGroup.AddCommand(updateCmd)
addInstanceCmd.Flags().StringVar(&flagLocale, "locale", instance.DefaultLocale, "Locale of the new cozy instance")
addInstanceCmd.Flags().StringVar(&flagTimezone, "tz", "", "The timezone for the user")
addInstanceCmd.Flags().StringVar(&flagEmail, "email", "", "The email of the owner")
addInstanceCmd.Flags().StringVar(&flagPublicName, "public-name", "", "The public name of the owner")
addInstanceCmd.Flags().StringVar(&flagSettings, "settings", "", "A list of settings (eg context:foo,offer:premium)")
addInstanceCmd.Flags().StringVar(&flagDiskQuota, "disk-quota", "", "The quota allowed to the instance's VFS")
addInstanceCmd.Flags().StringSliceVar(&flagApps, "apps", nil, "Apps to be preinstalled")
addInstanceCmd.Flags().BoolVar(&flagDev, "dev", false, "To create a development instance")
addInstanceCmd.Flags().StringVar(&flagPassphrase, "passphrase", "", "Register the instance with this passphrase (useful for tests)")
destroyInstanceCmd.Flags().BoolVar(&flagForce, "force", false, "Force the deletion without asking for confirmation")
fsckInstanceCmd.Flags().BoolVar(&flagDry, "dry", false, "Don't modify the VFS, only show the inconsistencies")
appTokenInstanceCmd.Flags().DurationVar(&flagExpire, "expire", 0, "Make the token expires in this amount of time")
oauthTokenInstanceCmd.Flags().DurationVar(&flagExpire, "expire", 0, "Make the token expires in this amount of time")
updateCmd.Flags().BoolVar(&flagAllDomains, "all-domains", false, "work on all domains iterativelly")
updateCmd.Flags().StringVar(&flagDomain, "domain", "", "specify the domain name of the instance")
RootCmd.AddCommand(instanceCmdGroup)
}
|
package main
import (
"log"
"os"
"regexp"
"strings"
"time"
"github.com/apcera/nats"
)
func main() {
opts := nats.DefaultOptions
natsURL := os.Getenv("NATS_CLUSTER")
if natsURL == "" {
natsURL = "nats://localhost:4222"
}
opts.Servers = strings.Split(natsURL, ",")
opts.MaxReconnect = 5
opts.ReconnectWait = (20 * time.Second)
nc, err := opts.Connect()
log.SetFlags(0)
if err != nil {
log.Fatal(err)
}
defer nc.Close()
nc.Opts.DisconnectedCB = func(_ *nats.Conn) {
log.Println("Got disconnected!")
}
nc.Opts.ReconnectedCB = func(nc *nats.Conn) {
log.Printf("Got reconnected to %v!\n", nc.ConnectedUrl())
}
done := make(chan struct{})
regexNotNeed, err := regexp.Compile("(?:queue/queue.go|\\[Polling\\])")
if err != nil {
log.Fatal(err)
}
if len(os.Args) == 1 {
log.Fatal("Need specify the topic")
}
topic := os.Args[1]
nc.Subscribe(topic, func(m *nats.Msg) {
if !regexNotNeed.Match(m.Data) {
log.Printf("[%s]: %s\n", m.Subject, string(m.Data))
}
})
<-done
}
update reconnection delay to 2 sec.
package main
import (
"log"
"os"
"regexp"
"strings"
"time"
"github.com/apcera/nats"
)
func main() {
opts := nats.DefaultOptions
natsURL := os.Getenv("NATS_CLUSTER")
if natsURL == "" {
natsURL = "nats://localhost:4222"
}
opts.Servers = strings.Split(natsURL, ",")
opts.MaxReconnect = 5
opts.ReconnectWait = (2 * time.Second)
nc, err := opts.Connect()
log.SetFlags(0)
if err != nil {
log.Fatal(err)
}
defer nc.Close()
nc.Opts.DisconnectedCB = func(_ *nats.Conn) {
log.Println("Got disconnected!")
}
nc.Opts.ReconnectedCB = func(nc *nats.Conn) {
log.Printf("Got reconnected to %v!\n", nc.ConnectedUrl())
}
done := make(chan struct{})
regexNotNeed, err := regexp.Compile("(?:queue/queue.go|\\[Polling\\])")
if err != nil {
log.Fatal(err)
}
if len(os.Args) == 1 {
log.Fatal("Need specify the topic")
}
topic := os.Args[1]
nc.Subscribe(topic, func(m *nats.Msg) {
if !regexNotNeed.Match(m.Data) {
log.Printf("[%s]: %s\n", m.Subject, string(m.Data))
}
})
<-done
}
|
package main
import (
"os"
"os/signal"
"syscall"
"google.golang.org/grpc"
"fmt"
"github.com/slok/ragnarok/cmd/node/flags"
"github.com/slok/ragnarok/log"
"github.com/slok/ragnarok/node"
"github.com/slok/ragnarok/node/client"
)
// Main run main logic.
func Main() error {
logger := log.Base()
// Get the command line arguments.
cfg, err := flags.GetNodeConfig(os.Args[1:])
if err != nil {
logger.Error(err)
return err
}
// Set debug mode.
if cfg.Debug {
logger.Set("debug")
}
// Create node status client
conn, err := grpc.Dial(cfg.MasterAddress, grpc.WithInsecure()) // TODO: secured.
if err != nil {
return err
}
defer conn.Close()
nsCli, err := client.NewStatusGRPCFromConnection(conn, logger)
if err != nil {
return err
}
// Create the node.
n := node.NewFailureNode(*cfg, nsCli, logger)
// Register node.
if err := n.RegisterOnMaster(); err != nil {
return fmt.Errorf("node not registered on master: %v", err)
}
// TODO: Listen for service calls
return nil
}
func clean() {
log.Debug("Cleaning...")
}
func main() {
sigC := make(chan os.Signal, 1)
signal.Notify(sigC, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
errC := make(chan error)
// Run main program
go func() {
if err := Main(); err != nil {
errC <- err
}
return
}()
// Wait until signal (ctr+c, SIGTERM...)
var exitCode int
Waiter:
for {
select {
// Wait for errors
case err := <-errC:
if err != nil {
exitCode = 1
break Waiter
}
// Wait for signal
case <-sigC:
break Waiter
}
}
clean()
os.Exit(exitCode)
}
Fix node main for the heartbeats
Signed-off-by: Xabier Larrakoetxea <30fb0ea44f2104eb1a81e793d922a064c3916c2f@gmail.com>
package main
import (
"os"
"os/signal"
"syscall"
"google.golang.org/grpc"
"fmt"
"github.com/slok/ragnarok/clock"
"github.com/slok/ragnarok/cmd/node/flags"
"github.com/slok/ragnarok/log"
"github.com/slok/ragnarok/node"
"github.com/slok/ragnarok/node/client"
"github.com/slok/ragnarok/types"
)
// Main run main logic.
func Main() error {
logger := log.Base()
// Get the command line arguments.
cfg, err := flags.GetNodeConfig(os.Args[1:])
if err != nil {
logger.Error(err)
return err
}
// Set debug mode.
if cfg.Debug {
logger.Set("debug")
}
// Create node status client
conn, err := grpc.Dial(cfg.MasterAddress, grpc.WithInsecure()) // TODO: secured.
if err != nil {
return err
}
defer conn.Close()
nsCli, err := client.NewStatusGRPCFromConnection(conn, types.NodeStateTransformer, logger)
if err != nil {
return err
}
// Create the node.
n := node.NewFailureNode(*cfg, nsCli, clock.Base(), logger)
// Register node.
if err := n.RegisterOnMaster(); err != nil {
return fmt.Errorf("node not registered on master: %v", err)
}
// Start heartbeats
if err := n.StartHeartbeat(); err != nil {
return fmt.Errorf("node heartbeating start failed: %v", err)
}
// TODO: Listen for service calls
return nil
}
func clean() {
log.Debug("Cleaning...")
}
func main() {
sigC := make(chan os.Signal, 1)
signal.Notify(sigC, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
errC := make(chan error)
// Run main program
go func() {
if err := Main(); err != nil {
errC <- err
}
return
}()
// Wait until signal (ctr+c, SIGTERM...)
var exitCode int
Waiter:
for {
select {
// Wait for errors
case err := <-errC:
if err != nil {
exitCode = 1
break Waiter
}
// Wait for signal
case <-sigC:
break Waiter
}
}
clean()
os.Exit(exitCode)
}
|
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"syscall"
"github.com/minio/cli"
"github.com/minio/mc/pkg/probe"
)
var (
pipeFlags = []cli.Flag{
cli.StringFlag{
Name: "encrypt",
Usage: "encrypt objects (using server-side encryption with server managed keys)",
},
cli.StringFlag{
Name: "storage-class, sc",
Usage: "set storage class for new object(s) on target",
},
}
)
// Display contents of a file.
var pipeCmd = cli.Command{
Name: "pipe",
Usage: "stream STDIN to an object",
Action: mainPipe,
OnUsageError: onUsageError,
Before: setGlobalsFromContext,
Flags: append(append(pipeFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} [FLAGS] [TARGET]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES:
MC_ENCRYPT: list of comma delimited prefix values
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
EXAMPLES:
1. Write contents of stdin to a file on local filesystem.
{{.Prompt}} {{.HelpName}} /tmp/hello-world.go
2. Write contents of stdin to an object on Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} s3/personalbuck/meeting-notes.txt
3. Copy an ISO image to an object on Amazon S3 cloud storage.
{{.Prompt}} cat debian-8.2.iso | {{.HelpName}} s3/opensource-isos/gnuos.iso
4. Stream MySQL database dump to Amazon S3 directly.
{{.Prompt}} mysqldump -u root -p ******* accountsdb | {{.HelpName}} s3/sql-backups/backups/accountsdb-oct-9-2015.sql
5. Write contents of stdin to an object on Amazon S3 cloud storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object.
{{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY s3/personalbuck/meeting-notes.txt
`,
}
func pipe(targetURL string, encKeyDB map[string][]prefixSSEPair, storageClass string) *probe.Error {
if targetURL == "" {
// When no target is specified, pipe cat's stdin to stdout.
return catOut(os.Stdin, -1).Trace()
}
alias, _ := url2Alias(targetURL)
sseKey := getSSE(targetURL, encKeyDB[alias])
// Stream from stdin to multiple objects until EOF.
// Ignore size, since os.Stat() would not return proper size all the time
// for local filesystem for example /proc files.
opts := PutOptions{
sse: sseKey,
storageClass: storageClass,
}
_, err := putTargetStreamWithURL(targetURL, os.Stdin, -1, opts)
// TODO: See if this check is necessary.
switch e := err.ToGoError().(type) {
case *os.PathError:
if e.Err == syscall.EPIPE {
// stdin closed by the user. Gracefully exit.
return nil
}
}
return err.Trace(targetURL)
}
// check pipe input arguments.
func checkPipeSyntax(ctx *cli.Context) {
if len(ctx.Args()) > 1 {
cli.ShowCommandHelpAndExit(ctx, "pipe", 1) // last argument is exit code.
}
}
// mainPipe is the main entry point for pipe command.
func mainPipe(ctx *cli.Context) error {
// Parse encryption keys per command.
encKeyDB, err := getEncKeys(ctx)
fatalIf(err, "Unable to parse encryption keys.")
// validate pipe input arguments.
checkPipeSyntax(ctx)
if len(ctx.Args()) == 0 {
err = pipe("", nil, ctx.String("storage-class"))
fatalIf(err.Trace("stdout"), "Unable to write to one or more targets.")
} else {
// extract URLs.
URLs := ctx.Args()
err = pipe(URLs[0], encKeyDB, ctx.String("storage-class"))
fatalIf(err.Trace(URLs[0]), "Unable to write to one or more targets.")
}
// Done.
return nil
}
pipe: Add custom attributes and tags (#3706)
Support adding custom attributes and tags.
Examples included.
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"syscall"
"github.com/minio/cli"
"github.com/minio/mc/pkg/probe"
)
var (
pipeFlags = []cli.Flag{
cli.StringFlag{
Name: "encrypt",
Usage: "encrypt objects (using server-side encryption with server managed keys)",
},
cli.StringFlag{
Name: "storage-class, sc",
Usage: "set storage class for new object(s) on target",
},
cli.StringFlag{
Name: "attr",
Usage: "add custom metadata for the object",
},
cli.StringFlag{
Name: "tags",
Usage: "apply tags to the uploaded objects",
},
}
)
// Display contents of a file.
var pipeCmd = cli.Command{
Name: "pipe",
Usage: "stream STDIN to an object",
Action: mainPipe,
OnUsageError: onUsageError,
Before: setGlobalsFromContext,
Flags: append(append(pipeFlags, ioFlags...), globalFlags...),
CustomHelpTemplate: `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{.HelpName}} [FLAGS] [TARGET]
{{if .VisibleFlags}}
FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
ENVIRONMENT VARIABLES:
MC_ENCRYPT: list of comma delimited prefix values
MC_ENCRYPT_KEY: list of comma delimited prefix=secret values
EXAMPLES:
1. Write contents of stdin to a file on local filesystem.
{{.Prompt}} {{.HelpName}} /tmp/hello-world.go
2. Write contents of stdin to an object on Amazon S3 cloud storage.
{{.Prompt}} {{.HelpName}} s3/personalbuck/meeting-notes.txt
3. Copy an ISO image to an object on Amazon S3 cloud storage.
{{.Prompt}} cat debian-8.2.iso | {{.HelpName}} s3/opensource-isos/gnuos.iso
4. Stream MySQL database dump to Amazon S3 directly.
{{.Prompt}} mysqldump -u root -p ******* accountsdb | {{.HelpName}} s3/sql-backups/backups/accountsdb-oct-9-2015.sql
5. Write contents of stdin to an object on Amazon S3 cloud storage and assign REDUCED_REDUNDANCY storage-class to the uploaded object.
{{.Prompt}} {{.HelpName}} --storage-class REDUCED_REDUNDANCY s3/personalbuck/meeting-notes.txt
6. Copy to MinIO cloud storage with specified metadata, separated by ";"
{{.Prompt}} cat music.mp3 | {{.HelpName}} --attr "Cache-Control=max-age=90000,min-fresh=9000;Artist=Unknown" play/mybucket/music.mp3
7. Set tags to the uploaded objects
{{.Prompt}} tar cvf - . | {{.HelpName}} --tags "category=backup" play/mybucket/backup.tar
`,
}
func pipe(targetURL string, encKeyDB map[string][]prefixSSEPair, storageClass string, meta map[string]string) *probe.Error {
if targetURL == "" {
// When no target is specified, pipe cat's stdin to stdout.
return catOut(os.Stdin, -1).Trace()
}
alias, _ := url2Alias(targetURL)
sseKey := getSSE(targetURL, encKeyDB[alias])
// Stream from stdin to multiple objects until EOF.
// Ignore size, since os.Stat() would not return proper size all the time
// for local filesystem for example /proc files.
opts := PutOptions{
sse: sseKey,
storageClass: storageClass,
metadata: meta,
}
_, err := putTargetStreamWithURL(targetURL, os.Stdin, -1, opts)
// TODO: See if this check is necessary.
switch e := err.ToGoError().(type) {
case *os.PathError:
if e.Err == syscall.EPIPE {
// stdin closed by the user. Gracefully exit.
return nil
}
}
return err.Trace(targetURL)
}
// check pipe input arguments.
func checkPipeSyntax(ctx *cli.Context) {
if len(ctx.Args()) > 1 {
cli.ShowCommandHelpAndExit(ctx, "pipe", 1) // last argument is exit code.
}
}
// mainPipe is the main entry point for pipe command.
func mainPipe(ctx *cli.Context) error {
// Parse encryption keys per command.
encKeyDB, err := getEncKeys(ctx)
fatalIf(err, "Unable to parse encryption keys.")
// validate pipe input arguments.
checkPipeSyntax(ctx)
meta, err := getMetaDataEntry(ctx.String("attr"))
fatalIf(err.Trace(""), "Unable to parse --attr value")
if tags := ctx.String("tags"); tags != "" {
meta["X-Amz-Tagging"] = tags
}
if len(ctx.Args()) == 0 {
err = pipe("", nil, ctx.String("storage-class"), meta)
fatalIf(err.Trace("stdout"), "Unable to write to one or more targets.")
} else {
// extract URLs.
URLs := ctx.Args()
err = pipe(URLs[0], encKeyDB, ctx.String("storage-class"), meta)
fatalIf(err.Trace(URLs[0]), "Unable to write to one or more targets.")
}
// Done.
return nil
}
|
package main
// Benchmark the scanning of a file-system tree.
// Usage: scan [dirname [numScans]]
// dirname: the top of the directory tree to scan (default=/)
// numScans: the number of scans to run (default=1, infinite: < 0)
import (
"fmt"
"github.com/Symantec/Dominator/sub/fsrateio"
"github.com/Symantec/Dominator/sub/scanner"
"os"
"strconv"
"syscall"
"time"
)
func main() {
pathname := "/"
var numScans int = 1
var err error
if len(os.Args) >= 2 {
pathname = os.Args[1]
}
if len(os.Args) == 3 {
numScans, err = strconv.Atoi(os.Args[2])
if err != nil {
fmt.Printf("Error! %s\n", err)
return
}
}
ctx, err := fsrateio.NewContext(pathname)
if err != nil {
fmt.Printf("Error! %s\n", err)
return
}
fmt.Println(ctx)
syscall.Setpriority(syscall.PRIO_PROCESS, 0, 10)
var prev_fs *scanner.FileSystem
for iter := 1; numScans < 0 || iter <= numScans; iter++ {
timeStart := time.Now()
fs, err := scanner.ScanFileSystem(pathname, ctx)
timeStop := time.Now()
if err != nil {
fmt.Printf("Error! %s\n", err)
return
}
var tread uint64 = 0
for _, inode := range fs.InodeTable {
tread += inode.Length()
}
fmt.Printf("Total scanned: %s,\t", fsrateio.FormatBytes(tread))
bytesPerSecond := uint64(float64(tread) /
timeStop.Sub(timeStart).Seconds())
fmt.Printf("%s/s\n", fsrateio.FormatBytes(bytesPerSecond))
if prev_fs != nil {
if !scanner.Compare(prev_fs, fs, nil) {
fmt.Println("Scan results different from last run")
}
}
prev_fs = fs
}
}
Switch scan utility back to os.Stdout for compare logging.
package main
// Benchmark the scanning of a file-system tree.
// Usage: scan [dirname [numScans]]
// dirname: the top of the directory tree to scan (default=/)
// numScans: the number of scans to run (default=1, infinite: < 0)
import (
"fmt"
"github.com/Symantec/Dominator/sub/fsrateio"
"github.com/Symantec/Dominator/sub/scanner"
"os"
"strconv"
"syscall"
"time"
)
func main() {
pathname := "/"
var numScans int = 1
var err error
if len(os.Args) >= 2 {
pathname = os.Args[1]
}
if len(os.Args) == 3 {
numScans, err = strconv.Atoi(os.Args[2])
if err != nil {
fmt.Printf("Error! %s\n", err)
return
}
}
ctx, err := fsrateio.NewContext(pathname)
if err != nil {
fmt.Printf("Error! %s\n", err)
return
}
fmt.Println(ctx)
syscall.Setpriority(syscall.PRIO_PROCESS, 0, 10)
var prev_fs *scanner.FileSystem
for iter := 1; numScans < 0 || iter <= numScans; iter++ {
timeStart := time.Now()
fs, err := scanner.ScanFileSystem(pathname, ctx)
timeStop := time.Now()
if err != nil {
fmt.Printf("Error! %s\n", err)
return
}
var tread uint64 = 0
for _, inode := range fs.InodeTable {
tread += inode.Length()
}
fmt.Printf("Total scanned: %s,\t", fsrateio.FormatBytes(tread))
bytesPerSecond := uint64(float64(tread) /
timeStop.Sub(timeStart).Seconds())
fmt.Printf("%s/s\n", fsrateio.FormatBytes(bytesPerSecond))
if prev_fs != nil {
if !scanner.Compare(prev_fs, fs, os.Stdout) {
fmt.Println("Scan results different from last run")
}
}
prev_fs = fs
}
}
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"github.com/tus/tusd"
"github.com/tus/tusd/filestore"
"github.com/tus/tusd/limitedstore"
"github.com/tus/tusd/memorylocker"
"github.com/tus/tusd/prometheuscollector"
"github.com/tus/tusd/s3store"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/prometheus/client_golang/prometheus"
)
var VersionName = "n/a"
var GitCommit = "n/a"
var BuildDate = "n/a"
var httpHost string
var httpPort string
var maxSize int64
var dir string
var storeSize int64
var basepath string
var timeout int64
var s3Bucket string
var hooksDir string
var version bool
var exposeMetrics bool
var stdout = log.New(os.Stdout, "[tusd] ", 0)
var stderr = log.New(os.Stderr, "[tusd] ", 0)
var hookInstalled bool
var greeting string
var openConnections = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "connections_open_total",
Help: "Current number of open connections.",
})
func init() {
flag.StringVar(&httpHost, "host", "0.0.0.0", "Host to bind HTTP server to")
flag.StringVar(&httpPort, "port", "1080", "Port to bind HTTP server to")
flag.Int64Var(&maxSize, "max-size", 0, "Maximum size of a single upload in bytes")
flag.StringVar(&dir, "dir", "./data", "Directory to store uploads in")
flag.Int64Var(&storeSize, "store-size", 0, "Size of space allowed for storage")
flag.StringVar(&basepath, "base-path", "/files/", "Basepath of the HTTP server")
flag.Int64Var(&timeout, "timeout", 30*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
flag.StringVar(&s3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
flag.StringVar(&hooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
flag.BoolVar(&version, "version", false, "Print tusd version information")
flag.BoolVar(&exposeMetrics, "expose-metrics", true, "Expose metrics about tusd usage")
flag.Parse()
if hooksDir != "" {
hooksDir, _ = filepath.Abs(hooksDir)
hookInstalled = true
stdout.Printf("Using '%s' for hooks", hooksDir)
}
greeting = fmt.Sprintf(
`Welcome to tusd
===============
Congratulations for setting up tusd! You are now part of the chosen elite and
able to experience the feeling of resumable uploads! We hope you are as excited
as we are (a lot)!
However, there is something you should be aware of: While you got tusd
running (you did an awesome job!), this is the root directory of the server
and tus requests are only accepted at the %s route.
So don't waste time, head over there and experience the future!
Version = %s
GitCommit = %s
BuildDate = %s`, basepath, VersionName, GitCommit, BuildDate)
}
func main() {
// Print version and other information and exit if the -version flag has been
// passed.
if version {
fmt.Printf("Version: %s\nCommit: %s\nDate: %s\n", VersionName, GitCommit, BuildDate)
return
}
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
// If not, we default to storing them locally on disk.
composer := tusd.NewStoreComposer()
if s3Bucket == "" {
stdout.Printf("Using '%s' as directory storage.\n", dir)
if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
stderr.Fatalf("Unable to ensure directory exists: %s", err)
}
store := filestore.New(dir)
store.UseIn(composer)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", s3Bucket)
// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
// AWS_REGION environment variables.
credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
store := s3store.New(s3Bucket, s3.New(session.New(), credentials))
store.UseIn(composer)
locker := memorylocker.New()
locker.UseIn(composer)
}
if storeSize > 0 {
limitedstore.New(storeSize, composer.Core, composer.Terminater).UseIn(composer)
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
// We need to ensure that a single upload can fit into the storage size
if maxSize > storeSize || maxSize == 0 {
maxSize = storeSize
}
}
stdout.Printf("Using %.2fMB as maximum size.\n", float64(maxSize)/1024/1024)
handler, err := tusd.NewHandler(tusd.Config{
MaxSize: maxSize,
BasePath: basepath,
StoreComposer: composer,
NotifyCompleteUploads: true,
NotifyTerminatedUploads: true,
})
if err != nil {
stderr.Fatalf("Unable to create handler: %s", err)
}
address := httpHost + ":" + httpPort
stdout.Printf("Using %s as address to listen.\n", address)
stdout.Printf(composer.Capabilities())
go func() {
for {
select {
case info := <-handler.CompleteUploads:
invokeHook("post-finish", info)
case info := <-handler.TerminatedUploads:
invokeHook("post-terminate", info)
}
}
}()
if exposeMetrics {
prometheus.MustRegister(openConnections)
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
http.Handle("/metrics", prometheus.Handler())
}
// Do not display the greeting if the tusd handler will be mounted at the root
// path. Else this would cause a "multiple registrations for /" panic.
if basepath != "/" {
http.HandleFunc("/", displayGreeting)
}
http.Handle(basepath, http.StripPrefix(basepath, handler))
timeoutDuration := time.Duration(timeout) * time.Millisecond
listener, err := NewListener(address, timeoutDuration, timeoutDuration)
if err != nil {
stderr.Fatalf("Unable to create listener: %s", err)
}
if err = http.Serve(listener, nil); err != nil {
stderr.Fatalf("Unable to serve: %s", err)
}
}
func invokeHook(name string, info tusd.FileInfo) {
switch name {
case "post-finish":
stdout.Printf("Upload %s (%d bytes) finished\n", info.ID, info.Size)
case "post-terminate":
stdout.Printf("Upload %s terminated\n", info.ID)
}
if !hookInstalled {
return
}
stdout.Printf("Invoking %s hook…\n", name)
cmd := exec.Command(hooksDir + "/" + name)
env := os.Environ()
env = append(env, "TUS_ID="+info.ID)
env = append(env, "TUS_SIZE="+strconv.FormatInt(info.Size, 10))
jsonInfo, err := json.Marshal(info)
if err != nil {
stderr.Printf("Error encoding JSON for hook: %s", err)
}
reader := bytes.NewReader(jsonInfo)
cmd.Stdin = reader
cmd.Env = env
cmd.Dir = hooksDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
go func() {
err := cmd.Run()
if err != nil {
stderr.Printf("Error running %s hook for %s: %s", name, info.ID, err)
}
}()
}
func displayGreeting(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(greeting))
}
// Listener wraps a net.Listener, and gives a place to store the timeout
// parameters. On Accept, it will wrap the net.Conn with our own Conn for us.
// Original implementation taken from https://gist.github.com/jbardin/9663312
// Thanks! <3
type Listener struct {
net.Listener
ReadTimeout time.Duration
WriteTimeout time.Duration
}
func (l *Listener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return nil, err
}
go openConnections.Inc()
tc := &Conn{
Conn: c,
ReadTimeout: l.ReadTimeout,
WriteTimeout: l.WriteTimeout,
}
return tc, nil
}
// Conn wraps a net.Conn, and sets a deadline for every read
// and write operation.
type Conn struct {
net.Conn
ReadTimeout time.Duration
WriteTimeout time.Duration
}
func (c *Conn) Read(b []byte) (int, error) {
var err error
if c.ReadTimeout > 0 {
err = c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout))
} else {
err = c.Conn.SetReadDeadline(time.Time{})
}
if err != nil {
return 0, err
}
return c.Conn.Read(b)
}
func (c *Conn) Write(b []byte) (int, error) {
var err error
if c.WriteTimeout > 0 {
err = c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout))
} else {
err = c.Conn.SetWriteDeadline(time.Time{})
}
if err != nil {
return 0, err
}
return c.Conn.Write(b)
}
func (c *Conn) Close() error {
go openConnections.Dec()
return c.Conn.Close()
}
func NewListener(addr string, readTimeout, writeTimeout time.Duration) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
tl := &Listener{
Listener: l,
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
}
return tl, nil
}
feat(cmd/tusd): Print the base path (#50)
Print the base path on start
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"time"
"github.com/tus/tusd"
"github.com/tus/tusd/filestore"
"github.com/tus/tusd/limitedstore"
"github.com/tus/tusd/memorylocker"
"github.com/tus/tusd/prometheuscollector"
"github.com/tus/tusd/s3store"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/prometheus/client_golang/prometheus"
)
var VersionName = "n/a"
var GitCommit = "n/a"
var BuildDate = "n/a"
var httpHost string
var httpPort string
var maxSize int64
var dir string
var storeSize int64
var basepath string
var timeout int64
var s3Bucket string
var hooksDir string
var version bool
var exposeMetrics bool
var stdout = log.New(os.Stdout, "[tusd] ", 0)
var stderr = log.New(os.Stderr, "[tusd] ", 0)
var hookInstalled bool
var greeting string
var openConnections = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "connections_open_total",
Help: "Current number of open connections.",
})
func init() {
flag.StringVar(&httpHost, "host", "0.0.0.0", "Host to bind HTTP server to")
flag.StringVar(&httpPort, "port", "1080", "Port to bind HTTP server to")
flag.Int64Var(&maxSize, "max-size", 0, "Maximum size of a single upload in bytes")
flag.StringVar(&dir, "dir", "./data", "Directory to store uploads in")
flag.Int64Var(&storeSize, "store-size", 0, "Size of space allowed for storage")
flag.StringVar(&basepath, "base-path", "/files/", "Basepath of the HTTP server")
flag.Int64Var(&timeout, "timeout", 30*1000, "Read timeout for connections in milliseconds. A zero value means that reads will not timeout")
flag.StringVar(&s3Bucket, "s3-bucket", "", "Use AWS S3 with this bucket as storage backend (requires the AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_REGION environment variables to be set)")
flag.StringVar(&hooksDir, "hooks-dir", "", "Directory to search for available hooks scripts")
flag.BoolVar(&version, "version", false, "Print tusd version information")
flag.BoolVar(&exposeMetrics, "expose-metrics", true, "Expose metrics about tusd usage")
flag.Parse()
if hooksDir != "" {
hooksDir, _ = filepath.Abs(hooksDir)
hookInstalled = true
stdout.Printf("Using '%s' for hooks", hooksDir)
}
greeting = fmt.Sprintf(
`Welcome to tusd
===============
Congratulations for setting up tusd! You are now part of the chosen elite and
able to experience the feeling of resumable uploads! We hope you are as excited
as we are (a lot)!
However, there is something you should be aware of: While you got tusd
running (you did an awesome job!), this is the root directory of the server
and tus requests are only accepted at the %s route.
So don't waste time, head over there and experience the future!
Version = %s
GitCommit = %s
BuildDate = %s`, basepath, VersionName, GitCommit, BuildDate)
}
func main() {
// Print version and other information and exit if the -version flag has been
// passed.
if version {
fmt.Printf("Version: %s\nCommit: %s\nDate: %s\n", VersionName, GitCommit, BuildDate)
return
}
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
// If not, we default to storing them locally on disk.
composer := tusd.NewStoreComposer()
if s3Bucket == "" {
stdout.Printf("Using '%s' as directory storage.\n", dir)
if err := os.MkdirAll(dir, os.FileMode(0775)); err != nil {
stderr.Fatalf("Unable to ensure directory exists: %s", err)
}
store := filestore.New(dir)
store.UseIn(composer)
} else {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", s3Bucket)
// Derive credentials from AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID and
// AWS_REGION environment variables.
credentials := aws.NewConfig().WithCredentials(credentials.NewEnvCredentials())
store := s3store.New(s3Bucket, s3.New(session.New(), credentials))
store.UseIn(composer)
locker := memorylocker.New()
locker.UseIn(composer)
}
if storeSize > 0 {
limitedstore.New(storeSize, composer.Core, composer.Terminater).UseIn(composer)
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
// We need to ensure that a single upload can fit into the storage size
if maxSize > storeSize || maxSize == 0 {
maxSize = storeSize
}
}
stdout.Printf("Using %.2fMB as maximum size.\n", float64(maxSize)/1024/1024)
handler, err := tusd.NewHandler(tusd.Config{
MaxSize: maxSize,
BasePath: basepath,
StoreComposer: composer,
NotifyCompleteUploads: true,
NotifyTerminatedUploads: true,
})
if err != nil {
stderr.Fatalf("Unable to create handler: %s", err)
}
address := httpHost + ":" + httpPort
stdout.Printf("Using %s as address to listen.\n", address)
stdout.Printf("Using %s as the base path.\n", basepath)
stdout.Printf(composer.Capabilities())
go func() {
for {
select {
case info := <-handler.CompleteUploads:
invokeHook("post-finish", info)
case info := <-handler.TerminatedUploads:
invokeHook("post-terminate", info)
}
}
}()
if exposeMetrics {
prometheus.MustRegister(openConnections)
prometheus.MustRegister(prometheuscollector.New(handler.Metrics))
http.Handle("/metrics", prometheus.Handler())
}
// Do not display the greeting if the tusd handler will be mounted at the root
// path. Else this would cause a "multiple registrations for /" panic.
if basepath != "/" {
http.HandleFunc("/", displayGreeting)
}
http.Handle(basepath, http.StripPrefix(basepath, handler))
timeoutDuration := time.Duration(timeout) * time.Millisecond
listener, err := NewListener(address, timeoutDuration, timeoutDuration)
if err != nil {
stderr.Fatalf("Unable to create listener: %s", err)
}
if err = http.Serve(listener, nil); err != nil {
stderr.Fatalf("Unable to serve: %s", err)
}
}
func invokeHook(name string, info tusd.FileInfo) {
switch name {
case "post-finish":
stdout.Printf("Upload %s (%d bytes) finished\n", info.ID, info.Size)
case "post-terminate":
stdout.Printf("Upload %s terminated\n", info.ID)
}
if !hookInstalled {
return
}
stdout.Printf("Invoking %s hook…\n", name)
cmd := exec.Command(hooksDir + "/" + name)
env := os.Environ()
env = append(env, "TUS_ID="+info.ID)
env = append(env, "TUS_SIZE="+strconv.FormatInt(info.Size, 10))
jsonInfo, err := json.Marshal(info)
if err != nil {
stderr.Printf("Error encoding JSON for hook: %s", err)
}
reader := bytes.NewReader(jsonInfo)
cmd.Stdin = reader
cmd.Env = env
cmd.Dir = hooksDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
go func() {
err := cmd.Run()
if err != nil {
stderr.Printf("Error running %s hook for %s: %s", name, info.ID, err)
}
}()
}
func displayGreeting(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(greeting))
}
// Listener wraps a net.Listener, and gives a place to store the timeout
// parameters. On Accept, it will wrap the net.Conn with our own Conn for us.
// Original implementation taken from https://gist.github.com/jbardin/9663312
// Thanks! <3
type Listener struct {
net.Listener
ReadTimeout time.Duration
WriteTimeout time.Duration
}
func (l *Listener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return nil, err
}
go openConnections.Inc()
tc := &Conn{
Conn: c,
ReadTimeout: l.ReadTimeout,
WriteTimeout: l.WriteTimeout,
}
return tc, nil
}
// Conn wraps a net.Conn, and sets a deadline for every read
// and write operation.
type Conn struct {
net.Conn
ReadTimeout time.Duration
WriteTimeout time.Duration
}
func (c *Conn) Read(b []byte) (int, error) {
var err error
if c.ReadTimeout > 0 {
err = c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout))
} else {
err = c.Conn.SetReadDeadline(time.Time{})
}
if err != nil {
return 0, err
}
return c.Conn.Read(b)
}
func (c *Conn) Write(b []byte) (int, error) {
var err error
if c.WriteTimeout > 0 {
err = c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout))
} else {
err = c.Conn.SetWriteDeadline(time.Time{})
}
if err != nil {
return 0, err
}
return c.Conn.Write(b)
}
func (c *Conn) Close() error {
go openConnections.Dec()
return c.Conn.Close()
}
func NewListener(addr string, readTimeout, writeTimeout time.Duration) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
tl := &Listener{
Listener: l,
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
}
return tl, nil
}
|
/*
This file is part of go-ethereum
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @authors
* Jeffrey Wilcke <i@jev.io>
* Viktor Tron <viktor@ethdev.com>
*/
package utils
import (
"bufio"
"fmt"
"io"
"os"
"os/signal"
"regexp"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
"github.com/peterh/liner"
)
var interruptCallbacks = []func(os.Signal){}
// Register interrupt handlers callbacks
func RegisterInterrupt(cb func(os.Signal)) {
interruptCallbacks = append(interruptCallbacks, cb)
}
// go routine that call interrupt handlers in order of registering
func HandleInterrupt() {
c := make(chan os.Signal, 1)
go func() {
signal.Notify(c, os.Interrupt)
for sig := range c {
glog.V(logger.Error).Infof("Shutting down (%v) ... \n", sig)
RunInterruptCallbacks(sig)
}
}()
}
func RunInterruptCallbacks(sig os.Signal) {
for _, cb := range interruptCallbacks {
cb(sig)
}
}
func openLogFile(Datadir string, filename string) *os.File {
path := common.AbsolutePath(Datadir, filename)
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
panic(fmt.Sprintf("error opening log file '%s': %v", filename, err))
}
return file
}
func PromptConfirm(prompt string) (bool, error) {
var (
input string
err error
)
prompt = prompt + " [y/N] "
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
input, err = lr.Prompt(prompt)
} else {
fmt.Print(prompt)
input, err = bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
}
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
return true, nil
} else {
return false, nil
}
return false, err
}
func PromptPassword(prompt string, warnTerm bool) (string, error) {
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
return lr.PasswordPrompt(prompt)
}
if warnTerm {
fmt.Println("!! Unsupported terminal, password will be echoed.")
}
fmt.Print(prompt)
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
return input, err
}
func initDataDir(Datadir string) {
_, err := os.Stat(Datadir)
if err != nil {
if os.IsNotExist(err) {
fmt.Printf("Data directory '%s' doesn't exist, creating it\n", Datadir)
os.Mkdir(Datadir, 0777)
}
}
}
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
func Fatalf(format string, args ...interface{}) {
w := io.MultiWriter(os.Stdout, os.Stderr)
outf, _ := os.Stdout.Stat()
errf, _ := os.Stderr.Stat()
if outf != nil && errf != nil && os.SameFile(outf, errf) {
w = os.Stderr
}
fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
logger.Flush()
os.Exit(1)
}
func StartEthereum(ethereum *eth.Ethereum) {
glog.V(logger.Info).Infoln("Starting", ethereum.Name())
if err := ethereum.Start(); err != nil {
Fatalf("Error starting Ethereum: %v", err)
}
RegisterInterrupt(func(sig os.Signal) {
ethereum.Stop()
logger.Flush()
})
}
func StartEthereumForTest(ethereum *eth.Ethereum) {
glog.V(logger.Info).Infoln("Starting ", ethereum.Name())
ethereum.StartForTest()
RegisterInterrupt(func(sig os.Signal) {
ethereum.Stop()
logger.Flush()
})
}
func FormatTransactionData(data string) []byte {
d := common.StringToByteFunc(data, func(s string) (ret []byte) {
slice := regexp.MustCompile("\\n|\\s").Split(s, 1000000000)
for _, dataItem := range slice {
d := common.FormatData(dataItem)
ret = append(ret, d...)
}
return
})
return d
}
func ImportChain(chain *core.ChainManager, fn string) error {
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make(chan os.Signal, 1)
stop := make(chan struct{})
signal.Notify(interrupt, os.Interrupt)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
glog.Info("caught interrupt during import, will stop at next batch")
}
close(stop)
}()
checkInterrupt := func() bool {
select {
case <-stop:
return true
default:
return false
}
}
glog.Infoln("Importing blockchain", fn)
fh, err := os.Open(fn)
if err != nil {
return err
}
defer fh.Close()
stream := rlp.NewStream(fh, 0)
// Run actual the import.
batchSize := 2500
blocks := make(types.Blocks, batchSize)
n := 0
for batch := 0; ; batch++ {
// Load a batch of RLP blocks.
if checkInterrupt() {
return fmt.Errorf("interrupted")
}
i := 0
for ; i < batchSize; i++ {
var b types.Block
if err := stream.Decode(&b); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("at block %d: %v", n, err)
}
blocks[i] = &b
n++
}
if i == 0 {
break
}
// Import the batch.
if checkInterrupt() {
return fmt.Errorf("interrupted")
}
if hasAllBlocks(chain, blocks[:i]) {
glog.Infof("skipping batch %d, all blocks present [%x / %x]",
batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4])
continue
}
if _, err := chain.InsertChain(blocks[:i]); err != nil {
return fmt.Errorf("invalid block %d: %v", n, err)
}
}
return nil
}
func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool {
for _, b := range bs {
if !chain.HasBlock(b.Hash()) {
return false
}
}
return true
}
func ExportChain(chainmgr *core.ChainManager, fn string) error {
glog.Infoln("Exporting blockchain to", fn)
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
if err := chainmgr.Export(fh); err != nil {
return err
}
glog.Infoln("Exported blockchain to", fn)
return nil
}
cmd/utils: use constant for import batch size
/*
This file is part of go-ethereum
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @authors
* Jeffrey Wilcke <i@jev.io>
* Viktor Tron <viktor@ethdev.com>
*/
package utils
import (
"bufio"
"fmt"
"io"
"os"
"os/signal"
"regexp"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp"
"github.com/peterh/liner"
)
const (
importBatchSize = 2500
)
var interruptCallbacks = []func(os.Signal){}
// Register interrupt handlers callbacks
func RegisterInterrupt(cb func(os.Signal)) {
interruptCallbacks = append(interruptCallbacks, cb)
}
// go routine that call interrupt handlers in order of registering
func HandleInterrupt() {
c := make(chan os.Signal, 1)
go func() {
signal.Notify(c, os.Interrupt)
for sig := range c {
glog.V(logger.Error).Infof("Shutting down (%v) ... \n", sig)
RunInterruptCallbacks(sig)
}
}()
}
func RunInterruptCallbacks(sig os.Signal) {
for _, cb := range interruptCallbacks {
cb(sig)
}
}
func openLogFile(Datadir string, filename string) *os.File {
path := common.AbsolutePath(Datadir, filename)
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
panic(fmt.Sprintf("error opening log file '%s': %v", filename, err))
}
return file
}
func PromptConfirm(prompt string) (bool, error) {
var (
input string
err error
)
prompt = prompt + " [y/N] "
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
input, err = lr.Prompt(prompt)
} else {
fmt.Print(prompt)
input, err = bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
}
if len(input) > 0 && strings.ToUpper(input[:1]) == "Y" {
return true, nil
} else {
return false, nil
}
return false, err
}
func PromptPassword(prompt string, warnTerm bool) (string, error) {
if liner.TerminalSupported() {
lr := liner.NewLiner()
defer lr.Close()
return lr.PasswordPrompt(prompt)
}
if warnTerm {
fmt.Println("!! Unsupported terminal, password will be echoed.")
}
fmt.Print(prompt)
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
fmt.Println()
return input, err
}
func initDataDir(Datadir string) {
_, err := os.Stat(Datadir)
if err != nil {
if os.IsNotExist(err) {
fmt.Printf("Data directory '%s' doesn't exist, creating it\n", Datadir)
os.Mkdir(Datadir, 0777)
}
}
}
// Fatalf formats a message to standard error and exits the program.
// The message is also printed to standard output if standard error
// is redirected to a different file.
func Fatalf(format string, args ...interface{}) {
w := io.MultiWriter(os.Stdout, os.Stderr)
outf, _ := os.Stdout.Stat()
errf, _ := os.Stderr.Stat()
if outf != nil && errf != nil && os.SameFile(outf, errf) {
w = os.Stderr
}
fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
logger.Flush()
os.Exit(1)
}
func StartEthereum(ethereum *eth.Ethereum) {
glog.V(logger.Info).Infoln("Starting", ethereum.Name())
if err := ethereum.Start(); err != nil {
Fatalf("Error starting Ethereum: %v", err)
}
RegisterInterrupt(func(sig os.Signal) {
ethereum.Stop()
logger.Flush()
})
}
func StartEthereumForTest(ethereum *eth.Ethereum) {
glog.V(logger.Info).Infoln("Starting ", ethereum.Name())
ethereum.StartForTest()
RegisterInterrupt(func(sig os.Signal) {
ethereum.Stop()
logger.Flush()
})
}
func FormatTransactionData(data string) []byte {
d := common.StringToByteFunc(data, func(s string) (ret []byte) {
slice := regexp.MustCompile("\\n|\\s").Split(s, 1000000000)
for _, dataItem := range slice {
d := common.FormatData(dataItem)
ret = append(ret, d...)
}
return
})
return d
}
func ImportChain(chain *core.ChainManager, fn string) error {
// Watch for Ctrl-C while the import is running.
// If a signal is received, the import will stop at the next batch.
interrupt := make(chan os.Signal, 1)
stop := make(chan struct{})
signal.Notify(interrupt, os.Interrupt)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
glog.Info("caught interrupt during import, will stop at next batch")
}
close(stop)
}()
checkInterrupt := func() bool {
select {
case <-stop:
return true
default:
return false
}
}
glog.Infoln("Importing blockchain", fn)
fh, err := os.Open(fn)
if err != nil {
return err
}
defer fh.Close()
stream := rlp.NewStream(fh, 0)
// Run actual the import.
blocks := make(types.Blocks, importBatchSize)
n := 0
for batch := 0; ; batch++ {
// Load a batch of RLP blocks.
if checkInterrupt() {
return fmt.Errorf("interrupted")
}
i := 0
for ; i < importBatchSize; i++ {
var b types.Block
if err := stream.Decode(&b); err == io.EOF {
break
} else if err != nil {
return fmt.Errorf("at block %d: %v", n, err)
}
blocks[i] = &b
n++
}
if i == 0 {
break
}
// Import the batch.
if checkInterrupt() {
return fmt.Errorf("interrupted")
}
if hasAllBlocks(chain, blocks[:i]) {
glog.Infof("skipping batch %d, all blocks present [%x / %x]",
batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4])
continue
}
if _, err := chain.InsertChain(blocks[:i]); err != nil {
return fmt.Errorf("invalid block %d: %v", n, err)
}
}
return nil
}
func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool {
for _, b := range bs {
if !chain.HasBlock(b.Hash()) {
return false
}
}
return true
}
func ExportChain(chainmgr *core.ChainManager, fn string) error {
glog.Infoln("Exporting blockchain to", fn)
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
if err := chainmgr.Export(fh); err != nil {
return err
}
glog.Infoln("Exported blockchain to", fn)
return nil
}
|
// Copyright 2016 Davide Muzzarelli. All right reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package gqt is a template engine for SQL queries.
It helps to separate SQL code from Go code and permits to compose the queries
with a simple syntax.
The template engine is the standard package "text/template".
Usage
Create a template directory tree of .sql files. Here an example template with
the definition of three blocks:
// File /path/to/sql/repository/dir/example.sql
{{define "allUsers"}}
SELECT *
FROM users
WHERE 1=1
{{end}}
{{define "getUser"}}
SELECT *
FROM users
WHERE id=?
{{end}}
{{define "allPosts"}}
SELECT *
FROM posts
WHERE date>=?
{{if ne .Order ""}}ORDER BY date {{.Order}}{{end}}
{{end}}
Then, with Go, add the directory to the default repository and execute the
queries:
// Setup
gqt.Add("/path/to/sql/repository/dir", "*.sql")
// Simple query without parameters
db.Query(gqt.Get("allUsers"))
// Query with parameters
db.QueryRow(gqt.Get("getuser"), 1)
// Query with context and parameters
db.Query(gqt.Exec("allPosts", map[string]interface{
"Order": "DESC",
}), date)
The templates are parsed immediately and recursively.
Namespaces
The templates can be organized in namespaces and stored in multiple root
directories.
templates1/
|-- roles/
| |-- queries.sql
|-- users/
| |-- queries.sql
| |-- commands.sql
templates2/
|-- posts/
| |-- queries.sql
| |-- commands.sql
|-- users/
| |-- queries.sql
|-- queries.sql
The blocks inside the sql files are merged, the blocks with the same namespace
and name will be overridden following the alphabetical order.
The sub-directories are used as namespaces and accessed like:
gqt.Add("../templates1", "*.sql")
gqt.Add("../templates2", "*.sql")
// Will search inside templates1/users/*.sql and templates2/users/*.sql
gqt.Get("users/allUsers")
Multiple databases
When dealing with multiple databases at the same time, like PostgreSQL and
MySQL, just create two repositories:
// Use a common directory
dir := "/path/to/sql/repository/dir"
// Create the PostgreSQL repository
pgsql := gqt.NewRepository()
pgsql.Add(dir, "*.pg.sql")
// Create a separated MySQL repository
mysql := gqt.NewRepository()
mysql.Add(dir, "*.my.sql")
// Then execute
pgsql.Get("queryName")
mysql.Get("queryName")
*/
package gqt
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
"text/template"
)
// Repository stores SQL templates.
type Repository struct {
templates map[string]*template.Template // namespace: template
}
// NewRepository creates a new Repository.
func NewRepository() *Repository {
return &Repository{
templates: make(map[string]*template.Template),
}
}
// Add adds a root directory to the repository, recursively. Match only the
// given file extension. Blocks on the same namespace will be overridden. Does
// not follow symbolic links.
func (r *Repository) Add(root string, pattern string) (err error) {
// List the directories
dirs := []string{}
err = filepath.Walk(
root,
func(path string, info os.FileInfo, e error) error {
if e != nil {
return e
}
if info.IsDir() {
dirs = append(dirs, path)
}
return nil
},
)
if err != nil {
return err
}
// Add each sub-directory as namespace
for _, dir := range dirs {
//namespace := strings.Replace(dir, root, "", 1)
d := strings.Split(dir, string(os.PathSeparator))
ro := strings.Split(root, string(os.PathSeparator))
namespace := strings.Join(d[len(ro):], "/")
err = r.addDir(dir, namespace, pattern)
if err != nil {
return err
}
}
return nil
}
// addDir parses a directory.
func (r *Repository) addDir(path, namespace, pattern string) error {
// Parse the template
t, err := t.ParseGlob(filepath.Join(path, pattern))
if err != nil {
r.templates[namespace] = template.New("")
return err
}
r.templates[namespace] = t
return nil
}
// Get is a shortcut for r.Exec(), passing nil as data.
func (r *Repository) Get(name string) string {
return r.Exec(name, nil)
}
// Exec is a shortcut for r.Parse(), but panics if an error occur.
func (r *Repository) Exec(name string, data interface{}) (s string) {
var err error
s, err = r.Parse(name, data)
if err != nil {
panic(err)
}
return s
}
// Parse executes the template and returns the resulting SQL or an error.
func (r *Repository) Parse(name string, data interface{}) (string, error) {
// Prepare namespace and block name
if name == "" {
return "", fmt.Errorf("unnamed block")
}
path := strings.Split(name, "/")
namespace := strings.Join(path[0:len(path)-1], "/")
if namespace == "." {
namespace = ""
}
block := path[len(path)-1]
// Execute the template
var b bytes.Buffer
t, ok := r.templates[namespace]
if ok == false {
return "", fmt.Errorf("unknown namespace \"%s\"", namespace)
}
err := t.ExecuteTemplate(&b, block, data)
if err != nil {
return "", err
}
return b.String(), nil
}
var defaultRepository = NewRepository()
// Add method for the default repository.
func Add(root string, ext string) error {
return defaultRepository.Add(root, ext)
}
// Get method for the default repository.
func Get(name string) string {
return defaultRepository.Get(name)
}
// Exec method for the default repository.
func Exec(name string, data interface{}) string {
return defaultRepository.Exec(name, data)
}
// Parse method for the default repository.
func Parse(name string, data interface{}) (string, error) {
return defaultRepository.Parse(name, data)
}
Commit 25038adaa17ae7f166 broke addDir func
// Copyright 2016 Davide Muzzarelli. All right reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package gqt is a template engine for SQL queries.
It helps to separate SQL code from Go code and permits to compose the queries
with a simple syntax.
The template engine is the standard package "text/template".
Usage
Create a template directory tree of .sql files. Here an example template with
the definition of three blocks:
// File /path/to/sql/repository/dir/example.sql
{{define "allUsers"}}
SELECT *
FROM users
WHERE 1=1
{{end}}
{{define "getUser"}}
SELECT *
FROM users
WHERE id=?
{{end}}
{{define "allPosts"}}
SELECT *
FROM posts
WHERE date>=?
{{if ne .Order ""}}ORDER BY date {{.Order}}{{end}}
{{end}}
Then, with Go, add the directory to the default repository and execute the
queries:
// Setup
gqt.Add("/path/to/sql/repository/dir", "*.sql")
// Simple query without parameters
db.Query(gqt.Get("allUsers"))
// Query with parameters
db.QueryRow(gqt.Get("getuser"), 1)
// Query with context and parameters
db.Query(gqt.Exec("allPosts", map[string]interface{
"Order": "DESC",
}), date)
The templates are parsed immediately and recursively.
Namespaces
The templates can be organized in namespaces and stored in multiple root
directories.
templates1/
|-- roles/
| |-- queries.sql
|-- users/
| |-- queries.sql
| |-- commands.sql
templates2/
|-- posts/
| |-- queries.sql
| |-- commands.sql
|-- users/
| |-- queries.sql
|-- queries.sql
The blocks inside the sql files are merged, the blocks with the same namespace
and name will be overridden following the alphabetical order.
The sub-directories are used as namespaces and accessed like:
gqt.Add("../templates1", "*.sql")
gqt.Add("../templates2", "*.sql")
// Will search inside templates1/users/*.sql and templates2/users/*.sql
gqt.Get("users/allUsers")
Multiple databases
When dealing with multiple databases at the same time, like PostgreSQL and
MySQL, just create two repositories:
// Use a common directory
dir := "/path/to/sql/repository/dir"
// Create the PostgreSQL repository
pgsql := gqt.NewRepository()
pgsql.Add(dir, "*.pg.sql")
// Create a separated MySQL repository
mysql := gqt.NewRepository()
mysql.Add(dir, "*.my.sql")
// Then execute
pgsql.Get("queryName")
mysql.Get("queryName")
*/
package gqt
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
"text/template"
)
// Repository stores SQL templates.
type Repository struct {
templates map[string]*template.Template // namespace: template
}
// NewRepository creates a new Repository.
func NewRepository() *Repository {
return &Repository{
templates: make(map[string]*template.Template),
}
}
// Add adds a root directory to the repository, recursively. Match only the
// given file extension. Blocks on the same namespace will be overridden. Does
// not follow symbolic links.
func (r *Repository) Add(root string, pattern string) (err error) {
// List the directories
dirs := []string{}
err = filepath.Walk(
root,
func(path string, info os.FileInfo, e error) error {
if e != nil {
return e
}
if info.IsDir() {
dirs = append(dirs, path)
}
return nil
},
)
if err != nil {
return err
}
// Add each sub-directory as namespace
for _, dir := range dirs {
//namespace := strings.Replace(dir, root, "", 1)
d := strings.Split(dir, string(os.PathSeparator))
ro := strings.Split(root, string(os.PathSeparator))
namespace := strings.Join(d[len(ro):], "/")
err = r.addDir(dir, namespace, pattern)
if err != nil {
return err
}
}
return nil
}
// addDir parses a directory.
func (r *Repository) addDir(path, namespace, pattern string) error {
// Parse the template
t, err := template.ParseGlob(filepath.Join(path, pattern))
if err != nil {
r.templates[namespace] = template.New("")
return err
}
r.templates[namespace] = t
return nil
}
// Get is a shortcut for r.Exec(), passing nil as data.
func (r *Repository) Get(name string) string {
return r.Exec(name, nil)
}
// Exec is a shortcut for r.Parse(), but panics if an error occur.
func (r *Repository) Exec(name string, data interface{}) (s string) {
var err error
s, err = r.Parse(name, data)
if err != nil {
panic(err)
}
return s
}
// Parse executes the template and returns the resulting SQL or an error.
func (r *Repository) Parse(name string, data interface{}) (string, error) {
// Prepare namespace and block name
if name == "" {
return "", fmt.Errorf("unnamed block")
}
path := strings.Split(name, "/")
namespace := strings.Join(path[0:len(path)-1], "/")
if namespace == "." {
namespace = ""
}
block := path[len(path)-1]
// Execute the template
var b bytes.Buffer
t, ok := r.templates[namespace]
if ok == false {
return "", fmt.Errorf("unknown namespace \"%s\"", namespace)
}
err := t.ExecuteTemplate(&b, block, data)
if err != nil {
return "", err
}
return b.String(), nil
}
var defaultRepository = NewRepository()
// Add method for the default repository.
func Add(root string, ext string) error {
return defaultRepository.Add(root, ext)
}
// Get method for the default repository.
func Get(name string) string {
return defaultRepository.Get(name)
}
// Exec method for the default repository.
func Exec(name string, data interface{}) string {
return defaultRepository.Exec(name, data)
}
// Parse method for the default repository.
func Parse(name string, data interface{}) (string, error) {
return defaultRepository.Parse(name, data)
}
|
// Copyright 2015 Robert S. Gerus. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
/*
#cgo LDFLAGS: -lxname
#include <xname.h>
*/
import "C"
import (
// "gopkg.in/yaml.v2"
"sync"
"time"
)
var statusLock sync.Mutex
var statusbar []string
func updateStatusbar(pos int, text string) {
statusLock.Lock()
defer statusLock.Unlock()
statusbar[pos] = text
}
func spawnUpdater(pos, period int, args map[string]string, f func(map[string]string) string) {
go func() {
for {
time.Sleep(time.Duration(period) * time.Second)
updateStatusbar(pos, f(args))
}
}()
}
func timestamp(args map[string]string) string {
return "placeholder"
}
func fileReader(args map[string]string) string {
return "placeholder"
}
func main() {
C.xname(C.CString("placeholder"))
}
We're now reading the configuration file, hopefully.
// Copyright 2015 Robert S. Gerus. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
/*
#cgo LDFLAGS: -lxname
#include <xname.h>
*/
import "C"
import (
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"time"
)
type statusbarItem struct {
Name string `yaml:"name"`
Period int `yaml:"period"`
Type string `yaml:"type"`
Args map[string]string `yaml:"args"`
}
type gsdConfig struct {
Separator string `yaml:"string"`
Items []statusbarItem `yaml:"items"`
}
var config gsdConfig
var statusLock sync.Mutex
var statusbar []string
func updateStatusbar(pos int, text string) {
statusLock.Lock()
defer statusLock.Unlock()
statusbar[pos] = text
C.xname(C.CString(strings.Join(statusbar, config.Separator)))
}
func spawnUpdater(pos, period int, args map[string]string, f func(map[string]string) string) {
go func() {
for {
time.Sleep(time.Duration(period) * time.Second)
updateStatusbar(pos, f(args))
}
}()
}
func timestamp(args map[string]string) string {
return "placeholder"
}
func fileReader(args map[string]string) string {
return "placeholder"
}
func main() {
if len(os.Args) < 2 {
log.Fatalln("Usage:", os.Args[0], "<configuration file>")
}
data, err := ioutil.ReadFile(os.Args[1])
if err != nil {
log.Fatalln("Error reading configuration file:", err)
}
if err := yaml.Unmarshal(data, &config); err != nil {
log.Fatalln("Error parsing configuration file:", err)
}
}
|
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
keyapi "github.com/matrix-org/dendrite/keyserver/api"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
// Send implements /_matrix/federation/v1/send/{txnID}
func Send(
httpReq *http.Request,
request *gomatrixserverlib.FederationRequest,
txnID gomatrixserverlib.TransactionID,
cfg *config.FederationAPI,
rsAPI api.RoomserverInternalAPI,
eduAPI eduserverAPI.EDUServerInputAPI,
keyAPI keyapi.KeyInternalAPI,
keys gomatrixserverlib.JSONVerifier,
federation *gomatrixserverlib.FederationClient,
) util.JSONResponse {
t := txnReq{
rsAPI: rsAPI,
eduAPI: eduAPI,
keys: keys,
federation: federation,
haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent),
newEvents: make(map[string]bool),
keyAPI: keyAPI,
}
var txnEvents struct {
PDUs []json.RawMessage `json:"pdus"`
EDUs []gomatrixserverlib.EDU `json:"edus"`
}
if err := json.Unmarshal(request.Content(), &txnEvents); err != nil {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.NotJSON("The request body could not be decoded into valid JSON. " + err.Error()),
}
}
// Transactions are limited in size; they can have at most 50 PDUs and 100 EDUs.
// https://matrix.org/docs/spec/server_server/latest#transactions
if len(txnEvents.PDUs) > 50 || len(txnEvents.EDUs) > 100 {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.BadJSON("max 50 pdus / 100 edus"),
}
}
// TODO: Really we should have a function to convert FederationRequest to txnReq
t.PDUs = txnEvents.PDUs
t.EDUs = txnEvents.EDUs
t.Origin = request.Origin()
t.TransactionID = txnID
t.Destination = cfg.Matrix.ServerName
util.GetLogger(httpReq.Context()).Infof("Received transaction %q from %q containing %d PDUs, %d EDUs", txnID, request.Origin(), len(t.PDUs), len(t.EDUs))
resp, jsonErr := t.processTransaction(httpReq.Context())
if jsonErr != nil {
util.GetLogger(httpReq.Context()).WithField("jsonErr", jsonErr).Error("t.processTransaction failed")
return *jsonErr
}
// https://matrix.org/docs/spec/server_server/r0.1.3#put-matrix-federation-v1-send-txnid
// Status code 200:
// The result of processing the transaction. The server is to use this response
// even in the event of one or more PDUs failing to be processed.
return util.JSONResponse{
Code: http.StatusOK,
JSON: resp,
}
}
type txnReq struct {
gomatrixserverlib.Transaction
rsAPI api.RoomserverInternalAPI
eduAPI eduserverAPI.EDUServerInputAPI
keyAPI keyapi.KeyInternalAPI
keys gomatrixserverlib.JSONVerifier
federation txnFederationClient
// local cache of events for auth checks, etc - this may include events
// which the roomserver is unaware of.
haveEvents map[string]*gomatrixserverlib.HeaderedEvent
// new events which the roomserver does not know about
newEvents map[string]bool
newEventsMutex sync.RWMutex
}
// A subset of FederationClient functionality that txn requires. Useful for testing.
type txnFederationClient interface {
LookupState(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
res gomatrixserverlib.RespState, err error,
)
LookupStateIDs(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, eventID string) (res gomatrixserverlib.RespStateIDs, err error)
GetEvent(ctx context.Context, s gomatrixserverlib.ServerName, eventID string) (res gomatrixserverlib.Transaction, err error)
LookupMissingEvents(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, missing gomatrixserverlib.MissingEvents,
roomVersion gomatrixserverlib.RoomVersion) (res gomatrixserverlib.RespMissingEvents, err error)
}
func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.RespSend, *util.JSONResponse) {
results := make(map[string]gomatrixserverlib.PDUResult)
pdus := []*gomatrixserverlib.HeaderedEvent{}
for _, pdu := range t.PDUs {
var header struct {
RoomID string `json:"room_id"`
}
if err := json.Unmarshal(pdu, &header); err != nil {
util.GetLogger(ctx).WithError(err).Warn("Transaction: Failed to extract room ID from event")
// We don't know the event ID at this point so we can't return the
// failure in the PDU results
continue
}
verReq := api.QueryRoomVersionForRoomRequest{RoomID: header.RoomID}
verRes := api.QueryRoomVersionForRoomResponse{}
if err := t.rsAPI.QueryRoomVersionForRoom(ctx, &verReq, &verRes); err != nil {
util.GetLogger(ctx).WithError(err).Warn("Transaction: Failed to query room version for room", verReq.RoomID)
// We don't know the event ID at this point so we can't return the
// failure in the PDU results
continue
}
event, err := gomatrixserverlib.NewEventFromUntrustedJSON(pdu, verRes.RoomVersion)
if err != nil {
if _, ok := err.(gomatrixserverlib.BadJSONError); ok {
// Room version 6 states that homeservers should strictly enforce canonical JSON
// on PDUs.
//
// This enforces that the entire transaction is rejected if a single bad PDU is
// sent. It is unclear if this is the correct behaviour or not.
//
// See https://github.com/matrix-org/synapse/issues/7543
return nil, &util.JSONResponse{
Code: 400,
JSON: jsonerror.BadJSON("PDU contains bad JSON"),
}
}
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Failed to parse event JSON of event %s", string(pdu))
continue
}
if api.IsServerBannedFromRoom(ctx, t.rsAPI, event.RoomID(), t.Origin) {
results[event.EventID()] = gomatrixserverlib.PDUResult{
Error: "Forbidden by server ACLs",
}
continue
}
if err = gomatrixserverlib.VerifyAllEventSignatures(ctx, []*gomatrixserverlib.Event{event}, t.keys); err != nil {
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID())
results[event.EventID()] = gomatrixserverlib.PDUResult{
Error: err.Error(),
}
continue
}
pdus = append(pdus, event.Headered(verRes.RoomVersion))
}
// Process the events.
for _, e := range pdus {
if err := t.processEvent(ctx, e.Unwrap()); err != nil {
// If the error is due to the event itself being bad then we skip
// it and move onto the next event. We report an error so that the
// sender knows that we have skipped processing it.
//
// However if the event is due to a temporary failure in our server
// such as a database being unavailable then we should bail, and
// hope that the sender will retry when we are feeling better.
//
// It is uncertain what we should do if an event fails because
// we failed to fetch more information from the sending server.
// For example if a request to /state fails.
// If we skip the event then we risk missing the event until we
// receive another event referencing it.
// If we bail and stop processing then we risk wedging incoming
// transactions from that server forever.
if isProcessingErrorFatal(err) {
// Any other error should be the result of a temporary error in
// our server so we should bail processing the transaction entirely.
util.GetLogger(ctx).Warnf("Processing %s failed fatally: %s", e.EventID(), err)
jsonErr := util.ErrorResponse(err)
return nil, &jsonErr
} else {
// Auth errors mean the event is 'rejected' which have to be silent to appease sytest
errMsg := ""
_, rejected := err.(*gomatrixserverlib.NotAllowed)
if !rejected {
errMsg = err.Error()
}
util.GetLogger(ctx).WithError(err).WithField("event_id", e.EventID()).WithField("rejected", rejected).Warn(
"Failed to process incoming federation event, skipping",
)
results[e.EventID()] = gomatrixserverlib.PDUResult{
Error: errMsg,
}
}
} else {
results[e.EventID()] = gomatrixserverlib.PDUResult{}
}
}
t.processEDUs(ctx)
if c := len(results); c > 0 {
util.GetLogger(ctx).Infof("Processed %d PDUs from transaction %q", c, t.TransactionID)
}
return &gomatrixserverlib.RespSend{PDUs: results}, nil
}
// isProcessingErrorFatal returns true if the error is really bad and
// we should stop processing the transaction, and returns false if it
// is just some less serious error about a specific event.
func isProcessingErrorFatal(err error) bool {
switch err {
case sql.ErrConnDone:
case sql.ErrTxDone:
return true
}
return false
}
type roomNotFoundError struct {
roomID string
}
type verifySigError struct {
eventID string
err error
}
type missingPrevEventsError struct {
eventID string
err error
}
func (e roomNotFoundError) Error() string { return fmt.Sprintf("room %q not found", e.roomID) }
func (e verifySigError) Error() string {
return fmt.Sprintf("unable to verify signature of event %q: %s", e.eventID, e.err)
}
func (e missingPrevEventsError) Error() string {
return fmt.Sprintf("unable to get prev_events for event %q: %s", e.eventID, e.err)
}
func (t *txnReq) haveEventIDs() map[string]bool {
t.newEventsMutex.RLock()
defer t.newEventsMutex.RUnlock()
result := make(map[string]bool, len(t.haveEvents))
for eventID := range t.haveEvents {
if t.newEvents[eventID] {
continue
}
result[eventID] = true
}
return result
}
// nolint:gocyclo
func (t *txnReq) processEDUs(ctx context.Context) {
for _, e := range t.EDUs {
switch e.Type {
case gomatrixserverlib.MTyping:
// https://matrix.org/docs/spec/server_server/latest#typing-notifications
var typingPayload struct {
RoomID string `json:"room_id"`
UserID string `json:"user_id"`
Typing bool `json:"typing"`
}
if err := json.Unmarshal(e.Content, &typingPayload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal typing event")
continue
}
_, domain, err := gomatrixserverlib.SplitID('@', typingPayload.UserID)
if err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to split domain from typing event sender")
continue
}
if domain != t.Origin {
util.GetLogger(ctx).Warnf("Dropping typing event where sender domain (%q) doesn't match origin (%q)", domain, t.Origin)
continue
}
if err := eduserverAPI.SendTyping(ctx, t.eduAPI, typingPayload.UserID, typingPayload.RoomID, typingPayload.Typing, 30*1000); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to send typing event to edu server")
}
case gomatrixserverlib.MDirectToDevice:
// https://matrix.org/docs/spec/server_server/r0.1.3#m-direct-to-device-schema
var directPayload gomatrixserverlib.ToDeviceMessage
if err := json.Unmarshal(e.Content, &directPayload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal send-to-device events")
continue
}
for userID, byUser := range directPayload.Messages {
for deviceID, message := range byUser {
// TODO: check that the user and the device actually exist here
if err := eduserverAPI.SendToDevice(ctx, t.eduAPI, directPayload.Sender, userID, deviceID, directPayload.Type, message); err != nil {
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
"sender": directPayload.Sender,
"user_id": userID,
"device_id": deviceID,
}).Error("Failed to send send-to-device event to edu server")
}
}
}
case gomatrixserverlib.MDeviceListUpdate:
t.processDeviceListUpdate(ctx, e)
case gomatrixserverlib.MReceipt:
// https://matrix.org/docs/spec/server_server/r0.1.4#receipts
payload := map[string]eduserverAPI.FederationReceiptMRead{}
if err := json.Unmarshal(e.Content, &payload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal receipt event")
continue
}
for roomID, receipt := range payload {
for userID, mread := range receipt.User {
_, domain, err := gomatrixserverlib.SplitID('@', userID)
if err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to split domain from receipt event sender")
continue
}
if t.Origin != domain {
util.GetLogger(ctx).Warnf("Dropping receipt event where sender domain (%q) doesn't match origin (%q)", domain, t.Origin)
continue
}
if err := t.processReceiptEvent(ctx, userID, roomID, "m.read", mread.Data.TS, mread.EventIDs); err != nil {
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
"sender": t.Origin,
"user_id": userID,
"room_id": roomID,
"events": mread.EventIDs,
}).Error("Failed to send receipt event to edu server")
continue
}
}
}
default:
util.GetLogger(ctx).WithField("type", e.Type).Debug("Unhandled EDU")
}
}
}
// processReceiptEvent sends receipt events to the edu server
func (t *txnReq) processReceiptEvent(ctx context.Context,
userID, roomID, receiptType string,
timestamp gomatrixserverlib.Timestamp,
eventIDs []string,
) error {
// store every event
for _, eventID := range eventIDs {
req := eduserverAPI.InputReceiptEventRequest{
InputReceiptEvent: eduserverAPI.InputReceiptEvent{
UserID: userID,
RoomID: roomID,
EventID: eventID,
Type: receiptType,
Timestamp: timestamp,
},
}
resp := eduserverAPI.InputReceiptEventResponse{}
if err := t.eduAPI.InputReceiptEvent(ctx, &req, &resp); err != nil {
return fmt.Errorf("unable to set receipt event: %w", err)
}
}
return nil
}
func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverlib.EDU) {
var payload gomatrixserverlib.DeviceListUpdateEvent
if err := json.Unmarshal(e.Content, &payload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal device list update event")
return
}
var inputRes keyapi.InputDeviceListUpdateResponse
t.keyAPI.InputDeviceListUpdate(context.Background(), &keyapi.InputDeviceListUpdateRequest{
Event: payload,
}, &inputRes)
if inputRes.Error != nil {
util.GetLogger(ctx).WithError(inputRes.Error).WithField("user_id", payload.UserID).Error("failed to InputDeviceListUpdate")
}
}
func (t *txnReq) getServers(ctx context.Context, roomID string) []gomatrixserverlib.ServerName {
servers := []gomatrixserverlib.ServerName{t.Origin}
serverReq := &api.QueryServerJoinedToRoomRequest{
RoomID: roomID,
}
serverRes := &api.QueryServerJoinedToRoomResponse{}
if err := t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
servers = append(servers, serverRes.ServerNames...)
util.GetLogger(ctx).Infof("Found %d server(s) to query for missing events in %q", len(servers), roomID)
}
return servers
}
func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) error {
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
// Work out if the roomserver knows everything it needs to know to auth
// the event. This includes the prev_events and auth_events.
// NOTE! This is going to include prev_events that have an empty state
// snapshot. This is because we will need to re-request the event, and
// it's /state_ids, in order for it to exist in the roomserver correctly
// before the roomserver tries to work out
stateReq := api.QueryMissingAuthPrevEventsRequest{
RoomID: e.RoomID(),
AuthEventIDs: e.AuthEventIDs(),
PrevEventIDs: e.PrevEventIDs(),
}
var stateResp api.QueryMissingAuthPrevEventsResponse
if err := t.rsAPI.QueryMissingAuthPrevEvents(ctx, &stateReq, &stateResp); err != nil {
return fmt.Errorf("t.rsAPI.QueryMissingAuthPrevEvents: %w", err)
}
if !stateResp.RoomExists {
// TODO: When synapse receives a message for a room it is not in it
// asks the remote server for the state of the room so that it can
// check if the remote server knows of a join "m.room.member" event
// that this server is unaware of.
// However generally speaking we should reject events for rooms we
// aren't a member of.
return roomNotFoundError{e.RoomID()}
}
if len(stateResp.MissingAuthEventIDs) > 0 {
logger.Infof("Event refers to %d unknown auth_events", len(stateResp.MissingAuthEventIDs))
if err := t.retrieveMissingAuthEvents(ctx, e, &stateResp); err != nil {
return fmt.Errorf("t.retrieveMissingAuthEvents: %w", err)
}
}
if len(stateResp.MissingPrevEventIDs) > 0 {
logger.Infof("Event refers to %d unknown prev_events", len(stateResp.MissingPrevEventIDs))
return t.processEventWithMissingState(ctx, e, stateResp.RoomVersion)
}
// pass the event to the roomserver which will do auth checks
// If the event fail auth checks, gmsl.NotAllowed error will be returned which we be silently
// discarded by the caller of this function
return api.SendEvents(
context.Background(),
t.rsAPI,
api.KindNew,
[]*gomatrixserverlib.HeaderedEvent{
e.Headered(stateResp.RoomVersion),
},
api.DoNotSendToOtherServers,
nil,
)
}
func (t *txnReq) retrieveMissingAuthEvents(
ctx context.Context, e *gomatrixserverlib.Event, stateResp *api.QueryMissingAuthPrevEventsResponse,
) error {
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
missingAuthEvents := make(map[string]struct{})
for _, missingAuthEventID := range stateResp.MissingAuthEventIDs {
missingAuthEvents[missingAuthEventID] = struct{}{}
}
servers := t.getServers(ctx, e.RoomID())
if len(servers) > 5 {
servers = servers[:5]
}
withNextEvent:
for missingAuthEventID := range missingAuthEvents {
withNextServer:
for _, server := range servers {
logger.Infof("Retrieving missing auth event %q from %q", missingAuthEventID, server)
tx, err := t.federation.GetEvent(ctx, server, missingAuthEventID)
if err != nil {
logger.WithError(err).Warnf("Failed to retrieve auth event %q", missingAuthEventID)
continue withNextServer
}
ev, err := gomatrixserverlib.NewEventFromUntrustedJSON(tx.PDUs[0], stateResp.RoomVersion)
if err != nil {
logger.WithError(err).Warnf("Failed to unmarshal auth event %q", missingAuthEventID)
continue withNextServer
}
if err = api.SendInputRoomEvents(
context.Background(),
t.rsAPI,
[]api.InputRoomEvent{
{
Kind: api.KindOutlier,
Event: ev.Headered(stateResp.RoomVersion),
AuthEventIDs: ev.AuthEventIDs(),
SendAsServer: api.DoNotSendToOtherServers,
},
},
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
delete(missingAuthEvents, missingAuthEventID)
continue withNextEvent
}
}
if missing := len(missingAuthEvents); missing > 0 {
return fmt.Errorf("Event refers to %d auth_events which we failed to fetch", missing)
}
return nil
}
func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserverlib.Event) error {
authUsingState := gomatrixserverlib.NewAuthEvents(nil)
for i := range stateEvents {
err := authUsingState.AddEvent(stateEvents[i])
if err != nil {
return err
}
}
return gomatrixserverlib.Allowed(e, &authUsingState)
}
// nolint:gocyclo
func (t *txnReq) processEventWithMissingState(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) error {
// Do this with a fresh context, so that we keep working even if the
// original request times out. With any luck, by the time the remote
// side retries, we'll have fetched the missing state.
gmectx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
// We are missing the previous events for this events.
// This means that there is a gap in our view of the history of the
// room. There two ways that we can handle such a gap:
// 1) We can fill in the gap using /get_missing_events
// 2) We can leave the gap and request the state of the room at
// this event from the remote server using either /state_ids
// or /state.
// Synapse will attempt to do 1 and if that fails or if the gap is
// too large then it will attempt 2.
// Synapse will use /state_ids if possible since usually the state
// is largely unchanged and it is more efficient to fetch a list of
// event ids and then use /event to fetch the individual events.
// However not all version of synapse support /state_ids so you may
// need to fallback to /state.
// Attempt to fill in the gap using /get_missing_events
// This will either:
// - fill in the gap completely then process event `e` returning no backwards extremity
// - fail to fill in the gap and tell us to terminate the transaction err=not nil
// - fail to fill in the gap and tell us to fetch state at the new backwards extremity, and to not terminate the transaction
newEvents, err := t.getMissingEvents(gmectx, e, roomVersion)
if err != nil {
return err
}
if len(newEvents) == 0 {
return nil
}
backwardsExtremity := newEvents[0]
newEvents = newEvents[1:]
type respState struct {
// A snapshot is considered trustworthy if it came from our own roomserver.
// That's because the state will have been through state resolution once
// already in QueryStateAfterEvent.
trustworthy bool
*gomatrixserverlib.RespState
}
// at this point we know we're going to have a gap: we need to work out the room state at the new backwards extremity.
// Therefore, we cannot just query /state_ids with this event to get the state before. Instead, we need to query
// the state AFTER all the prev_events for this event, then apply state resolution to that to get the state before the event.
var states []*respState
for _, prevEventID := range backwardsExtremity.PrevEventIDs() {
// Look up what the state is after the backward extremity. This will either
// come from the roomserver, if we know all the required events, or it will
// come from a remote server via /state_ids if not.
prevState, trustworthy, lerr := t.lookupStateAfterEvent(gmectx, roomVersion, backwardsExtremity.RoomID(), prevEventID)
if lerr != nil {
util.GetLogger(ctx).WithError(lerr).Errorf("Failed to lookup state after prev_event: %s", prevEventID)
return lerr
}
// Append the state onto the collected state. We'll run this through the
// state resolution next.
states = append(states, &respState{trustworthy, prevState})
}
// Now that we have collected all of the state from the prev_events, we'll
// run the state through the appropriate state resolution algorithm for the
// room if needed. This does a couple of things:
// 1. Ensures that the state is deduplicated fully for each state-key tuple
// 2. Ensures that we pick the latest events from both sets, in the case that
// one of the prev_events is quite a bit older than the others
resolvedState := &gomatrixserverlib.RespState{}
switch len(states) {
case 0:
extremityIsCreate := backwardsExtremity.Type() == gomatrixserverlib.MRoomCreate && backwardsExtremity.StateKeyEquals("")
if !extremityIsCreate {
// There are no previous states and this isn't the beginning of the
// room - this is an error condition!
util.GetLogger(ctx).Errorf("Failed to lookup any state after prev_events")
return fmt.Errorf("expected %d states but got %d", len(backwardsExtremity.PrevEventIDs()), len(states))
}
case 1:
// There's only one previous state - if it's trustworthy (came from a
// local state snapshot which will already have been through state res),
// use it as-is. There's no point in resolving it again.
if states[0].trustworthy {
resolvedState = states[0].RespState
break
}
// Otherwise, if it isn't trustworthy (came from federation), run it through
// state resolution anyway for safety, in case there are duplicates.
fallthrough
default:
respStates := make([]*gomatrixserverlib.RespState, len(states))
for i := range states {
respStates[i] = states[i].RespState
}
// There's more than one previous state - run them all through state res
resolvedState, err = t.resolveStatesAndCheck(gmectx, roomVersion, respStates, backwardsExtremity)
if err != nil {
util.GetLogger(ctx).WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID())
return err
}
}
// First of all, send the backward extremity into the roomserver with the
// newly resolved state. This marks the "oldest" point in the backfill and
// sets the baseline state for any new events after this.
err = api.SendEventWithState(
context.Background(),
t.rsAPI,
api.KindOld,
resolvedState,
backwardsExtremity.Headered(roomVersion),
t.haveEventIDs(),
)
if err != nil {
return fmt.Errorf("api.SendEventWithState: %w", err)
}
// Then send all of the newer backfilled events, of which will all be newer
// than the backward extremity, into the roomserver without state. This way
// they will automatically fast-forward based on the room state at the
// extremity in the last step.
headeredNewEvents := make([]*gomatrixserverlib.HeaderedEvent, len(newEvents))
for i, newEvent := range newEvents {
headeredNewEvents[i] = newEvent.Headered(roomVersion)
}
if err = api.SendEvents(
context.Background(),
t.rsAPI,
api.KindOld,
append(headeredNewEvents, e.Headered(roomVersion)),
api.DoNotSendToOtherServers,
nil,
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
return nil
}
// lookupStateAfterEvent returns the room state after `eventID`, which is the state before eventID with the state of `eventID` (if it's a state event)
// added into the mix.
func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (*gomatrixserverlib.RespState, bool, error) {
// try doing all this locally before we resort to querying federation
respState := t.lookupStateAfterEventLocally(ctx, roomID, eventID)
if respState != nil {
return respState, true, nil
}
respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID)
if err != nil {
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
}
servers := t.getServers(ctx, roomID)
if len(servers) > 5 {
servers = servers[:5]
}
// fetch the event we're missing and add it to the pile
h, err := t.lookupEvent(ctx, roomVersion, eventID, false, servers)
switch err.(type) {
case verifySigError:
return respState, false, nil
case nil:
// do nothing
default:
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
}
t.haveEvents[h.EventID()] = h
if h.StateKey() != nil {
addedToState := false
for i := range respState.StateEvents {
se := respState.StateEvents[i]
if se.Type() == h.Type() && se.StateKeyEquals(*h.StateKey()) {
respState.StateEvents[i] = h.Unwrap()
addedToState = true
break
}
}
if !addedToState {
respState.StateEvents = append(respState.StateEvents, h.Unwrap())
}
}
return respState, false, nil
}
func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, eventID string) *gomatrixserverlib.RespState {
var res api.QueryStateAfterEventsResponse
err := t.rsAPI.QueryStateAfterEvents(ctx, &api.QueryStateAfterEventsRequest{
RoomID: roomID,
PrevEventIDs: []string{eventID},
}, &res)
if err != nil || !res.PrevEventsExist {
util.GetLogger(ctx).WithError(err).Warnf("failed to query state after %s locally", eventID)
return nil
}
for i, ev := range res.StateEvents {
t.haveEvents[ev.EventID()] = res.StateEvents[i]
}
var authEvents []*gomatrixserverlib.Event
missingAuthEvents := make(map[string]bool)
for _, ev := range res.StateEvents {
for _, ae := range ev.AuthEventIDs() {
aev, ok := t.haveEvents[ae]
if ok {
authEvents = append(authEvents, aev.Unwrap())
} else {
missingAuthEvents[ae] = true
}
}
}
// QueryStateAfterEvents does not return the auth events, so fetch them now. We know the roomserver has them else it wouldn't
// have stored the event.
var missingEventList []string
for evID := range missingAuthEvents {
missingEventList = append(missingEventList, evID)
}
queryReq := api.QueryEventsByIDRequest{
EventIDs: missingEventList,
}
util.GetLogger(ctx).Infof("Fetching missing auth events: %v", missingEventList)
var queryRes api.QueryEventsByIDResponse
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
return nil
}
for i := range queryRes.Events {
evID := queryRes.Events[i].EventID()
t.haveEvents[evID] = queryRes.Events[i]
authEvents = append(authEvents, queryRes.Events[i].Unwrap())
}
evs := gomatrixserverlib.UnwrapEventHeaders(res.StateEvents)
return &gomatrixserverlib.RespState{
StateEvents: evs,
AuthEvents: authEvents,
}
}
// lookuptStateBeforeEvent returns the room state before the event e, which is just /state_ids and/or /state depending on what
// the server supports.
func (t *txnReq) lookupStateBeforeEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (
*gomatrixserverlib.RespState, error) {
util.GetLogger(ctx).Infof("lookupStateBeforeEvent %s", eventID)
// Attempt to fetch the missing state using /state_ids and /events
return t.lookupMissingStateViaStateIDs(ctx, roomID, eventID, roomVersion)
}
func (t *txnReq) resolveStatesAndCheck(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, states []*gomatrixserverlib.RespState, backwardsExtremity *gomatrixserverlib.Event) (*gomatrixserverlib.RespState, error) {
var authEventList []*gomatrixserverlib.Event
var stateEventList []*gomatrixserverlib.Event
for _, state := range states {
authEventList = append(authEventList, state.AuthEvents...)
stateEventList = append(stateEventList, state.StateEvents...)
}
resolvedStateEvents, err := gomatrixserverlib.ResolveConflicts(roomVersion, stateEventList, authEventList)
if err != nil {
return nil, err
}
// apply the current event
retryAllowedState:
if err = checkAllowedByState(backwardsExtremity, resolvedStateEvents); err != nil {
switch missing := err.(type) {
case gomatrixserverlib.MissingAuthEventError:
servers := t.getServers(ctx, backwardsExtremity.RoomID())
if len(servers) > 5 {
servers = servers[:5]
}
h, err2 := t.lookupEvent(ctx, roomVersion, missing.AuthEventID, true, servers)
switch err2.(type) {
case verifySigError:
return &gomatrixserverlib.RespState{
AuthEvents: authEventList,
StateEvents: resolvedStateEvents,
}, nil
case nil:
// do nothing
default:
return nil, fmt.Errorf("missing auth event %s and failed to look it up: %w", missing.AuthEventID, err2)
}
util.GetLogger(ctx).Infof("fetched event %s", missing.AuthEventID)
resolvedStateEvents = append(resolvedStateEvents, h.Unwrap())
goto retryAllowedState
default:
}
return nil, err
}
return &gomatrixserverlib.RespState{
AuthEvents: authEventList,
StateEvents: resolvedStateEvents,
}, nil
}
// getMissingEvents returns a nil backwardsExtremity if missing events were fetched and handled, else returns the new backwards extremity which we should
// begin from. Returns an error only if we should terminate the transaction which initiated /get_missing_events
// This function recursively calls txnReq.processEvent with the missing events, which will be processed before this function returns.
// This means that we may recursively call this function, as we spider back up prev_events.
// nolint:gocyclo
func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) (newEvents []*gomatrixserverlib.Event, err error) {
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
needed := gomatrixserverlib.StateNeededForAuth([]*gomatrixserverlib.Event{e})
// query latest events (our trusted forward extremities)
req := api.QueryLatestEventsAndStateRequest{
RoomID: e.RoomID(),
StateToFetch: needed.Tuples(),
}
var res api.QueryLatestEventsAndStateResponse
if err = t.rsAPI.QueryLatestEventsAndState(ctx, &req, &res); err != nil {
logger.WithError(err).Warn("Failed to query latest events")
return nil, err
}
latestEvents := make([]string, len(res.LatestEvents))
for i := range res.LatestEvents {
latestEvents[i] = res.LatestEvents[i].EventID
}
servers := []gomatrixserverlib.ServerName{t.Origin}
serverReq := &api.QueryServerJoinedToRoomRequest{
RoomID: e.RoomID(),
}
serverRes := &api.QueryServerJoinedToRoomResponse{}
if err = t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
servers = append(servers, serverRes.ServerNames...)
logger.Infof("Found %d server(s) to query for missing events", len(servers))
}
var missingResp *gomatrixserverlib.RespMissingEvents
for _, server := range servers {
var m gomatrixserverlib.RespMissingEvents
if m, err = t.federation.LookupMissingEvents(ctx, server, e.RoomID(), gomatrixserverlib.MissingEvents{
Limit: 20,
// The latest event IDs that the sender already has. These are skipped when retrieving the previous events of latest_events.
EarliestEvents: latestEvents,
// The event IDs to retrieve the previous events for.
LatestEvents: []string{e.EventID()},
}, roomVersion); err == nil {
missingResp = &m
break
} else {
logger.WithError(err).Errorf("%s pushed us an event but %q did not respond to /get_missing_events", t.Origin, server)
}
}
if missingResp == nil {
logger.WithError(err).Errorf(
"%s pushed us an event but %d server(s) couldn't give us details about prev_events via /get_missing_events - dropping this event until it can",
t.Origin, len(servers),
)
return nil, missingPrevEventsError{
eventID: e.EventID(),
err: err,
}
}
// security: how we handle failures depends on whether or not this event will become the new forward extremity for the room.
// There's 2 scenarios to consider:
// - Case A: We got pushed an event and are now fetching missing prev_events. (isInboundTxn=true)
// - Case B: We are fetching missing prev_events already and now fetching some more (isInboundTxn=false)
// In Case B, we know for sure that the event we are currently processing will not become the new forward extremity for the room,
// as it was called in response to an inbound txn which had it as a prev_event.
// In Case A, the event is a forward extremity, and could eventually become the _only_ forward extremity in the room. This is bad
// because it means we would trust the state at that event to be the state for the entire room, and allows rooms to be hijacked.
// https://github.com/matrix-org/synapse/pull/3456
// https://github.com/matrix-org/synapse/blob/229eb81498b0fe1da81e9b5b333a0285acde9446/synapse/handlers/federation.py#L335
// For now, we do not allow Case B, so reject the event.
logger.Infof("get_missing_events returned %d events", len(missingResp.Events))
// topologically sort and sanity check that we are making forward progress
newEvents = gomatrixserverlib.ReverseTopologicalOrdering(missingResp.Events, gomatrixserverlib.TopologicalOrderByPrevEvents)
shouldHaveSomeEventIDs := e.PrevEventIDs()
hasPrevEvent := false
Event:
for _, pe := range shouldHaveSomeEventIDs {
for _, ev := range newEvents {
if ev.EventID() == pe {
hasPrevEvent = true
break Event
}
}
}
if !hasPrevEvent {
err = fmt.Errorf("called /get_missing_events but server %s didn't return any prev_events with IDs %v", t.Origin, shouldHaveSomeEventIDs)
logger.WithError(err).Errorf(
"%s pushed us an event but couldn't give us details about prev_events via /get_missing_events - dropping this event until it can",
t.Origin,
)
return nil, missingPrevEventsError{
eventID: e.EventID(),
err: err,
}
}
// we processed everything!
return newEvents, nil
}
func (t *txnReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
respState *gomatrixserverlib.RespState, err error) {
state, err := t.federation.LookupState(ctx, t.Origin, roomID, eventID, roomVersion)
if err != nil {
return nil, err
}
// Check that the returned state is valid.
if err := state.Check(ctx, t.keys, nil); err != nil {
return nil, err
}
return &state, nil
}
// nolint:gocyclo
func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
*gomatrixserverlib.RespState, error) {
util.GetLogger(ctx).Infof("lookupMissingStateViaStateIDs %s", eventID)
// fetch the state event IDs at the time of the event
stateIDs, err := t.federation.LookupStateIDs(ctx, t.Origin, roomID, eventID)
if err != nil {
return nil, err
}
// work out which auth/state IDs are missing
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
missing := make(map[string]bool)
var missingEventList []string
for _, sid := range wantIDs {
if _, ok := t.haveEvents[sid]; !ok {
if !missing[sid] {
missing[sid] = true
missingEventList = append(missingEventList, sid)
}
}
}
// fetch as many as we can from the roomserver
queryReq := api.QueryEventsByIDRequest{
EventIDs: missingEventList,
}
var queryRes api.QueryEventsByIDResponse
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
return nil, err
}
for i := range queryRes.Events {
evID := queryRes.Events[i].EventID()
t.haveEvents[evID] = queryRes.Events[i]
if missing[evID] {
delete(missing, evID)
}
}
concurrentRequests := 8
missingCount := len(missing)
// If over 50% of the auth/state events from /state_ids are missing
// then we'll just call /state instead, otherwise we'll just end up
// hammering the remote side with /event requests unnecessarily.
if missingCount > concurrentRequests && missingCount > len(wantIDs)/2 {
util.GetLogger(ctx).WithFields(logrus.Fields{
"missing": missingCount,
"event_id": eventID,
"room_id": roomID,
"total_state": len(stateIDs.StateEventIDs),
"total_auth_events": len(stateIDs.AuthEventIDs),
}).Info("Fetching all state at event")
return t.lookupMissingStateViaState(ctx, roomID, eventID, roomVersion)
}
util.GetLogger(ctx).WithFields(logrus.Fields{
"missing": missingCount,
"event_id": eventID,
"room_id": roomID,
"total_state": len(stateIDs.StateEventIDs),
"total_auth_events": len(stateIDs.AuthEventIDs),
"concurrent_requests": concurrentRequests,
}).Info("Fetching missing state at event")
// Get a list of servers to fetch from.
servers := t.getServers(ctx, roomID)
if len(servers) > 5 {
servers = servers[:5]
}
// Create a queue containing all of the missing event IDs that we want
// to retrieve.
pending := make(chan string, missingCount)
for missingEventID := range missing {
pending <- missingEventID
}
close(pending)
// Define how many workers we should start to do this.
if missingCount < concurrentRequests {
concurrentRequests = missingCount
}
// Create the wait group.
var fetchgroup sync.WaitGroup
fetchgroup.Add(concurrentRequests)
// This is the only place where we'll write to t.haveEvents from
// multiple goroutines, and everywhere else is blocked on this
// synchronous function anyway.
var haveEventsMutex sync.Mutex
// Define what we'll do in order to fetch the missing event ID.
fetch := func(missingEventID string) {
var h *gomatrixserverlib.HeaderedEvent
h, err = t.lookupEvent(ctx, roomVersion, missingEventID, false, servers)
switch err.(type) {
case verifySigError:
return
case nil:
break
default:
util.GetLogger(ctx).WithFields(logrus.Fields{
"event_id": missingEventID,
"room_id": roomID,
}).Info("Failed to fetch missing event")
return
}
haveEventsMutex.Lock()
t.haveEvents[h.EventID()] = h
haveEventsMutex.Unlock()
}
// Create the worker.
worker := func(ch <-chan string) {
defer fetchgroup.Done()
for missingEventID := range ch {
fetch(missingEventID)
}
}
// Start the workers.
for i := 0; i < concurrentRequests; i++ {
go worker(pending)
}
// Wait for the workers to finish.
fetchgroup.Wait()
resp, err := t.createRespStateFromStateIDs(stateIDs)
return resp, err
}
func (t *txnReq) createRespStateFromStateIDs(stateIDs gomatrixserverlib.RespStateIDs) (
*gomatrixserverlib.RespState, error) { // nolint:unparam
// create a RespState response using the response to /state_ids as a guide
respState := gomatrixserverlib.RespState{}
for i := range stateIDs.StateEventIDs {
ev, ok := t.haveEvents[stateIDs.StateEventIDs[i]]
if !ok {
logrus.Warnf("Missing state event in createRespStateFromStateIDs: %s", stateIDs.StateEventIDs[i])
continue
}
respState.StateEvents = append(respState.StateEvents, ev.Unwrap())
}
for i := range stateIDs.AuthEventIDs {
ev, ok := t.haveEvents[stateIDs.AuthEventIDs[i]]
if !ok {
logrus.Warnf("Missing auth event in createRespStateFromStateIDs: %s", stateIDs.AuthEventIDs[i])
continue
}
respState.AuthEvents = append(respState.AuthEvents, ev.Unwrap())
}
// We purposefully do not do auth checks on the returned events, as they will still
// be processed in the exact same way, just as a 'rejected' event
// TODO: Add a field to HeaderedEvent to indicate if the event is rejected.
return &respState, nil
}
func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, missingEventID string, localFirst bool, servers []gomatrixserverlib.ServerName) (*gomatrixserverlib.HeaderedEvent, error) {
if localFirst {
// fetch from the roomserver
queryReq := api.QueryEventsByIDRequest{
EventIDs: []string{missingEventID},
}
var queryRes api.QueryEventsByIDResponse
if err := t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
util.GetLogger(ctx).Warnf("Failed to query roomserver for missing event %s: %s - falling back to remote", missingEventID, err)
} else if len(queryRes.Events) == 1 {
return queryRes.Events[0], nil
}
}
var event *gomatrixserverlib.Event
found := false
for _, serverName := range servers {
txn, err := t.federation.GetEvent(ctx, serverName, missingEventID)
if err != nil || len(txn.PDUs) == 0 {
util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warn("Failed to get missing /event for event ID")
continue
}
event, err = gomatrixserverlib.NewEventFromUntrustedJSON(txn.PDUs[0], roomVersion)
if err != nil {
util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warnf("Transaction: Failed to parse event JSON of event")
continue
}
found = true
break
}
if !found {
util.GetLogger(ctx).WithField("event_id", missingEventID).Warnf("Failed to get missing /event for event ID from %d server(s)", len(servers))
return nil, fmt.Errorf("wasn't able to find event via %d server(s)", len(servers))
}
if err := gomatrixserverlib.VerifyAllEventSignatures(ctx, []*gomatrixserverlib.Event{event}, t.keys); err != nil {
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID())
return nil, verifySigError{event.EventID(), err}
}
h := event.Headered(roomVersion)
t.newEventsMutex.Lock()
t.newEvents[h.EventID()] = true
t.newEventsMutex.Unlock()
return h, nil
}
Send/state tweaks (#1681)
* Check missing event count
* Don't use request context for /send
// Copyright 2017 Vector Creations Ltd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routing
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/matrix-org/dendrite/clientapi/jsonerror"
eduserverAPI "github.com/matrix-org/dendrite/eduserver/api"
keyapi "github.com/matrix-org/dendrite/keyserver/api"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
)
// Send implements /_matrix/federation/v1/send/{txnID}
func Send(
httpReq *http.Request,
request *gomatrixserverlib.FederationRequest,
txnID gomatrixserverlib.TransactionID,
cfg *config.FederationAPI,
rsAPI api.RoomserverInternalAPI,
eduAPI eduserverAPI.EDUServerInputAPI,
keyAPI keyapi.KeyInternalAPI,
keys gomatrixserverlib.JSONVerifier,
federation *gomatrixserverlib.FederationClient,
) util.JSONResponse {
t := txnReq{
rsAPI: rsAPI,
eduAPI: eduAPI,
keys: keys,
federation: federation,
haveEvents: make(map[string]*gomatrixserverlib.HeaderedEvent),
newEvents: make(map[string]bool),
keyAPI: keyAPI,
}
var txnEvents struct {
PDUs []json.RawMessage `json:"pdus"`
EDUs []gomatrixserverlib.EDU `json:"edus"`
}
if err := json.Unmarshal(request.Content(), &txnEvents); err != nil {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.NotJSON("The request body could not be decoded into valid JSON. " + err.Error()),
}
}
// Transactions are limited in size; they can have at most 50 PDUs and 100 EDUs.
// https://matrix.org/docs/spec/server_server/latest#transactions
if len(txnEvents.PDUs) > 50 || len(txnEvents.EDUs) > 100 {
return util.JSONResponse{
Code: http.StatusBadRequest,
JSON: jsonerror.BadJSON("max 50 pdus / 100 edus"),
}
}
// TODO: Really we should have a function to convert FederationRequest to txnReq
t.PDUs = txnEvents.PDUs
t.EDUs = txnEvents.EDUs
t.Origin = request.Origin()
t.TransactionID = txnID
t.Destination = cfg.Matrix.ServerName
util.GetLogger(httpReq.Context()).Infof("Received transaction %q from %q containing %d PDUs, %d EDUs", txnID, request.Origin(), len(t.PDUs), len(t.EDUs))
resp, jsonErr := t.processTransaction(context.Background())
if jsonErr != nil {
util.GetLogger(httpReq.Context()).WithField("jsonErr", jsonErr).Error("t.processTransaction failed")
return *jsonErr
}
// https://matrix.org/docs/spec/server_server/r0.1.3#put-matrix-federation-v1-send-txnid
// Status code 200:
// The result of processing the transaction. The server is to use this response
// even in the event of one or more PDUs failing to be processed.
return util.JSONResponse{
Code: http.StatusOK,
JSON: resp,
}
}
type txnReq struct {
gomatrixserverlib.Transaction
rsAPI api.RoomserverInternalAPI
eduAPI eduserverAPI.EDUServerInputAPI
keyAPI keyapi.KeyInternalAPI
keys gomatrixserverlib.JSONVerifier
federation txnFederationClient
// local cache of events for auth checks, etc - this may include events
// which the roomserver is unaware of.
haveEvents map[string]*gomatrixserverlib.HeaderedEvent
// new events which the roomserver does not know about
newEvents map[string]bool
newEventsMutex sync.RWMutex
}
// A subset of FederationClient functionality that txn requires. Useful for testing.
type txnFederationClient interface {
LookupState(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
res gomatrixserverlib.RespState, err error,
)
LookupStateIDs(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, eventID string) (res gomatrixserverlib.RespStateIDs, err error)
GetEvent(ctx context.Context, s gomatrixserverlib.ServerName, eventID string) (res gomatrixserverlib.Transaction, err error)
LookupMissingEvents(ctx context.Context, s gomatrixserverlib.ServerName, roomID string, missing gomatrixserverlib.MissingEvents,
roomVersion gomatrixserverlib.RoomVersion) (res gomatrixserverlib.RespMissingEvents, err error)
}
func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.RespSend, *util.JSONResponse) {
results := make(map[string]gomatrixserverlib.PDUResult)
pdus := []*gomatrixserverlib.HeaderedEvent{}
for _, pdu := range t.PDUs {
var header struct {
RoomID string `json:"room_id"`
}
if err := json.Unmarshal(pdu, &header); err != nil {
util.GetLogger(ctx).WithError(err).Warn("Transaction: Failed to extract room ID from event")
// We don't know the event ID at this point so we can't return the
// failure in the PDU results
continue
}
verReq := api.QueryRoomVersionForRoomRequest{RoomID: header.RoomID}
verRes := api.QueryRoomVersionForRoomResponse{}
if err := t.rsAPI.QueryRoomVersionForRoom(ctx, &verReq, &verRes); err != nil {
util.GetLogger(ctx).WithError(err).Warn("Transaction: Failed to query room version for room", verReq.RoomID)
// We don't know the event ID at this point so we can't return the
// failure in the PDU results
continue
}
event, err := gomatrixserverlib.NewEventFromUntrustedJSON(pdu, verRes.RoomVersion)
if err != nil {
if _, ok := err.(gomatrixserverlib.BadJSONError); ok {
// Room version 6 states that homeservers should strictly enforce canonical JSON
// on PDUs.
//
// This enforces that the entire transaction is rejected if a single bad PDU is
// sent. It is unclear if this is the correct behaviour or not.
//
// See https://github.com/matrix-org/synapse/issues/7543
return nil, &util.JSONResponse{
Code: 400,
JSON: jsonerror.BadJSON("PDU contains bad JSON"),
}
}
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Failed to parse event JSON of event %s", string(pdu))
continue
}
if api.IsServerBannedFromRoom(ctx, t.rsAPI, event.RoomID(), t.Origin) {
results[event.EventID()] = gomatrixserverlib.PDUResult{
Error: "Forbidden by server ACLs",
}
continue
}
if err = gomatrixserverlib.VerifyAllEventSignatures(ctx, []*gomatrixserverlib.Event{event}, t.keys); err != nil {
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID())
results[event.EventID()] = gomatrixserverlib.PDUResult{
Error: err.Error(),
}
continue
}
pdus = append(pdus, event.Headered(verRes.RoomVersion))
}
// Process the events.
for _, e := range pdus {
if err := t.processEvent(ctx, e.Unwrap()); err != nil {
// If the error is due to the event itself being bad then we skip
// it and move onto the next event. We report an error so that the
// sender knows that we have skipped processing it.
//
// However if the event is due to a temporary failure in our server
// such as a database being unavailable then we should bail, and
// hope that the sender will retry when we are feeling better.
//
// It is uncertain what we should do if an event fails because
// we failed to fetch more information from the sending server.
// For example if a request to /state fails.
// If we skip the event then we risk missing the event until we
// receive another event referencing it.
// If we bail and stop processing then we risk wedging incoming
// transactions from that server forever.
if isProcessingErrorFatal(err) {
// Any other error should be the result of a temporary error in
// our server so we should bail processing the transaction entirely.
util.GetLogger(ctx).Warnf("Processing %s failed fatally: %s", e.EventID(), err)
jsonErr := util.ErrorResponse(err)
return nil, &jsonErr
} else {
// Auth errors mean the event is 'rejected' which have to be silent to appease sytest
errMsg := ""
_, rejected := err.(*gomatrixserverlib.NotAllowed)
if !rejected {
errMsg = err.Error()
}
util.GetLogger(ctx).WithError(err).WithField("event_id", e.EventID()).WithField("rejected", rejected).Warn(
"Failed to process incoming federation event, skipping",
)
results[e.EventID()] = gomatrixserverlib.PDUResult{
Error: errMsg,
}
}
} else {
results[e.EventID()] = gomatrixserverlib.PDUResult{}
}
}
t.processEDUs(ctx)
if c := len(results); c > 0 {
util.GetLogger(ctx).Infof("Processed %d PDUs from transaction %q", c, t.TransactionID)
}
return &gomatrixserverlib.RespSend{PDUs: results}, nil
}
// isProcessingErrorFatal returns true if the error is really bad and
// we should stop processing the transaction, and returns false if it
// is just some less serious error about a specific event.
func isProcessingErrorFatal(err error) bool {
switch err {
case sql.ErrConnDone:
case sql.ErrTxDone:
return true
}
return false
}
type roomNotFoundError struct {
roomID string
}
type verifySigError struct {
eventID string
err error
}
type missingPrevEventsError struct {
eventID string
err error
}
func (e roomNotFoundError) Error() string { return fmt.Sprintf("room %q not found", e.roomID) }
func (e verifySigError) Error() string {
return fmt.Sprintf("unable to verify signature of event %q: %s", e.eventID, e.err)
}
func (e missingPrevEventsError) Error() string {
return fmt.Sprintf("unable to get prev_events for event %q: %s", e.eventID, e.err)
}
func (t *txnReq) haveEventIDs() map[string]bool {
t.newEventsMutex.RLock()
defer t.newEventsMutex.RUnlock()
result := make(map[string]bool, len(t.haveEvents))
for eventID := range t.haveEvents {
if t.newEvents[eventID] {
continue
}
result[eventID] = true
}
return result
}
// nolint:gocyclo
func (t *txnReq) processEDUs(ctx context.Context) {
for _, e := range t.EDUs {
switch e.Type {
case gomatrixserverlib.MTyping:
// https://matrix.org/docs/spec/server_server/latest#typing-notifications
var typingPayload struct {
RoomID string `json:"room_id"`
UserID string `json:"user_id"`
Typing bool `json:"typing"`
}
if err := json.Unmarshal(e.Content, &typingPayload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal typing event")
continue
}
_, domain, err := gomatrixserverlib.SplitID('@', typingPayload.UserID)
if err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to split domain from typing event sender")
continue
}
if domain != t.Origin {
util.GetLogger(ctx).Warnf("Dropping typing event where sender domain (%q) doesn't match origin (%q)", domain, t.Origin)
continue
}
if err := eduserverAPI.SendTyping(ctx, t.eduAPI, typingPayload.UserID, typingPayload.RoomID, typingPayload.Typing, 30*1000); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to send typing event to edu server")
}
case gomatrixserverlib.MDirectToDevice:
// https://matrix.org/docs/spec/server_server/r0.1.3#m-direct-to-device-schema
var directPayload gomatrixserverlib.ToDeviceMessage
if err := json.Unmarshal(e.Content, &directPayload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal send-to-device events")
continue
}
for userID, byUser := range directPayload.Messages {
for deviceID, message := range byUser {
// TODO: check that the user and the device actually exist here
if err := eduserverAPI.SendToDevice(ctx, t.eduAPI, directPayload.Sender, userID, deviceID, directPayload.Type, message); err != nil {
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
"sender": directPayload.Sender,
"user_id": userID,
"device_id": deviceID,
}).Error("Failed to send send-to-device event to edu server")
}
}
}
case gomatrixserverlib.MDeviceListUpdate:
t.processDeviceListUpdate(ctx, e)
case gomatrixserverlib.MReceipt:
// https://matrix.org/docs/spec/server_server/r0.1.4#receipts
payload := map[string]eduserverAPI.FederationReceiptMRead{}
if err := json.Unmarshal(e.Content, &payload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal receipt event")
continue
}
for roomID, receipt := range payload {
for userID, mread := range receipt.User {
_, domain, err := gomatrixserverlib.SplitID('@', userID)
if err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to split domain from receipt event sender")
continue
}
if t.Origin != domain {
util.GetLogger(ctx).Warnf("Dropping receipt event where sender domain (%q) doesn't match origin (%q)", domain, t.Origin)
continue
}
if err := t.processReceiptEvent(ctx, userID, roomID, "m.read", mread.Data.TS, mread.EventIDs); err != nil {
util.GetLogger(ctx).WithError(err).WithFields(logrus.Fields{
"sender": t.Origin,
"user_id": userID,
"room_id": roomID,
"events": mread.EventIDs,
}).Error("Failed to send receipt event to edu server")
continue
}
}
}
default:
util.GetLogger(ctx).WithField("type", e.Type).Debug("Unhandled EDU")
}
}
}
// processReceiptEvent sends receipt events to the edu server
func (t *txnReq) processReceiptEvent(ctx context.Context,
userID, roomID, receiptType string,
timestamp gomatrixserverlib.Timestamp,
eventIDs []string,
) error {
// store every event
for _, eventID := range eventIDs {
req := eduserverAPI.InputReceiptEventRequest{
InputReceiptEvent: eduserverAPI.InputReceiptEvent{
UserID: userID,
RoomID: roomID,
EventID: eventID,
Type: receiptType,
Timestamp: timestamp,
},
}
resp := eduserverAPI.InputReceiptEventResponse{}
if err := t.eduAPI.InputReceiptEvent(ctx, &req, &resp); err != nil {
return fmt.Errorf("unable to set receipt event: %w", err)
}
}
return nil
}
func (t *txnReq) processDeviceListUpdate(ctx context.Context, e gomatrixserverlib.EDU) {
var payload gomatrixserverlib.DeviceListUpdateEvent
if err := json.Unmarshal(e.Content, &payload); err != nil {
util.GetLogger(ctx).WithError(err).Error("Failed to unmarshal device list update event")
return
}
var inputRes keyapi.InputDeviceListUpdateResponse
t.keyAPI.InputDeviceListUpdate(context.Background(), &keyapi.InputDeviceListUpdateRequest{
Event: payload,
}, &inputRes)
if inputRes.Error != nil {
util.GetLogger(ctx).WithError(inputRes.Error).WithField("user_id", payload.UserID).Error("failed to InputDeviceListUpdate")
}
}
func (t *txnReq) getServers(ctx context.Context, roomID string) []gomatrixserverlib.ServerName {
servers := []gomatrixserverlib.ServerName{t.Origin}
serverReq := &api.QueryServerJoinedToRoomRequest{
RoomID: roomID,
}
serverRes := &api.QueryServerJoinedToRoomResponse{}
if err := t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
servers = append(servers, serverRes.ServerNames...)
util.GetLogger(ctx).Infof("Found %d server(s) to query for missing events in %q", len(servers), roomID)
}
return servers
}
func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) error {
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
// Work out if the roomserver knows everything it needs to know to auth
// the event. This includes the prev_events and auth_events.
// NOTE! This is going to include prev_events that have an empty state
// snapshot. This is because we will need to re-request the event, and
// it's /state_ids, in order for it to exist in the roomserver correctly
// before the roomserver tries to work out
stateReq := api.QueryMissingAuthPrevEventsRequest{
RoomID: e.RoomID(),
AuthEventIDs: e.AuthEventIDs(),
PrevEventIDs: e.PrevEventIDs(),
}
var stateResp api.QueryMissingAuthPrevEventsResponse
if err := t.rsAPI.QueryMissingAuthPrevEvents(ctx, &stateReq, &stateResp); err != nil {
return fmt.Errorf("t.rsAPI.QueryMissingAuthPrevEvents: %w", err)
}
if !stateResp.RoomExists {
// TODO: When synapse receives a message for a room it is not in it
// asks the remote server for the state of the room so that it can
// check if the remote server knows of a join "m.room.member" event
// that this server is unaware of.
// However generally speaking we should reject events for rooms we
// aren't a member of.
return roomNotFoundError{e.RoomID()}
}
if len(stateResp.MissingAuthEventIDs) > 0 {
logger.Infof("Event refers to %d unknown auth_events", len(stateResp.MissingAuthEventIDs))
if err := t.retrieveMissingAuthEvents(ctx, e, &stateResp); err != nil {
return fmt.Errorf("t.retrieveMissingAuthEvents: %w", err)
}
}
if len(stateResp.MissingPrevEventIDs) > 0 {
logger.Infof("Event refers to %d unknown prev_events", len(stateResp.MissingPrevEventIDs))
return t.processEventWithMissingState(ctx, e, stateResp.RoomVersion)
}
// pass the event to the roomserver which will do auth checks
// If the event fail auth checks, gmsl.NotAllowed error will be returned which we be silently
// discarded by the caller of this function
return api.SendEvents(
context.Background(),
t.rsAPI,
api.KindNew,
[]*gomatrixserverlib.HeaderedEvent{
e.Headered(stateResp.RoomVersion),
},
api.DoNotSendToOtherServers,
nil,
)
}
func (t *txnReq) retrieveMissingAuthEvents(
ctx context.Context, e *gomatrixserverlib.Event, stateResp *api.QueryMissingAuthPrevEventsResponse,
) error {
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
missingAuthEvents := make(map[string]struct{})
for _, missingAuthEventID := range stateResp.MissingAuthEventIDs {
missingAuthEvents[missingAuthEventID] = struct{}{}
}
servers := t.getServers(ctx, e.RoomID())
if len(servers) > 5 {
servers = servers[:5]
}
withNextEvent:
for missingAuthEventID := range missingAuthEvents {
withNextServer:
for _, server := range servers {
logger.Infof("Retrieving missing auth event %q from %q", missingAuthEventID, server)
tx, err := t.federation.GetEvent(ctx, server, missingAuthEventID)
if err != nil {
logger.WithError(err).Warnf("Failed to retrieve auth event %q", missingAuthEventID)
continue withNextServer
}
ev, err := gomatrixserverlib.NewEventFromUntrustedJSON(tx.PDUs[0], stateResp.RoomVersion)
if err != nil {
logger.WithError(err).Warnf("Failed to unmarshal auth event %q", missingAuthEventID)
continue withNextServer
}
if err = api.SendInputRoomEvents(
context.Background(),
t.rsAPI,
[]api.InputRoomEvent{
{
Kind: api.KindOutlier,
Event: ev.Headered(stateResp.RoomVersion),
AuthEventIDs: ev.AuthEventIDs(),
SendAsServer: api.DoNotSendToOtherServers,
},
},
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
delete(missingAuthEvents, missingAuthEventID)
continue withNextEvent
}
}
if missing := len(missingAuthEvents); missing > 0 {
return fmt.Errorf("Event refers to %d auth_events which we failed to fetch", missing)
}
return nil
}
func checkAllowedByState(e *gomatrixserverlib.Event, stateEvents []*gomatrixserverlib.Event) error {
authUsingState := gomatrixserverlib.NewAuthEvents(nil)
for i := range stateEvents {
err := authUsingState.AddEvent(stateEvents[i])
if err != nil {
return err
}
}
return gomatrixserverlib.Allowed(e, &authUsingState)
}
// nolint:gocyclo
func (t *txnReq) processEventWithMissingState(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) error {
// Do this with a fresh context, so that we keep working even if the
// original request times out. With any luck, by the time the remote
// side retries, we'll have fetched the missing state.
gmectx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
// We are missing the previous events for this events.
// This means that there is a gap in our view of the history of the
// room. There two ways that we can handle such a gap:
// 1) We can fill in the gap using /get_missing_events
// 2) We can leave the gap and request the state of the room at
// this event from the remote server using either /state_ids
// or /state.
// Synapse will attempt to do 1 and if that fails or if the gap is
// too large then it will attempt 2.
// Synapse will use /state_ids if possible since usually the state
// is largely unchanged and it is more efficient to fetch a list of
// event ids and then use /event to fetch the individual events.
// However not all version of synapse support /state_ids so you may
// need to fallback to /state.
// Attempt to fill in the gap using /get_missing_events
// This will either:
// - fill in the gap completely then process event `e` returning no backwards extremity
// - fail to fill in the gap and tell us to terminate the transaction err=not nil
// - fail to fill in the gap and tell us to fetch state at the new backwards extremity, and to not terminate the transaction
newEvents, err := t.getMissingEvents(gmectx, e, roomVersion)
if err != nil {
return err
}
if len(newEvents) == 0 {
return nil
}
backwardsExtremity := newEvents[0]
newEvents = newEvents[1:]
type respState struct {
// A snapshot is considered trustworthy if it came from our own roomserver.
// That's because the state will have been through state resolution once
// already in QueryStateAfterEvent.
trustworthy bool
*gomatrixserverlib.RespState
}
// at this point we know we're going to have a gap: we need to work out the room state at the new backwards extremity.
// Therefore, we cannot just query /state_ids with this event to get the state before. Instead, we need to query
// the state AFTER all the prev_events for this event, then apply state resolution to that to get the state before the event.
var states []*respState
for _, prevEventID := range backwardsExtremity.PrevEventIDs() {
// Look up what the state is after the backward extremity. This will either
// come from the roomserver, if we know all the required events, or it will
// come from a remote server via /state_ids if not.
prevState, trustworthy, lerr := t.lookupStateAfterEvent(gmectx, roomVersion, backwardsExtremity.RoomID(), prevEventID)
if lerr != nil {
util.GetLogger(ctx).WithError(lerr).Errorf("Failed to lookup state after prev_event: %s", prevEventID)
return lerr
}
// Append the state onto the collected state. We'll run this through the
// state resolution next.
states = append(states, &respState{trustworthy, prevState})
}
// Now that we have collected all of the state from the prev_events, we'll
// run the state through the appropriate state resolution algorithm for the
// room if needed. This does a couple of things:
// 1. Ensures that the state is deduplicated fully for each state-key tuple
// 2. Ensures that we pick the latest events from both sets, in the case that
// one of the prev_events is quite a bit older than the others
resolvedState := &gomatrixserverlib.RespState{}
switch len(states) {
case 0:
extremityIsCreate := backwardsExtremity.Type() == gomatrixserverlib.MRoomCreate && backwardsExtremity.StateKeyEquals("")
if !extremityIsCreate {
// There are no previous states and this isn't the beginning of the
// room - this is an error condition!
util.GetLogger(ctx).Errorf("Failed to lookup any state after prev_events")
return fmt.Errorf("expected %d states but got %d", len(backwardsExtremity.PrevEventIDs()), len(states))
}
case 1:
// There's only one previous state - if it's trustworthy (came from a
// local state snapshot which will already have been through state res),
// use it as-is. There's no point in resolving it again.
if states[0].trustworthy {
resolvedState = states[0].RespState
break
}
// Otherwise, if it isn't trustworthy (came from federation), run it through
// state resolution anyway for safety, in case there are duplicates.
fallthrough
default:
respStates := make([]*gomatrixserverlib.RespState, len(states))
for i := range states {
respStates[i] = states[i].RespState
}
// There's more than one previous state - run them all through state res
resolvedState, err = t.resolveStatesAndCheck(gmectx, roomVersion, respStates, backwardsExtremity)
if err != nil {
util.GetLogger(ctx).WithError(err).Errorf("Failed to resolve state conflicts for event %s", backwardsExtremity.EventID())
return err
}
}
// First of all, send the backward extremity into the roomserver with the
// newly resolved state. This marks the "oldest" point in the backfill and
// sets the baseline state for any new events after this.
err = api.SendEventWithState(
context.Background(),
t.rsAPI,
api.KindOld,
resolvedState,
backwardsExtremity.Headered(roomVersion),
t.haveEventIDs(),
)
if err != nil {
return fmt.Errorf("api.SendEventWithState: %w", err)
}
// Then send all of the newer backfilled events, of which will all be newer
// than the backward extremity, into the roomserver without state. This way
// they will automatically fast-forward based on the room state at the
// extremity in the last step.
headeredNewEvents := make([]*gomatrixserverlib.HeaderedEvent, len(newEvents))
for i, newEvent := range newEvents {
headeredNewEvents[i] = newEvent.Headered(roomVersion)
}
if err = api.SendEvents(
context.Background(),
t.rsAPI,
api.KindOld,
append(headeredNewEvents, e.Headered(roomVersion)),
api.DoNotSendToOtherServers,
nil,
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
return nil
}
// lookupStateAfterEvent returns the room state after `eventID`, which is the state before eventID with the state of `eventID` (if it's a state event)
// added into the mix.
func (t *txnReq) lookupStateAfterEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (*gomatrixserverlib.RespState, bool, error) {
// try doing all this locally before we resort to querying federation
respState := t.lookupStateAfterEventLocally(ctx, roomID, eventID)
if respState != nil {
return respState, true, nil
}
respState, err := t.lookupStateBeforeEvent(ctx, roomVersion, roomID, eventID)
if err != nil {
return nil, false, fmt.Errorf("t.lookupStateBeforeEvent: %w", err)
}
servers := t.getServers(ctx, roomID)
if len(servers) > 5 {
servers = servers[:5]
}
// fetch the event we're missing and add it to the pile
h, err := t.lookupEvent(ctx, roomVersion, eventID, false, servers)
switch err.(type) {
case verifySigError:
return respState, false, nil
case nil:
// do nothing
default:
return nil, false, fmt.Errorf("t.lookupEvent: %w", err)
}
t.haveEvents[h.EventID()] = h
if h.StateKey() != nil {
addedToState := false
for i := range respState.StateEvents {
se := respState.StateEvents[i]
if se.Type() == h.Type() && se.StateKeyEquals(*h.StateKey()) {
respState.StateEvents[i] = h.Unwrap()
addedToState = true
break
}
}
if !addedToState {
respState.StateEvents = append(respState.StateEvents, h.Unwrap())
}
}
return respState, false, nil
}
func (t *txnReq) lookupStateAfterEventLocally(ctx context.Context, roomID, eventID string) *gomatrixserverlib.RespState {
var res api.QueryStateAfterEventsResponse
err := t.rsAPI.QueryStateAfterEvents(ctx, &api.QueryStateAfterEventsRequest{
RoomID: roomID,
PrevEventIDs: []string{eventID},
}, &res)
if err != nil || !res.PrevEventsExist {
util.GetLogger(ctx).WithError(err).Warnf("failed to query state after %s locally", eventID)
return nil
}
for i, ev := range res.StateEvents {
t.haveEvents[ev.EventID()] = res.StateEvents[i]
}
var authEvents []*gomatrixserverlib.Event
missingAuthEvents := make(map[string]bool)
for _, ev := range res.StateEvents {
for _, ae := range ev.AuthEventIDs() {
aev, ok := t.haveEvents[ae]
if ok {
authEvents = append(authEvents, aev.Unwrap())
} else {
missingAuthEvents[ae] = true
}
}
}
// QueryStateAfterEvents does not return the auth events, so fetch them now. We know the roomserver has them else it wouldn't
// have stored the event.
var missingEventList []string
for evID := range missingAuthEvents {
missingEventList = append(missingEventList, evID)
}
queryReq := api.QueryEventsByIDRequest{
EventIDs: missingEventList,
}
util.GetLogger(ctx).Infof("Fetching missing auth events: %v", missingEventList)
var queryRes api.QueryEventsByIDResponse
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
return nil
}
for i := range queryRes.Events {
evID := queryRes.Events[i].EventID()
t.haveEvents[evID] = queryRes.Events[i]
authEvents = append(authEvents, queryRes.Events[i].Unwrap())
}
evs := gomatrixserverlib.UnwrapEventHeaders(res.StateEvents)
return &gomatrixserverlib.RespState{
StateEvents: evs,
AuthEvents: authEvents,
}
}
// lookuptStateBeforeEvent returns the room state before the event e, which is just /state_ids and/or /state depending on what
// the server supports.
func (t *txnReq) lookupStateBeforeEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, roomID, eventID string) (
*gomatrixserverlib.RespState, error) {
util.GetLogger(ctx).Infof("lookupStateBeforeEvent %s", eventID)
// Attempt to fetch the missing state using /state_ids and /events
return t.lookupMissingStateViaStateIDs(ctx, roomID, eventID, roomVersion)
}
func (t *txnReq) resolveStatesAndCheck(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, states []*gomatrixserverlib.RespState, backwardsExtremity *gomatrixserverlib.Event) (*gomatrixserverlib.RespState, error) {
var authEventList []*gomatrixserverlib.Event
var stateEventList []*gomatrixserverlib.Event
for _, state := range states {
authEventList = append(authEventList, state.AuthEvents...)
stateEventList = append(stateEventList, state.StateEvents...)
}
resolvedStateEvents, err := gomatrixserverlib.ResolveConflicts(roomVersion, stateEventList, authEventList)
if err != nil {
return nil, err
}
// apply the current event
retryAllowedState:
if err = checkAllowedByState(backwardsExtremity, resolvedStateEvents); err != nil {
switch missing := err.(type) {
case gomatrixserverlib.MissingAuthEventError:
servers := t.getServers(ctx, backwardsExtremity.RoomID())
if len(servers) > 5 {
servers = servers[:5]
}
h, err2 := t.lookupEvent(ctx, roomVersion, missing.AuthEventID, true, servers)
switch err2.(type) {
case verifySigError:
return &gomatrixserverlib.RespState{
AuthEvents: authEventList,
StateEvents: resolvedStateEvents,
}, nil
case nil:
// do nothing
default:
return nil, fmt.Errorf("missing auth event %s and failed to look it up: %w", missing.AuthEventID, err2)
}
util.GetLogger(ctx).Infof("fetched event %s", missing.AuthEventID)
resolvedStateEvents = append(resolvedStateEvents, h.Unwrap())
goto retryAllowedState
default:
}
return nil, err
}
return &gomatrixserverlib.RespState{
AuthEvents: authEventList,
StateEvents: resolvedStateEvents,
}, nil
}
// getMissingEvents returns a nil backwardsExtremity if missing events were fetched and handled, else returns the new backwards extremity which we should
// begin from. Returns an error only if we should terminate the transaction which initiated /get_missing_events
// This function recursively calls txnReq.processEvent with the missing events, which will be processed before this function returns.
// This means that we may recursively call this function, as we spider back up prev_events.
// nolint:gocyclo
func (t *txnReq) getMissingEvents(ctx context.Context, e *gomatrixserverlib.Event, roomVersion gomatrixserverlib.RoomVersion) (newEvents []*gomatrixserverlib.Event, err error) {
logger := util.GetLogger(ctx).WithField("event_id", e.EventID()).WithField("room_id", e.RoomID())
needed := gomatrixserverlib.StateNeededForAuth([]*gomatrixserverlib.Event{e})
// query latest events (our trusted forward extremities)
req := api.QueryLatestEventsAndStateRequest{
RoomID: e.RoomID(),
StateToFetch: needed.Tuples(),
}
var res api.QueryLatestEventsAndStateResponse
if err = t.rsAPI.QueryLatestEventsAndState(ctx, &req, &res); err != nil {
logger.WithError(err).Warn("Failed to query latest events")
return nil, err
}
latestEvents := make([]string, len(res.LatestEvents))
for i := range res.LatestEvents {
latestEvents[i] = res.LatestEvents[i].EventID
}
servers := []gomatrixserverlib.ServerName{t.Origin}
serverReq := &api.QueryServerJoinedToRoomRequest{
RoomID: e.RoomID(),
}
serverRes := &api.QueryServerJoinedToRoomResponse{}
if err = t.rsAPI.QueryServerJoinedToRoom(ctx, serverReq, serverRes); err == nil {
servers = append(servers, serverRes.ServerNames...)
logger.Infof("Found %d server(s) to query for missing events", len(servers))
}
var missingResp *gomatrixserverlib.RespMissingEvents
for _, server := range servers {
var m gomatrixserverlib.RespMissingEvents
if m, err = t.federation.LookupMissingEvents(ctx, server, e.RoomID(), gomatrixserverlib.MissingEvents{
Limit: 20,
// The latest event IDs that the sender already has. These are skipped when retrieving the previous events of latest_events.
EarliestEvents: latestEvents,
// The event IDs to retrieve the previous events for.
LatestEvents: []string{e.EventID()},
}, roomVersion); err == nil {
missingResp = &m
break
} else {
logger.WithError(err).Errorf("%s pushed us an event but %q did not respond to /get_missing_events", t.Origin, server)
}
}
if missingResp == nil {
logger.WithError(err).Errorf(
"%s pushed us an event but %d server(s) couldn't give us details about prev_events via /get_missing_events - dropping this event until it can",
t.Origin, len(servers),
)
return nil, missingPrevEventsError{
eventID: e.EventID(),
err: err,
}
}
// security: how we handle failures depends on whether or not this event will become the new forward extremity for the room.
// There's 2 scenarios to consider:
// - Case A: We got pushed an event and are now fetching missing prev_events. (isInboundTxn=true)
// - Case B: We are fetching missing prev_events already and now fetching some more (isInboundTxn=false)
// In Case B, we know for sure that the event we are currently processing will not become the new forward extremity for the room,
// as it was called in response to an inbound txn which had it as a prev_event.
// In Case A, the event is a forward extremity, and could eventually become the _only_ forward extremity in the room. This is bad
// because it means we would trust the state at that event to be the state for the entire room, and allows rooms to be hijacked.
// https://github.com/matrix-org/synapse/pull/3456
// https://github.com/matrix-org/synapse/blob/229eb81498b0fe1da81e9b5b333a0285acde9446/synapse/handlers/federation.py#L335
// For now, we do not allow Case B, so reject the event.
logger.Infof("get_missing_events returned %d events", len(missingResp.Events))
// topologically sort and sanity check that we are making forward progress
newEvents = gomatrixserverlib.ReverseTopologicalOrdering(missingResp.Events, gomatrixserverlib.TopologicalOrderByPrevEvents)
shouldHaveSomeEventIDs := e.PrevEventIDs()
hasPrevEvent := false
Event:
for _, pe := range shouldHaveSomeEventIDs {
for _, ev := range newEvents {
if ev.EventID() == pe {
hasPrevEvent = true
break Event
}
}
}
if !hasPrevEvent {
err = fmt.Errorf("called /get_missing_events but server %s didn't return any prev_events with IDs %v", t.Origin, shouldHaveSomeEventIDs)
logger.WithError(err).Errorf(
"%s pushed us an event but couldn't give us details about prev_events via /get_missing_events - dropping this event until it can",
t.Origin,
)
return nil, missingPrevEventsError{
eventID: e.EventID(),
err: err,
}
}
// we processed everything!
return newEvents, nil
}
func (t *txnReq) lookupMissingStateViaState(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
respState *gomatrixserverlib.RespState, err error) {
state, err := t.federation.LookupState(ctx, t.Origin, roomID, eventID, roomVersion)
if err != nil {
return nil, err
}
// Check that the returned state is valid.
if err := state.Check(ctx, t.keys, nil); err != nil {
return nil, err
}
return &state, nil
}
// nolint:gocyclo
func (t *txnReq) lookupMissingStateViaStateIDs(ctx context.Context, roomID, eventID string, roomVersion gomatrixserverlib.RoomVersion) (
*gomatrixserverlib.RespState, error) {
util.GetLogger(ctx).Infof("lookupMissingStateViaStateIDs %s", eventID)
// fetch the state event IDs at the time of the event
stateIDs, err := t.federation.LookupStateIDs(ctx, t.Origin, roomID, eventID)
if err != nil {
return nil, err
}
// work out which auth/state IDs are missing
wantIDs := append(stateIDs.StateEventIDs, stateIDs.AuthEventIDs...)
missing := make(map[string]bool)
var missingEventList []string
for _, sid := range wantIDs {
if _, ok := t.haveEvents[sid]; !ok {
if !missing[sid] {
missing[sid] = true
missingEventList = append(missingEventList, sid)
}
}
}
// fetch as many as we can from the roomserver
queryReq := api.QueryEventsByIDRequest{
EventIDs: missingEventList,
}
var queryRes api.QueryEventsByIDResponse
if err = t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
return nil, err
}
for i := range queryRes.Events {
evID := queryRes.Events[i].EventID()
t.haveEvents[evID] = queryRes.Events[i]
if missing[evID] {
delete(missing, evID)
}
}
concurrentRequests := 8
missingCount := len(missing)
// If over 50% of the auth/state events from /state_ids are missing
// then we'll just call /state instead, otherwise we'll just end up
// hammering the remote side with /event requests unnecessarily.
if missingCount > concurrentRequests && missingCount > len(wantIDs)/2 {
util.GetLogger(ctx).WithFields(logrus.Fields{
"missing": missingCount,
"event_id": eventID,
"room_id": roomID,
"total_state": len(stateIDs.StateEventIDs),
"total_auth_events": len(stateIDs.AuthEventIDs),
}).Info("Fetching all state at event")
return t.lookupMissingStateViaState(ctx, roomID, eventID, roomVersion)
}
if missingCount > 0 {
util.GetLogger(ctx).WithFields(logrus.Fields{
"missing": missingCount,
"event_id": eventID,
"room_id": roomID,
"total_state": len(stateIDs.StateEventIDs),
"total_auth_events": len(stateIDs.AuthEventIDs),
"concurrent_requests": concurrentRequests,
}).Info("Fetching missing state at event")
// Get a list of servers to fetch from.
servers := t.getServers(ctx, roomID)
if len(servers) > 5 {
servers = servers[:5]
}
// Create a queue containing all of the missing event IDs that we want
// to retrieve.
pending := make(chan string, missingCount)
for missingEventID := range missing {
pending <- missingEventID
}
close(pending)
// Define how many workers we should start to do this.
if missingCount < concurrentRequests {
concurrentRequests = missingCount
}
// Create the wait group.
var fetchgroup sync.WaitGroup
fetchgroup.Add(concurrentRequests)
// This is the only place where we'll write to t.haveEvents from
// multiple goroutines, and everywhere else is blocked on this
// synchronous function anyway.
var haveEventsMutex sync.Mutex
// Define what we'll do in order to fetch the missing event ID.
fetch := func(missingEventID string) {
var h *gomatrixserverlib.HeaderedEvent
h, err = t.lookupEvent(ctx, roomVersion, missingEventID, false, servers)
switch err.(type) {
case verifySigError:
return
case nil:
break
default:
util.GetLogger(ctx).WithFields(logrus.Fields{
"event_id": missingEventID,
"room_id": roomID,
}).Info("Failed to fetch missing event")
return
}
haveEventsMutex.Lock()
t.haveEvents[h.EventID()] = h
haveEventsMutex.Unlock()
}
// Create the worker.
worker := func(ch <-chan string) {
defer fetchgroup.Done()
for missingEventID := range ch {
fetch(missingEventID)
}
}
// Start the workers.
for i := 0; i < concurrentRequests; i++ {
go worker(pending)
}
// Wait for the workers to finish.
fetchgroup.Wait()
}
resp, err := t.createRespStateFromStateIDs(stateIDs)
return resp, err
}
func (t *txnReq) createRespStateFromStateIDs(stateIDs gomatrixserverlib.RespStateIDs) (
*gomatrixserverlib.RespState, error) { // nolint:unparam
// create a RespState response using the response to /state_ids as a guide
respState := gomatrixserverlib.RespState{}
for i := range stateIDs.StateEventIDs {
ev, ok := t.haveEvents[stateIDs.StateEventIDs[i]]
if !ok {
logrus.Warnf("Missing state event in createRespStateFromStateIDs: %s", stateIDs.StateEventIDs[i])
continue
}
respState.StateEvents = append(respState.StateEvents, ev.Unwrap())
}
for i := range stateIDs.AuthEventIDs {
ev, ok := t.haveEvents[stateIDs.AuthEventIDs[i]]
if !ok {
logrus.Warnf("Missing auth event in createRespStateFromStateIDs: %s", stateIDs.AuthEventIDs[i])
continue
}
respState.AuthEvents = append(respState.AuthEvents, ev.Unwrap())
}
// We purposefully do not do auth checks on the returned events, as they will still
// be processed in the exact same way, just as a 'rejected' event
// TODO: Add a field to HeaderedEvent to indicate if the event is rejected.
return &respState, nil
}
func (t *txnReq) lookupEvent(ctx context.Context, roomVersion gomatrixserverlib.RoomVersion, missingEventID string, localFirst bool, servers []gomatrixserverlib.ServerName) (*gomatrixserverlib.HeaderedEvent, error) {
if localFirst {
// fetch from the roomserver
queryReq := api.QueryEventsByIDRequest{
EventIDs: []string{missingEventID},
}
var queryRes api.QueryEventsByIDResponse
if err := t.rsAPI.QueryEventsByID(ctx, &queryReq, &queryRes); err != nil {
util.GetLogger(ctx).Warnf("Failed to query roomserver for missing event %s: %s - falling back to remote", missingEventID, err)
} else if len(queryRes.Events) == 1 {
return queryRes.Events[0], nil
}
}
var event *gomatrixserverlib.Event
found := false
for _, serverName := range servers {
txn, err := t.federation.GetEvent(ctx, serverName, missingEventID)
if err != nil || len(txn.PDUs) == 0 {
util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warn("Failed to get missing /event for event ID")
continue
}
event, err = gomatrixserverlib.NewEventFromUntrustedJSON(txn.PDUs[0], roomVersion)
if err != nil {
util.GetLogger(ctx).WithError(err).WithField("event_id", missingEventID).Warnf("Transaction: Failed to parse event JSON of event")
continue
}
found = true
break
}
if !found {
util.GetLogger(ctx).WithField("event_id", missingEventID).Warnf("Failed to get missing /event for event ID from %d server(s)", len(servers))
return nil, fmt.Errorf("wasn't able to find event via %d server(s)", len(servers))
}
if err := gomatrixserverlib.VerifyAllEventSignatures(ctx, []*gomatrixserverlib.Event{event}, t.keys); err != nil {
util.GetLogger(ctx).WithError(err).Warnf("Transaction: Couldn't validate signature of event %q", event.EventID())
return nil, verifySigError{event.EventID(), err}
}
h := event.Headered(roomVersion)
t.newEventsMutex.Lock()
t.newEvents[h.EventID()] = true
t.newEventsMutex.Unlock()
return h, nil
}
|
package api
// renterhost_test.go sets up larger integration tests between renters and
// hosts, checking that the whole storage ecosystem is functioning cohesively.
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/NebulousLabs/Sia/build"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// TestHostObligationAcceptingContracts verifies that the host will complete
// storage proofs and the renter will successfully download even if the host
// has set accepting contracts to false.
func TestHostObligationAcceptingContracts(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "1")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "5")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
filesize := int(1024)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 1
var rf RenterFiles
err = build.Retry(120, time.Millisecond*250, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Available {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// Get contracts via API call
var cts ContractInfoGET
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
// There should be some contracts returned
if len(cts.Contracts) == 0 {
t.Fatal("No contracts returned from /host/contracts API call.")
}
// Check if the number of contracts are equal to the number of storage obligations
if len(cts.Contracts) != len(st.host.StorageObligations()) {
t.Fatal("Number of contracts returned by API call and host method don't match.")
}
// set acceptingcontracts = false, mine some blocks, verify we can download
settings := st.host.InternalSettings()
settings.AcceptingContracts = false
st.host.SetInternalSettings(settings)
for i := 0; i < 3; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 100)
}
downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// mine blocks to cause the host to submit storage proofs to the blockchain.
for i := 0; i < 15; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 100)
}
// should have successful proofs
success := false
for _, so := range st.host.StorageObligations() {
if so.ProofConfirmed {
success = true
break
}
}
if !success {
t.Fatal("no successful storage proofs")
}
}
// TestHostAndRentVanilla sets up an integration test where a host and renter
// do basic uploads and downloads.
func TestHostAndRentVanilla(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "20"
renewWindow := "10"
testPeriodInt := 20
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", renewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Check the host, who should now be reporting file contracts.
var cts ContractInfoGET
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
if len(cts.Contracts) != 1 {
t.Error("Host has wrong number of obligations:", len(cts.Contracts))
}
// Check if the obligation status is unresolved
if cts.Contracts[0].ObligationStatus != "obligationUnresolved" {
t.Error("Wrong obligation status for new contract:", cts.Contracts[0].ObligationStatus)
}
// Check if there are no sector roots on a new contract
if cts.Contracts[0].SectorRootsCount != 0 {
t.Error("Wrong number of sector roots for new contract:", cts.Contracts[0].SectorRootsCount)
}
// Check if there is locked collateral
if cts.Contracts[0].LockedCollateral.IsZero() {
t.Error("No locked collateral in contract.")
}
// Check if risked collateral is not equal to zero
if !cts.Contracts[0].RiskedCollateral.IsZero() {
t.Error("Risked collateral not zero in new contract.")
}
// Check if all potential revenues are zero
if !(cts.Contracts[0].PotentialDownloadRevenue.IsZero() && cts.Contracts[0].PotentialUploadRevenue.IsZero() && cts.Contracts[0].PotentialStorageRevenue.IsZero()) {
t.Error("Potential values not zero in new contract.")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// On a second connection, upload another file.
path2 := filepath.Join(st.dir, "test2.dat")
test2Size := modules.SectorSize*2 + 1
err = createRandFile(path2, int(test2Size))
if err != nil {
t.Fatal(err)
}
uploadValues = url.Values{}
uploadValues.Set("source", path2)
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1])
}
// Try downloading the first file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 1 entry now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 1 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
// Try downloading the second file.
downpath2 := filepath.Join(st.dir, "testdown2.dat")
err = st.stdGetAPI("/renter/download/test2?destination=" + downpath2)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig2, err := ioutil.ReadFile(path2)
if err != nil {
t.Fatal(err)
}
download2, err := ioutil.ReadFile(downpath2)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig2, download2) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 2 entries now.
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 2 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
// Mine two blocks, which should cause the host to submit the storage
// obligation to the blockchain.
for i := 0; i < 2; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 200)
}
// Check that the host was able to get the file contract confirmed on the
// blockchain.
cts = ContractInfoGET{}
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
if len(cts.Contracts) != 1 {
t.Error("Host has wrong number of obligations:", len(cts.Contracts))
}
if !cts.Contracts[0].OriginConfirmed {
t.Error("Host has not seen the file contract on the blockchain.")
}
// Check if there are sector roots
if cts.Contracts[0].SectorRootsCount == 0 {
t.Error("Sector roots count is zero for used obligation.")
}
// Check if risked collateral is not equal to zero
if cts.Contracts[0].RiskedCollateral.IsZero() {
t.Error("Risked collateral is zero for used obligation.")
}
// There should be some potential revenues in this contract
if cts.Contracts[0].PotentialDownloadRevenue.IsZero() || cts.Contracts[0].PotentialUploadRevenue.IsZero() || cts.Contracts[0].PotentialStorageRevenue.IsZero() {
t.Error("Potential revenue value is zero for used obligation.")
}
// Mine blocks until the host should have submitted a storage proof.
for i := 0; i <= testPeriodInt+5; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 200)
}
cts = ContractInfoGET{}
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
success := false
for _, contract := range cts.Contracts {
if contract.ProofConfirmed {
// Sector roots should be removed from storage obligation
if contract.SectorRootsCount > 0 {
t.Error("There are sector roots on completed storage obligation.")
}
success = true
break
}
}
if !success {
t.Error("does not seem like the host has submitted a storage proof successfully to the network")
}
}
// TestHostAndRentMultiHost sets up an integration test where three hosts and a
// renter do basic (parallel) uploads and downloads.
func TestHostAndRentMultiHost(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH1.server.panicClose()
stH2, err := blankServerTester(t.Name() + " - Host 3")
if err != nil {
t.Fatal(err)
}
defer stH2.server.panicClose()
testGroup := []*serverTester{st, stH1, stH2}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with three hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "3")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "2")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file to upload.
filesize := int(45678)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// Upload a file with 2-of-6 redundancy.
uploadValues := url.Values{}
uploadValues.Set("source", path)
uploadValues.Set("datapieces", "2")
uploadValues.Set("paritypieces", "4")
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Three pieces should get uploaded.
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 50); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 50 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 1 entry now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 1 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
}
// TestHostAndRentManyFiles sets up an integration test where a single renter
// is uploading many files to the network.
func TestHostAndRentManyFiles(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH1.server.panicClose()
stH2, err := blankServerTester(t.Name() + " - Host 3")
if err != nil {
t.Fatal(err)
}
defer stH2.server.panicClose()
stH3, err := blankServerTester(t.Name() + " - Host 4")
if err != nil {
t.Fatal(err)
}
defer stH3.server.panicClose()
testGroup := []*serverTester{st, stH1, stH2, stH3}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with four hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "4")
allowanceValues.Set("period", "5")
allowanceValues.Set("renewwindow", "2")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create 3 files to upload at the same time.
filesize1 := int(12347)
filesize2 := int(22343)
filesize3 := int(32349)
path1 := filepath.Join(st.dir, "test1.dat")
path2 := filepath.Join(st.dir, "test2.dat")
path3 := filepath.Join(st.dir, "test3.dat")
err = createRandFile(path1, filesize1)
if err != nil {
t.Fatal(err)
}
err = createRandFile(path2, filesize2)
if err != nil {
t.Fatal(err)
}
err = createRandFile(path3, filesize3)
if err != nil {
t.Fatal(err)
}
// Concurrently upload a file with 1-of-4 redundancy, 2-of-4 redundancy,
// and 3-of-4 redundancy.
var wg sync.WaitGroup
wg.Add(3)
go func() {
defer wg.Done()
uploadValues := url.Values{}
uploadValues.Set("source", path1)
uploadValues.Set("datapieces", "1")
uploadValues.Set("paritypieces", "3")
err := st.stdPostAPI("/renter/upload/test1", uploadValues)
if err != nil {
t.Error(err)
}
}()
go func() {
defer wg.Done()
uploadValues := url.Values{}
uploadValues.Set("source", path2)
uploadValues.Set("datapieces", "2")
uploadValues.Set("paritypieces", "2")
err := st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Error(err)
}
}()
go func() {
defer wg.Done()
uploadValues := url.Values{}
uploadValues.Set("source", path3)
uploadValues.Set("datapieces", "3")
uploadValues.Set("paritypieces", "1")
err := st.stdPostAPI("/renter/upload/test3", uploadValues)
if err != nil {
t.Error(err)
}
}()
// Block until the upload call is complete for all three files.
wg.Wait()
// Block until all files hit 100% uploaded.
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 3 || rf.Files[0].UploadProgress < 100 || rf.Files[1].UploadProgress < 100 || rf.Files[2].UploadProgress < 100); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(500 * time.Millisecond)
}
if len(rf.Files) != 3 || rf.Files[0].UploadProgress < 100 || rf.Files[1].UploadProgress < 100 || rf.Files[2].UploadProgress < 100 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1], rf.Files[2])
}
// Download all three files in parallel.
wg.Add(3)
go func() {
defer wg.Done()
downpath := filepath.Join(st.dir, "testdown1.dat")
err := st.stdGetAPI("/renter/download/test1?destination=" + downpath)
if err != nil {
t.Error(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path1)
if err != nil {
t.Error(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Error(err)
}
if bytes.Compare(orig, download) != 0 {
t.Error("data mismatch when downloading a file")
}
}()
go func() {
defer wg.Done()
downpath := filepath.Join(st.dir, "testdown2.dat")
err := st.stdGetAPI("/renter/download/test2?destination=" + downpath)
if err != nil {
t.Error(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path2)
if err != nil {
t.Error(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Error(err)
}
if bytes.Compare(orig, download) != 0 {
t.Error("data mismatch when downloading a file")
}
}()
go func() {
defer wg.Done()
downpath := filepath.Join(st.dir, "testdown3.dat")
err := st.stdGetAPI("/renter/download/test3?destination=" + downpath)
if err != nil {
t.Error(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path3)
if err != nil {
t.Error(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Error(err)
}
if bytes.Compare(orig, download) != 0 {
t.Error("data mismatch when downloading a file")
}
}()
wg.Wait()
// The renter's downloads queue should have 3 entries now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 3 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
}
// TestRenterUploadDownload tests that downloading and uploading in parallel
// does not result in failures or stalling.
func TestRenterUploadDownload(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Check financial metrics; coins should have been spent on contracts
var rg RenterGET
err = st.getAPI("/renter", &rg)
if err != nil {
t.Fatal(err)
}
spent := rg.Settings.Allowance.Funds.Sub(rg.FinancialMetrics.Unspent)
if spent.IsZero() {
t.Fatal("financial metrics do not reflect contract spending")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload to host.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// In parallel, upload another file and download the first file.
path2 := filepath.Join(st.dir, "test2.dat")
test2Size := modules.SectorSize*2 + 1
err = createRandFile(path2, int(test2Size))
if err != nil {
t.Fatal(err)
}
uploadValues = url.Values{}
uploadValues.Set("source", path2)
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// Wait for upload to complete.
for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1])
}
// Check financial metrics; funds should have been spent on uploads/downloads
err = st.getAPI("/renter", &rg)
if err != nil {
t.Fatal(err)
}
fm := rg.FinancialMetrics
newSpent := rg.Settings.Allowance.Funds.Sub(fm.Unspent)
// all new spending should be reflected in upload/download/storage spending
diff := fm.UploadSpending.Add(fm.DownloadSpending).Add(fm.StorageSpending)
if !diff.Equals(newSpent.Sub(spent)) {
t.Fatal("all new spending should be reflected in metrics:", diff, newSpent.Sub(spent))
}
}
// TestRenterCancelAllowance tests that setting an empty allowance causes
// uploads, downloads, and renewals to cease.
func TestRenterCancelAllowance(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := 20
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", fmt.Sprint(testPeriod))
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Cancel the allowance
allowanceValues = url.Values{}
allowanceValues.Set("funds", "0")
allowanceValues.Set("hosts", "0")
allowanceValues.Set("period", "0")
allowanceValues.Set("renewwindow", "0")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Try downloading the file; should succeed.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal("downloading file failed", err)
}
// Try to upload a file after the allowance was cancelled. Should fail.
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
// Give it some time to upload.
time.Sleep(time.Second)
// Redundancy should still be 0.
if err := st.getAPI("/renter/files", &rf); err != nil {
t.Fatal(err)
}
if len(rf.Files) != 2 || rf.Files[1].UploadProgress > 0 || rf.Files[1].Redundancy > 0 {
t.Fatal("uploading a file after cancelling allowance should fail",
rf.Files[1].UploadProgress, rf.Files[1].Redundancy)
}
// Mine enough blocks for the period to pass and the contracts to expire.
for i := 0; i < testPeriod; i++ {
if _, err := st.miner.AddBlock(); err != nil {
t.Fatal(err)
}
}
// Try downloading the file; should fail.
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err == nil || !strings.Contains(err.Error(), "download failed") {
t.Fatal("expected insufficient hosts error, got", err)
}
// The uploaded file should have 0x redundancy now.
err = build.Retry(600, 100*time.Millisecond, func() error {
if err := st.getAPI("/renter/files", &rf); err != nil {
return err
}
if len(rf.Files) != 2 || rf.Files[0].Redundancy != 0 {
return errors.New("file redundancy should be 0 now")
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
// TestRenterParallelDelete tests that uploading and deleting parallel does not
// result in failures or stalling.
func TestRenterParallelDelete(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create two files.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
path2 := filepath.Join(st.dir, "test2.dat")
err = createRandFile(path2, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the first file to host.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Wait for the first file to be registered in the renter.
var rf RenterFiles
for i := 0; i < 200 && len(rf.Files) != 1; i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 {
t.Fatal("file is not being registered:", rf.Files)
}
// In parallel, start uploading the other file, and delete the first file.
uploadValues = url.Values{}
uploadValues.Set("source", path2)
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
err = st.stdPostAPI("/renter/delete/test", url.Values{})
if err != nil {
t.Fatal(err)
}
// Only the second file should be present
st.getAPI("/renter/files", &rf)
if len(rf.Files) != 1 || rf.Files[0].SiaPath != "test2" {
t.Fatal("file was not deleted properly:", rf.Files)
}
// Wait for the second upload to complete.
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files)
}
// In parallel, download and delete the second file.
go st.stdPostAPI("/renter/delete/test2", url.Values{})
time.Sleep(100 * time.Millisecond)
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test2?destination=" + downpath)
if err == nil {
t.Fatal("download should fail after delete")
}
// No files should be present
st.getAPI("/renter/files", &rf)
if len(rf.Files) != 0 {
t.Fatal("file was not deleted properly:", rf.Files)
}
}
// TestRenterRenew sets up an integration test where a renter renews a
// contract with a host.
func TestRenterRenew(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
var ah HostdbActiveGET
for i := 0; i < 50; i++ {
if err = st.getAPI("/hostdb/active", &ah); err != nil {
t.Fatal(err)
}
if len(ah.Hosts) == 1 {
break
}
time.Sleep(time.Millisecond * 100)
}
if len(ah.Hosts) != 1 {
t.Fatalf("expected 1 host, got %v", len(ah.Hosts))
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := 10
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", strconv.Itoa(testPeriod))
allowanceValues.Set("renewwindow", strconv.Itoa(testPeriod/2))
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Get current contract ID.
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
t.Fatal(err)
}
contractID := rc.Contracts[0].ID
// Mine enough blocks to enter the renewal window.
testWindow := testPeriod / 2
for i := 0; i < testWindow+1; i++ {
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
}
// Wait for the contract to be renewed.
for i := 0; i < 200 && (len(rc.Contracts) != 1 || rc.Contracts[0].ID == contractID); i++ {
st.getAPI("/renter/contracts", &rc)
time.Sleep(100 * time.Millisecond)
}
if rc.Contracts[0].ID == contractID {
t.Fatal("contract was not renewed:", rc.Contracts[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
// TestRenterAllowance sets up an integration test where a renter attempts to
// download a file after changing the allowance.
func TestRenterAllowance(t *testing.T) {
t.Skip("bypassing NDF")
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := types.SiacoinPrecision.Mul64(10000) // 10k SC
testPeriod := 20
allowanceValues.Set("funds", testFunds.String())
allowanceValues.Set("period", strconv.Itoa(testPeriod))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
t.Skip("ndf - re-enable after contractor overhaul")
// Try downloading the file after modifying the allowance in various ways.
allowances := []struct {
funds types.Currency
period int
}{
{testFunds.Mul64(10), testPeriod / 2},
{testFunds, testPeriod / 2},
{testFunds.Div64(10), testPeriod / 2},
{testFunds.Mul64(10), testPeriod},
{testFunds, testPeriod},
{testFunds.Div64(10), testPeriod},
{testFunds.Mul64(10), testPeriod * 2},
{testFunds, testPeriod * 2},
{testFunds.Div64(10), testPeriod * 2},
}
for _, a := range allowances {
allowanceValues.Set("funds", a.funds.String())
allowanceValues.Set("period", strconv.Itoa(a.period))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
}
// TestHostAndRentReload sets up an integration test where a host and renter
// do basic uploads and downloads, with an intervening shutdown+startup.
func TestHostAndRentReload(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Mine a block so that the wallet reclaims refund outputs
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 1 entry now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 1 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
// close and reopen the server
err = st.server.Close()
if err != nil {
t.Fatal(err)
}
st, err = assembleServerTester(st.walletKey, st.dir)
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host again and wait until the host is re-scanned and put
// back into the hostdb as an active host.
announceValues := url.Values{}
announceValues.Set("address", string(st.host.ExternalSettings().NetAddress))
err = st.stdPostAPI("/host/announce", announceValues)
if err != nil {
t.Fatal(err)
}
// Mine a block.
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
err = build.Retry(100, time.Millisecond*100, func() error {
var hosts HostdbActiveGET
err := st.getAPI("/hostdb/active", &hosts)
if err != nil {
return err
}
if len(hosts.Hosts) != 1 {
return errors.New("host is not in the set of active hosts")
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Try downloading the file.
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err = ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err = ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
// TestHostAndRenterRenewInterrupt
func TestHostAndRenterRenewInterrupt(t *testing.T) {
t.Skip("active test following contractor overhaul")
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
stHost, err := blankServerTester(t.Name() + "-Host")
if err != nil {
t.Fatal(err)
}
sts := []*serverTester{st, stHost}
err = fullyConnectNodes(sts)
if err != nil {
t.Fatal(err)
}
err = fundAllNodes(sts)
if err != nil {
t.Fatal(err)
}
// Announce the host.
err = stHost.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = stHost.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = stHost.announceHost()
if err != nil {
t.Fatal(err)
}
// Wait for host to be seen in renter's hostdb
var ah HostdbActiveGET
for i := 0; i < 50; i++ {
if err = st.getAPI("/hostdb/active", &ah); err != nil {
t.Fatal(err)
}
if len(ah.Hosts) == 1 {
break
}
time.Sleep(time.Millisecond * 100)
}
if len(ah.Hosts) != 1 {
t.Fatalf("expected 1 host, got %v", len(ah.Hosts))
}
// Upload a file to the host.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
testPeriodInt := 10
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 10e3)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Get current contract ID.
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
t.Fatal(err)
}
contractID := rc.Contracts[0].ID
// Mine enough blocks to enter the renewal window.
testWindow := testPeriodInt / 2
for i := 0; i < testWindow+1; i++ {
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
}
// Wait for the contract to be renewed.
for i := 0; i < 200 && (len(rc.Contracts) != 1 || rc.Contracts[0].ID == contractID); i++ {
st.getAPI("/renter/contracts", &rc)
time.Sleep(100 * time.Millisecond)
}
if rc.Contracts[0].ID == contractID {
t.Fatal("contract was not renewed:", rc.Contracts[0])
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(1000 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
// TestRedundancyReporting verifies that redundancy reporting is accurate if
// contracts become offline.
func TestRedundancyReporting(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "5")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 2 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file to upload.
filesize := int(1024)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 2
var rf RenterFiles
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
stH1.server.Close()
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 {
return nil
}
return errors.New("file redundancy not decremented")
})
if err != nil {
t.Fatal(err)
}
// bring back the host and let it mine a block
stH1, err = assembleServerTester(stH1.walletKey, stH1.dir)
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup = []*serverTester{st, stH1}
// Make sure the leader of the group has the longest chain before
// connecting the nodes
if _, err := st.miner.AddBlock(); err != nil {
t.Fatal(err)
}
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add a block to clear the transaction pool and give the host an output to
// make an announcement, and then make the announcement.
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
err = waitForBlock(st.cs.CurrentBlock().ID(), stH1)
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Wait until the host shows back up in the hostdb.
var ah HostdbActiveGET
err = build.Retry(1000, 100*time.Millisecond, func() error {
if err = st.getAPI("/hostdb/active", &ah); err != nil {
t.Fatal(err)
}
if len(ah.Hosts) != 2 {
return fmt.Errorf("not enough hosts in hostdb, number of hosts is: %v", len(ah.Hosts))
}
for _, host := range ah.Hosts {
if len(host.ScanHistory) < 2 {
return errors.New("hosts are not scanned")
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Mine another block so that the contract checker updates the IsGood status
// of the contracts.
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Redundancy should re-report at 2.
err = build.Retry(250, 100*time.Millisecond, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 {
return nil
}
return errors.New("file redundancy not incremented")
})
if err != nil {
t.Fatal(err)
}
}
// TestUploadedBytesReporting verifies that reporting of how many bytes have
// been uploaded via active contracts is accurate
func TestUploadedBytesReporting(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "5")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 2 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file to upload.
filesize := int(modules.SectorSize * 2)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// Upload the file
dataPieces := 1
parityPieces := 1
uploadValues := url.Values{}
uploadValues.Set("source", path)
uploadValues.Set("datapieces", fmt.Sprint(dataPieces))
uploadValues.Set("paritypieces", fmt.Sprint(parityPieces))
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Calculate the encrypted size of our fully redundant encoded file
pieceSize := modules.SectorSize - crypto.TwofishOverhead
chunkSize := pieceSize * uint64(dataPieces)
numChunks := uint64(filesize) / chunkSize
if uint64(filesize)%chunkSize != 0 {
numChunks++
}
fullyRedundantSize := modules.SectorSize * uint64(dataPieces+parityPieces) * uint64(numChunks)
// Monitor the file as it uploads. Ensure that the UploadProgress times
// the fully redundant file size always equals UploadedBytes reported
var rf RenterFiles
for i := 0; i < 60 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 100); i++ {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 {
uploadProgressBytes := uint64(float64(fullyRedundantSize) * rf.Files[0].UploadProgress / 100.0)
// Note: in Go 1.10 we will be able to write Math.Round(uploadProgressBytes) != rf.Files[0].UploadedBytes
if uploadProgressBytes != rf.Files[0].UploadedBytes && (uploadProgressBytes+1) != rf.Files[0].UploadedBytes {
t.Fatalf("api reports having uploaded %v bytes when upload progress is %v%%, but the actual uploaded bytes count should be %v\n",
rf.Files[0].UploadedBytes, rf.Files[0].UploadProgress, uploadProgressBytes)
}
}
time.Sleep(time.Second)
}
if err != nil {
t.Fatal(err)
}
// Upload progress should be 100% and redundancy should reach 2
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 100 || rf.Files[0].Redundancy != 2 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// When the file is fully redundantly uploaded, UploadedBytes should
// equal the file's fully redundant size
if rf.Files[0].UploadedBytes != fullyRedundantSize {
t.Fatalf("api reports having uploaded %v bytes when upload progress is 100%%, but the actual fully redundant file size is %v\n",
rf.Files[0].UploadedBytes, fullyRedundantSize)
}
}
// TestRenterMissingHosts verifies that if hosts are taken offline, downloads
// fail.
func TestRenterMissingHosts(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 1")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
stH2, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH2.server.Close()
stH3, err := blankServerTester(t.Name() + " - Host 3")
if err != nil {
t.Fatal(err)
}
defer stH3.server.Close()
testGroup := []*serverTester{st, stH1, stH2, stH3}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "3")
allowanceValues.Set("period", "20")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 3 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed:", err)
}
// Create a file to upload.
filesize := int(100)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
uploadValues.Set("datapieces", "2")
uploadValues.Set("paritypieces", "1")
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 1.5
var rf RenterFiles
err = build.Retry(20, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1.5 {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// verify we can download
downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
err = stH1.server.Close()
if err != nil {
t.Fatal(err)
}
// redundancy should not decrement, we have a backup host we can use.
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1.5 {
return nil
}
return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy))
})
if err != nil {
t.Log(err)
}
// verify we still can download
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// take down another host
err = stH2.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 {
return nil
}
return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy))
})
if err != nil {
t.Log(err)
}
// verify we still can download
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// take down another host
err = stH3.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 0 {
return nil
}
return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy))
})
if err != nil {
t.Log(err)
}
// verify that the download fails
downloadPath = filepath.Join(st.dir, "test-downloaded-verify4.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err == nil {
t.Fatal("expected download to fail with redundancy <1")
}
}
// TestRepairLoopBlocking checks if the repair loop blocks operations while a
// non local file is being downloaded for repair.
func TestRepairLoopBlocking(t *testing.T) {
// TODO: Refactor dependency management to block download
t.Skip("Test requires refactoring")
if testing.Short() || !build.VLONG {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
//st.renter.SetDependencies(renter.BlockRepairUpload{})
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 1")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file with 1 chunk to upload.
filesize := int(1)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 2
var rf RenterFiles
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// verify we can download
downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// remove the local copy of the file
err = os.Remove(path)
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
err = stH1.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 {
return nil
}
return errors.New("file redundancy not decremented")
})
if err != nil {
t.Fatal(err)
}
// verify we still can download
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// bring up a few new hosts
testGroup = []*serverTester{st}
for i := 0; i < 3; i++ {
stNewHost, err := blankServerTester(t.Name() + fmt.Sprintf("-newhost%d", i))
if err != nil {
t.Fatal(err)
}
defer stNewHost.server.Close()
testGroup = append(testGroup, stNewHost)
}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
for _, stNewHost := range testGroup[1 : len(testGroup)-1] {
err = stNewHost.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = stNewHost.announceHost()
if err != nil {
t.Fatal(err)
}
err = waitForBlock(stNewHost.cs.CurrentBlock().ID(), st)
if err != nil {
t.Fatal(err)
}
// add a few new blocks in order to cause the renter to form contracts with the new host
for i := 0; i < 10; i++ {
b, err := testGroup[0].miner.AddBlock()
if err != nil {
t.Fatal(err)
}
tipID, err := synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
if b.ID() != tipID {
t.Fatal("test group does not have the tip block")
}
}
}
// wait a few seconds for the the repair to be queued and started
time.Sleep(3 * time.Second)
// redundancy should not increment back to 2 because the renter should be blocked
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy >= 2 && rf.Files[0].Available {
t.Error("The file's redundancy incremented back to 2 but shouldn't")
}
// create a second file to upload
filesize = int(1)
path2 := filepath.Join(st.dir, "test2.dat")
err = createRandFile(path2, filesize)
if err != nil {
t.Fatal(err)
}
// upload the second file
uploadValues = url.Values{}
uploadValues.Set("source", path2)
wait := make(chan error)
go func() {
wait <- st.stdPostAPI("/renter/upload/test2", uploadValues)
}()
select {
case <-time.After(time.Minute):
t.Fatal("/renter/upload API call didn't return within 60 seconds")
case err = <-wait:
}
if err != nil {
t.Fatal(err)
}
// redundancy should reach 2 for the second file
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 2 && rf.Files[1].Redundancy >= 2 {
return nil
}
return errors.New("file 2 not uploaded")
})
if err != nil {
t.Fatal(err)
}
// verify we can download the second file
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test2?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
}
// TestRemoteFileRepairMassive is similar to TestRemoteFileRepair but uploads
// more files to find potential deadlocks or crashes
func TestRemoteFileRepairMassive(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 1")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file to upload.
filesize := int(4000)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file numUploads times
numUploads := 10
uploadValues := url.Values{}
uploadValues.Set("source", path)
for i := 0; i < numUploads; i++ {
err = st.stdPostAPI(fmt.Sprintf("/renter/upload/test%v", i), uploadValues)
if err != nil {
t.Fatal(err)
}
}
// redundancy should reach 2 for all files
var rf RenterFiles
err = build.Retry(600, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) != numUploads {
return errors.New("file not uploaded")
}
for i, f := range rf.Files {
if f.Redundancy != 2 {
return fmt.Errorf("file %v only reached %v redundancy", i, f.Redundancy)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// remove the local copy of the file
err = os.Remove(path)
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
err = stH1.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) != numUploads {
return errors.New("file not uploaded")
}
for _, f := range rf.Files {
if f.Redundancy != 1 {
return errors.New("file redudancy didn't decrement to x1")
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// bring up a new host
stNewHost, err := blankServerTester(t.Name() + "-newhost")
if err != nil {
t.Fatal(err)
}
defer stNewHost.server.Close()
testGroup = []*serverTester{st, stNewHost}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
err = stNewHost.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = stNewHost.announceHost()
if err != nil {
t.Fatal(err)
}
err = waitForBlock(stNewHost.cs.CurrentBlock().ID(), st)
if err != nil {
t.Fatal(err)
}
// add a few new blocks in order to cause the renter to form contracts with the new host
for i := 0; i < 10; i++ {
b, err := testGroup[0].miner.AddBlock()
if err != nil {
t.Fatal(err)
}
tipID, err := synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
if b.ID() != tipID {
t.Fatal("test group does not have the tip block")
}
}
// redundancy should increment back to 2 as the renter uploads to the new
// host using the download-to-upload strategy
err = build.Retry(300, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) != numUploads {
return errors.New("file not uploaded")
}
for i, f := range rf.Files {
if f.Redundancy != 2 {
return fmt.Errorf("file %v only reached %v redundancy", i, f.Redundancy)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
Fix TestRenterCancelAllowance
package api
// renterhost_test.go sets up larger integration tests between renters and
// hosts, checking that the whole storage ecosystem is functioning cohesively.
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/NebulousLabs/Sia/build"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// TestHostObligationAcceptingContracts verifies that the host will complete
// storage proofs and the renter will successfully download even if the host
// has set accepting contracts to false.
func TestHostObligationAcceptingContracts(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "1")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "5")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
filesize := int(1024)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 1
var rf RenterFiles
err = build.Retry(120, time.Millisecond*250, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Available {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// Get contracts via API call
var cts ContractInfoGET
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
// There should be some contracts returned
if len(cts.Contracts) == 0 {
t.Fatal("No contracts returned from /host/contracts API call.")
}
// Check if the number of contracts are equal to the number of storage obligations
if len(cts.Contracts) != len(st.host.StorageObligations()) {
t.Fatal("Number of contracts returned by API call and host method don't match.")
}
// set acceptingcontracts = false, mine some blocks, verify we can download
settings := st.host.InternalSettings()
settings.AcceptingContracts = false
st.host.SetInternalSettings(settings)
for i := 0; i < 3; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 100)
}
downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// mine blocks to cause the host to submit storage proofs to the blockchain.
for i := 0; i < 15; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 100)
}
// should have successful proofs
success := false
for _, so := range st.host.StorageObligations() {
if so.ProofConfirmed {
success = true
break
}
}
if !success {
t.Fatal("no successful storage proofs")
}
}
// TestHostAndRentVanilla sets up an integration test where a host and renter
// do basic uploads and downloads.
func TestHostAndRentVanilla(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "20"
renewWindow := "10"
testPeriodInt := 20
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", renewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Check the host, who should now be reporting file contracts.
var cts ContractInfoGET
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
if len(cts.Contracts) != 1 {
t.Error("Host has wrong number of obligations:", len(cts.Contracts))
}
// Check if the obligation status is unresolved
if cts.Contracts[0].ObligationStatus != "obligationUnresolved" {
t.Error("Wrong obligation status for new contract:", cts.Contracts[0].ObligationStatus)
}
// Check if there are no sector roots on a new contract
if cts.Contracts[0].SectorRootsCount != 0 {
t.Error("Wrong number of sector roots for new contract:", cts.Contracts[0].SectorRootsCount)
}
// Check if there is locked collateral
if cts.Contracts[0].LockedCollateral.IsZero() {
t.Error("No locked collateral in contract.")
}
// Check if risked collateral is not equal to zero
if !cts.Contracts[0].RiskedCollateral.IsZero() {
t.Error("Risked collateral not zero in new contract.")
}
// Check if all potential revenues are zero
if !(cts.Contracts[0].PotentialDownloadRevenue.IsZero() && cts.Contracts[0].PotentialUploadRevenue.IsZero() && cts.Contracts[0].PotentialStorageRevenue.IsZero()) {
t.Error("Potential values not zero in new contract.")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// On a second connection, upload another file.
path2 := filepath.Join(st.dir, "test2.dat")
test2Size := modules.SectorSize*2 + 1
err = createRandFile(path2, int(test2Size))
if err != nil {
t.Fatal(err)
}
uploadValues = url.Values{}
uploadValues.Set("source", path2)
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1])
}
// Try downloading the first file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 1 entry now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 1 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
// Try downloading the second file.
downpath2 := filepath.Join(st.dir, "testdown2.dat")
err = st.stdGetAPI("/renter/download/test2?destination=" + downpath2)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig2, err := ioutil.ReadFile(path2)
if err != nil {
t.Fatal(err)
}
download2, err := ioutil.ReadFile(downpath2)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig2, download2) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 2 entries now.
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 2 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
// Mine two blocks, which should cause the host to submit the storage
// obligation to the blockchain.
for i := 0; i < 2; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 200)
}
// Check that the host was able to get the file contract confirmed on the
// blockchain.
cts = ContractInfoGET{}
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
if len(cts.Contracts) != 1 {
t.Error("Host has wrong number of obligations:", len(cts.Contracts))
}
if !cts.Contracts[0].OriginConfirmed {
t.Error("Host has not seen the file contract on the blockchain.")
}
// Check if there are sector roots
if cts.Contracts[0].SectorRootsCount == 0 {
t.Error("Sector roots count is zero for used obligation.")
}
// Check if risked collateral is not equal to zero
if cts.Contracts[0].RiskedCollateral.IsZero() {
t.Error("Risked collateral is zero for used obligation.")
}
// There should be some potential revenues in this contract
if cts.Contracts[0].PotentialDownloadRevenue.IsZero() || cts.Contracts[0].PotentialUploadRevenue.IsZero() || cts.Contracts[0].PotentialStorageRevenue.IsZero() {
t.Error("Potential revenue value is zero for used obligation.")
}
// Mine blocks until the host should have submitted a storage proof.
for i := 0; i <= testPeriodInt+5; i++ {
_, err := st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 200)
}
cts = ContractInfoGET{}
err = st.getAPI("/host/contracts", &cts)
if err != nil {
t.Fatal(err)
}
success := false
for _, contract := range cts.Contracts {
if contract.ProofConfirmed {
// Sector roots should be removed from storage obligation
if contract.SectorRootsCount > 0 {
t.Error("There are sector roots on completed storage obligation.")
}
success = true
break
}
}
if !success {
t.Error("does not seem like the host has submitted a storage proof successfully to the network")
}
}
// TestHostAndRentMultiHost sets up an integration test where three hosts and a
// renter do basic (parallel) uploads and downloads.
func TestHostAndRentMultiHost(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH1.server.panicClose()
stH2, err := blankServerTester(t.Name() + " - Host 3")
if err != nil {
t.Fatal(err)
}
defer stH2.server.panicClose()
testGroup := []*serverTester{st, stH1, stH2}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with three hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "3")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "2")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file to upload.
filesize := int(45678)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// Upload a file with 2-of-6 redundancy.
uploadValues := url.Values{}
uploadValues.Set("source", path)
uploadValues.Set("datapieces", "2")
uploadValues.Set("paritypieces", "4")
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Three pieces should get uploaded.
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 50); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 50 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 1 entry now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 1 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
}
// TestHostAndRentManyFiles sets up an integration test where a single renter
// is uploading many files to the network.
func TestHostAndRentManyFiles(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH1.server.panicClose()
stH2, err := blankServerTester(t.Name() + " - Host 3")
if err != nil {
t.Fatal(err)
}
defer stH2.server.panicClose()
stH3, err := blankServerTester(t.Name() + " - Host 4")
if err != nil {
t.Fatal(err)
}
defer stH3.server.panicClose()
testGroup := []*serverTester{st, stH1, stH2, stH3}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with four hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "4")
allowanceValues.Set("period", "5")
allowanceValues.Set("renewwindow", "2")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create 3 files to upload at the same time.
filesize1 := int(12347)
filesize2 := int(22343)
filesize3 := int(32349)
path1 := filepath.Join(st.dir, "test1.dat")
path2 := filepath.Join(st.dir, "test2.dat")
path3 := filepath.Join(st.dir, "test3.dat")
err = createRandFile(path1, filesize1)
if err != nil {
t.Fatal(err)
}
err = createRandFile(path2, filesize2)
if err != nil {
t.Fatal(err)
}
err = createRandFile(path3, filesize3)
if err != nil {
t.Fatal(err)
}
// Concurrently upload a file with 1-of-4 redundancy, 2-of-4 redundancy,
// and 3-of-4 redundancy.
var wg sync.WaitGroup
wg.Add(3)
go func() {
defer wg.Done()
uploadValues := url.Values{}
uploadValues.Set("source", path1)
uploadValues.Set("datapieces", "1")
uploadValues.Set("paritypieces", "3")
err := st.stdPostAPI("/renter/upload/test1", uploadValues)
if err != nil {
t.Error(err)
}
}()
go func() {
defer wg.Done()
uploadValues := url.Values{}
uploadValues.Set("source", path2)
uploadValues.Set("datapieces", "2")
uploadValues.Set("paritypieces", "2")
err := st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Error(err)
}
}()
go func() {
defer wg.Done()
uploadValues := url.Values{}
uploadValues.Set("source", path3)
uploadValues.Set("datapieces", "3")
uploadValues.Set("paritypieces", "1")
err := st.stdPostAPI("/renter/upload/test3", uploadValues)
if err != nil {
t.Error(err)
}
}()
// Block until the upload call is complete for all three files.
wg.Wait()
// Block until all files hit 100% uploaded.
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 3 || rf.Files[0].UploadProgress < 100 || rf.Files[1].UploadProgress < 100 || rf.Files[2].UploadProgress < 100); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(500 * time.Millisecond)
}
if len(rf.Files) != 3 || rf.Files[0].UploadProgress < 100 || rf.Files[1].UploadProgress < 100 || rf.Files[2].UploadProgress < 100 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1], rf.Files[2])
}
// Download all three files in parallel.
wg.Add(3)
go func() {
defer wg.Done()
downpath := filepath.Join(st.dir, "testdown1.dat")
err := st.stdGetAPI("/renter/download/test1?destination=" + downpath)
if err != nil {
t.Error(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path1)
if err != nil {
t.Error(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Error(err)
}
if bytes.Compare(orig, download) != 0 {
t.Error("data mismatch when downloading a file")
}
}()
go func() {
defer wg.Done()
downpath := filepath.Join(st.dir, "testdown2.dat")
err := st.stdGetAPI("/renter/download/test2?destination=" + downpath)
if err != nil {
t.Error(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path2)
if err != nil {
t.Error(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Error(err)
}
if bytes.Compare(orig, download) != 0 {
t.Error("data mismatch when downloading a file")
}
}()
go func() {
defer wg.Done()
downpath := filepath.Join(st.dir, "testdown3.dat")
err := st.stdGetAPI("/renter/download/test3?destination=" + downpath)
if err != nil {
t.Error(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path3)
if err != nil {
t.Error(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Error(err)
}
if bytes.Compare(orig, download) != 0 {
t.Error("data mismatch when downloading a file")
}
}()
wg.Wait()
// The renter's downloads queue should have 3 entries now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 3 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
}
// TestRenterUploadDownload tests that downloading and uploading in parallel
// does not result in failures or stalling.
func TestRenterUploadDownload(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Check financial metrics; coins should have been spent on contracts
var rg RenterGET
err = st.getAPI("/renter", &rg)
if err != nil {
t.Fatal(err)
}
spent := rg.Settings.Allowance.Funds.Sub(rg.FinancialMetrics.Unspent)
if spent.IsZero() {
t.Fatal("financial metrics do not reflect contract spending")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload to host.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// In parallel, upload another file and download the first file.
path2 := filepath.Join(st.dir, "test2.dat")
test2Size := modules.SectorSize*2 + 1
err = createRandFile(path2, int(test2Size))
if err != nil {
t.Fatal(err)
}
uploadValues = url.Values{}
uploadValues.Set("source", path2)
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// Wait for upload to complete.
for i := 0; i < 200 && (len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 2 || rf.Files[0].UploadProgress < 10 || rf.Files[1].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0], rf.Files[1])
}
// Check financial metrics; funds should have been spent on uploads/downloads
err = st.getAPI("/renter", &rg)
if err != nil {
t.Fatal(err)
}
fm := rg.FinancialMetrics
newSpent := rg.Settings.Allowance.Funds.Sub(fm.Unspent)
// all new spending should be reflected in upload/download/storage spending
diff := fm.UploadSpending.Add(fm.DownloadSpending).Add(fm.StorageSpending)
if !diff.Equals(newSpent.Sub(spent)) {
t.Fatal("all new spending should be reflected in metrics:", diff, newSpent.Sub(spent))
}
}
// TestRenterCancelAllowance tests that setting an empty allowance causes
// uploads, downloads, and renewals to cease.
func TestRenterCancelAllowance(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := 20
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", fmt.Sprint(testPeriod))
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Cancel the allowance
allowanceValues = url.Values{}
allowanceValues.Set("funds", "0")
allowanceValues.Set("hosts", "0")
allowanceValues.Set("period", "0")
allowanceValues.Set("renewwindow", "0")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Give it some time to mark the contracts as !goodForUpload and
// !goodForRenew.
err = build.Retry(600, 100*time.Millisecond, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
for _, c := range rc.Contracts {
if c.GoodForUpload {
return errors.New("contract shouldn't be goodForUpload")
}
if c.GoodForRenew {
return errors.New("contract shouldn't be goodForRenew")
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Try downloading the file; should succeed.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal("downloading file failed", err)
}
// Try to upload a file after the allowance was cancelled. Should fail.
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
// Give it some time to upload.
time.Sleep(time.Second)
// Redundancy should still be 0.
if err := st.getAPI("/renter/files", &rf); err != nil {
t.Fatal(err)
}
if len(rf.Files) != 2 || rf.Files[1].UploadProgress > 0 || rf.Files[1].Redundancy > 0 {
t.Fatal("uploading a file after cancelling allowance should fail",
rf.Files[1].UploadProgress, rf.Files[1].Redundancy)
}
// Mine enough blocks for the period to pass and the contracts to expire.
for i := 0; i < testPeriod; i++ {
if _, err := st.miner.AddBlock(); err != nil {
t.Fatal(err)
}
}
// Try downloading the file; should fail.
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err == nil || !strings.Contains(err.Error(), "download failed") {
t.Fatal("expected insufficient hosts error, got", err)
}
// The uploaded file should have 0x redundancy now.
err = build.Retry(600, 100*time.Millisecond, func() error {
if err := st.getAPI("/renter/files", &rf); err != nil {
return err
}
if len(rf.Files) != 2 || rf.Files[0].Redundancy != 0 {
return errors.New("file redundancy should be 0 now")
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
// TestRenterParallelDelete tests that uploading and deleting parallel does not
// result in failures or stalling.
func TestRenterParallelDelete(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create two files.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
path2 := filepath.Join(st.dir, "test2.dat")
err = createRandFile(path2, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the first file to host.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Wait for the first file to be registered in the renter.
var rf RenterFiles
for i := 0; i < 200 && len(rf.Files) != 1; i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 {
t.Fatal("file is not being registered:", rf.Files)
}
// In parallel, start uploading the other file, and delete the first file.
uploadValues = url.Values{}
uploadValues.Set("source", path2)
err = st.stdPostAPI("/renter/upload/test2", uploadValues)
if err != nil {
t.Fatal(err)
}
err = st.stdPostAPI("/renter/delete/test", url.Values{})
if err != nil {
t.Fatal(err)
}
// Only the second file should be present
st.getAPI("/renter/files", &rf)
if len(rf.Files) != 1 || rf.Files[0].SiaPath != "test2" {
t.Fatal("file was not deleted properly:", rf.Files)
}
// Wait for the second upload to complete.
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files)
}
// In parallel, download and delete the second file.
go st.stdPostAPI("/renter/delete/test2", url.Values{})
time.Sleep(100 * time.Millisecond)
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test2?destination=" + downpath)
if err == nil {
t.Fatal("download should fail after delete")
}
// No files should be present
st.getAPI("/renter/files", &rf)
if len(rf.Files) != 0 {
t.Fatal("file was not deleted properly:", rf.Files)
}
}
// TestRenterRenew sets up an integration test where a renter renews a
// contract with a host.
func TestRenterRenew(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
var ah HostdbActiveGET
for i := 0; i < 50; i++ {
if err = st.getAPI("/hostdb/active", &ah); err != nil {
t.Fatal(err)
}
if len(ah.Hosts) == 1 {
break
}
time.Sleep(time.Millisecond * 100)
}
if len(ah.Hosts) != 1 {
t.Fatalf("expected 1 host, got %v", len(ah.Hosts))
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := 10
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", strconv.Itoa(testPeriod))
allowanceValues.Set("renewwindow", strconv.Itoa(testPeriod/2))
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Get current contract ID.
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
t.Fatal(err)
}
contractID := rc.Contracts[0].ID
// Mine enough blocks to enter the renewal window.
testWindow := testPeriod / 2
for i := 0; i < testWindow+1; i++ {
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
}
// Wait for the contract to be renewed.
for i := 0; i < 200 && (len(rc.Contracts) != 1 || rc.Contracts[0].ID == contractID); i++ {
st.getAPI("/renter/contracts", &rc)
time.Sleep(100 * time.Millisecond)
}
if rc.Contracts[0].ID == contractID {
t.Fatal("contract was not renewed:", rc.Contracts[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
// TestRenterAllowance sets up an integration test where a renter attempts to
// download a file after changing the allowance.
func TestRenterAllowance(t *testing.T) {
t.Skip("bypassing NDF")
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := types.SiacoinPrecision.Mul64(10000) // 10k SC
testPeriod := 20
allowanceValues.Set("funds", testFunds.String())
allowanceValues.Set("period", strconv.Itoa(testPeriod))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
t.Skip("ndf - re-enable after contractor overhaul")
// Try downloading the file after modifying the allowance in various ways.
allowances := []struct {
funds types.Currency
period int
}{
{testFunds.Mul64(10), testPeriod / 2},
{testFunds, testPeriod / 2},
{testFunds.Div64(10), testPeriod / 2},
{testFunds.Mul64(10), testPeriod},
{testFunds, testPeriod},
{testFunds.Div64(10), testPeriod},
{testFunds.Mul64(10), testPeriod * 2},
{testFunds, testPeriod * 2},
{testFunds.Div64(10), testPeriod * 2},
}
for _, a := range allowances {
allowanceValues.Set("funds", a.funds.String())
allowanceValues.Set("period", strconv.Itoa(a.period))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
time.Sleep(100 * time.Millisecond)
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
}
// TestHostAndRentReload sets up an integration test where a host and renter
// do basic uploads and downloads, with an intervening shutdown+startup.
func TestHostAndRentReload(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
// Announce the host and start accepting contracts.
err = st.announceHost()
if err != nil {
t.Fatal(err)
}
err = st.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = st.setHostStorage()
if err != nil {
t.Fatal(err)
}
// Mine a block so that the wallet reclaims refund outputs
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
// Set an allowance for the renter, allowing a contract to be formed.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
allowanceValues.Set("renewwindow", testRenewWindow)
allowanceValues.Set("hosts", fmt.Sprint(recommendedHosts))
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 1 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 1024)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(100 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
// The renter's downloads queue should have 1 entry now.
var queue RenterDownloadQueue
if err = st.getAPI("/renter/downloads", &queue); err != nil {
t.Fatal(err)
}
if len(queue.Downloads) != 1 {
t.Fatalf("expected renter to have 1 download in the queue; got %v", len(queue.Downloads))
}
// close and reopen the server
err = st.server.Close()
if err != nil {
t.Fatal(err)
}
st, err = assembleServerTester(st.walletKey, st.dir)
if err != nil {
t.Fatal(err)
}
defer st.server.panicClose()
// Announce the host again and wait until the host is re-scanned and put
// back into the hostdb as an active host.
announceValues := url.Values{}
announceValues.Set("address", string(st.host.ExternalSettings().NetAddress))
err = st.stdPostAPI("/host/announce", announceValues)
if err != nil {
t.Fatal(err)
}
// Mine a block.
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
err = build.Retry(100, time.Millisecond*100, func() error {
var hosts HostdbActiveGET
err := st.getAPI("/hostdb/active", &hosts)
if err != nil {
return err
}
if len(hosts.Hosts) != 1 {
return errors.New("host is not in the set of active hosts")
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Try downloading the file.
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err = ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err = ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
// TestHostAndRenterRenewInterrupt
func TestHostAndRenterRenewInterrupt(t *testing.T) {
t.Skip("active test following contractor overhaul")
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
stHost, err := blankServerTester(t.Name() + "-Host")
if err != nil {
t.Fatal(err)
}
sts := []*serverTester{st, stHost}
err = fullyConnectNodes(sts)
if err != nil {
t.Fatal(err)
}
err = fundAllNodes(sts)
if err != nil {
t.Fatal(err)
}
// Announce the host.
err = stHost.acceptContracts()
if err != nil {
t.Fatal(err)
}
err = stHost.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = stHost.announceHost()
if err != nil {
t.Fatal(err)
}
// Wait for host to be seen in renter's hostdb
var ah HostdbActiveGET
for i := 0; i < 50; i++ {
if err = st.getAPI("/hostdb/active", &ah); err != nil {
t.Fatal(err)
}
if len(ah.Hosts) == 1 {
break
}
time.Sleep(time.Millisecond * 100)
}
if len(ah.Hosts) != 1 {
t.Fatalf("expected 1 host, got %v", len(ah.Hosts))
}
// Upload a file to the host.
allowanceValues := url.Values{}
testFunds := "10000000000000000000000000000" // 10k SC
testPeriod := "10"
testPeriodInt := 10
allowanceValues.Set("funds", testFunds)
allowanceValues.Set("period", testPeriod)
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file.
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, 10e3)
if err != nil {
t.Fatal(err)
}
// Upload the file to the renter.
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Get current contract ID.
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
t.Fatal(err)
}
contractID := rc.Contracts[0].ID
// Mine enough blocks to enter the renewal window.
testWindow := testPeriodInt / 2
for i := 0; i < testWindow+1; i++ {
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
}
// Wait for the contract to be renewed.
for i := 0; i < 200 && (len(rc.Contracts) != 1 || rc.Contracts[0].ID == contractID); i++ {
st.getAPI("/renter/contracts", &rc)
time.Sleep(100 * time.Millisecond)
}
if rc.Contracts[0].ID == contractID {
t.Fatal("contract was not renewed:", rc.Contracts[0])
}
// Only one piece will be uploaded (10% at current redundancy).
var rf RenterFiles
for i := 0; i < 200 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10); i++ {
st.getAPI("/renter/files", &rf)
time.Sleep(1000 * time.Millisecond)
}
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 10 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// Try downloading the file.
downpath := filepath.Join(st.dir, "testdown.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downpath)
if err != nil {
t.Fatal(err)
}
// Check that the download has the right contents.
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
download, err := ioutil.ReadFile(downpath)
if err != nil {
t.Fatal(err)
}
if bytes.Compare(orig, download) != 0 {
t.Fatal("data mismatch when downloading a file")
}
}
// TestRedundancyReporting verifies that redundancy reporting is accurate if
// contracts become offline.
func TestRedundancyReporting(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "5")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 2 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file to upload.
filesize := int(1024)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 2
var rf RenterFiles
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
stH1.server.Close()
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 {
return nil
}
return errors.New("file redundancy not decremented")
})
if err != nil {
t.Fatal(err)
}
// bring back the host and let it mine a block
stH1, err = assembleServerTester(stH1.walletKey, stH1.dir)
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup = []*serverTester{st, stH1}
// Make sure the leader of the group has the longest chain before
// connecting the nodes
if _, err := st.miner.AddBlock(); err != nil {
t.Fatal(err)
}
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add a block to clear the transaction pool and give the host an output to
// make an announcement, and then make the announcement.
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
err = waitForBlock(st.cs.CurrentBlock().ID(), stH1)
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Wait until the host shows back up in the hostdb.
var ah HostdbActiveGET
err = build.Retry(1000, 100*time.Millisecond, func() error {
if err = st.getAPI("/hostdb/active", &ah); err != nil {
t.Fatal(err)
}
if len(ah.Hosts) != 2 {
return fmt.Errorf("not enough hosts in hostdb, number of hosts is: %v", len(ah.Hosts))
}
for _, host := range ah.Hosts {
if len(host.ScanHistory) < 2 {
return errors.New("hosts are not scanned")
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Mine another block so that the contract checker updates the IsGood status
// of the contracts.
_, err = st.miner.AddBlock()
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Redundancy should re-report at 2.
err = build.Retry(250, 100*time.Millisecond, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 {
return nil
}
return errors.New("file redundancy not incremented")
})
if err != nil {
t.Fatal(err)
}
}
// TestUploadedBytesReporting verifies that reporting of how many bytes have
// been uploaded via active contracts is accurate
func TestUploadedBytesReporting(t *testing.T) {
if testing.Short() {
t.SkipNow()
}
t.Parallel()
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Announce every host.
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
allowanceValues.Set("renewwindow", "5")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 2 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed")
}
// Create a file to upload.
filesize := int(modules.SectorSize * 2)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// Upload the file
dataPieces := 1
parityPieces := 1
uploadValues := url.Values{}
uploadValues.Set("source", path)
uploadValues.Set("datapieces", fmt.Sprint(dataPieces))
uploadValues.Set("paritypieces", fmt.Sprint(parityPieces))
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// Calculate the encrypted size of our fully redundant encoded file
pieceSize := modules.SectorSize - crypto.TwofishOverhead
chunkSize := pieceSize * uint64(dataPieces)
numChunks := uint64(filesize) / chunkSize
if uint64(filesize)%chunkSize != 0 {
numChunks++
}
fullyRedundantSize := modules.SectorSize * uint64(dataPieces+parityPieces) * uint64(numChunks)
// Monitor the file as it uploads. Ensure that the UploadProgress times
// the fully redundant file size always equals UploadedBytes reported
var rf RenterFiles
for i := 0; i < 60 && (len(rf.Files) != 1 || rf.Files[0].UploadProgress < 100); i++ {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 {
uploadProgressBytes := uint64(float64(fullyRedundantSize) * rf.Files[0].UploadProgress / 100.0)
// Note: in Go 1.10 we will be able to write Math.Round(uploadProgressBytes) != rf.Files[0].UploadedBytes
if uploadProgressBytes != rf.Files[0].UploadedBytes && (uploadProgressBytes+1) != rf.Files[0].UploadedBytes {
t.Fatalf("api reports having uploaded %v bytes when upload progress is %v%%, but the actual uploaded bytes count should be %v\n",
rf.Files[0].UploadedBytes, rf.Files[0].UploadProgress, uploadProgressBytes)
}
}
time.Sleep(time.Second)
}
if err != nil {
t.Fatal(err)
}
// Upload progress should be 100% and redundancy should reach 2
if len(rf.Files) != 1 || rf.Files[0].UploadProgress < 100 || rf.Files[0].Redundancy != 2 {
t.Fatal("the uploading is not succeeding for some reason:", rf.Files[0])
}
// When the file is fully redundantly uploaded, UploadedBytes should
// equal the file's fully redundant size
if rf.Files[0].UploadedBytes != fullyRedundantSize {
t.Fatalf("api reports having uploaded %v bytes when upload progress is 100%%, but the actual fully redundant file size is %v\n",
rf.Files[0].UploadedBytes, fullyRedundantSize)
}
}
// TestRenterMissingHosts verifies that if hosts are taken offline, downloads
// fail.
func TestRenterMissingHosts(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 1")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
stH2, err := blankServerTester(t.Name() + " - Host 2")
if err != nil {
t.Fatal(err)
}
defer stH2.server.Close()
stH3, err := blankServerTester(t.Name() + " - Host 3")
if err != nil {
t.Fatal(err)
}
defer stH3.server.Close()
testGroup := []*serverTester{st, stH1, stH2, stH3}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "3")
allowanceValues.Set("period", "20")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Block until the allowance has finished forming contracts.
err = build.Retry(50, time.Millisecond*250, func() error {
var rc RenterContracts
err = st.getAPI("/renter/contracts", &rc)
if err != nil {
return errors.New("couldn't get renter stats")
}
if len(rc.Contracts) != 3 {
return errors.New("no contracts")
}
return nil
})
if err != nil {
t.Fatal("allowance setting failed:", err)
}
// Create a file to upload.
filesize := int(100)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
uploadValues.Set("datapieces", "2")
uploadValues.Set("paritypieces", "1")
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 1.5
var rf RenterFiles
err = build.Retry(20, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1.5 {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// verify we can download
downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
err = stH1.server.Close()
if err != nil {
t.Fatal(err)
}
// redundancy should not decrement, we have a backup host we can use.
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1.5 {
return nil
}
return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy))
})
if err != nil {
t.Log(err)
}
// verify we still can download
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// take down another host
err = stH2.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 {
return nil
}
return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy))
})
if err != nil {
t.Log(err)
}
// verify we still can download
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// take down another host
err = stH3.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 0 {
return nil
}
return errors.New("file redundancy not decremented: " + fmt.Sprint(rf.Files[0].Redundancy))
})
if err != nil {
t.Log(err)
}
// verify that the download fails
downloadPath = filepath.Join(st.dir, "test-downloaded-verify4.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err == nil {
t.Fatal("expected download to fail with redundancy <1")
}
}
// TestRepairLoopBlocking checks if the repair loop blocks operations while a
// non local file is being downloaded for repair.
func TestRepairLoopBlocking(t *testing.T) {
// TODO: Refactor dependency management to block download
t.Skip("Test requires refactoring")
if testing.Short() || !build.VLONG {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
//st.renter.SetDependencies(renter.BlockRepairUpload{})
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 1")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file with 1 chunk to upload.
filesize := int(1)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file
uploadValues := url.Values{}
uploadValues.Set("source", path)
err = st.stdPostAPI("/renter/upload/test", uploadValues)
if err != nil {
t.Fatal(err)
}
// redundancy should reach 2
var rf RenterFiles
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 2 {
return nil
}
return errors.New("file not uploaded")
})
if err != nil {
t.Fatal(err)
}
// verify we can download
downloadPath := filepath.Join(st.dir, "test-downloaded-verify.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// remove the local copy of the file
err = os.Remove(path)
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
err = stH1.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy == 1 {
return nil
}
return errors.New("file redundancy not decremented")
})
if err != nil {
t.Fatal(err)
}
// verify we still can download
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
// bring up a few new hosts
testGroup = []*serverTester{st}
for i := 0; i < 3; i++ {
stNewHost, err := blankServerTester(t.Name() + fmt.Sprintf("-newhost%d", i))
if err != nil {
t.Fatal(err)
}
defer stNewHost.server.Close()
testGroup = append(testGroup, stNewHost)
}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
for _, stNewHost := range testGroup[1 : len(testGroup)-1] {
err = stNewHost.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = stNewHost.announceHost()
if err != nil {
t.Fatal(err)
}
err = waitForBlock(stNewHost.cs.CurrentBlock().ID(), st)
if err != nil {
t.Fatal(err)
}
// add a few new blocks in order to cause the renter to form contracts with the new host
for i := 0; i < 10; i++ {
b, err := testGroup[0].miner.AddBlock()
if err != nil {
t.Fatal(err)
}
tipID, err := synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
if b.ID() != tipID {
t.Fatal("test group does not have the tip block")
}
}
}
// wait a few seconds for the the repair to be queued and started
time.Sleep(3 * time.Second)
// redundancy should not increment back to 2 because the renter should be blocked
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 1 && rf.Files[0].Redundancy >= 2 && rf.Files[0].Available {
t.Error("The file's redundancy incremented back to 2 but shouldn't")
}
// create a second file to upload
filesize = int(1)
path2 := filepath.Join(st.dir, "test2.dat")
err = createRandFile(path2, filesize)
if err != nil {
t.Fatal(err)
}
// upload the second file
uploadValues = url.Values{}
uploadValues.Set("source", path2)
wait := make(chan error)
go func() {
wait <- st.stdPostAPI("/renter/upload/test2", uploadValues)
}()
select {
case <-time.After(time.Minute):
t.Fatal("/renter/upload API call didn't return within 60 seconds")
case err = <-wait:
}
if err != nil {
t.Fatal(err)
}
// redundancy should reach 2 for the second file
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) >= 2 && rf.Files[1].Redundancy >= 2 {
return nil
}
return errors.New("file 2 not uploaded")
})
if err != nil {
t.Fatal(err)
}
// verify we can download the second file
downloadPath = filepath.Join(st.dir, "test-downloaded-verify2.dat")
err = st.stdGetAPI("/renter/download/test2?destination=" + downloadPath)
if err != nil {
t.Fatal(err)
}
}
// TestRemoteFileRepairMassive is similar to TestRemoteFileRepair but uploads
// more files to find potential deadlocks or crashes
func TestRemoteFileRepairMassive(t *testing.T) {
if testing.Short() || !build.VLONG {
t.SkipNow()
}
st, err := createServerTester(t.Name())
if err != nil {
t.Fatal(err)
}
defer st.server.Close()
stH1, err := blankServerTester(t.Name() + " - Host 1")
if err != nil {
t.Fatal(err)
}
defer stH1.server.Close()
testGroup := []*serverTester{st, stH1}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
// Add storage to every host.
err = addStorageToAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
err = announceAllHosts(testGroup)
if err != nil {
t.Fatal(err)
}
// Set an allowance with two hosts.
allowanceValues := url.Values{}
allowanceValues.Set("funds", "50000000000000000000000000000") // 50k SC
allowanceValues.Set("hosts", "2")
allowanceValues.Set("period", "10")
err = st.stdPostAPI("/renter", allowanceValues)
if err != nil {
t.Fatal(err)
}
// Create a file to upload.
filesize := int(4000)
path := filepath.Join(st.dir, "test.dat")
err = createRandFile(path, filesize)
if err != nil {
t.Fatal(err)
}
// upload the file numUploads times
numUploads := 10
uploadValues := url.Values{}
uploadValues.Set("source", path)
for i := 0; i < numUploads; i++ {
err = st.stdPostAPI(fmt.Sprintf("/renter/upload/test%v", i), uploadValues)
if err != nil {
t.Fatal(err)
}
}
// redundancy should reach 2 for all files
var rf RenterFiles
err = build.Retry(600, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) != numUploads {
return errors.New("file not uploaded")
}
for i, f := range rf.Files {
if f.Redundancy != 2 {
return fmt.Errorf("file %v only reached %v redundancy", i, f.Redundancy)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// remove the local copy of the file
err = os.Remove(path)
if err != nil {
t.Fatal(err)
}
// take down one of the hosts
err = stH1.server.Close()
if err != nil {
t.Fatal(err)
}
// wait for the redundancy to decrement
err = build.Retry(60, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) != numUploads {
return errors.New("file not uploaded")
}
for _, f := range rf.Files {
if f.Redundancy != 1 {
return errors.New("file redudancy didn't decrement to x1")
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// bring up a new host
stNewHost, err := blankServerTester(t.Name() + "-newhost")
if err != nil {
t.Fatal(err)
}
defer stNewHost.server.Close()
testGroup = []*serverTester{st, stNewHost}
// Connect the testers to eachother so that they are all on the same
// blockchain.
err = fullyConnectNodes(testGroup)
if err != nil {
t.Fatal(err)
}
_, err = synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
// Make sure that every wallet has money in it.
err = fundAllNodes(testGroup)
if err != nil {
t.Fatal(err)
}
err = stNewHost.setHostStorage()
if err != nil {
t.Fatal(err)
}
err = stNewHost.announceHost()
if err != nil {
t.Fatal(err)
}
err = waitForBlock(stNewHost.cs.CurrentBlock().ID(), st)
if err != nil {
t.Fatal(err)
}
// add a few new blocks in order to cause the renter to form contracts with the new host
for i := 0; i < 10; i++ {
b, err := testGroup[0].miner.AddBlock()
if err != nil {
t.Fatal(err)
}
tipID, err := synchronizationCheck(testGroup)
if err != nil {
t.Fatal(err)
}
if b.ID() != tipID {
t.Fatal("test group does not have the tip block")
}
}
// redundancy should increment back to 2 as the renter uploads to the new
// host using the download-to-upload strategy
err = build.Retry(300, time.Second, func() error {
st.getAPI("/renter/files", &rf)
if len(rf.Files) != numUploads {
return errors.New("file not uploaded")
}
for i, f := range rf.Files {
if f.Redundancy != 2 {
return fmt.Errorf("file %v only reached %v redundancy", i, f.Redundancy)
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
|
package hdf5
// #include "hdf5.h"
// #include <stdlib.h>
// #include <string.h>
import "C"
import (
"fmt"
"reflect"
"runtime"
"unsafe"
)
type Attribute struct {
Location
}
func newAttribute(id C.hid_t) *Attribute {
d := &Attribute{Location{id}}
runtime.SetFinalizer(d, (*Attribute).finalizer)
return d
}
func createAttribute(id C.hid_t, name string, dtype *Datatype, dspace *Dataspace, acpl *PropList) (*Attribute, error) {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
hid := C.H5Acreate2(id, c_name, dtype.id, dspace.id, acpl.id, P_DEFAULT.id)
if err := h5err(C.herr_t(int(hid))); err != nil {
return nil, err
}
return newAttribute(hid), nil
}
func openAttribute(id C.hid_t, name string) (*Attribute, error) {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
hid := C.H5Aopen(id, c_name, P_DEFAULT.id)
if err := h5err(C.herr_t(int(hid))); err != nil {
return nil, err
}
return newAttribute(hid), nil
}
func (s *Attribute) finalizer() {
err := s.Close()
if err != nil {
panic(fmt.Sprintf("error closing attr: %s", err))
}
}
func (s *Attribute) Id() int {
return int(s.id)
}
// Access the type of an attribute
func (s *Attribute) GetType() Location {
ftype := C.H5Aget_type(s.id)
return Location{ftype}
}
// Close releases and terminates access to an attribute.
func (s *Attribute) Close() error {
if s.id > 0 {
err := C.H5Aclose(s.id)
s.id = 0
return h5err(err)
}
return nil
}
// Space returns an identifier for a copy of the dataspace for a attribute.
func (s *Attribute) Space() *Dataspace {
hid := C.H5Aget_space(s.id)
if int(hid) > 0 {
return newDataspace(hid)
}
return nil
}
// Read reads raw data from a attribute into a buffer.
func (s *Attribute) Read(data interface{}, dtype *Datatype) error {
var addr uintptr
v := reflect.ValueOf(data)
switch v.Kind() {
case reflect.Array:
addr = v.UnsafeAddr()
case reflect.String:
str := (*reflect.StringHeader)(unsafe.Pointer(v.UnsafeAddr()))
addr = str.Data
case reflect.Ptr:
addr = v.Pointer()
default:
addr = v.UnsafeAddr()
}
rc := C.H5Aread(s.id, dtype.id, unsafe.Pointer(addr))
err := h5err(rc)
return err
}
// Write writes raw data from a buffer to an attribute.
func (s *Attribute) Write(data interface{}, dtype *Datatype) error {
var addr uintptr
v := reflect.ValueOf(data)
switch v.Kind() {
case reflect.Array:
addr = v.UnsafeAddr()
case reflect.String:
str := (*reflect.StringHeader)(unsafe.Pointer(v.UnsafeAddr()))
addr = str.Data
case reflect.Ptr:
addr = v.Pointer()
default:
addr = v.Pointer()
}
rc := C.H5Awrite(s.id, dtype.id, unsafe.Pointer(addr))
err := h5err(rc)
return err
}
h5a: migrate to Identifier
package hdf5
// #include "hdf5.h"
// #include <stdlib.h>
// #include <string.h>
import "C"
import (
"fmt"
"reflect"
"runtime"
"unsafe"
)
type Attribute struct {
Location
}
func newAttribute(id C.hid_t) *Attribute {
d := &Attribute{Location{Identifier{id}}}
runtime.SetFinalizer(d, (*Attribute).finalizer)
return d
}
func createAttribute(id C.hid_t, name string, dtype *Datatype, dspace *Dataspace, acpl *PropList) (*Attribute, error) {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
hid := C.H5Acreate2(id, c_name, dtype.id, dspace.id, acpl.id, P_DEFAULT.id)
if err := h5err(C.herr_t(int(hid))); err != nil {
return nil, err
}
return newAttribute(hid), nil
}
func openAttribute(id C.hid_t, name string) (*Attribute, error) {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
hid := C.H5Aopen(id, c_name, P_DEFAULT.id)
if err := h5err(C.herr_t(int(hid))); err != nil {
return nil, err
}
return newAttribute(hid), nil
}
func (s *Attribute) finalizer() {
err := s.Close()
if err != nil {
panic(fmt.Sprintf("error closing attr: %s", err))
}
}
func (s *Attribute) Id() int {
return int(s.id)
}
// Access the type of an attribute
func (s *Attribute) GetType() Location {
ftype := C.H5Aget_type(s.id)
return Location{Identifier{ftype}}
}
// Close releases and terminates access to an attribute.
func (s *Attribute) Close() error {
if s.id > 0 {
err := C.H5Aclose(s.id)
s.id = 0
return h5err(err)
}
return nil
}
// Space returns an identifier for a copy of the dataspace for a attribute.
func (s *Attribute) Space() *Dataspace {
hid := C.H5Aget_space(s.id)
if int(hid) > 0 {
return newDataspace(hid)
}
return nil
}
// Read reads raw data from a attribute into a buffer.
func (s *Attribute) Read(data interface{}, dtype *Datatype) error {
var addr uintptr
v := reflect.ValueOf(data)
switch v.Kind() {
case reflect.Array:
addr = v.UnsafeAddr()
case reflect.String:
str := (*reflect.StringHeader)(unsafe.Pointer(v.UnsafeAddr()))
addr = str.Data
case reflect.Ptr:
addr = v.Pointer()
default:
addr = v.UnsafeAddr()
}
rc := C.H5Aread(s.id, dtype.id, unsafe.Pointer(addr))
err := h5err(rc)
return err
}
// Write writes raw data from a buffer to an attribute.
func (s *Attribute) Write(data interface{}, dtype *Datatype) error {
var addr uintptr
v := reflect.ValueOf(data)
switch v.Kind() {
case reflect.Array:
addr = v.UnsafeAddr()
case reflect.String:
str := (*reflect.StringHeader)(unsafe.Pointer(v.UnsafeAddr()))
addr = str.Data
case reflect.Ptr:
addr = v.Pointer()
default:
addr = v.Pointer()
}
rc := C.H5Awrite(s.id, dtype.id, unsafe.Pointer(addr))
err := h5err(rc)
return err
}
|
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"path/filepath"
"strings"
)
func main() {
sourceFile, err := ioutil.ReadFile(os.Args[1])
var s string
var cfile bool = (filepath.Ext(os.Args[1]) == "c")
if err == nil {
s = string(sourceFile)
fmt.Println(s)
}
splitFile := strings.Split(s, "\n")
splitFile = RemoveNewlines(splitFile)
lineSlice := CreateLines(splitFile)
debugFile, _ := os.Create("testfiles/c++/debug.cpp")
for i := 0; i < len(lineSlice); i++ {
debugFile.WriteString(lineSlice[i].s + "\n")
if cfile {
AddPrintc(debugFile, lineSlice[i].s)
} else {
AddPrint(debugFile, lineSlice[i])
}
}
debugFile.Close()
CompileAndRun()
}
type lineType struct {
s string
code int
}
func AddPrint(file *os.File, line lineType) {
if line.code == 0 {
file.WriteString("std::cout << \"" + line.s + "\" std::endl;\n")
}
}
func MarkInvalid(lineSlice []lineType) {
r, _ := regexp.Compile(`(.*)\((.*)\)(.*)\{`)
opencount := 0
closecount := 0
for i := 0; i < len(lineSlice); i++ {
fmt.Println(opencount, closecount, lineSlice[i].code)
if opencount <= closecount {
lineSlice[i].code = 1
}
if r.MatchString(lineSlice[i].s) {
opencount++
} else if lineSlice[i].s == "}" {
closecount++
}
}
}
func AddPrintc(file *os.File, s string) {
file.WriteString("printf(\"" + s + "\n\");\n")
}
func CreateLines(stringSlice []string) []lineType {
var lineSlice []lineType
for i := 0; i < len(stringSlice); i++ {
lineSlice = append(lineSlice, lineType{stringSlice[i], 0})
}
return lineSlice
}
func RemoveNewlines(stringSlice []string) []string {
var newSlice []string
for i := 0; i < len(stringSlice); i++ {
if stringSlice[i] != "" && stringSlice[i][0] != 10 {
newSlice = append(newSlice, stringSlice[i])
}
}
return newSlice
}
func CompileAndRun() {
gccCmd := exec.Command("g++", "-Wall", "testfiles/c++/debug.cpp", "-o", "testfiles/c++/out")
gccCmd.Run()
runCmd := exec.Command("./testfiles/c++/out")
err := runCmd.Run()
fmt.Println(err)
}
actually calls markinvalid cause josh is dum
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"path/filepath"
"strings"
)
func main() {
sourceFile, err := ioutil.ReadFile(os.Args[1])
var s string
var cfile bool = (filepath.Ext(os.Args[1]) == "c")
if err == nil {
s = string(sourceFile)
fmt.Println(s)
}
splitFile := strings.Split(s, "\n")
splitFile = RemoveNewlines(splitFile)
lineSlice := CreateLines(splitFile)
MarkInvalid(lineSlice)
debugFile, _ := os.Create("testfiles/c++/debug.cpp")
for i := 0; i < len(lineSlice); i++ {
debugFile.WriteString(lineSlice[i].s + "\n")
if cfile {
AddPrintc(debugFile, lineSlice[i].s)
} else {
AddPrint(debugFile, lineSlice[i])
}
}
debugFile.Close()
CompileAndRun()
}
type lineType struct {
s string
code int
}
func AddPrint(file *os.File, line lineType) {
if line.code == 0 {
file.WriteString("std::cout << \"" + line.s + "\" std::endl;\n")
}
}
func MarkInvalid(lineSlice []lineType) {
r, _ := regexp.Compile(`(.*)\((.*)\)(.*)\{`)
opencount := 0
closecount := 0
for i := 0; i < len(lineSlice); i++ {
fmt.Println(opencount, closecount, lineSlice[i].code)
if opencount <= closecount {
lineSlice[i].code = 1
}
if r.MatchString(lineSlice[i].s) {
opencount++
} else if lineSlice[i].s == "}" {
closecount++
}
}
}
func AddPrintc(file *os.File, s string) {
file.WriteString("printf(\"" + s + "\n\");\n")
}
func CreateLines(stringSlice []string) []lineType {
var lineSlice []lineType
for i := 0; i < len(stringSlice); i++ {
lineSlice = append(lineSlice, lineType{stringSlice[i], 0})
}
return lineSlice
}
func RemoveNewlines(stringSlice []string) []string {
var newSlice []string
for i := 0; i < len(stringSlice); i++ {
if stringSlice[i] != "" && stringSlice[i][0] != 10 {
newSlice = append(newSlice, stringSlice[i])
}
}
return newSlice
}
func CompileAndRun() {
gccCmd := exec.Command("g++", "-Wall", "testfiles/c++/debug.cpp", "-o", "testfiles/c++/out")
gccCmd.Run()
runCmd := exec.Command("./testfiles/c++/out")
err := runCmd.Run()
fmt.Println(err)
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/distributions"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/mount-utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/blang/semver/v4"
)
const (
ConfigurationModeWarming string = "Warming"
)
// NodeupModelContext is the context supplied the nodeup tasks
type NodeupModelContext struct {
Cloud fi.Cloud
Architecture architectures.Architecture
Assets *fi.AssetStore
Cluster *kops.Cluster
ConfigBase vfs.Path
Distribution distributions.Distribution
KeyStore fi.CAStore
NodeupConfig *nodeup.Config
NodeupAuxConfig *nodeup.AuxConfig
SecretStore fi.SecretStore
// IsMaster is true if the InstanceGroup has a role of master (populated by Init)
IsMaster bool
// HasAPIServer is true if the InstanceGroup has a role of master or apiserver (pupulated by Init)
HasAPIServer bool
kubernetesVersion semver.Version
bootstrapCerts map[string]*nodetasks.BootstrapCert
// ConfigurationMode determines if we are prewarming an instance or running it live
ConfigurationMode string
InstanceID string
}
// Init completes initialization of the object, for example pre-parsing the kubernetes version
func (c *NodeupModelContext) Init() error {
k8sVersion, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil || k8sVersion == nil {
return fmt.Errorf("unable to parse KubernetesVersion %q", c.Cluster.Spec.KubernetesVersion)
}
c.kubernetesVersion = *k8sVersion
c.bootstrapCerts = map[string]*nodetasks.BootstrapCert{}
role := c.NodeupConfig.InstanceGroupRole
if role == kops.InstanceGroupRoleMaster {
c.IsMaster = true
}
if role == kops.InstanceGroupRoleMaster || role == kops.InstanceGroupRoleAPIServer {
c.HasAPIServer = true
}
return nil
}
// SSLHostPaths returns the TLS paths for the distribution
func (c *NodeupModelContext) SSLHostPaths() []string {
paths := []string{"/etc/ssl", "/etc/pki/tls", "/etc/pki/ca-trust"}
switch c.Distribution {
case distributions.DistributionFlatcar:
// Because /usr is read-only on Flatcar, we can't have any new directories; docker will try (and fail) to create them
// TODO: Just check if the directories exist?
paths = append(paths, "/usr/share/ca-certificates")
case distributions.DistributionContainerOS:
paths = append(paths, "/usr/share/ca-certificates")
default:
paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl")
}
return paths
}
// VolumesServiceName is the name of the service which is downstream of any volume mounts
func (c *NodeupModelContext) VolumesServiceName() string {
return c.EnsureSystemdSuffix("kops-volume-mounts")
}
// EnsureSystemdSuffix ensures that the hook name ends with a valid systemd unit file extension. If it
// doesn't, it adds ".service" for backwards-compatibility with older versions of Kops
func (c *NodeupModelContext) EnsureSystemdSuffix(name string) string {
if !systemd.UnitFileExtensionValid(name) {
name += ".service"
}
return name
}
// EnsureDirectory ensures the directory exists or creates it
func (c *NodeupModelContext) EnsureDirectory(path string) error {
st, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return os.MkdirAll(path, 0755)
}
return err
}
if !st.IsDir() {
return fmt.Errorf("path: %s already exists but is not a directory", path)
}
return nil
}
// IsMounted checks if the device is mount
func (c *NodeupModelContext) IsMounted(m mount.Interface, device, path string) (bool, error) {
list, err := m.List()
if err != nil {
return false, err
}
for _, x := range list {
if x.Device == device {
klog.V(3).Infof("Found mountpoint device: %s, path: %s, type: %s", x.Device, x.Path, x.Type)
if strings.TrimSuffix(x.Path, "/") == strings.TrimSuffix(path, "/") {
return true, nil
}
}
}
return false, nil
}
// PathSrvKubernetes returns the path for the kubernetes service files
func (c *NodeupModelContext) PathSrvKubernetes() string {
switch c.Distribution {
case distributions.DistributionContainerOS:
return "/etc/srv/kubernetes"
default:
return "/srv/kubernetes"
}
}
// FileAssetsDefaultPath is the default location for assets which have no path
func (c *NodeupModelContext) FileAssetsDefaultPath() string {
return filepath.Join(c.PathSrvKubernetes(), "assets")
}
// PathSrvSshproxy returns the path for the SSH proxy
func (c *NodeupModelContext) PathSrvSshproxy() string {
switch c.Distribution {
case distributions.DistributionContainerOS:
return "/etc/srv/sshproxy"
default:
return "/srv/sshproxy"
}
}
// KubeletBootstrapKubeconfig is the path the bootstrap config file
func (c *NodeupModelContext) KubeletBootstrapKubeconfig() string {
path := c.Cluster.Spec.Kubelet.BootstrapKubeconfig
if c.IsMaster {
if c.Cluster.Spec.MasterKubelet != nil && c.Cluster.Spec.MasterKubelet.BootstrapKubeconfig != "" {
path = c.Cluster.Spec.MasterKubelet.BootstrapKubeconfig
}
}
if path != "" {
return path
}
return "/var/lib/kubelet/bootstrap-kubeconfig"
}
// KubeletKubeConfig is the path of the kubelet kubeconfig file
func (c *NodeupModelContext) KubeletKubeConfig() string {
return "/var/lib/kubelet/kubeconfig"
}
// BuildIssuedKubeconfig generates a kubeconfig with a locally issued client certificate.
func (c *NodeupModelContext) BuildIssuedKubeconfig(name string, subject nodetasks.PKIXName, ctx *fi.ModelBuilderContext) *fi.TaskDependentResource {
issueCert := &nodetasks.IssueCert{
Name: name,
Signer: fi.CertificateIDCA,
Type: "client",
Subject: subject,
}
ctx.AddTask(issueCert)
certResource, keyResource, caResource := issueCert.GetResources()
kubeConfig := &nodetasks.KubeConfig{
Name: name,
Cert: certResource,
Key: keyResource,
CA: caResource,
}
if c.HasAPIServer {
// @note: use https even for local connections, so we can turn off the insecure port
kubeConfig.ServerURL = "https://127.0.0.1"
} else {
kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName
}
ctx.AddTask(kubeConfig)
return kubeConfig.GetConfig()
}
// GetBootstrapCert requests a certificate keypair from kops-controller.
func (c *NodeupModelContext) GetBootstrapCert(name string) (cert, key fi.Resource) {
b, ok := c.bootstrapCerts[name]
if !ok {
b = &nodetasks.BootstrapCert{
Cert: &fi.TaskDependentResource{},
Key: &fi.TaskDependentResource{},
}
c.bootstrapCerts[name] = b
}
return b.Cert, b.Key
}
// BuildBootstrapKubeconfig generates a kubeconfig with a client certificate from either kops-controller or the state store.
func (c *NodeupModelContext) BuildBootstrapKubeconfig(name string, ctx *fi.ModelBuilderContext) (fi.Resource, error) {
if c.UseKopsControllerForNodeBootstrap() {
cert, key := c.GetBootstrapCert(name)
ca, err := c.GetCert(fi.CertificateIDCA)
if err != nil {
return nil, err
}
kubeConfig := &nodetasks.KubeConfig{
Name: name,
Cert: cert,
Key: key,
CA: fi.NewBytesResource(ca),
}
if c.HasAPIServer {
// @note: use https even for local connections, so we can turn off the insecure port
kubeConfig.ServerURL = "https://127.0.0.1"
} else {
kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName
}
err = ctx.EnsureTask(kubeConfig)
if err != nil {
return nil, err
}
return kubeConfig.GetConfig(), nil
} else {
ca, err := c.GetCert(fi.CertificateIDCA)
if err != nil {
return nil, err
}
cert, err := c.GetCert(name)
if err != nil {
return nil, err
}
key, err := c.GetPrivateKey(name)
if err != nil {
return nil, err
}
kubeConfig := &nodetasks.KubeConfig{
Name: name,
Cert: fi.NewBytesResource(cert),
Key: fi.NewBytesResource(key),
CA: fi.NewBytesResource(ca),
}
if c.HasAPIServer {
// @note: use https even for local connections, so we can turn off the insecure port
// This code path is used for the kubelet cert in Kubernetes 1.18 and earlier.
kubeConfig.ServerURL = "https://127.0.0.1"
} else {
kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName
}
err = kubeConfig.Run(nil)
if err != nil {
return nil, err
}
config, err := fi.ResourceAsBytes(kubeConfig.GetConfig())
if err != nil {
return nil, err
}
return fi.NewBytesResource(config), nil
}
}
// IsKubernetesGTE checks if the version is greater-than-or-equal
func (c *NodeupModelContext) IsKubernetesGTE(version string) bool {
if c.kubernetesVersion.Major == 0 {
klog.Fatalf("kubernetesVersion not set (%s); Init not called", c.kubernetesVersion)
}
return util.IsKubernetesGTE(version, c.kubernetesVersion)
}
// IsKubernetesLT checks if the version is less-than
func (c *NodeupModelContext) IsKubernetesLT(version string) bool {
if c.kubernetesVersion.Major == 0 {
klog.Fatalf("kubernetesVersion not set (%s); Init not called", c.kubernetesVersion)
}
return !c.IsKubernetesGTE(version)
}
// UseEtcdManager checks if the etcd cluster has etcd-manager enabled
func (c *NodeupModelContext) UseEtcdManager() bool {
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.Provider == kops.EtcdProviderTypeManager {
return true
}
}
return false
}
// UseEtcdTLS checks if the etcd cluster has TLS enabled bool
func (c *NodeupModelContext) UseEtcdTLS() bool {
// @note: because we enforce that 'both' have to be enabled for TLS we only need to check one here.
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.EnableEtcdTLS {
return true
}
}
return false
}
// UseVolumeMounts is used to check if we have volume mounts enabled as we need to
// insert requires and afters in various places
func (c *NodeupModelContext) UseVolumeMounts() bool {
return len(c.NodeupConfig.VolumeMounts) > 0
}
// UseEtcdTLSAuth checks the peer-auth is set in both cluster
// @NOTE: in retrospect i think we should have consolidated the common config in the wrapper struct; it
// feels weird we set things like version, tls etc per cluster since they both have to be the same.
func (c *NodeupModelContext) UseEtcdTLSAuth() bool {
if !c.UseEtcdTLS() {
return false
}
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.EnableTLSAuth {
return true
}
}
return false
}
// UseKopsControllerForNodeBootstrap checks if nodeup should use kops-controller to bootstrap.
func (c *NodeupModelContext) UseKopsControllerForNodeBootstrap() bool {
return model.UseKopsControllerForNodeBootstrap(c.Cluster)
}
// UsesSecondaryIP checks if the CNI in use attaches secondary interfaces to the host.
func (c *NodeupModelContext) UsesSecondaryIP() bool {
return (c.Cluster.Spec.Networking.CNI != nil && c.Cluster.Spec.Networking.CNI.UsesSecondaryIP) || c.Cluster.Spec.Networking.AmazonVPC != nil || c.Cluster.Spec.Networking.LyftVPC != nil ||
(c.Cluster.Spec.Networking.Cilium != nil && c.Cluster.Spec.Networking.Cilium.Ipam == kops.CiliumIpamEni)
}
// UseBootstrapTokens checks if we are using bootstrap tokens
func (c *NodeupModelContext) UseBootstrapTokens() bool {
if c.HasAPIServer {
return fi.BoolValue(c.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
}
return c.Cluster.Spec.Kubelet != nil && c.Cluster.Spec.Kubelet.BootstrapKubeconfig != ""
}
// KubectlPath returns distro based path for kubectl
func (c *NodeupModelContext) KubectlPath() string {
kubeletCommand := "/usr/local/bin"
if c.Distribution == distributions.DistributionFlatcar {
kubeletCommand = "/opt/kops/bin"
}
if c.Distribution == distributions.DistributionContainerOS {
kubeletCommand = "/home/kubernetes/bin"
}
return kubeletCommand
}
// BuildCertificatePairTask creates the tasks to create the certificate and private key files.
func (c *NodeupModelContext) BuildCertificatePairTask(ctx *fi.ModelBuilderContext, key, path, filename string, owner *string) error {
certificateName := filepath.Join(path, filename+".pem")
keyName := filepath.Join(path, filename+"-key.pem")
if err := c.BuildCertificateTask(ctx, key, certificateName, owner); err != nil {
return err
}
return c.BuildPrivateKeyTask(ctx, key, keyName, owner)
}
// BuildCertificateTask builds a task to create a certificate file.
func (c *NodeupModelContext) BuildCertificateTask(ctx *fi.ModelBuilderContext, name, filename string, owner *string) error {
cert, err := c.KeyStore.FindCert(name)
if err != nil {
return err
}
if cert == nil {
return fmt.Errorf("certificate %q not found", name)
}
serialized, err := cert.AsString()
if err != nil {
return err
}
p := filename
if !filepath.IsAbs(p) {
p = filepath.Join(c.PathSrvKubernetes(), filename)
}
ctx.AddTask(&nodetasks.File{
Path: p,
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0600"),
Owner: owner,
})
return nil
}
// BuildPrivateKeyTask builds a task to create a private key file.
func (c *NodeupModelContext) BuildPrivateKeyTask(ctx *fi.ModelBuilderContext, name, filename string, owner *string) error {
cert, err := c.KeyStore.FindPrivateKey(name)
if err != nil {
return err
}
if cert == nil {
return fmt.Errorf("private key %q not found", name)
}
serialized, err := cert.AsString()
if err != nil {
return err
}
p := filename
if !filepath.IsAbs(p) {
p = filepath.Join(c.PathSrvKubernetes(), filename)
}
ctx.AddTask(&nodetasks.File{
Path: p,
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0600"),
Owner: owner,
})
return nil
}
// NodeName returns the name of the local Node, as it will be created in k8s
func (c *NodeupModelContext) NodeName() (string, error) {
// This mirrors nodeutil.GetHostName
hostnameOverride := c.Cluster.Spec.Kubelet.HostnameOverride
if c.IsMaster && c.Cluster.Spec.MasterKubelet.HostnameOverride != "" {
hostnameOverride = c.Cluster.Spec.MasterKubelet.HostnameOverride
}
nodeName, err := EvaluateHostnameOverride(hostnameOverride)
if err != nil {
return "", fmt.Errorf("error evaluating hostname: %v", err)
}
if nodeName == "" {
hostname, err := os.Hostname()
if err != nil {
klog.Fatalf("Couldn't determine hostname: %v", err)
}
nodeName = hostname
}
return strings.ToLower(strings.TrimSpace(nodeName)), nil
}
// EvaluateHostnameOverride returns the hostname after replacing some well-known placeholders
func EvaluateHostnameOverride(hostnameOverride string) (string, error) {
if hostnameOverride == "" || hostnameOverride == "@hostname" {
return "", nil
}
k := strings.TrimSpace(hostnameOverride)
k = strings.ToLower(k)
if k != "@aws" {
return hostnameOverride, nil
}
// We recognize @aws as meaning "the private DNS name from AWS", to generate this we need to get a few pieces of information
azBytes, err := vfs.Context.ReadFile("metadata://aws/meta-data/placement/availability-zone")
if err != nil {
return "", fmt.Errorf("error reading availability zone from AWS metadata: %v", err)
}
instanceIDBytes, err := vfs.Context.ReadFile("metadata://aws/meta-data/instance-id")
if err != nil {
return "", fmt.Errorf("error reading instance-id from AWS metadata: %v", err)
}
instanceID := string(instanceIDBytes)
config := aws.NewConfig()
config = config.WithCredentialsChainVerboseErrors(true)
s, err := session.NewSession(config)
if err != nil {
return "", fmt.Errorf("error starting new AWS session: %v", err)
}
svc := ec2.New(s, config.WithRegion(string(azBytes[:len(azBytes)-1])))
result, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{&instanceID},
})
if err != nil {
return "", fmt.Errorf("error describing instances: %v", err)
}
if len(result.Reservations) != 1 {
return "", fmt.Errorf("too many reservations returned for the single instance-id")
}
if len(result.Reservations[0].Instances) != 1 {
return "", fmt.Errorf("too many instances returned for the single instance-id")
}
return *(result.Reservations[0].Instances[0].PrivateDnsName), nil
}
// GetCert is a helper method to retrieve a certificate from the store
func (c *NodeupModelContext) GetCert(name string) ([]byte, error) {
cert, err := c.KeyStore.FindCert(name)
if err != nil {
return []byte{}, fmt.Errorf("error fetching certificate: %v from keystore: %v", name, err)
}
if cert == nil {
return []byte{}, fmt.Errorf("unable to find certificate: %s", name)
}
return cert.AsBytes()
}
// GetPrivateKey is a helper method to retrieve a private key from the store
func (c *NodeupModelContext) GetPrivateKey(name string) ([]byte, error) {
key, err := c.KeyStore.FindPrivateKey(name)
if err != nil {
return []byte{}, fmt.Errorf("error fetching private key: %v from keystore: %v", name, err)
}
if key == nil {
return []byte{}, fmt.Errorf("unable to find private key: %s", name)
}
return key.AsBytes()
}
func (b *NodeupModelContext) AddCNIBinAssets(c *fi.ModelBuilderContext, assetNames []string) error {
for _, assetName := range assetNames {
re, err := regexp.Compile(fmt.Sprintf("^%s$", regexp.QuoteMeta(assetName)))
if err != nil {
return err
}
if err := b.addCNIBinAsset(c, re); err != nil {
return err
}
}
return nil
}
func (b *NodeupModelContext) addCNIBinAsset(c *fi.ModelBuilderContext, assetPath *regexp.Regexp) error {
name, res, err := b.Assets.FindMatch(assetPath)
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: filepath.Join(b.CNIBinDir(), name),
Contents: res,
Type: nodetasks.FileType_File,
Mode: fi.String("0755"),
})
return nil
}
// UsesCNI checks if the cluster has CNI configured
func (c *NodeupModelContext) UsesCNI() bool {
networking := c.Cluster.Spec.Networking
if networking == nil || networking.Classic != nil {
return false
}
return true
}
// CNIBinDir returns the path for the CNI binaries
func (c *NodeupModelContext) CNIBinDir() string {
// We used to map this on a per-distro basis, but this can require CNI manifests to be distro aware
return "/opt/cni/bin/"
}
// CNIConfDir returns the CNI directory
func (c *NodeupModelContext) CNIConfDir() string {
return "/etc/cni/net.d/"
}
func (c *NodeupModelContext) WarmPullImage(ctx *fi.ModelBuilderContext, imageName string) {
if c.ConfigurationMode == "Warming" {
image := &nodetasks.PullImageTask{
Name: imageName,
Runtime: c.Cluster.Spec.ContainerRuntime,
}
ctx.AddTask(image)
}
}
Include multiple CA certificates in bootstrap kubeconfigs
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/distributions"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/mount-utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/blang/semver/v4"
)
const (
ConfigurationModeWarming string = "Warming"
)
// NodeupModelContext is the context supplied the nodeup tasks
type NodeupModelContext struct {
Cloud fi.Cloud
Architecture architectures.Architecture
Assets *fi.AssetStore
Cluster *kops.Cluster
ConfigBase vfs.Path
Distribution distributions.Distribution
KeyStore fi.CAStore
NodeupConfig *nodeup.Config
NodeupAuxConfig *nodeup.AuxConfig
SecretStore fi.SecretStore
// IsMaster is true if the InstanceGroup has a role of master (populated by Init)
IsMaster bool
// HasAPIServer is true if the InstanceGroup has a role of master or apiserver (pupulated by Init)
HasAPIServer bool
kubernetesVersion semver.Version
bootstrapCerts map[string]*nodetasks.BootstrapCert
// ConfigurationMode determines if we are prewarming an instance or running it live
ConfigurationMode string
InstanceID string
}
// Init completes initialization of the object, for example pre-parsing the kubernetes version
func (c *NodeupModelContext) Init() error {
k8sVersion, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil || k8sVersion == nil {
return fmt.Errorf("unable to parse KubernetesVersion %q", c.Cluster.Spec.KubernetesVersion)
}
c.kubernetesVersion = *k8sVersion
c.bootstrapCerts = map[string]*nodetasks.BootstrapCert{}
role := c.NodeupConfig.InstanceGroupRole
if role == kops.InstanceGroupRoleMaster {
c.IsMaster = true
}
if role == kops.InstanceGroupRoleMaster || role == kops.InstanceGroupRoleAPIServer {
c.HasAPIServer = true
}
return nil
}
// SSLHostPaths returns the TLS paths for the distribution
func (c *NodeupModelContext) SSLHostPaths() []string {
paths := []string{"/etc/ssl", "/etc/pki/tls", "/etc/pki/ca-trust"}
switch c.Distribution {
case distributions.DistributionFlatcar:
// Because /usr is read-only on Flatcar, we can't have any new directories; docker will try (and fail) to create them
// TODO: Just check if the directories exist?
paths = append(paths, "/usr/share/ca-certificates")
case distributions.DistributionContainerOS:
paths = append(paths, "/usr/share/ca-certificates")
default:
paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl")
}
return paths
}
// VolumesServiceName is the name of the service which is downstream of any volume mounts
func (c *NodeupModelContext) VolumesServiceName() string {
return c.EnsureSystemdSuffix("kops-volume-mounts")
}
// EnsureSystemdSuffix ensures that the hook name ends with a valid systemd unit file extension. If it
// doesn't, it adds ".service" for backwards-compatibility with older versions of Kops
func (c *NodeupModelContext) EnsureSystemdSuffix(name string) string {
if !systemd.UnitFileExtensionValid(name) {
name += ".service"
}
return name
}
// EnsureDirectory ensures the directory exists or creates it
func (c *NodeupModelContext) EnsureDirectory(path string) error {
st, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return os.MkdirAll(path, 0755)
}
return err
}
if !st.IsDir() {
return fmt.Errorf("path: %s already exists but is not a directory", path)
}
return nil
}
// IsMounted checks if the device is mount
func (c *NodeupModelContext) IsMounted(m mount.Interface, device, path string) (bool, error) {
list, err := m.List()
if err != nil {
return false, err
}
for _, x := range list {
if x.Device == device {
klog.V(3).Infof("Found mountpoint device: %s, path: %s, type: %s", x.Device, x.Path, x.Type)
if strings.TrimSuffix(x.Path, "/") == strings.TrimSuffix(path, "/") {
return true, nil
}
}
}
return false, nil
}
// PathSrvKubernetes returns the path for the kubernetes service files
func (c *NodeupModelContext) PathSrvKubernetes() string {
switch c.Distribution {
case distributions.DistributionContainerOS:
return "/etc/srv/kubernetes"
default:
return "/srv/kubernetes"
}
}
// FileAssetsDefaultPath is the default location for assets which have no path
func (c *NodeupModelContext) FileAssetsDefaultPath() string {
return filepath.Join(c.PathSrvKubernetes(), "assets")
}
// PathSrvSshproxy returns the path for the SSH proxy
func (c *NodeupModelContext) PathSrvSshproxy() string {
switch c.Distribution {
case distributions.DistributionContainerOS:
return "/etc/srv/sshproxy"
default:
return "/srv/sshproxy"
}
}
// KubeletBootstrapKubeconfig is the path the bootstrap config file
func (c *NodeupModelContext) KubeletBootstrapKubeconfig() string {
path := c.Cluster.Spec.Kubelet.BootstrapKubeconfig
if c.IsMaster {
if c.Cluster.Spec.MasterKubelet != nil && c.Cluster.Spec.MasterKubelet.BootstrapKubeconfig != "" {
path = c.Cluster.Spec.MasterKubelet.BootstrapKubeconfig
}
}
if path != "" {
return path
}
return "/var/lib/kubelet/bootstrap-kubeconfig"
}
// KubeletKubeConfig is the path of the kubelet kubeconfig file
func (c *NodeupModelContext) KubeletKubeConfig() string {
return "/var/lib/kubelet/kubeconfig"
}
// BuildIssuedKubeconfig generates a kubeconfig with a locally issued client certificate.
func (c *NodeupModelContext) BuildIssuedKubeconfig(name string, subject nodetasks.PKIXName, ctx *fi.ModelBuilderContext) *fi.TaskDependentResource {
issueCert := &nodetasks.IssueCert{
Name: name,
Signer: fi.CertificateIDCA,
Type: "client",
Subject: subject,
}
ctx.AddTask(issueCert)
certResource, keyResource, caResource := issueCert.GetResources()
kubeConfig := &nodetasks.KubeConfig{
Name: name,
Cert: certResource,
Key: keyResource,
CA: caResource,
}
if c.HasAPIServer {
// @note: use https even for local connections, so we can turn off the insecure port
kubeConfig.ServerURL = "https://127.0.0.1"
} else {
kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName
}
ctx.AddTask(kubeConfig)
return kubeConfig.GetConfig()
}
// GetBootstrapCert requests a certificate keypair from kops-controller.
func (c *NodeupModelContext) GetBootstrapCert(name string) (cert, key fi.Resource) {
b, ok := c.bootstrapCerts[name]
if !ok {
b = &nodetasks.BootstrapCert{
Cert: &fi.TaskDependentResource{},
Key: &fi.TaskDependentResource{},
}
c.bootstrapCerts[name] = b
}
return b.Cert, b.Key
}
// BuildBootstrapKubeconfig generates a kubeconfig with a client certificate from either kops-controller or the state store.
func (c *NodeupModelContext) BuildBootstrapKubeconfig(name string, ctx *fi.ModelBuilderContext) (fi.Resource, error) {
if c.UseKopsControllerForNodeBootstrap() {
cert, key := c.GetBootstrapCert(name)
kubeConfig := &nodetasks.KubeConfig{
Name: name,
Cert: cert,
Key: key,
CA: fi.NewStringResource(c.NodeupConfig.CAs[fi.CertificateIDCA]),
}
if c.HasAPIServer {
// @note: use https even for local connections, so we can turn off the insecure port
kubeConfig.ServerURL = "https://127.0.0.1"
} else {
kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName
}
err := ctx.EnsureTask(kubeConfig)
if err != nil {
return nil, err
}
return kubeConfig.GetConfig(), nil
} else {
cert, key, err := c.GetPrimaryKeypair(name)
if err != nil {
return nil, err
}
kubeConfig := &nodetasks.KubeConfig{
Name: name,
Cert: fi.NewBytesResource(cert),
Key: fi.NewBytesResource(key),
CA: fi.NewStringResource(c.NodeupConfig.CAs[fi.CertificateIDCA]),
}
if c.HasAPIServer {
// @note: use https even for local connections, so we can turn off the insecure port
// This code path is used for the kubelet cert in Kubernetes 1.18 and earlier.
kubeConfig.ServerURL = "https://127.0.0.1"
} else {
kubeConfig.ServerURL = "https://" + c.Cluster.Spec.MasterInternalName
}
err = kubeConfig.Run(nil)
if err != nil {
return nil, err
}
config, err := fi.ResourceAsBytes(kubeConfig.GetConfig())
if err != nil {
return nil, err
}
return fi.NewBytesResource(config), nil
}
}
// IsKubernetesGTE checks if the version is greater-than-or-equal
func (c *NodeupModelContext) IsKubernetesGTE(version string) bool {
if c.kubernetesVersion.Major == 0 {
klog.Fatalf("kubernetesVersion not set (%s); Init not called", c.kubernetesVersion)
}
return util.IsKubernetesGTE(version, c.kubernetesVersion)
}
// IsKubernetesLT checks if the version is less-than
func (c *NodeupModelContext) IsKubernetesLT(version string) bool {
if c.kubernetesVersion.Major == 0 {
klog.Fatalf("kubernetesVersion not set (%s); Init not called", c.kubernetesVersion)
}
return !c.IsKubernetesGTE(version)
}
// UseEtcdManager checks if the etcd cluster has etcd-manager enabled
func (c *NodeupModelContext) UseEtcdManager() bool {
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.Provider == kops.EtcdProviderTypeManager {
return true
}
}
return false
}
// UseEtcdTLS checks if the etcd cluster has TLS enabled bool
func (c *NodeupModelContext) UseEtcdTLS() bool {
// @note: because we enforce that 'both' have to be enabled for TLS we only need to check one here.
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.EnableEtcdTLS {
return true
}
}
return false
}
// UseVolumeMounts is used to check if we have volume mounts enabled as we need to
// insert requires and afters in various places
func (c *NodeupModelContext) UseVolumeMounts() bool {
return len(c.NodeupConfig.VolumeMounts) > 0
}
// UseEtcdTLSAuth checks the peer-auth is set in both cluster
// @NOTE: in retrospect i think we should have consolidated the common config in the wrapper struct; it
// feels weird we set things like version, tls etc per cluster since they both have to be the same.
func (c *NodeupModelContext) UseEtcdTLSAuth() bool {
if !c.UseEtcdTLS() {
return false
}
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.EnableTLSAuth {
return true
}
}
return false
}
// UseKopsControllerForNodeBootstrap checks if nodeup should use kops-controller to bootstrap.
func (c *NodeupModelContext) UseKopsControllerForNodeBootstrap() bool {
return model.UseKopsControllerForNodeBootstrap(c.Cluster)
}
// UsesSecondaryIP checks if the CNI in use attaches secondary interfaces to the host.
func (c *NodeupModelContext) UsesSecondaryIP() bool {
return (c.Cluster.Spec.Networking.CNI != nil && c.Cluster.Spec.Networking.CNI.UsesSecondaryIP) || c.Cluster.Spec.Networking.AmazonVPC != nil || c.Cluster.Spec.Networking.LyftVPC != nil ||
(c.Cluster.Spec.Networking.Cilium != nil && c.Cluster.Spec.Networking.Cilium.Ipam == kops.CiliumIpamEni)
}
// UseBootstrapTokens checks if we are using bootstrap tokens
func (c *NodeupModelContext) UseBootstrapTokens() bool {
if c.HasAPIServer {
return fi.BoolValue(c.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
}
return c.Cluster.Spec.Kubelet != nil && c.Cluster.Spec.Kubelet.BootstrapKubeconfig != ""
}
// KubectlPath returns distro based path for kubectl
func (c *NodeupModelContext) KubectlPath() string {
kubeletCommand := "/usr/local/bin"
if c.Distribution == distributions.DistributionFlatcar {
kubeletCommand = "/opt/kops/bin"
}
if c.Distribution == distributions.DistributionContainerOS {
kubeletCommand = "/home/kubernetes/bin"
}
return kubeletCommand
}
// BuildCertificatePairTask creates the tasks to create the certificate and private key files.
func (c *NodeupModelContext) BuildCertificatePairTask(ctx *fi.ModelBuilderContext, key, path, filename string, owner *string) error {
certificateName := filepath.Join(path, filename+".pem")
keyName := filepath.Join(path, filename+"-key.pem")
if err := c.BuildCertificateTask(ctx, key, certificateName, owner); err != nil {
return err
}
return c.BuildPrivateKeyTask(ctx, key, keyName, owner)
}
// BuildCertificateTask builds a task to create a certificate file.
func (c *NodeupModelContext) BuildCertificateTask(ctx *fi.ModelBuilderContext, name, filename string, owner *string) error {
cert, err := c.KeyStore.FindCert(name)
if err != nil {
return err
}
if cert == nil {
return fmt.Errorf("certificate %q not found", name)
}
serialized, err := cert.AsString()
if err != nil {
return err
}
p := filename
if !filepath.IsAbs(p) {
p = filepath.Join(c.PathSrvKubernetes(), filename)
}
ctx.AddTask(&nodetasks.File{
Path: p,
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0600"),
Owner: owner,
})
return nil
}
// BuildPrivateKeyTask builds a task to create a private key file.
func (c *NodeupModelContext) BuildPrivateKeyTask(ctx *fi.ModelBuilderContext, name, filename string, owner *string) error {
cert, err := c.KeyStore.FindPrivateKey(name)
if err != nil {
return err
}
if cert == nil {
return fmt.Errorf("private key %q not found", name)
}
serialized, err := cert.AsString()
if err != nil {
return err
}
p := filename
if !filepath.IsAbs(p) {
p = filepath.Join(c.PathSrvKubernetes(), filename)
}
ctx.AddTask(&nodetasks.File{
Path: p,
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
Mode: s("0600"),
Owner: owner,
})
return nil
}
// NodeName returns the name of the local Node, as it will be created in k8s
func (c *NodeupModelContext) NodeName() (string, error) {
// This mirrors nodeutil.GetHostName
hostnameOverride := c.Cluster.Spec.Kubelet.HostnameOverride
if c.IsMaster && c.Cluster.Spec.MasterKubelet.HostnameOverride != "" {
hostnameOverride = c.Cluster.Spec.MasterKubelet.HostnameOverride
}
nodeName, err := EvaluateHostnameOverride(hostnameOverride)
if err != nil {
return "", fmt.Errorf("error evaluating hostname: %v", err)
}
if nodeName == "" {
hostname, err := os.Hostname()
if err != nil {
klog.Fatalf("Couldn't determine hostname: %v", err)
}
nodeName = hostname
}
return strings.ToLower(strings.TrimSpace(nodeName)), nil
}
// EvaluateHostnameOverride returns the hostname after replacing some well-known placeholders
func EvaluateHostnameOverride(hostnameOverride string) (string, error) {
if hostnameOverride == "" || hostnameOverride == "@hostname" {
return "", nil
}
k := strings.TrimSpace(hostnameOverride)
k = strings.ToLower(k)
if k != "@aws" {
return hostnameOverride, nil
}
// We recognize @aws as meaning "the private DNS name from AWS", to generate this we need to get a few pieces of information
azBytes, err := vfs.Context.ReadFile("metadata://aws/meta-data/placement/availability-zone")
if err != nil {
return "", fmt.Errorf("error reading availability zone from AWS metadata: %v", err)
}
instanceIDBytes, err := vfs.Context.ReadFile("metadata://aws/meta-data/instance-id")
if err != nil {
return "", fmt.Errorf("error reading instance-id from AWS metadata: %v", err)
}
instanceID := string(instanceIDBytes)
config := aws.NewConfig()
config = config.WithCredentialsChainVerboseErrors(true)
s, err := session.NewSession(config)
if err != nil {
return "", fmt.Errorf("error starting new AWS session: %v", err)
}
svc := ec2.New(s, config.WithRegion(string(azBytes[:len(azBytes)-1])))
result, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{&instanceID},
})
if err != nil {
return "", fmt.Errorf("error describing instances: %v", err)
}
if len(result.Reservations) != 1 {
return "", fmt.Errorf("too many reservations returned for the single instance-id")
}
if len(result.Reservations[0].Instances) != 1 {
return "", fmt.Errorf("too many instances returned for the single instance-id")
}
return *(result.Reservations[0].Instances[0].PrivateDnsName), nil
}
// GetPrimaryKeypair is a helper method to retrieve a primary keypair from the store
func (c *NodeupModelContext) GetPrimaryKeypair(name string) (cert []byte, key []byte, err error) {
certificate, privateKey, err := c.KeyStore.FindPrimaryKeypair(name)
if err != nil {
return nil, nil, fmt.Errorf("error fetching certificate: %v from keystore: %v", name, err)
}
if certificate == nil {
return nil, nil, fmt.Errorf("unable to find certificate: %s", name)
}
if privateKey == nil {
return nil, nil, fmt.Errorf("unable to find key: %s", name)
}
cert, err = certificate.AsBytes()
if err != nil {
return nil, nil, err
}
key, err = privateKey.AsBytes()
if err != nil {
return nil, nil, err
}
return cert, key, nil
}
func (b *NodeupModelContext) AddCNIBinAssets(c *fi.ModelBuilderContext, assetNames []string) error {
for _, assetName := range assetNames {
re, err := regexp.Compile(fmt.Sprintf("^%s$", regexp.QuoteMeta(assetName)))
if err != nil {
return err
}
if err := b.addCNIBinAsset(c, re); err != nil {
return err
}
}
return nil
}
func (b *NodeupModelContext) addCNIBinAsset(c *fi.ModelBuilderContext, assetPath *regexp.Regexp) error {
name, res, err := b.Assets.FindMatch(assetPath)
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: filepath.Join(b.CNIBinDir(), name),
Contents: res,
Type: nodetasks.FileType_File,
Mode: fi.String("0755"),
})
return nil
}
// UsesCNI checks if the cluster has CNI configured
func (c *NodeupModelContext) UsesCNI() bool {
networking := c.Cluster.Spec.Networking
if networking == nil || networking.Classic != nil {
return false
}
return true
}
// CNIBinDir returns the path for the CNI binaries
func (c *NodeupModelContext) CNIBinDir() string {
// We used to map this on a per-distro basis, but this can require CNI manifests to be distro aware
return "/opt/cni/bin/"
}
// CNIConfDir returns the CNI directory
func (c *NodeupModelContext) CNIConfDir() string {
return "/etc/cni/net.d/"
}
func (c *NodeupModelContext) WarmPullImage(ctx *fi.ModelBuilderContext, imageName string) {
if c.ConfigurationMode == "Warming" {
image := &nodetasks.PullImageTask{
Name: imageName,
Runtime: c.Cluster.Spec.ContainerRuntime,
}
ctx.AddTask(image)
}
}
|
package command
import (
"flag"
"fmt"
"strings"
"github.com/mitchellh/cli"
"github.com/mohae/rancher/ranchr"
)
// BuildCommand is a Command implementation that generates Packer templates
// from named named builds and passed build arguments.
type BuildCommand struct {
Ui cli.Ui
}
// Rancher help text.
func (c *BuildCommand) Help() string {
helpText := `
Usage: rancher build [options]
Generates Packer templates. At minimum, this command needs to be run with
either the -distro flag or a build name. The simplest way to generate a Packer
template with rancher is to build a template with just the target distribution
name. The distribution must be supported, i.e. exists within Rancher's
distros.toml file:
% rancher build -distro=<ditribution name>
% rancher build -distro=ubuntu
The above command generates a Packer template, targeting Ubuntu, using the
defaults for that distribution, which are found in the distros.toml configur-
ation. file. Each of the distro defaults can be selectively overridden using
some of the other flags listed in the Options section.
Rancher can also generate Packer templates using preconfigured Rancher build
templates via the builds.toml file. The name of the build is used to specify
which build configuration should be used:
% rancher build <build template name...>
% rancher build 1204-amd64-server 1310-amd64-desktop
The above command generates two Packer templates using the 1204-amd64-server
and 1310-amd64-desktop build templates. The list of build template names is
variadic, accepting 1 or more build template names. For builds using the
-distro flag, the -arch, -image, and -release flags are optional. If any of
them are missing, the distribution's default value for that flag will be used.
Options:
-distro=<distroName> If provided, Rancher will generate a template for the
passed distribution name, e.g. ubuntu. This flag can
be used along with the -arch, -image, and -release
flags to override the Distribution's default values
for those settings.
-arch=<architecture> Specify whether 32 or 64 bit code should be used,
e.g."x32" or "amd64" for ubuntu. This flag is only
valid when used with the -distro flag.
-image=<imageType> The type of ISO image that this Packer template will
target, e.g. server, desktop, minimal for ubuntu. If
the -distro flag is used and this flag is not used,
the distro's default imageType will be used. This flag
is only valid when used with the -distro flag.
-release=<releaseNum> The release number that this Packer template will
target, e.g. 12.04, etc. Only the targeted distri-
bution's supported releases are valid. This flag is
only valid when used with the -distro flag.
-log_dir=<logDirPath> The directory path in which logging files will be
written. This will override the existing logging
directory information.
`
return strings.TrimSpace(helpText)
}
func (c *BuildCommand) Run(args []string) int {
var distroFilter, archFilter, imageFilter, releaseFilter, logDirFilter string
cmdFlags := flag.NewFlagSet("build", flag.ContinueOnError)
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
cmdFlags.StringVar(&distroFilter, "distro", "", "distro filter")
cmdFlags.StringVar(&archFilter, "arch", "", "arch filter")
cmdFlags.StringVar(&imageFilter, "image", "", "image filter")
cmdFlags.StringVar(&releaseFilter, "release", "", "release filter")
cmdFlags.StringVar(&logDirFilter, "log_dir", "", "log directory")
if err := cmdFlags.Parse(args); err != nil {
c.Ui.Error(fmt.Sprintf("Parse of command-line arguments failed: %s", err))
return 1
}
buildArgs := cmdFlags.Args()
if distroFilter != "" {
args := ranchr.ArgsFilter{Arch: archFilter, Distro: distroFilter, Image: imageFilter, Release: releaseFilter}
// TODO go it
if err := ranchr.BuildDistro(args); err != nil {
c.Ui.Output(err.Error())
return 1
}
}
if len(buildArgs) > 0 {
var message string
var err error
if message, err = ranchr.BuildBuilds(buildArgs...); err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(message)
}
c.Ui.Output("Rancher Build complete.")
return 0
}
func (c *BuildCommand) Synopsis() string {
return "Create a Packer template from either distribution defaults or pre-defined Rancher Build templates."
}
updated build command help
package command
import (
"flag"
"fmt"
"strings"
"github.com/mitchellh/cli"
"github.com/mohae/rancher/ranchr"
)
// BuildCommand is a Command implementation that generates Packer templates
// from named named builds and passed build arguments.
type BuildCommand struct {
Ui cli.Ui
}
// Rancher help text.
func (c *BuildCommand) Help() string {
helpText := `
Usage: rancher build [options]
Rancher creates Packer templates. At minimum, this command needs to be run with
either the -distro flag or a build name. The simplest way to generate a Packer
template is to run Rancher with just the target distribution name, which must
be supported, i.e. exists within Rancher's supported.toml file:
% rancher build -distro=<distro name>
% rancher build -distro=ubuntu
The above command generates a Packer template, targeting Ubuntu, using the
defaults for that distribution. See the options section for the other flags.
Rancher can also generate Packer templates using preconfigured Rancher build
templates via the builds.toml file. The name of the build is used to specify
which build configuration should be used:
% rancher build <buildName...>
% rancher build 1204-amd64-server 1404-amd64-desktop
The above command generates two Packer templates using the 1204-amd64-server
and 1404-amd64-desktop build configurations. The list of build names is
variadic, accepting 1 or more build names.
For builds using the -distro flag, the -arch, -image, and -release flags are
optional. If any of them are missing, the distribution's default value for that
flag will be used.
Options:
-distro=<distroName> If provided, Rancher will create a Packer template for
the passed distro, e.g. ubuntu. This flag can be used
with the -arch, -image, and -release flags to override
the distro's default values for those settings.
-arch=<architecture> Specify whether 32 or 64 bit code should be used. These
values are distro dependent. This flag is only used
with the -distro flag.
-image=<imageType> The ISO image that the Packer template will use, e.g.
server or desktop. These values are distro dependent.
This flag is only used with the -distro flag.
-release=<releaseNum> The release number that the Packer template will use,
e.g. 12.04, etc. Only the targeted distro's currently
supported releases are valid. This flag is only used
with the -distro flag.
`
return strings.TrimSpace(helpText)
}
func (c *BuildCommand) Run(args []string) int {
var distroFilter, archFilter, imageFilter, releaseFilter, logDirFilter string
cmdFlags := flag.NewFlagSet("build", flag.ContinueOnError)
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
cmdFlags.StringVar(&distroFilter, "distro", "", "distro filter")
cmdFlags.StringVar(&archFilter, "arch", "", "arch filter")
cmdFlags.StringVar(&imageFilter, "image", "", "image filter")
cmdFlags.StringVar(&releaseFilter, "release", "", "release filter")
cmdFlags.StringVar(&logDirFilter, "log_dir", "", "log directory")
if err := cmdFlags.Parse(args); err != nil {
c.Ui.Error(fmt.Sprintf("Parse of command-line arguments failed: %s", err))
return 1
}
buildArgs := cmdFlags.Args()
if distroFilter != "" {
args := ranchr.ArgsFilter{Arch: archFilter, Distro: distroFilter, Image: imageFilter, Release: releaseFilter}
// TODO go it
if err := ranchr.BuildDistro(args); err != nil {
c.Ui.Output(err.Error())
return 1
}
}
if len(buildArgs) > 0 {
var message string
var err error
if message, err = ranchr.BuildBuilds(buildArgs...); err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(message)
}
c.Ui.Output("Rancher Build complete.")
return 0
}
func (c *BuildCommand) Synopsis() string {
return "Create a Packer template from either distribution defaults or pre-defined Rancher Build templates."
}
|
// Copyright 2015 Cloud Security Alliance EMEA (cloudsecurityalliance.org)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsmm
import (
"regexp"
"strings"
"time"
)
func ToString(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
m.Push(NewString(m.Get(-2).ToString()))
return 1, nil
}
func ToBoolean(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
m.Push(NewBoolean(m.Get(-2).ToBoolean()))
return 1, nil
}
func ToNumber(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
m.Push(NewNumber(m.Get(-2).ToNumber()))
return 1, nil
}
func ToArray(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
objref := m.Get(-2)
switch objref.Type() {
case TypeArray:
m.Push(objref)
case TypeString:
reader := strings.NewReader(objref.ToString())
a := NewArray()
index := uint32(0)
for {
ch, _, err := reader.ReadRune()
if err != nil {
break
}
a.SetUInt32Property(index, NewString(string(ch)))
index++
}
m.Push(a)
case TypeNumber, TypeBoolean, TypeNull:
m.Push(NewArray(objref))
default:
m.Push(NewNull())
}
return 1, nil
}
func TimeUTC(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
objref := m.Get(-2)
if objref.Type() != TypeString {
//m.Push(NewNumber(-1))
return 0, NewMachineException("Missing time expression in call to timeUTC()")
}
ts := objref.ToString()
if ts == "now" {
t := time.Now().UTC().Unix()
m.Push(NewNumber(float64(t)))
return 1, nil
}
tp, err := time.Parse(time.RFC3339, ts)
if err != nil {
//m.Push(NewNumber(-1))
return 0, NewMachineException("Time format error in timeUTC(), " + err.Error())
}
t := tp.Unix()
m.Push(NewNumber(float64(t)))
return 1, nil
}
func MatchRegexp(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: regexp string
// stack -3: string or array of strings
reref := m.Get(-2)
objref := m.Get(-3)
if reref.Type() != TypeString {
m.Push(NewNull())
return 1, nil
}
re, err := regexp.CompilePOSIX(reref.ToString())
if err != nil {
return 0, NewMachineException("matchRegex failed, " + err.Error())
}
switch objref.Type() {
case TypeString:
m.Push(NewBoolean(re.MatchString(objref.ToString())))
case TypeArray:
var i uint32
array := objref.(*Array)
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err!=nil && val.Type() == TypeString {
if !re.MatchString(val.ToString()) {
m.Push(NewBoolean(false))
return 1, nil
}
}
}
m.Push(NewBoolean(true))
default:
return 0, NewMachineException("matchRegex expects a string or an array as parameters")
}
return 1, nil
}
func Select(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: select key
// stack -3: array of objects
keyref := m.Get(-2)
objref := m.Get(-3)
if keyref.Type() != TypeString || objref.Type() != TypeArray {
return 0, NewMachineException("select expects a string key and an array as parameters, got (%s,%s) instead",TypeOf(keyref),TypeOf(objref))
}
var i uint32
result := NewArray()
array := objref.(*Array)
key := keyref.ToString()
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err!=nil {
val2, err2 := val.GetProperty(key)
if err2!=nil {
result.Push(val2)
}
}
}
m.Push(result)
return 1, nil
}
func ArrayMin(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: table object
arrayref := m.Get(-1)
if array, ok := arrayref.(*Array); ok {
var i uint32
var ref MachineValue
ref = NullConst
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err!=nil && val.Type() != TypeNull {
if ref == NullConst {
ref = val
} else {
if lessThan(val, ref) {
ref = val
}
}
}
}
m.Push(ref)
} else {
return 0, NewMachineException("Array method called on non-array object")
}
return 1, nil
}
func ArrayMax(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: table object
arrayref := m.Get(-1)
if array, ok := arrayref.(*Array); ok {
var i uint32
var ref MachineValue
ref = NullConst
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err!=nil && val.Type() != TypeNull {
if ref == NullConst {
ref = val
} else {
if !lessThan(val, ref) {
ref = val
}
}
}
}
m.Push(ref)
} else {
return 0, NewMachineException("Array method called on non-array object")
}
return 1, nil
}
func ToJSON(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
m.Push(NewString(m.Get(-2).ToJSON()))
return 1, nil
}
Corrected bug in some builtin functions
// Copyright 2015 Cloud Security Alliance EMEA (cloudsecurityalliance.org)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsmm
import (
"regexp"
"strings"
"time"
)
func ToString(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
m.Push(NewString(m.Get(-2).ToString()))
return 1, nil
}
func ToBoolean(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
m.Push(NewBoolean(m.Get(-2).ToBoolean()))
return 1, nil
}
func ToNumber(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
m.Push(NewNumber(m.Get(-2).ToNumber()))
return 1, nil
}
func ToArray(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
objref := m.Get(-2)
switch objref.Type() {
case TypeArray:
m.Push(objref)
case TypeString:
reader := strings.NewReader(objref.ToString())
a := NewArray()
index := uint32(0)
for {
ch, _, err := reader.ReadRune()
if err != nil {
break
}
a.SetUInt32Property(index, NewString(string(ch)))
index++
}
m.Push(a)
case TypeNumber, TypeBoolean, TypeNull:
m.Push(NewArray(objref))
default:
m.Push(NewNull())
}
return 1, nil
}
func TimeUTC(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: object to convert
objref := m.Get(-2)
if objref.Type() != TypeString {
//m.Push(NewNumber(-1))
return 0, NewMachineException("Missing time expression in call to timeUTC()")
}
ts := objref.ToString()
if ts == "now" {
t := time.Now().UTC().Unix()
m.Push(NewNumber(float64(t)))
return 1, nil
}
tp, err := time.Parse(time.RFC3339, ts)
if err != nil {
//m.Push(NewNumber(-1))
return 0, NewMachineException("Time format error in timeUTC(), " + err.Error())
}
t := tp.Unix()
m.Push(NewNumber(float64(t)))
return 1, nil
}
func MatchRegexp(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: regexp string
// stack -3: string or array of strings
reref := m.Get(-2)
objref := m.Get(-3)
if reref.Type() != TypeString {
m.Push(NewNull())
return 1, nil
}
re, err := regexp.CompilePOSIX(reref.ToString())
if err != nil {
return 0, NewMachineException("matchRegex failed, " + err.Error())
}
switch objref.Type() {
case TypeString:
m.Push(NewBoolean(re.MatchString(objref.ToString())))
case TypeArray:
var i uint32
array := objref.(*Array)
retval := false
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err == nil && val.Type() == TypeString {
retval = re.MatchString(val.ToString())
if !retval {
break
}
}
}
m.Push(NewBoolean(retval))
default:
return 0, NewMachineException("matchRegex expects a string or an array as parameters")
}
return 1, nil
}
func Select(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: global object
// stack -2: select key
// stack -3: array of objects
keyref := m.Get(-2)
objref := m.Get(-3)
if keyref.Type() != TypeString || objref.Type() != TypeArray {
return 0, NewMachineException("select expects a string key and an array as parameters, got (%s,%s) instead", TypeOf(keyref), TypeOf(objref))
}
var i uint32
result := NewArray()
array := objref.(*Array)
key := keyref.ToString()
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err == nil {
val2, err2 := val.GetProperty(key)
if err2 == nil {
result.Push(val2)
}
}
}
m.Push(result)
return 1, nil
}
func ArrayMin(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: table object
arrayref := m.Get(-1)
if array, ok := arrayref.(*Array); ok {
var i uint32
var ref MachineValue
ref = NullConst
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err == nil && val.Type() != TypeNull {
if ref == NullConst {
ref = val
} else {
if lessThan(val, ref) {
ref = val
}
}
}
}
m.Push(ref)
} else {
return 0, NewMachineException("Array method called on non-array object")
}
return 1, nil
}
func ArrayMax(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
// stack -1: table object
arrayref := m.Get(-1)
if array, ok := arrayref.(*Array); ok {
var i uint32
var ref MachineValue
ref = NullConst
for i = 0; i < array.length; i++ {
val, err := array.GetUInt32Property(i)
if err == nil && val.Type() != TypeNull {
if ref == NullConst {
ref = val
} else {
if !lessThan(val, ref) {
ref = val
}
}
}
}
m.Push(ref)
} else {
return 0, NewMachineException("Array method called on non-array object")
}
return 1, nil
}
func ToJSON(m *Machine, fn *Function, paramCount int) (int, *MachineException) {
m.Push(NewString(m.Get(-2).ToJSON()))
return 1, nil
}
|
package main
import (
"fmt"
"strconv"
"github.com/codegangsta/cli"
)
func cmdNodeAdd(c *cli.Context) {
keySlice := []string{"assetid", "name", "team", "server", "online"}
reqSlice := []string{"assetid", "name", "team", "server"}
switch utl.GetCliArgumentCount(c) {
case 8, 10:
break
default:
utl.Abort("Syntax error, unexpected argument count")
}
argSlice := utl.GetFullArgumentSlice(c)
options, optional := utl.ParseVariableArguments(keySlice, reqSlice, argSlice)
var req somaproto.ProtoRequestNode
utl.ValidateStringAsNodeAssetId(options["assetid"])
if utl.SliceContainsString("online", optional) {
utl.ValidateStringAsBool(options["online"])
req.Node.IsOnline, _ = strconv.ParseBool(options["online"])
} else {
req.Node.IsOnline = true
}
req.Node.AssetId, _ = strconv.ParseUint(options["assetid"], 10, 64)
req.Node.Name = options["name"]
req.Node.Team = options["team"]
req.Node.Server = options["server"]
_ = utl.PostRequestWithBody(req, "/nodes/")
}
func cmdNodeDel(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
_ = utl.DeleteRequest(path)
}
func cmdNodePurge(c *cli.Context) {
var (
path string
req somaproto.ProtoRequestNode
)
if c.Bool("all") {
utl.ValidateCliArgumentCount(c, 0)
path = "/nodes/"
} else {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path = fmt.Sprintf("/nodes/%s", id.String())
}
req.Purge = true
_ = utl.DeleteRequestWithBody(req, path)
}
func cmdNodeRestore(c *cli.Context) {
var (
path string
req somaproto.ProtoRequestNode
)
if c.Bool("all") {
utl.ValidateCliArgumentCount(c, 0)
path = "/nodes/"
} else {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path = fmt.Sprintf("/nodes/%s", id.String())
}
req.Restore = true
_ = utl.DeleteRequestWithBody(req, path)
}
func cmdNodeRename(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 3)
utl.ValidateCliArgument(c, 2, "to")
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.Name = c.Args().Get(2)
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeRepo(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 3)
utl.ValidateCliArgument(c, 2, "to")
id := utl.TryGetNodeByUUIDOrName(c.Args().Get(0))
team := c.Args().Get(2)
// try resolving team name to uuid as name validation
_ = utl.GetTeamIdByName(team)
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.Team = team
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeMove(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 3)
utl.ValidateCliArgument(c, 2, "to")
id := utl.TryGetNodeByUUIDOrName(c.Args().Get(0))
server := c.Args().Get(2)
// try resolving server name to uuid as name validation
_ = utl.GetServerAssetIdByName(server)
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.Server = server
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeOnline(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.IsOnline = true
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeOffline(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.IsOnline = false
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeAssign(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 6)
utl.ValidateCliArgument(c, 2, "to")
keySlice := []string{"repository", "bucket"}
argSlice := utl.GetFullArgumentSlice(c)[2:]
options, _ := utl.ParseVariableArguments(keySlice, keySlice, argSlice)
var req somaproto.ProtoRequestJob
req.JobType = "node"
req.Node.Action = "assign"
req.Node.Node.Config.RepositoryName = options["repository"]
req.Node.Node.Config.BucketName = options["bucket"]
_ = utl.PostRequestWithBody(req, "/jobs/")
// TODO save jobid locally as outstanding
}
func cmdNodeList(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 0)
_ = utl.GetRequest("/nodes/")
}
func cmdNodeShow(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
_ = utl.GetRequest(path)
}
func cmdNodePropertyAdd(c *cli.Context) {
}
func cmdNodePropertyGet(c *cli.Context) {
}
func cmdNodePropertyDel(c *cli.Context) {
}
func cmdNodePropertyList(c *cli.Context) {
}
func cmdNodePropertyShow(c *cli.Context) {
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
Implement cmdNodePropertyAdd()
package main
import (
"fmt"
"strconv"
"github.com/codegangsta/cli"
)
func cmdNodeAdd(c *cli.Context) {
keySlice := []string{"assetid", "name", "team", "server", "online"}
reqSlice := []string{"assetid", "name", "team", "server"}
switch utl.GetCliArgumentCount(c) {
case 8, 10:
break
default:
utl.Abort("Syntax error, unexpected argument count")
}
argSlice := utl.GetFullArgumentSlice(c)
options, optional := utl.ParseVariableArguments(keySlice, reqSlice, argSlice)
var req somaproto.ProtoRequestNode
utl.ValidateStringAsNodeAssetId(options["assetid"])
if utl.SliceContainsString("online", optional) {
utl.ValidateStringAsBool(options["online"])
req.Node.IsOnline, _ = strconv.ParseBool(options["online"])
} else {
req.Node.IsOnline = true
}
req.Node.AssetId, _ = strconv.ParseUint(options["assetid"], 10, 64)
req.Node.Name = options["name"]
req.Node.Team = options["team"]
req.Node.Server = options["server"]
_ = utl.PostRequestWithBody(req, "/nodes/")
}
func cmdNodeDel(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
_ = utl.DeleteRequest(path)
}
func cmdNodePurge(c *cli.Context) {
var (
path string
req somaproto.ProtoRequestNode
)
if c.Bool("all") {
utl.ValidateCliArgumentCount(c, 0)
path = "/nodes/"
} else {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path = fmt.Sprintf("/nodes/%s", id.String())
}
req.Purge = true
_ = utl.DeleteRequestWithBody(req, path)
}
func cmdNodeRestore(c *cli.Context) {
var (
path string
req somaproto.ProtoRequestNode
)
if c.Bool("all") {
utl.ValidateCliArgumentCount(c, 0)
path = "/nodes/"
} else {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path = fmt.Sprintf("/nodes/%s", id.String())
}
req.Restore = true
_ = utl.DeleteRequestWithBody(req, path)
}
func cmdNodeRename(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 3)
utl.ValidateCliArgument(c, 2, "to")
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.Name = c.Args().Get(2)
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeRepo(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 3)
utl.ValidateCliArgument(c, 2, "to")
id := utl.TryGetNodeByUUIDOrName(c.Args().Get(0))
team := c.Args().Get(2)
// try resolving team name to uuid as name validation
_ = utl.GetTeamIdByName(team)
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.Team = team
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeMove(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 3)
utl.ValidateCliArgument(c, 2, "to")
id := utl.TryGetNodeByUUIDOrName(c.Args().Get(0))
server := c.Args().Get(2)
// try resolving server name to uuid as name validation
_ = utl.GetServerAssetIdByName(server)
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.Server = server
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeOnline(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.IsOnline = true
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeOffline(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
var req somaproto.ProtoRequestNode
req.Node.IsOnline = false
_ = utl.PatchRequestWithBody(req, path)
}
func cmdNodeAssign(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 6)
utl.ValidateCliArgument(c, 2, "to")
keySlice := []string{"repository", "bucket"}
argSlice := utl.GetFullArgumentSlice(c)[2:]
options, _ := utl.ParseVariableArguments(keySlice, keySlice, argSlice)
var req somaproto.ProtoRequestJob
req.JobType = "node"
req.Node.Action = "assign"
req.Node.Node.Config.RepositoryName = options["repository"]
req.Node.Node.Config.BucketName = options["bucket"]
_ = utl.PostRequestWithBody(req, "/jobs/")
// TODO save jobid locally as outstanding
}
func cmdNodeList(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 0)
_ = utl.GetRequest("/nodes/")
}
func cmdNodeShow(c *cli.Context) {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetNodeByUUIDOrName(c.Args().First())
path := fmt.Sprintf("/nodes/%s", id.String())
_ = utl.GetRequest(path)
}
func cmdNodePropertyAdd(c *cli.Context) {
// preliminary argv validation
switch utl.GetCliArgumentCount(c) {
case 4, 6, 8, 10, 12:
break
default:
utl.Abort("Syntax error, unexpected argument count")
}
utl.ValidateCliArgument(c, 3, "to")
argSlice := utl.GetFullArgumentSlice(c)
// define property types
typSlice := []string{"service", "system", "custom"}
// first argument must be a valid property type
utl.ValidateStringInSlice(argSlice[0], typSlice)
// TODO: validate property of that type and name exists
propertyType := argSlice[0]
property := argSlice[1]
// get which node is being modified
id := utl.TryGetNodeByUUIDOrName(argSlice[3])
path := fmt.Sprintf("/nodes/%s/property/", id.String())
// variable key/value part of argv
argSlice = argSlice[4:]
// define accepted and required keys
keySlice := []string{"inheritance", "childrenonly", "view", "value"}
reqSlice := []string{"view"}
if propertyType != "service" {
// non service properties require values, services are
// predefined and do not
reqSlice = append(reqSlice, "value")
}
options, optional := utl.ParseVariableArguments(keySlice, reqSlice, argSlice)
// build node property JSON
var prop somaproto.ProtoNodeProperty
prop.Type = propertyType
prop.View = options["view"] //required
prop.Property = property
// add value if it was required
if utl.SliceContainsString("value", reqSlice) {
prop.Value = options["value"]
}
// optional inheritance, default true
if utl.SliceContainsString("inheritance", optional) {
utl.ValidateStringAsBool(options["inheritance"])
prop.Inheritance, _ = strconv.ParseBool(options["inheritance"])
} else {
prop.Inheritance = true
}
// optional childrenonly, default false
if utl.SliceContainsString("childrenonly", optional) {
utl.ValidateStringAsBool(options["childrenonly"])
prop.ChildrenOnly, _ = strconv.ParseBool(options["childrenonly"])
} else {
prop.ChildrenOnly = false
}
// build request JSON
var req somaproto.ProtoRequestNode
req.Node.Properties = append(req.Node.Properties, prop)
_ = utl.PostRequestWithBody(req, path)
// TODO save jobid locally as outstanding
}
func cmdNodePropertyGet(c *cli.Context) {
}
func cmdNodePropertyDel(c *cli.Context) {
}
func cmdNodePropertyList(c *cli.Context) {
}
func cmdNodePropertyShow(c *cli.Context) {
}
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
package main
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/boltdb/bolt"
"github.com/codegangsta/cli"
)
func registerUsers(app cli.App) *cli.App {
app.Commands = append(app.Commands,
[]cli.Command{
// users
{
Name: "users",
Usage: "SUBCOMMANDS for users",
Subcommands: []cli.Command{
{
Name: "create",
Usage: "Create a new user",
Action: runtime(cmdUserAdd),
},
{
Name: "delete",
Usage: "Mark a user as deleted",
Action: runtime(cmdUserMarkDeleted),
},
{
Name: "purge",
Usage: "Purge a user marked as deleted",
Action: runtime(cmdUserPurgeDeleted),
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all, a",
Usage: "Purge all deleted users",
},
},
},
/*
{
Name: "restore",
Usage: "Restore a user marked as deleted",
Action: cmdUserRestoreDeleted,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all, a",
Usage: "Restore all deleted users",
},
},
},
{
Name: "update",
Usage: "Set/change user information",
Action: cmdUserUpdate,
},
{
Name: "rename",
Usage: "Change a user's username",
Action: cmdUserRename,
},
*/
{
Name: "activate",
Usage: "Activate a deativated user",
Action: cmdUserActivate,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "Apply administrative force to the activation",
},
},
},
/*
{
Name: "deactivate",
Usage: "Deactivate a user account",
Action: cmdUserDeactivate,
},
{
Name: "password",
Usage: "SUBCOMMANDS for user passwords",
Subcommands: []cli.Command{
{
Name: "update",
Usage: "Update the password of one's own user account",
Action: cmdUserPasswordUpdate,
},
{
Name: "reset",
Usage: "Trigger a password reset for a user",
Action: cmdUserPasswordReset,
},
{
Name: "force",
Usage: "Forcefully set the password of a user",
Action: cmdUserPasswordForce,
},
},
}, // end users password
*/
{
Name: "list",
Usage: "List all registered users",
Action: runtime(cmdUserList),
},
{
Name: "show",
Usage: "Show information about a specific user",
Action: runtime(cmdUserShow),
},
},
}, // end users
}...,
)
return &app
}
func cmdUserAdd(c *cli.Context) error {
utl.ValidateCliMinArgumentCount(c, 11)
multiple := []string{}
unique := []string{"firstname", "lastname", "employeenr",
"mailaddr", "team", "active", "system"}
required := []string{"firstname", "lastname", "employeenr",
"mailaddr", "team"}
var err error
opts := utl.ParseVariadicArguments(
multiple,
unique,
required,
c.Args().Tail())
// validate
utl.ValidateStringAsEmployeeNumber(opts["employeenr"][0])
utl.ValidateStringAsMailAddress(opts["mailaddr"][0])
req := proto.Request{}
req.User = &proto.User{}
req.User.UserName = c.Args().First()
req.User.FirstName = opts["firstname"][0]
req.User.LastName = opts["lastname"][0]
req.User.TeamId = utl.TryGetTeamByUUIDOrName(Client, opts["team"][0])
req.User.MailAddress = opts["mailaddr"][0]
req.User.EmployeeNumber = opts["employeenr"][0]
req.User.IsDeleted = false
// optional arguments
if _, ok := opts["active"]; ok {
req.User.IsActive, err = strconv.ParseBool(opts["active"][0])
utl.AbortOnError(err, "Syntax error, active argument not boolean")
} else {
req.User.IsActive = true
}
if _, ok := opts["system"]; ok {
req.User.IsSystem, err = strconv.ParseBool(opts["system"][0])
utl.AbortOnError(err, "Syntax error, system argument not boolean")
} else {
req.User.IsSystem = false
}
resp := utl.PostRequestWithBody(Client, req, "/users/")
fmt.Println(resp)
return nil
}
func cmdUserMarkDeleted(c *cli.Context) error {
utl.ValidateCliArgumentCount(c, 1)
userId := utl.TryGetUserByUUIDOrName(Client, c.Args().First())
path := fmt.Sprintf("/users/%s", userId)
resp := utl.DeleteRequest(Client, path)
fmt.Println(resp)
return nil
}
func cmdUserPurgeDeleted(c *cli.Context) error {
utl.ValidateCliArgumentCount(c, 1)
userId := utl.TryGetUserByUUIDOrName(Client, c.Args().First())
path := fmt.Sprintf("/users/%s", userId)
req := proto.Request{
Flags: &proto.Flags{
Purge: true,
},
}
resp := utl.DeleteRequestWithBody(Client, req, path)
fmt.Println(resp)
return nil
}
/*
func cmdUserRestoreDeleted(c *cli.Context) {
url := getApiUrl()
var (
id uuid.UUID
err error
)
if c.Bool("all") {
utl.ValidateCliArgumentCount(c, 0)
url.Path = fmt.Sprintf("/users")
} else {
switch utl.GetCliArgumentCount(c) {
case 1:
id, err = uuid.FromString(c.Args().First())
utl.AbortOnError(err, "Syntax error, argument not a uuid")
case 2:
utl.ValidateCliArgument(c, 1, "by-name")
id = utl.GetUserIdByName(c.Args().Get(1))
default:
utl.Abort("Syntax error, unexpected argument count")
}
url.Path = fmt.Sprintf("/users/%s", id.String())
}
var req somaproto.ProtoRequestUser
req.Restore = true
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
func cmdUserUpdate(c *cli.Context) {
url := getApiUrl()
var (
id uuid.UUID
err error
)
argSlice := make([]string, 0)
keySlice := []string{"firstname", "lastname", "employeenr", "mailaddr", "team"}
reqSlice := make([]string, 0)
switch utl.GetCliArgumentCount(c) {
case 1, 3, 5, 7, 9, 11:
id, err = uuid.FromString(c.Args().First())
utl.AbortOnError(err, "Syntax error, argument not a uuid")
argSlice = c.Args().Tail()
case 2, 4, 6, 8, 10, 12:
utl.ValidateCliArgument(c, 1, "by-name")
id = utl.GetUserIdByName(c.Args().Tail()[0])
argSlice = c.Args().Tail()[1:]
default:
utl.Abort("Syntax error, unexpected argument count")
}
url.Path = fmt.Sprintf("/users/%s", id.String())
options, opts := utl.ParseVariableArguments(keySlice, reqSlice, argSlice)
var req somaproto.ProtoRequestUser
for _, v := range opts {
switch v {
case "firstname":
req.User.FirstName = options["firstname"]
case "lastname":
req.User.LastName = options["lastname"]
case "employeenr":
utl.ValidateStringAsEmployeeNumber(options["employeenr"])
req.User.EmployeeNumber = options["employeenr"]
case "mailaddr":
utl.ValidateStringAsMailAddress(options["mailaddr"])
req.User.MailAddress = options["mailaddr"]
case "team":
req.User.Team = options["team"]
}
}
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
func cmdUserRename(c *cli.Context) {
url := getApiUrl()
var (
id uuid.UUID
err error
newName string
)
switch utl.GetCliArgumentCount(c) {
case 3:
utl.ValidateCliArgument(c, 2, "to")
id, err = uuid.FromString(c.Args().First())
utl.AbortOnError(err, "Syntax error, argument not a uuid")
newName = c.Args().Get(2)
case 4:
utl.ValidateCliArgument(c, 1, "by-name")
utl.ValidateCliArgument(c, 3, "to")
id = utl.GetUserIdByName(c.Args().Get(1))
newName = c.Args().Get(3)
default:
utl.Abort("Syntax error, unexpected argument count")
}
url.Path = fmt.Sprintf("/users/%s", id.String())
var req somaproto.ProtoRequestUser
req.User.UserName = newName
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
*/
func cmdUserActivate(c *cli.Context) error {
// administrative use, full runtime is available
if c.GlobalIsSet(`admin`) {
utl.ValidateCliArgumentCount(c, 1)
return runtime(cmdUserActivateAdmin)(c)
}
// user trying to activate the account for the first
// time, reduced runtime
utl.ValidateCliArgumentCount(c, 0)
return boottime(cmdUserActivateUser)(c)
}
func cmdUserActivateUser(c *cli.Context) error {
var err error
var password string
var passKey string
var happy bool
var cred *auth.Token
if Cfg.Auth.User == "" {
fmt.Println(`Please specify which account to activate.`)
if Cfg.Auth.User, err = adm.Read(`user`); err != nil {
return err
}
} else {
fmt.Printf("Starting with activation of account '%s' in 2 seconds.\n", Cfg.Auth.User)
fmt.Printf(`Use --user flag to activate a different account.`)
time.Sleep(2 * time.Second)
}
if strings.Contains(Cfg.Auth.User, `:`) {
return fmt.Errorf(`Usernames must not contain : character.`)
}
fmt.Printf("\nPlease provide the password you want to use.")
password_read:
password = adm.ReadVerified(`password`)
if happy, err = adm.EvaluatePassword(3, password, Cfg.Auth.User, `soma`); err != nil {
return err
} else if !happy {
password = ""
goto password_read
}
fmt.Printf("\nTo confirm that this is your account, an additional credential is required" +
" this once.\n")
switch Cfg.Activation {
case `ldap`:
fmt.Println(`Please provide your LDAP password to establish ownership.`)
passKey = adm.ReadVerified(`password`)
case `mailtoken`:
fmt.Println(`Please provide the token you received via email.`)
passKey = adm.ReadVerified(`token`)
default:
return fmt.Errorf(`Unknown activation mode`)
}
if cred, err = adm.ActivateAccount(Client, &auth.Token{
UserName: Cfg.Auth.User,
Password: password,
Token: passKey,
}); err != nil {
return err
}
// validate received token
if err = adm.ValidateToken(Client, Cfg.Auth.User, cred.Token); err != nil {
return err
}
// save received token
if err = store.Open(
Cfg.Run.PathBoltDB,
os.FileMode(uint32(Cfg.Run.ModeBoltDB)),
&bolt.Options{Timeout: Cfg.Run.TimeoutBoltDB},
); err != nil {
fmt.Fprintf(os.Stderr, "Failed to open database: %s\n", err)
fmt.Fprintln(os.Stderr, `Failed to save received token`)
return err
}
defer store.Close()
if err = store.SaveToken(
Cfg.Auth.User,
cred.ValidFrom,
cred.ExpiresAt,
cred.Token,
); err != nil {
return err
}
return nil
}
func cmdUserActivateAdmin(c *cli.Context) error {
return nil
}
/*
func cmdUserDeactivate(c *cli.Context) {
url := getApiUrl()
id := utl.UserIdByUuidOrName(c)
url.Path = fmt.Sprintf("/users/%s", id.String())
var req somaproto.ProtoRequestUser
req.User.IsActive = false
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
*/
func cmdUserList(c *cli.Context) error {
resp := utl.GetRequest(Client, "/users/")
fmt.Println(resp)
return nil
}
func cmdUserShow(c *cli.Context) error {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetUserByUUIDOrName(Client, c.Args().First())
path := fmt.Sprintf("/users/%s", id)
resp := utl.GetRequest(Client, path)
fmt.Println(resp)
return nil
}
/*
func cmdUserPasswordUpdate(c *cli.Context) {
id := utl.UserIdByUuidOrName(c)
path := fmt.Sprintf("/users/%s/password", id.String())
pass := utl.GetNewPassword()
var req somaproto.ProtoRequestUser
req.Credentials.Password = pass
_ = utl.PutRequestWithBody(Client, req, path)
}
func cmdUserPasswordReset(c *cli.Context) {
id := utl.UserIdByUuidOrName(c)
path := fmt.Sprintf("/users/%s/password", id.String())
var req somaproto.ProtoRequestUser
req.Credentials.Reset = true
_ = utl.PutRequestWithBody(Client, req, path)
}
func cmdUserPasswordForce(c *cli.Context) {
id := utl.UserIdByUuidOrName(c)
path := fmt.Sprintf("/users/%s/password", id.String())
pass := utl.GetNewPassword()
var req somaproto.ProtoRequestUser
req.Credentials.Force = true
req.Credentials.Password = pass
_ = utl.PutRequestWithBody(Client, req, path)
}
*/
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
Prettify activation dialogue
package main
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/boltdb/bolt"
"github.com/codegangsta/cli"
)
func registerUsers(app cli.App) *cli.App {
app.Commands = append(app.Commands,
[]cli.Command{
// users
{
Name: "users",
Usage: "SUBCOMMANDS for users",
Subcommands: []cli.Command{
{
Name: "create",
Usage: "Create a new user",
Action: runtime(cmdUserAdd),
},
{
Name: "delete",
Usage: "Mark a user as deleted",
Action: runtime(cmdUserMarkDeleted),
},
{
Name: "purge",
Usage: "Purge a user marked as deleted",
Action: runtime(cmdUserPurgeDeleted),
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all, a",
Usage: "Purge all deleted users",
},
},
},
/*
{
Name: "restore",
Usage: "Restore a user marked as deleted",
Action: cmdUserRestoreDeleted,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "all, a",
Usage: "Restore all deleted users",
},
},
},
{
Name: "update",
Usage: "Set/change user information",
Action: cmdUserUpdate,
},
{
Name: "rename",
Usage: "Change a user's username",
Action: cmdUserRename,
},
*/
{
Name: "activate",
Usage: "Activate a deativated user",
Action: cmdUserActivate,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "Apply administrative force to the activation",
},
},
},
/*
{
Name: "deactivate",
Usage: "Deactivate a user account",
Action: cmdUserDeactivate,
},
{
Name: "password",
Usage: "SUBCOMMANDS for user passwords",
Subcommands: []cli.Command{
{
Name: "update",
Usage: "Update the password of one's own user account",
Action: cmdUserPasswordUpdate,
},
{
Name: "reset",
Usage: "Trigger a password reset for a user",
Action: cmdUserPasswordReset,
},
{
Name: "force",
Usage: "Forcefully set the password of a user",
Action: cmdUserPasswordForce,
},
},
}, // end users password
*/
{
Name: "list",
Usage: "List all registered users",
Action: runtime(cmdUserList),
},
{
Name: "show",
Usage: "Show information about a specific user",
Action: runtime(cmdUserShow),
},
},
}, // end users
}...,
)
return &app
}
func cmdUserAdd(c *cli.Context) error {
utl.ValidateCliMinArgumentCount(c, 11)
multiple := []string{}
unique := []string{"firstname", "lastname", "employeenr",
"mailaddr", "team", "active", "system"}
required := []string{"firstname", "lastname", "employeenr",
"mailaddr", "team"}
var err error
opts := utl.ParseVariadicArguments(
multiple,
unique,
required,
c.Args().Tail())
// validate
utl.ValidateStringAsEmployeeNumber(opts["employeenr"][0])
utl.ValidateStringAsMailAddress(opts["mailaddr"][0])
req := proto.Request{}
req.User = &proto.User{}
req.User.UserName = c.Args().First()
req.User.FirstName = opts["firstname"][0]
req.User.LastName = opts["lastname"][0]
req.User.TeamId = utl.TryGetTeamByUUIDOrName(Client, opts["team"][0])
req.User.MailAddress = opts["mailaddr"][0]
req.User.EmployeeNumber = opts["employeenr"][0]
req.User.IsDeleted = false
// optional arguments
if _, ok := opts["active"]; ok {
req.User.IsActive, err = strconv.ParseBool(opts["active"][0])
utl.AbortOnError(err, "Syntax error, active argument not boolean")
} else {
req.User.IsActive = true
}
if _, ok := opts["system"]; ok {
req.User.IsSystem, err = strconv.ParseBool(opts["system"][0])
utl.AbortOnError(err, "Syntax error, system argument not boolean")
} else {
req.User.IsSystem = false
}
resp := utl.PostRequestWithBody(Client, req, "/users/")
fmt.Println(resp)
return nil
}
func cmdUserMarkDeleted(c *cli.Context) error {
utl.ValidateCliArgumentCount(c, 1)
userId := utl.TryGetUserByUUIDOrName(Client, c.Args().First())
path := fmt.Sprintf("/users/%s", userId)
resp := utl.DeleteRequest(Client, path)
fmt.Println(resp)
return nil
}
func cmdUserPurgeDeleted(c *cli.Context) error {
utl.ValidateCliArgumentCount(c, 1)
userId := utl.TryGetUserByUUIDOrName(Client, c.Args().First())
path := fmt.Sprintf("/users/%s", userId)
req := proto.Request{
Flags: &proto.Flags{
Purge: true,
},
}
resp := utl.DeleteRequestWithBody(Client, req, path)
fmt.Println(resp)
return nil
}
/*
func cmdUserRestoreDeleted(c *cli.Context) {
url := getApiUrl()
var (
id uuid.UUID
err error
)
if c.Bool("all") {
utl.ValidateCliArgumentCount(c, 0)
url.Path = fmt.Sprintf("/users")
} else {
switch utl.GetCliArgumentCount(c) {
case 1:
id, err = uuid.FromString(c.Args().First())
utl.AbortOnError(err, "Syntax error, argument not a uuid")
case 2:
utl.ValidateCliArgument(c, 1, "by-name")
id = utl.GetUserIdByName(c.Args().Get(1))
default:
utl.Abort("Syntax error, unexpected argument count")
}
url.Path = fmt.Sprintf("/users/%s", id.String())
}
var req somaproto.ProtoRequestUser
req.Restore = true
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
func cmdUserUpdate(c *cli.Context) {
url := getApiUrl()
var (
id uuid.UUID
err error
)
argSlice := make([]string, 0)
keySlice := []string{"firstname", "lastname", "employeenr", "mailaddr", "team"}
reqSlice := make([]string, 0)
switch utl.GetCliArgumentCount(c) {
case 1, 3, 5, 7, 9, 11:
id, err = uuid.FromString(c.Args().First())
utl.AbortOnError(err, "Syntax error, argument not a uuid")
argSlice = c.Args().Tail()
case 2, 4, 6, 8, 10, 12:
utl.ValidateCliArgument(c, 1, "by-name")
id = utl.GetUserIdByName(c.Args().Tail()[0])
argSlice = c.Args().Tail()[1:]
default:
utl.Abort("Syntax error, unexpected argument count")
}
url.Path = fmt.Sprintf("/users/%s", id.String())
options, opts := utl.ParseVariableArguments(keySlice, reqSlice, argSlice)
var req somaproto.ProtoRequestUser
for _, v := range opts {
switch v {
case "firstname":
req.User.FirstName = options["firstname"]
case "lastname":
req.User.LastName = options["lastname"]
case "employeenr":
utl.ValidateStringAsEmployeeNumber(options["employeenr"])
req.User.EmployeeNumber = options["employeenr"]
case "mailaddr":
utl.ValidateStringAsMailAddress(options["mailaddr"])
req.User.MailAddress = options["mailaddr"]
case "team":
req.User.Team = options["team"]
}
}
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
func cmdUserRename(c *cli.Context) {
url := getApiUrl()
var (
id uuid.UUID
err error
newName string
)
switch utl.GetCliArgumentCount(c) {
case 3:
utl.ValidateCliArgument(c, 2, "to")
id, err = uuid.FromString(c.Args().First())
utl.AbortOnError(err, "Syntax error, argument not a uuid")
newName = c.Args().Get(2)
case 4:
utl.ValidateCliArgument(c, 1, "by-name")
utl.ValidateCliArgument(c, 3, "to")
id = utl.GetUserIdByName(c.Args().Get(1))
newName = c.Args().Get(3)
default:
utl.Abort("Syntax error, unexpected argument count")
}
url.Path = fmt.Sprintf("/users/%s", id.String())
var req somaproto.ProtoRequestUser
req.User.UserName = newName
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
*/
func cmdUserActivate(c *cli.Context) error {
// administrative use, full runtime is available
if c.GlobalIsSet(`admin`) {
utl.ValidateCliArgumentCount(c, 1)
return runtime(cmdUserActivateAdmin)(c)
}
// user trying to activate the account for the first
// time, reduced runtime
utl.ValidateCliArgumentCount(c, 0)
return boottime(cmdUserActivateUser)(c)
}
func cmdUserActivateUser(c *cli.Context) error {
var err error
var password string
var passKey string
var happy bool
var cred *auth.Token
if Cfg.Auth.User == "" {
fmt.Println(`Please specify which account to activate.`)
if Cfg.Auth.User, err = adm.Read(`user`); err != nil {
return err
}
} else {
fmt.Printf("Starting with activation of account '%s' in 2 seconds.\n", Cfg.Auth.User)
fmt.Printf(`Use --user flag to activate a different account.`)
time.Sleep(2 * time.Second)
}
if strings.Contains(Cfg.Auth.User, `:`) {
return fmt.Errorf(`Usernames must not contain : character.`)
}
fmt.Printf("\nPlease provide the password you want to use.\n")
password_read:
password = adm.ReadVerified(`password`)
if happy, err = adm.EvaluatePassword(3, password, Cfg.Auth.User, `soma`); err != nil {
return err
} else if !happy {
password = ""
goto password_read
}
fmt.Printf("\nTo confirm that this is your account, an additional credential is required" +
" this once.\n")
switch Cfg.Activation {
case `ldap`:
fmt.Println(`Please provide your LDAP password to establish ownership.`)
passKey = adm.ReadVerified(`password`)
case `mailtoken`:
fmt.Println(`Please provide the token you received via email.`)
passKey = adm.ReadVerified(`token`)
default:
return fmt.Errorf(`Unknown activation mode`)
}
if cred, err = adm.ActivateAccount(Client, &auth.Token{
UserName: Cfg.Auth.User,
Password: password,
Token: passKey,
}); err != nil {
return err
}
// validate received token
if err = adm.ValidateToken(Client, Cfg.Auth.User, cred.Token); err != nil {
return err
}
// save received token
if err = store.Open(
Cfg.Run.PathBoltDB,
os.FileMode(uint32(Cfg.Run.ModeBoltDB)),
&bolt.Options{Timeout: Cfg.Run.TimeoutBoltDB},
); err != nil {
fmt.Fprintf(os.Stderr, "Failed to open database: %s\n", err)
fmt.Fprintln(os.Stderr, `Failed to save received token`)
return err
}
defer store.Close()
if err = store.SaveToken(
Cfg.Auth.User,
cred.ValidFrom,
cred.ExpiresAt,
cred.Token,
); err != nil {
return err
}
return nil
}
func cmdUserActivateAdmin(c *cli.Context) error {
return nil
}
/*
func cmdUserDeactivate(c *cli.Context) {
url := getApiUrl()
id := utl.UserIdByUuidOrName(c)
url.Path = fmt.Sprintf("/users/%s", id.String())
var req somaproto.ProtoRequestUser
req.User.IsActive = false
_ = utl.PatchRequestWithBody(Client, req, url.String())
}
*/
func cmdUserList(c *cli.Context) error {
resp := utl.GetRequest(Client, "/users/")
fmt.Println(resp)
return nil
}
func cmdUserShow(c *cli.Context) error {
utl.ValidateCliArgumentCount(c, 1)
id := utl.TryGetUserByUUIDOrName(Client, c.Args().First())
path := fmt.Sprintf("/users/%s", id)
resp := utl.GetRequest(Client, path)
fmt.Println(resp)
return nil
}
/*
func cmdUserPasswordUpdate(c *cli.Context) {
id := utl.UserIdByUuidOrName(c)
path := fmt.Sprintf("/users/%s/password", id.String())
pass := utl.GetNewPassword()
var req somaproto.ProtoRequestUser
req.Credentials.Password = pass
_ = utl.PutRequestWithBody(Client, req, path)
}
func cmdUserPasswordReset(c *cli.Context) {
id := utl.UserIdByUuidOrName(c)
path := fmt.Sprintf("/users/%s/password", id.String())
var req somaproto.ProtoRequestUser
req.Credentials.Reset = true
_ = utl.PutRequestWithBody(Client, req, path)
}
func cmdUserPasswordForce(c *cli.Context) {
id := utl.UserIdByUuidOrName(c)
path := fmt.Sprintf("/users/%s/password", id.String())
pass := utl.GetNewPassword()
var req somaproto.ProtoRequestUser
req.Credentials.Force = true
req.Credentials.Password = pass
_ = utl.PutRequestWithBody(Client, req, path)
}
*/
// vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix
|
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
type Code int
// This file defines the error codes that can be produced during type-checking.
// Collectively, these codes provide an identifier that may be used to
// implement special handling for certain types of errors.
//
// Error code values should not be changed: add new codes at the end.
//
// Error codes should be fine-grained enough that the exact nature of the error
// can be easily determined, but coarse enough that they are not an
// implementation detail of the type checking algorithm. As a rule-of-thumb,
// errors should be considered equivalent if there is a theoretical refactoring
// of the type checker in which they are emitted in exactly one place. For
// example, the type checker emits different error messages for "too many
// arguments" and "too few arguments", but one can imagine an alternative type
// checker where this check instead just emits a single "wrong number of
// arguments", so these errors should have the same code.
//
// Error code names should be as brief as possible while retaining accuracy and
// distinctiveness. In most cases names should start with an adjective
// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
// and end with a noun identifying the relevant language object. For example,
// "_DuplicateDecl" or "_InvalidSliceExpr". For brevity, naming follows the
// convention that "bad" implies a problem with syntax, and "invalid" implies a
// problem with types.
const (
_ Code = iota
// Test is reserved for errors that only apply while in self-test mode.
Test
// BlankPkgName occurs when a package name is the blank identifier "_".
//
// Per the spec:
// "The PackageName must not be the blank identifier."
BlankPkgName
// MismatchedPkgName occurs when a file's package name doesn't match the
// package name already established by other files.
MismatchedPkgName
// InvalidPkgUse occurs when a package identifier is used outside of a
// selector expression.
//
// Example:
// import "fmt"
//
// var _ = fmt
InvalidPkgUse
// BadImportPath occurs when an import path is not valid.
BadImportPath
// BrokenImport occurs when importing a package fails.
//
// Example:
// import "amissingpackage"
BrokenImport
// ImportCRenamed occurs when the special import "C" is renamed. "C" is a
// pseudo-package, and must not be renamed.
//
// Example:
// import _ "C"
ImportCRenamed
// UnusedImport occurs when an import is unused.
//
// Example:
// import "fmt"
//
// func main() {}
UnusedImport
// InvalidInitCycle occurs when an invalid cycle is detected within the
// initialization graph.
//
// Example:
// var x int = f()
//
// func f() int { return x }
InvalidInitCycle
// DuplicateDecl occurs when an identifier is declared multiple times.
//
// Example:
// var x = 1
// var x = 2
DuplicateDecl
// InvalidDeclCycle occurs when a declaration cycle is not valid.
//
// Example:
// type S struct {
// S
// }
//
InvalidDeclCycle
// InvalidTypeCycle occurs when a cycle in type definitions results in a
// type that is not well-defined.
//
// Example:
// import "unsafe"
//
// type T [unsafe.Sizeof(T{})]int
InvalidTypeCycle
// InvalidConstInit occurs when a const declaration has a non-constant
// initializer.
//
// Example:
// var x int
// const _ = x
InvalidConstInit
// InvalidConstVal occurs when a const value cannot be converted to its
// target type.
//
// TODO(findleyr): this error code and example are not very clear. Consider
// removing it.
//
// Example:
// const _ = 1 << "hello"
InvalidConstVal
// InvalidConstType occurs when the underlying type in a const declaration
// is not a valid constant type.
//
// Example:
// const c *int = 4
InvalidConstType
// UntypedNil occurs when the predeclared (untyped) value nil is used to
// initialize a variable declared without an explicit type.
//
// Example:
// var x = nil
UntypedNil
// WrongAssignCount occurs when the number of values on the right-hand side
// of an assignment or initialization expression does not match the number
// of variables on the left-hand side.
//
// Example:
// var x = 1, 2
WrongAssignCount
// UnassignableOperand occurs when the left-hand side of an assignment is
// not assignable.
//
// Example:
// func f() {
// const c = 1
// c = 2
// }
UnassignableOperand
// NoNewVar occurs when a short variable declaration (':=') does not declare
// new variables.
//
// Example:
// func f() {
// x := 1
// x := 2
// }
NoNewVar
// MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
// not have single-valued left-hand or right-hand side.
//
// Per the spec:
// "In assignment operations, both the left- and right-hand expression lists
// must contain exactly one single-valued expression"
//
// Example:
// func f() int {
// x, y := 1, 2
// x, y += 1
// return x + y
// }
MultiValAssignOp
// InvalidIfaceAssign occurs when a value of type T is used as an
// interface, but T does not implement a method of the expected interface.
//
// Example:
// type I interface {
// f()
// }
//
// type T int
//
// var x I = T(1)
InvalidIfaceAssign
// InvalidChanAssign occurs when a chan assignment is invalid.
//
// Per the spec, a value x is assignable to a channel type T if:
// "x is a bidirectional channel value, T is a channel type, x's type V and
// T have identical element types, and at least one of V or T is not a
// defined type."
//
// Example:
// type T1 chan int
// type T2 chan int
//
// var x T1
// // Invalid assignment because both types are named
// var _ T2 = x
InvalidChanAssign
// IncompatibleAssign occurs when the type of the right-hand side expression
// in an assignment cannot be assigned to the type of the variable being
// assigned.
//
// Example:
// var x []int
// var _ int = x
IncompatibleAssign
// UnaddressableFieldAssign occurs when trying to assign to a struct field
// in a map value.
//
// Example:
// func f() {
// m := make(map[string]struct{i int})
// m["foo"].i = 42
// }
UnaddressableFieldAssign
// NotAType occurs when the identifier used as the underlying type in a type
// declaration or the right-hand side of a type alias does not denote a type.
//
// Example:
// var S = 2
//
// type T S
NotAType
// InvalidArrayLen occurs when an array length is not a constant value.
//
// Example:
// var n = 3
// var _ = [n]int{}
InvalidArrayLen
// BlankIfaceMethod occurs when a method name is '_'.
//
// Per the spec:
// "The name of each explicitly specified method must be unique and not
// blank."
//
// Example:
// type T interface {
// _(int)
// }
BlankIfaceMethod
// IncomparableMapKey occurs when a map key type does not support the == and
// != operators.
//
// Per the spec:
// "The comparison operators == and != must be fully defined for operands of
// the key type; thus the key type must not be a function, map, or slice."
//
// Example:
// var x map[T]int
//
// type T []int
IncomparableMapKey
// InvalidIfaceEmbed occurs when a non-interface type is embedded in an
// interface (for go 1.17 or earlier).
_ // not used anymore
// InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
// and T itself is itself a pointer, an unsafe.Pointer, or an interface.
//
// Per the spec:
// "An embedded field must be specified as a type name T or as a pointer to
// a non-interface type name *T, and T itself may not be a pointer type."
//
// Example:
// type T *int
//
// type S struct {
// *T
// }
InvalidPtrEmbed
// BadRecv occurs when a method declaration does not have exactly one
// receiver parameter.
//
// Example:
// func () _() {}
BadRecv
// InvalidRecv occurs when a receiver type expression is not of the form T
// or *T, or T is a pointer type.
//
// Example:
// type T struct {}
//
// func (**T) m() {}
InvalidRecv
// DuplicateFieldAndMethod occurs when an identifier appears as both a field
// and method name.
//
// Example:
// type T struct {
// m int
// }
//
// func (T) m() {}
DuplicateFieldAndMethod
// DuplicateMethod occurs when two methods on the same receiver type have
// the same name.
//
// Example:
// type T struct {}
// func (T) m() {}
// func (T) m(i int) int { return i }
DuplicateMethod
// InvalidBlank occurs when a blank identifier is used as a value or type.
//
// Per the spec:
// "The blank identifier may appear as an operand only on the left-hand side
// of an assignment."
//
// Example:
// var x = _
InvalidBlank
// InvalidIota occurs when the predeclared identifier iota is used outside
// of a constant declaration.
//
// Example:
// var x = iota
InvalidIota
// MissingInitBody occurs when an init function is missing its body.
//
// Example:
// func init()
MissingInitBody
// InvalidInitSig occurs when an init function declares parameters or
// results.
//
// Deprecated: no longer emitted by the type checker. _InvalidInitDecl is
// used instead.
InvalidInitSig
// InvalidInitDecl occurs when init is declared as anything other than a
// function.
//
// Example:
// var init = 1
//
// Example:
// func init() int { return 1 }
InvalidInitDecl
// InvalidMainDecl occurs when main is declared as anything other than a
// function, in a main package.
InvalidMainDecl
// TooManyValues occurs when a function returns too many values for the
// expression context in which it is used.
//
// Example:
// func ReturnTwo() (int, int) {
// return 1, 2
// }
//
// var x = ReturnTwo()
TooManyValues
// NotAnExpr occurs when a type expression is used where a value expression
// is expected.
//
// Example:
// type T struct {}
//
// func f() {
// T
// }
NotAnExpr
// TruncatedFloat occurs when a float constant is truncated to an integer
// value.
//
// Example:
// var _ int = 98.6
TruncatedFloat
// NumericOverflow occurs when a numeric constant overflows its target type.
//
// Example:
// var x int8 = 1000
NumericOverflow
// UndefinedOp occurs when an operator is not defined for the type(s) used
// in an operation.
//
// Example:
// var c = "a" - "b"
UndefinedOp
// MismatchedTypes occurs when operand types are incompatible in a binary
// operation.
//
// Example:
// var a = "hello"
// var b = 1
// var c = a - b
MismatchedTypes
// DivByZero occurs when a division operation is provable at compile
// time to be a division by zero.
//
// Example:
// const divisor = 0
// var x int = 1/divisor
DivByZero
// NonNumericIncDec occurs when an increment or decrement operator is
// applied to a non-numeric value.
//
// Example:
// func f() {
// var c = "c"
// c++
// }
NonNumericIncDec
// UnaddressableOperand occurs when the & operator is applied to an
// unaddressable expression.
//
// Example:
// var x = &1
UnaddressableOperand
// InvalidIndirection occurs when a non-pointer value is indirected via the
// '*' operator.
//
// Example:
// var x int
// var y = *x
InvalidIndirection
// NonIndexableOperand occurs when an index operation is applied to a value
// that cannot be indexed.
//
// Example:
// var x = 1
// var y = x[1]
NonIndexableOperand
// InvalidIndex occurs when an index argument is not of integer type,
// negative, or out-of-bounds.
//
// Example:
// var s = [...]int{1,2,3}
// var x = s[5]
//
// Example:
// var s = []int{1,2,3}
// var _ = s[-1]
//
// Example:
// var s = []int{1,2,3}
// var i string
// var _ = s[i]
InvalidIndex
// SwappedSliceIndices occurs when constant indices in a slice expression
// are decreasing in value.
//
// Example:
// var _ = []int{1,2,3}[2:1]
SwappedSliceIndices
// NonSliceableOperand occurs when a slice operation is applied to a value
// whose type is not sliceable, or is unaddressable.
//
// Example:
// var x = [...]int{1, 2, 3}[:1]
//
// Example:
// var x = 1
// var y = 1[:1]
NonSliceableOperand
// InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
// applied to a string.
//
// Example:
// var s = "hello"
// var x = s[1:2:3]
InvalidSliceExpr
// InvalidShiftCount occurs when the right-hand side of a shift operation is
// either non-integer, negative, or too large.
//
// Example:
// var (
// x string
// y int = 1 << x
// )
InvalidShiftCount
// InvalidShiftOperand occurs when the shifted operand is not an integer.
//
// Example:
// var s = "hello"
// var x = s << 2
InvalidShiftOperand
// InvalidReceive occurs when there is a channel receive from a value that
// is either not a channel, or is a send-only channel.
//
// Example:
// func f() {
// var x = 1
// <-x
// }
InvalidReceive
// InvalidSend occurs when there is a channel send to a value that is not a
// channel, or is a receive-only channel.
//
// Example:
// func f() {
// var x = 1
// x <- "hello!"
// }
InvalidSend
// DuplicateLitKey occurs when an index is duplicated in a slice, array, or
// map literal.
//
// Example:
// var _ = []int{0:1, 0:2}
//
// Example:
// var _ = map[string]int{"a": 1, "a": 2}
DuplicateLitKey
// MissingLitKey occurs when a map literal is missing a key expression.
//
// Example:
// var _ = map[string]int{1}
MissingLitKey
// InvalidLitIndex occurs when the key in a key-value element of a slice or
// array literal is not an integer constant.
//
// Example:
// var i = 0
// var x = []string{i: "world"}
InvalidLitIndex
// OversizeArrayLit occurs when an array literal exceeds its length.
//
// Example:
// var _ = [2]int{1,2,3}
OversizeArrayLit
// MixedStructLit occurs when a struct literal contains a mix of positional
// and named elements.
//
// Example:
// var _ = struct{i, j int}{i: 1, 2}
MixedStructLit
// InvalidStructLit occurs when a positional struct literal has an incorrect
// number of values.
//
// Example:
// var _ = struct{i, j int}{1,2,3}
InvalidStructLit
// MissingLitField occurs when a struct literal refers to a field that does
// not exist on the struct type.
//
// Example:
// var _ = struct{i int}{j: 2}
MissingLitField
// DuplicateLitField occurs when a struct literal contains duplicated
// fields.
//
// Example:
// var _ = struct{i int}{i: 1, i: 2}
DuplicateLitField
// UnexportedLitField occurs when a positional struct literal implicitly
// assigns an unexported field of an imported type.
UnexportedLitField
// InvalidLitField occurs when a field name is not a valid identifier.
//
// Example:
// var _ = struct{i int}{1: 1}
InvalidLitField
// UntypedLit occurs when a composite literal omits a required type
// identifier.
//
// Example:
// type outer struct{
// inner struct { i int }
// }
//
// var _ = outer{inner: {1}}
UntypedLit
// InvalidLit occurs when a composite literal expression does not match its
// type.
//
// Example:
// type P *struct{
// x int
// }
// var _ = P {}
InvalidLit
// AmbiguousSelector occurs when a selector is ambiguous.
//
// Example:
// type E1 struct { i int }
// type E2 struct { i int }
// type T struct { E1; E2 }
//
// var x T
// var _ = x.i
AmbiguousSelector
// UndeclaredImportedName occurs when a package-qualified identifier is
// undeclared by the imported package.
//
// Example:
// import "go/types"
//
// var _ = types.NotAnActualIdentifier
UndeclaredImportedName
// UnexportedName occurs when a selector refers to an unexported identifier
// of an imported package.
//
// Example:
// import "reflect"
//
// type _ reflect.flag
UnexportedName
// UndeclaredName occurs when an identifier is not declared in the current
// scope.
//
// Example:
// var x T
UndeclaredName
// MissingFieldOrMethod occurs when a selector references a field or method
// that does not exist.
//
// Example:
// type T struct {}
//
// var x = T{}.f
MissingFieldOrMethod
// BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
// not valid.
//
// Example:
// var _ = map[int][...]int{0: {}}
BadDotDotDotSyntax
// NonVariadicDotDotDot occurs when a "..." is used on the final argument to
// a non-variadic function.
//
// Example:
// func printArgs(s []string) {
// for _, a := range s {
// println(a)
// }
// }
//
// func f() {
// s := []string{"a", "b", "c"}
// printArgs(s...)
// }
NonVariadicDotDotDot
// MisplacedDotDotDot occurs when a "..." is used somewhere other than the
// final argument in a function declaration.
//
// Example:
// func f(...int, int)
MisplacedDotDotDot
_ // InvalidDotDotDotOperand was removed.
// InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
// function.
//
// Example:
// var s = []int{1, 2, 3}
// var l = len(s...)
InvalidDotDotDot
// UncalledBuiltin occurs when a built-in function is used as a
// function-valued expression, instead of being called.
//
// Per the spec:
// "The built-in functions do not have standard Go types, so they can only
// appear in call expressions; they cannot be used as function values."
//
// Example:
// var _ = copy
UncalledBuiltin
// InvalidAppend occurs when append is called with a first argument that is
// not a slice.
//
// Example:
// var _ = append(1, 2)
InvalidAppend
// InvalidCap occurs when an argument to the cap built-in function is not of
// supported type.
//
// See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
// var s = 2
// var x = cap(s)
InvalidCap
// InvalidClose occurs when close(...) is called with an argument that is
// not of channel type, or that is a receive-only channel.
//
// Example:
// func f() {
// var x int
// close(x)
// }
InvalidClose
// InvalidCopy occurs when the arguments are not of slice type or do not
// have compatible type.
//
// See https://golang.org/ref/spec#Appending_and_copying_slices for more
// information on the type requirements for the copy built-in.
//
// Example:
// func f() {
// var x []int
// y := []int64{1,2,3}
// copy(x, y)
// }
InvalidCopy
// InvalidComplex occurs when the complex built-in function is called with
// arguments with incompatible types.
//
// Example:
// var _ = complex(float32(1), float64(2))
InvalidComplex
// InvalidDelete occurs when the delete built-in function is called with a
// first argument that is not a map.
//
// Example:
// func f() {
// m := "hello"
// delete(m, "e")
// }
InvalidDelete
// InvalidImag occurs when the imag built-in function is called with an
// argument that does not have complex type.
//
// Example:
// var _ = imag(int(1))
InvalidImag
// InvalidLen occurs when an argument to the len built-in function is not of
// supported type.
//
// See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
// var s = 2
// var x = len(s)
InvalidLen
// SwappedMakeArgs occurs when make is called with three arguments, and its
// length argument is larger than its capacity argument.
//
// Example:
// var x = make([]int, 3, 2)
SwappedMakeArgs
// InvalidMake occurs when make is called with an unsupported type argument.
//
// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
// information on the types that may be created using make.
//
// Example:
// var x = make(int)
InvalidMake
// InvalidReal occurs when the real built-in function is called with an
// argument that does not have complex type.
//
// Example:
// var _ = real(int(1))
InvalidReal
// InvalidAssert occurs when a type assertion is applied to a
// value that is not of interface type.
//
// Example:
// var x = 1
// var _ = x.(float64)
InvalidAssert
// ImpossibleAssert occurs for a type assertion x.(T) when the value x of
// interface cannot have dynamic type T, due to a missing or mismatching
// method on T.
//
// Example:
// type T int
//
// func (t *T) m() int { return int(*t) }
//
// type I interface { m() int }
//
// var x I
// var _ = x.(T)
ImpossibleAssert
// InvalidConversion occurs when the argument type cannot be converted to the
// target.
//
// See https://golang.org/ref/spec#Conversions for the rules of
// convertibility.
//
// Example:
// var x float64
// var _ = string(x)
InvalidConversion
// InvalidUntypedConversion occurs when there is no valid implicit
// conversion from an untyped value satisfying the type constraints of the
// context in which it is used.
//
// Example:
// var _ = 1 + new(int)
InvalidUntypedConversion
// BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
// that is not a selector expression.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Offsetof(x)
BadOffsetofSyntax
// InvalidOffsetof occurs when unsafe.Offsetof is called with a method
// selector, rather than a field selector, or when the field is embedded via
// a pointer.
//
// Per the spec:
//
// "If f is an embedded field, it must be reachable without pointer
// indirections through fields of the struct. "
//
// Example:
// import "unsafe"
//
// type T struct { f int }
// type S struct { *T }
// var s S
// var _ = unsafe.Offsetof(s.f)
//
// Example:
// import "unsafe"
//
// type S struct{}
//
// func (S) m() {}
//
// var s S
// var _ = unsafe.Offsetof(s.m)
InvalidOffsetof
// UnusedExpr occurs when a side-effect free expression is used as a
// statement. Such a statement has no effect.
//
// Example:
// func f(i int) {
// i*i
// }
UnusedExpr
// UnusedVar occurs when a variable is declared but unused.
//
// Example:
// func f() {
// x := 1
// }
UnusedVar
// MissingReturn occurs when a function with results is missing a return
// statement.
//
// Example:
// func f() int {}
MissingReturn
// WrongResultCount occurs when a return statement returns an incorrect
// number of values.
//
// Example:
// func ReturnOne() int {
// return 1, 2
// }
WrongResultCount
// OutOfScopeResult occurs when the name of a value implicitly returned by
// an empty return statement is shadowed in a nested scope.
//
// Example:
// func factor(n int) (i int) {
// for i := 2; i < n; i++ {
// if n%i == 0 {
// return
// }
// }
// return 0
// }
OutOfScopeResult
// InvalidCond occurs when an if condition is not a boolean expression.
//
// Example:
// func checkReturn(i int) {
// if i {
// panic("non-zero return")
// }
// }
InvalidCond
// InvalidPostDecl occurs when there is a declaration in a for-loop post
// statement.
//
// Example:
// func f() {
// for i := 0; i < 10; j := 0 {}
// }
InvalidPostDecl
_ // InvalidChanRange was removed.
// InvalidIterVar occurs when two iteration variables are used while ranging
// over a channel.
//
// Example:
// func f(c chan int) {
// for k, v := range c {
// println(k, v)
// }
// }
InvalidIterVar
// InvalidRangeExpr occurs when the type of a range expression is not array,
// slice, string, map, or channel.
//
// Example:
// func f(i int) {
// for j := range i {
// println(j)
// }
// }
InvalidRangeExpr
// MisplacedBreak occurs when a break statement is not within a for, switch,
// or select statement of the innermost function definition.
//
// Example:
// func f() {
// break
// }
MisplacedBreak
// MisplacedContinue occurs when a continue statement is not within a for
// loop of the innermost function definition.
//
// Example:
// func sumeven(n int) int {
// proceed := func() {
// continue
// }
// sum := 0
// for i := 1; i <= n; i++ {
// if i % 2 != 0 {
// proceed()
// }
// sum += i
// }
// return sum
// }
MisplacedContinue
// MisplacedFallthrough occurs when a fallthrough statement is not within an
// expression switch.
//
// Example:
// func typename(i interface{}) string {
// switch i.(type) {
// case int64:
// fallthrough
// case int:
// return "int"
// }
// return "unsupported"
// }
MisplacedFallthrough
// DuplicateCase occurs when a type or expression switch has duplicate
// cases.
//
// Example:
// func printInt(i int) {
// switch i {
// case 1:
// println("one")
// case 1:
// println("One")
// }
// }
DuplicateCase
// DuplicateDefault occurs when a type or expression switch has multiple
// default clauses.
//
// Example:
// func printInt(i int) {
// switch i {
// case 1:
// println("one")
// default:
// println("One")
// default:
// println("1")
// }
// }
DuplicateDefault
// BadTypeKeyword occurs when a .(type) expression is used anywhere other
// than a type switch.
//
// Example:
// type I interface {
// m()
// }
// var t I
// var _ = t.(type)
BadTypeKeyword
// InvalidTypeSwitch occurs when .(type) is used on an expression that is
// not of interface type.
//
// Example:
// func f(i int) {
// switch x := i.(type) {}
// }
InvalidTypeSwitch
// InvalidExprSwitch occurs when a switch expression is not comparable.
//
// Example:
// func _() {
// var a struct{ _ func() }
// switch a /* ERROR cannot switch on a */ {
// }
// }
InvalidExprSwitch
// InvalidSelectCase occurs when a select case is not a channel send or
// receive.
//
// Example:
// func checkChan(c <-chan int) bool {
// select {
// case c:
// return true
// default:
// return false
// }
// }
InvalidSelectCase
// UndeclaredLabel occurs when an undeclared label is jumped to.
//
// Example:
// func f() {
// goto L
// }
UndeclaredLabel
// DuplicateLabel occurs when a label is declared more than once.
//
// Example:
// func f() int {
// L:
// L:
// return 1
// }
DuplicateLabel
// MisplacedLabel occurs when a break or continue label is not on a for,
// switch, or select statement.
//
// Example:
// func f() {
// L:
// a := []int{1,2,3}
// for _, e := range a {
// if e > 10 {
// break L
// }
// println(a)
// }
// }
MisplacedLabel
// UnusedLabel occurs when a label is declared and not used.
//
// Example:
// func f() {
// L:
// }
UnusedLabel
// JumpOverDecl occurs when a label jumps over a variable declaration.
//
// Example:
// func f() int {
// goto L
// x := 2
// L:
// x++
// return x
// }
JumpOverDecl
// JumpIntoBlock occurs when a forward jump goes to a label inside a nested
// block.
//
// Example:
// func f(x int) {
// goto L
// if x > 0 {
// L:
// print("inside block")
// }
// }
JumpIntoBlock
// InvalidMethodExpr occurs when a pointer method is called but the argument
// is not addressable.
//
// Example:
// type T struct {}
//
// func (*T) m() int { return 1 }
//
// var _ = T.m(T{})
InvalidMethodExpr
// WrongArgCount occurs when too few or too many arguments are passed by a
// function call.
//
// Example:
// func f(i int) {}
// var x = f()
WrongArgCount
// InvalidCall occurs when an expression is called that is not of function
// type.
//
// Example:
// var x = "x"
// var y = x()
InvalidCall
// UnusedResults occurs when a restricted expression-only built-in function
// is suspended via go or defer. Such a suspension discards the results of
// these side-effect free built-in functions, and therefore is ineffectual.
//
// Example:
// func f(a []int) int {
// defer len(a)
// return i
// }
UnusedResults
// InvalidDefer occurs when a deferred expression is not a function call,
// for example if the expression is a type conversion.
//
// Example:
// func f(i int) int {
// defer int32(i)
// return i
// }
InvalidDefer
// InvalidGo occurs when a go expression is not a function call, for example
// if the expression is a type conversion.
//
// Example:
// func f(i int) int {
// go int32(i)
// return i
// }
InvalidGo
// All codes below were added in Go 1.17.
// BadDecl occurs when a declaration has invalid syntax.
BadDecl
// RepeatedDecl occurs when an identifier occurs more than once on the left
// hand side of a short variable declaration.
//
// Example:
// func _() {
// x, y, y := 1, 2, 3
// }
RepeatedDecl
// InvalidUnsafeAdd occurs when unsafe.Add is called with a
// length argument that is not of integer type.
// It also occurs if it is used in a package compiled for a
// language version before go1.17.
//
// Example:
// import "unsafe"
//
// var p unsafe.Pointer
// var _ = unsafe.Add(p, float64(1))
InvalidUnsafeAdd
// InvalidUnsafeSlice occurs when unsafe.Slice is called with a
// pointer argument that is not of pointer type or a length argument
// that is not of integer type, negative, or out of bounds.
// It also occurs if it is used in a package compiled for a language
// version before go1.17.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(x, 1)
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(&x, float64(1))
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(&x, -1)
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(&x, uint64(1) << 63)
InvalidUnsafeSlice
// All codes below were added in Go 1.18.
// UnsupportedFeature occurs when a language feature is used that is not
// supported at this Go version.
UnsupportedFeature
// NotAGenericType occurs when a non-generic type is used where a generic
// type is expected: in type or function instantiation.
//
// Example:
// type T int
//
// var _ T[int]
NotAGenericType
// WrongTypeArgCount occurs when a type or function is instantiated with an
// incorrent number of type arguments, including when a generic type or
// function is used without instantiation.
//
// Errors inolving failed type inference are assigned other error codes.
//
// Example:
// type T[p any] int
//
// var _ T[int, string]
//
// Example:
// func f[T any]() {}
//
// var x = f
WrongTypeArgCount
// CannotInferTypeArgs occurs when type or function type argument inference
// fails to infer all type arguments.
//
// Example:
// func f[T any]() {}
//
// func _() {
// f()
// }
CannotInferTypeArgs
// InvalidTypeArg occurs when a type argument does not satisfy its
// corresponding type parameter constraints.
//
// Example:
// type T[P ~int] struct{}
//
// var _ T[string]
InvalidTypeArg // arguments? InferenceFailed
// InvalidInstanceCycle occurs when an invalid cycle is detected
// within the instantiation graph.
//
// Example:
// func f[T any]() { f[*T]() }
InvalidInstanceCycle
// InvalidUnion occurs when an embedded union or approximation element is
// not valid.
//
// Example:
// type _ interface {
// ~int | interface{ m() }
// }
InvalidUnion
// MisplacedConstraintIface occurs when a constraint-type interface is used
// outside of constraint position.
//
// Example:
// type I interface { ~int }
//
// var _ I
MisplacedConstraintIface
// InvalidMethodTypeParams occurs when methods have type parameters.
//
// It cannot be encountered with an AST parsed using go/parser.
InvalidMethodTypeParams
// MisplacedTypeParam occurs when a type parameter is used in a place where
// it is not permitted.
//
// Example:
// type T[P any] P
//
// Example:
// type T[P any] struct{ *P }
MisplacedTypeParam
// InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
// an argument that is not of slice type. It also occurs if it is used
// in a package compiled for a language version before go1.20.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.SliceData(x)
InvalidUnsafeSliceData
// InvalidUnsafeString occurs when unsafe.String is called with
// a length argument that is not of integer type, negative, or
// out of bounds. It also occurs if it is used in a package
// compiled for a language version before go1.20.
//
// Example:
// import "unsafe"
//
// var b [10]byte
// var _ = unsafe.String(&b[0], -1)
InvalidUnsafeString
// InvalidUnsafeStringData occurs if it is used in a package
// compiled for a language version before go1.20.
_ // not used anymore
)
internal/types/errors: rename UntypedNil to UntypedNilUse
This avoids a conflict when dot-importing this package in
go/types and types2.
Change-Id: Ia6fc45ef21c28ea595b49f5321b5c0d441763e2b
Reviewed-on: https://go-review.googlesource.com/c/go/+/439562
Reviewed-by: Robert Griesemer <a6752097faebf1f02d80107d77a9be009089afe3@google.com>
TryBot-Result: Gopher Robot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Run-TryBot: Robert Griesemer <a6752097faebf1f02d80107d77a9be009089afe3@google.com>
Reviewed-by: Robert Findley <9179eee875d1ee628c37fb47d7e749e855ffd21f@google.com>
Auto-Submit: Robert Griesemer <a6752097faebf1f02d80107d77a9be009089afe3@google.com>
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
type Code int
// This file defines the error codes that can be produced during type-checking.
// Collectively, these codes provide an identifier that may be used to
// implement special handling for certain types of errors.
//
// Error code values should not be changed: add new codes at the end.
//
// Error codes should be fine-grained enough that the exact nature of the error
// can be easily determined, but coarse enough that they are not an
// implementation detail of the type checking algorithm. As a rule-of-thumb,
// errors should be considered equivalent if there is a theoretical refactoring
// of the type checker in which they are emitted in exactly one place. For
// example, the type checker emits different error messages for "too many
// arguments" and "too few arguments", but one can imagine an alternative type
// checker where this check instead just emits a single "wrong number of
// arguments", so these errors should have the same code.
//
// Error code names should be as brief as possible while retaining accuracy and
// distinctiveness. In most cases names should start with an adjective
// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
// and end with a noun identifying the relevant language object. For example,
// "_DuplicateDecl" or "_InvalidSliceExpr". For brevity, naming follows the
// convention that "bad" implies a problem with syntax, and "invalid" implies a
// problem with types.
const (
_ Code = iota
// Test is reserved for errors that only apply while in self-test mode.
Test
// BlankPkgName occurs when a package name is the blank identifier "_".
//
// Per the spec:
// "The PackageName must not be the blank identifier."
BlankPkgName
// MismatchedPkgName occurs when a file's package name doesn't match the
// package name already established by other files.
MismatchedPkgName
// InvalidPkgUse occurs when a package identifier is used outside of a
// selector expression.
//
// Example:
// import "fmt"
//
// var _ = fmt
InvalidPkgUse
// BadImportPath occurs when an import path is not valid.
BadImportPath
// BrokenImport occurs when importing a package fails.
//
// Example:
// import "amissingpackage"
BrokenImport
// ImportCRenamed occurs when the special import "C" is renamed. "C" is a
// pseudo-package, and must not be renamed.
//
// Example:
// import _ "C"
ImportCRenamed
// UnusedImport occurs when an import is unused.
//
// Example:
// import "fmt"
//
// func main() {}
UnusedImport
// InvalidInitCycle occurs when an invalid cycle is detected within the
// initialization graph.
//
// Example:
// var x int = f()
//
// func f() int { return x }
InvalidInitCycle
// DuplicateDecl occurs when an identifier is declared multiple times.
//
// Example:
// var x = 1
// var x = 2
DuplicateDecl
// InvalidDeclCycle occurs when a declaration cycle is not valid.
//
// Example:
// type S struct {
// S
// }
//
InvalidDeclCycle
// InvalidTypeCycle occurs when a cycle in type definitions results in a
// type that is not well-defined.
//
// Example:
// import "unsafe"
//
// type T [unsafe.Sizeof(T{})]int
InvalidTypeCycle
// InvalidConstInit occurs when a const declaration has a non-constant
// initializer.
//
// Example:
// var x int
// const _ = x
InvalidConstInit
// InvalidConstVal occurs when a const value cannot be converted to its
// target type.
//
// TODO(findleyr): this error code and example are not very clear. Consider
// removing it.
//
// Example:
// const _ = 1 << "hello"
InvalidConstVal
// InvalidConstType occurs when the underlying type in a const declaration
// is not a valid constant type.
//
// Example:
// const c *int = 4
InvalidConstType
// UntypedNilUse occurs when the predeclared (untyped) value nil is used to
// initialize a variable declared without an explicit type.
//
// Example:
// var x = nil
UntypedNilUse
// WrongAssignCount occurs when the number of values on the right-hand side
// of an assignment or initialization expression does not match the number
// of variables on the left-hand side.
//
// Example:
// var x = 1, 2
WrongAssignCount
// UnassignableOperand occurs when the left-hand side of an assignment is
// not assignable.
//
// Example:
// func f() {
// const c = 1
// c = 2
// }
UnassignableOperand
// NoNewVar occurs when a short variable declaration (':=') does not declare
// new variables.
//
// Example:
// func f() {
// x := 1
// x := 2
// }
NoNewVar
// MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
// not have single-valued left-hand or right-hand side.
//
// Per the spec:
// "In assignment operations, both the left- and right-hand expression lists
// must contain exactly one single-valued expression"
//
// Example:
// func f() int {
// x, y := 1, 2
// x, y += 1
// return x + y
// }
MultiValAssignOp
// InvalidIfaceAssign occurs when a value of type T is used as an
// interface, but T does not implement a method of the expected interface.
//
// Example:
// type I interface {
// f()
// }
//
// type T int
//
// var x I = T(1)
InvalidIfaceAssign
// InvalidChanAssign occurs when a chan assignment is invalid.
//
// Per the spec, a value x is assignable to a channel type T if:
// "x is a bidirectional channel value, T is a channel type, x's type V and
// T have identical element types, and at least one of V or T is not a
// defined type."
//
// Example:
// type T1 chan int
// type T2 chan int
//
// var x T1
// // Invalid assignment because both types are named
// var _ T2 = x
InvalidChanAssign
// IncompatibleAssign occurs when the type of the right-hand side expression
// in an assignment cannot be assigned to the type of the variable being
// assigned.
//
// Example:
// var x []int
// var _ int = x
IncompatibleAssign
// UnaddressableFieldAssign occurs when trying to assign to a struct field
// in a map value.
//
// Example:
// func f() {
// m := make(map[string]struct{i int})
// m["foo"].i = 42
// }
UnaddressableFieldAssign
// NotAType occurs when the identifier used as the underlying type in a type
// declaration or the right-hand side of a type alias does not denote a type.
//
// Example:
// var S = 2
//
// type T S
NotAType
// InvalidArrayLen occurs when an array length is not a constant value.
//
// Example:
// var n = 3
// var _ = [n]int{}
InvalidArrayLen
// BlankIfaceMethod occurs when a method name is '_'.
//
// Per the spec:
// "The name of each explicitly specified method must be unique and not
// blank."
//
// Example:
// type T interface {
// _(int)
// }
BlankIfaceMethod
// IncomparableMapKey occurs when a map key type does not support the == and
// != operators.
//
// Per the spec:
// "The comparison operators == and != must be fully defined for operands of
// the key type; thus the key type must not be a function, map, or slice."
//
// Example:
// var x map[T]int
//
// type T []int
IncomparableMapKey
// InvalidIfaceEmbed occurs when a non-interface type is embedded in an
// interface (for go 1.17 or earlier).
_ // not used anymore
// InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
// and T itself is itself a pointer, an unsafe.Pointer, or an interface.
//
// Per the spec:
// "An embedded field must be specified as a type name T or as a pointer to
// a non-interface type name *T, and T itself may not be a pointer type."
//
// Example:
// type T *int
//
// type S struct {
// *T
// }
InvalidPtrEmbed
// BadRecv occurs when a method declaration does not have exactly one
// receiver parameter.
//
// Example:
// func () _() {}
BadRecv
// InvalidRecv occurs when a receiver type expression is not of the form T
// or *T, or T is a pointer type.
//
// Example:
// type T struct {}
//
// func (**T) m() {}
InvalidRecv
// DuplicateFieldAndMethod occurs when an identifier appears as both a field
// and method name.
//
// Example:
// type T struct {
// m int
// }
//
// func (T) m() {}
DuplicateFieldAndMethod
// DuplicateMethod occurs when two methods on the same receiver type have
// the same name.
//
// Example:
// type T struct {}
// func (T) m() {}
// func (T) m(i int) int { return i }
DuplicateMethod
// InvalidBlank occurs when a blank identifier is used as a value or type.
//
// Per the spec:
// "The blank identifier may appear as an operand only on the left-hand side
// of an assignment."
//
// Example:
// var x = _
InvalidBlank
// InvalidIota occurs when the predeclared identifier iota is used outside
// of a constant declaration.
//
// Example:
// var x = iota
InvalidIota
// MissingInitBody occurs when an init function is missing its body.
//
// Example:
// func init()
MissingInitBody
// InvalidInitSig occurs when an init function declares parameters or
// results.
//
// Deprecated: no longer emitted by the type checker. _InvalidInitDecl is
// used instead.
InvalidInitSig
// InvalidInitDecl occurs when init is declared as anything other than a
// function.
//
// Example:
// var init = 1
//
// Example:
// func init() int { return 1 }
InvalidInitDecl
// InvalidMainDecl occurs when main is declared as anything other than a
// function, in a main package.
InvalidMainDecl
// TooManyValues occurs when a function returns too many values for the
// expression context in which it is used.
//
// Example:
// func ReturnTwo() (int, int) {
// return 1, 2
// }
//
// var x = ReturnTwo()
TooManyValues
// NotAnExpr occurs when a type expression is used where a value expression
// is expected.
//
// Example:
// type T struct {}
//
// func f() {
// T
// }
NotAnExpr
// TruncatedFloat occurs when a float constant is truncated to an integer
// value.
//
// Example:
// var _ int = 98.6
TruncatedFloat
// NumericOverflow occurs when a numeric constant overflows its target type.
//
// Example:
// var x int8 = 1000
NumericOverflow
// UndefinedOp occurs when an operator is not defined for the type(s) used
// in an operation.
//
// Example:
// var c = "a" - "b"
UndefinedOp
// MismatchedTypes occurs when operand types are incompatible in a binary
// operation.
//
// Example:
// var a = "hello"
// var b = 1
// var c = a - b
MismatchedTypes
// DivByZero occurs when a division operation is provable at compile
// time to be a division by zero.
//
// Example:
// const divisor = 0
// var x int = 1/divisor
DivByZero
// NonNumericIncDec occurs when an increment or decrement operator is
// applied to a non-numeric value.
//
// Example:
// func f() {
// var c = "c"
// c++
// }
NonNumericIncDec
// UnaddressableOperand occurs when the & operator is applied to an
// unaddressable expression.
//
// Example:
// var x = &1
UnaddressableOperand
// InvalidIndirection occurs when a non-pointer value is indirected via the
// '*' operator.
//
// Example:
// var x int
// var y = *x
InvalidIndirection
// NonIndexableOperand occurs when an index operation is applied to a value
// that cannot be indexed.
//
// Example:
// var x = 1
// var y = x[1]
NonIndexableOperand
// InvalidIndex occurs when an index argument is not of integer type,
// negative, or out-of-bounds.
//
// Example:
// var s = [...]int{1,2,3}
// var x = s[5]
//
// Example:
// var s = []int{1,2,3}
// var _ = s[-1]
//
// Example:
// var s = []int{1,2,3}
// var i string
// var _ = s[i]
InvalidIndex
// SwappedSliceIndices occurs when constant indices in a slice expression
// are decreasing in value.
//
// Example:
// var _ = []int{1,2,3}[2:1]
SwappedSliceIndices
// NonSliceableOperand occurs when a slice operation is applied to a value
// whose type is not sliceable, or is unaddressable.
//
// Example:
// var x = [...]int{1, 2, 3}[:1]
//
// Example:
// var x = 1
// var y = 1[:1]
NonSliceableOperand
// InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
// applied to a string.
//
// Example:
// var s = "hello"
// var x = s[1:2:3]
InvalidSliceExpr
// InvalidShiftCount occurs when the right-hand side of a shift operation is
// either non-integer, negative, or too large.
//
// Example:
// var (
// x string
// y int = 1 << x
// )
InvalidShiftCount
// InvalidShiftOperand occurs when the shifted operand is not an integer.
//
// Example:
// var s = "hello"
// var x = s << 2
InvalidShiftOperand
// InvalidReceive occurs when there is a channel receive from a value that
// is either not a channel, or is a send-only channel.
//
// Example:
// func f() {
// var x = 1
// <-x
// }
InvalidReceive
// InvalidSend occurs when there is a channel send to a value that is not a
// channel, or is a receive-only channel.
//
// Example:
// func f() {
// var x = 1
// x <- "hello!"
// }
InvalidSend
// DuplicateLitKey occurs when an index is duplicated in a slice, array, or
// map literal.
//
// Example:
// var _ = []int{0:1, 0:2}
//
// Example:
// var _ = map[string]int{"a": 1, "a": 2}
DuplicateLitKey
// MissingLitKey occurs when a map literal is missing a key expression.
//
// Example:
// var _ = map[string]int{1}
MissingLitKey
// InvalidLitIndex occurs when the key in a key-value element of a slice or
// array literal is not an integer constant.
//
// Example:
// var i = 0
// var x = []string{i: "world"}
InvalidLitIndex
// OversizeArrayLit occurs when an array literal exceeds its length.
//
// Example:
// var _ = [2]int{1,2,3}
OversizeArrayLit
// MixedStructLit occurs when a struct literal contains a mix of positional
// and named elements.
//
// Example:
// var _ = struct{i, j int}{i: 1, 2}
MixedStructLit
// InvalidStructLit occurs when a positional struct literal has an incorrect
// number of values.
//
// Example:
// var _ = struct{i, j int}{1,2,3}
InvalidStructLit
// MissingLitField occurs when a struct literal refers to a field that does
// not exist on the struct type.
//
// Example:
// var _ = struct{i int}{j: 2}
MissingLitField
// DuplicateLitField occurs when a struct literal contains duplicated
// fields.
//
// Example:
// var _ = struct{i int}{i: 1, i: 2}
DuplicateLitField
// UnexportedLitField occurs when a positional struct literal implicitly
// assigns an unexported field of an imported type.
UnexportedLitField
// InvalidLitField occurs when a field name is not a valid identifier.
//
// Example:
// var _ = struct{i int}{1: 1}
InvalidLitField
// UntypedLit occurs when a composite literal omits a required type
// identifier.
//
// Example:
// type outer struct{
// inner struct { i int }
// }
//
// var _ = outer{inner: {1}}
UntypedLit
// InvalidLit occurs when a composite literal expression does not match its
// type.
//
// Example:
// type P *struct{
// x int
// }
// var _ = P {}
InvalidLit
// AmbiguousSelector occurs when a selector is ambiguous.
//
// Example:
// type E1 struct { i int }
// type E2 struct { i int }
// type T struct { E1; E2 }
//
// var x T
// var _ = x.i
AmbiguousSelector
// UndeclaredImportedName occurs when a package-qualified identifier is
// undeclared by the imported package.
//
// Example:
// import "go/types"
//
// var _ = types.NotAnActualIdentifier
UndeclaredImportedName
// UnexportedName occurs when a selector refers to an unexported identifier
// of an imported package.
//
// Example:
// import "reflect"
//
// type _ reflect.flag
UnexportedName
// UndeclaredName occurs when an identifier is not declared in the current
// scope.
//
// Example:
// var x T
UndeclaredName
// MissingFieldOrMethod occurs when a selector references a field or method
// that does not exist.
//
// Example:
// type T struct {}
//
// var x = T{}.f
MissingFieldOrMethod
// BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
// not valid.
//
// Example:
// var _ = map[int][...]int{0: {}}
BadDotDotDotSyntax
// NonVariadicDotDotDot occurs when a "..." is used on the final argument to
// a non-variadic function.
//
// Example:
// func printArgs(s []string) {
// for _, a := range s {
// println(a)
// }
// }
//
// func f() {
// s := []string{"a", "b", "c"}
// printArgs(s...)
// }
NonVariadicDotDotDot
// MisplacedDotDotDot occurs when a "..." is used somewhere other than the
// final argument in a function declaration.
//
// Example:
// func f(...int, int)
MisplacedDotDotDot
_ // InvalidDotDotDotOperand was removed.
// InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
// function.
//
// Example:
// var s = []int{1, 2, 3}
// var l = len(s...)
InvalidDotDotDot
// UncalledBuiltin occurs when a built-in function is used as a
// function-valued expression, instead of being called.
//
// Per the spec:
// "The built-in functions do not have standard Go types, so they can only
// appear in call expressions; they cannot be used as function values."
//
// Example:
// var _ = copy
UncalledBuiltin
// InvalidAppend occurs when append is called with a first argument that is
// not a slice.
//
// Example:
// var _ = append(1, 2)
InvalidAppend
// InvalidCap occurs when an argument to the cap built-in function is not of
// supported type.
//
// See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
// var s = 2
// var x = cap(s)
InvalidCap
// InvalidClose occurs when close(...) is called with an argument that is
// not of channel type, or that is a receive-only channel.
//
// Example:
// func f() {
// var x int
// close(x)
// }
InvalidClose
// InvalidCopy occurs when the arguments are not of slice type or do not
// have compatible type.
//
// See https://golang.org/ref/spec#Appending_and_copying_slices for more
// information on the type requirements for the copy built-in.
//
// Example:
// func f() {
// var x []int
// y := []int64{1,2,3}
// copy(x, y)
// }
InvalidCopy
// InvalidComplex occurs when the complex built-in function is called with
// arguments with incompatible types.
//
// Example:
// var _ = complex(float32(1), float64(2))
InvalidComplex
// InvalidDelete occurs when the delete built-in function is called with a
// first argument that is not a map.
//
// Example:
// func f() {
// m := "hello"
// delete(m, "e")
// }
InvalidDelete
// InvalidImag occurs when the imag built-in function is called with an
// argument that does not have complex type.
//
// Example:
// var _ = imag(int(1))
InvalidImag
// InvalidLen occurs when an argument to the len built-in function is not of
// supported type.
//
// See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
// var s = 2
// var x = len(s)
InvalidLen
// SwappedMakeArgs occurs when make is called with three arguments, and its
// length argument is larger than its capacity argument.
//
// Example:
// var x = make([]int, 3, 2)
SwappedMakeArgs
// InvalidMake occurs when make is called with an unsupported type argument.
//
// See https://golang.org/ref/spec#Making_slices_maps_and_channels for
// information on the types that may be created using make.
//
// Example:
// var x = make(int)
InvalidMake
// InvalidReal occurs when the real built-in function is called with an
// argument that does not have complex type.
//
// Example:
// var _ = real(int(1))
InvalidReal
// InvalidAssert occurs when a type assertion is applied to a
// value that is not of interface type.
//
// Example:
// var x = 1
// var _ = x.(float64)
InvalidAssert
// ImpossibleAssert occurs for a type assertion x.(T) when the value x of
// interface cannot have dynamic type T, due to a missing or mismatching
// method on T.
//
// Example:
// type T int
//
// func (t *T) m() int { return int(*t) }
//
// type I interface { m() int }
//
// var x I
// var _ = x.(T)
ImpossibleAssert
// InvalidConversion occurs when the argument type cannot be converted to the
// target.
//
// See https://golang.org/ref/spec#Conversions for the rules of
// convertibility.
//
// Example:
// var x float64
// var _ = string(x)
InvalidConversion
// InvalidUntypedConversion occurs when there is no valid implicit
// conversion from an untyped value satisfying the type constraints of the
// context in which it is used.
//
// Example:
// var _ = 1 + new(int)
InvalidUntypedConversion
// BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
// that is not a selector expression.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Offsetof(x)
BadOffsetofSyntax
// InvalidOffsetof occurs when unsafe.Offsetof is called with a method
// selector, rather than a field selector, or when the field is embedded via
// a pointer.
//
// Per the spec:
//
// "If f is an embedded field, it must be reachable without pointer
// indirections through fields of the struct. "
//
// Example:
// import "unsafe"
//
// type T struct { f int }
// type S struct { *T }
// var s S
// var _ = unsafe.Offsetof(s.f)
//
// Example:
// import "unsafe"
//
// type S struct{}
//
// func (S) m() {}
//
// var s S
// var _ = unsafe.Offsetof(s.m)
InvalidOffsetof
// UnusedExpr occurs when a side-effect free expression is used as a
// statement. Such a statement has no effect.
//
// Example:
// func f(i int) {
// i*i
// }
UnusedExpr
// UnusedVar occurs when a variable is declared but unused.
//
// Example:
// func f() {
// x := 1
// }
UnusedVar
// MissingReturn occurs when a function with results is missing a return
// statement.
//
// Example:
// func f() int {}
MissingReturn
// WrongResultCount occurs when a return statement returns an incorrect
// number of values.
//
// Example:
// func ReturnOne() int {
// return 1, 2
// }
WrongResultCount
// OutOfScopeResult occurs when the name of a value implicitly returned by
// an empty return statement is shadowed in a nested scope.
//
// Example:
// func factor(n int) (i int) {
// for i := 2; i < n; i++ {
// if n%i == 0 {
// return
// }
// }
// return 0
// }
OutOfScopeResult
// InvalidCond occurs when an if condition is not a boolean expression.
//
// Example:
// func checkReturn(i int) {
// if i {
// panic("non-zero return")
// }
// }
InvalidCond
// InvalidPostDecl occurs when there is a declaration in a for-loop post
// statement.
//
// Example:
// func f() {
// for i := 0; i < 10; j := 0 {}
// }
InvalidPostDecl
_ // InvalidChanRange was removed.
// InvalidIterVar occurs when two iteration variables are used while ranging
// over a channel.
//
// Example:
// func f(c chan int) {
// for k, v := range c {
// println(k, v)
// }
// }
InvalidIterVar
// InvalidRangeExpr occurs when the type of a range expression is not array,
// slice, string, map, or channel.
//
// Example:
// func f(i int) {
// for j := range i {
// println(j)
// }
// }
InvalidRangeExpr
// MisplacedBreak occurs when a break statement is not within a for, switch,
// or select statement of the innermost function definition.
//
// Example:
// func f() {
// break
// }
MisplacedBreak
// MisplacedContinue occurs when a continue statement is not within a for
// loop of the innermost function definition.
//
// Example:
// func sumeven(n int) int {
// proceed := func() {
// continue
// }
// sum := 0
// for i := 1; i <= n; i++ {
// if i % 2 != 0 {
// proceed()
// }
// sum += i
// }
// return sum
// }
MisplacedContinue
// MisplacedFallthrough occurs when a fallthrough statement is not within an
// expression switch.
//
// Example:
// func typename(i interface{}) string {
// switch i.(type) {
// case int64:
// fallthrough
// case int:
// return "int"
// }
// return "unsupported"
// }
MisplacedFallthrough
// DuplicateCase occurs when a type or expression switch has duplicate
// cases.
//
// Example:
// func printInt(i int) {
// switch i {
// case 1:
// println("one")
// case 1:
// println("One")
// }
// }
DuplicateCase
// DuplicateDefault occurs when a type or expression switch has multiple
// default clauses.
//
// Example:
// func printInt(i int) {
// switch i {
// case 1:
// println("one")
// default:
// println("One")
// default:
// println("1")
// }
// }
DuplicateDefault
// BadTypeKeyword occurs when a .(type) expression is used anywhere other
// than a type switch.
//
// Example:
// type I interface {
// m()
// }
// var t I
// var _ = t.(type)
BadTypeKeyword
// InvalidTypeSwitch occurs when .(type) is used on an expression that is
// not of interface type.
//
// Example:
// func f(i int) {
// switch x := i.(type) {}
// }
InvalidTypeSwitch
// InvalidExprSwitch occurs when a switch expression is not comparable.
//
// Example:
// func _() {
// var a struct{ _ func() }
// switch a /* ERROR cannot switch on a */ {
// }
// }
InvalidExprSwitch
// InvalidSelectCase occurs when a select case is not a channel send or
// receive.
//
// Example:
// func checkChan(c <-chan int) bool {
// select {
// case c:
// return true
// default:
// return false
// }
// }
InvalidSelectCase
// UndeclaredLabel occurs when an undeclared label is jumped to.
//
// Example:
// func f() {
// goto L
// }
UndeclaredLabel
// DuplicateLabel occurs when a label is declared more than once.
//
// Example:
// func f() int {
// L:
// L:
// return 1
// }
DuplicateLabel
// MisplacedLabel occurs when a break or continue label is not on a for,
// switch, or select statement.
//
// Example:
// func f() {
// L:
// a := []int{1,2,3}
// for _, e := range a {
// if e > 10 {
// break L
// }
// println(a)
// }
// }
MisplacedLabel
// UnusedLabel occurs when a label is declared and not used.
//
// Example:
// func f() {
// L:
// }
UnusedLabel
// JumpOverDecl occurs when a label jumps over a variable declaration.
//
// Example:
// func f() int {
// goto L
// x := 2
// L:
// x++
// return x
// }
JumpOverDecl
// JumpIntoBlock occurs when a forward jump goes to a label inside a nested
// block.
//
// Example:
// func f(x int) {
// goto L
// if x > 0 {
// L:
// print("inside block")
// }
// }
JumpIntoBlock
// InvalidMethodExpr occurs when a pointer method is called but the argument
// is not addressable.
//
// Example:
// type T struct {}
//
// func (*T) m() int { return 1 }
//
// var _ = T.m(T{})
InvalidMethodExpr
// WrongArgCount occurs when too few or too many arguments are passed by a
// function call.
//
// Example:
// func f(i int) {}
// var x = f()
WrongArgCount
// InvalidCall occurs when an expression is called that is not of function
// type.
//
// Example:
// var x = "x"
// var y = x()
InvalidCall
// UnusedResults occurs when a restricted expression-only built-in function
// is suspended via go or defer. Such a suspension discards the results of
// these side-effect free built-in functions, and therefore is ineffectual.
//
// Example:
// func f(a []int) int {
// defer len(a)
// return i
// }
UnusedResults
// InvalidDefer occurs when a deferred expression is not a function call,
// for example if the expression is a type conversion.
//
// Example:
// func f(i int) int {
// defer int32(i)
// return i
// }
InvalidDefer
// InvalidGo occurs when a go expression is not a function call, for example
// if the expression is a type conversion.
//
// Example:
// func f(i int) int {
// go int32(i)
// return i
// }
InvalidGo
// All codes below were added in Go 1.17.
// BadDecl occurs when a declaration has invalid syntax.
BadDecl
// RepeatedDecl occurs when an identifier occurs more than once on the left
// hand side of a short variable declaration.
//
// Example:
// func _() {
// x, y, y := 1, 2, 3
// }
RepeatedDecl
// InvalidUnsafeAdd occurs when unsafe.Add is called with a
// length argument that is not of integer type.
// It also occurs if it is used in a package compiled for a
// language version before go1.17.
//
// Example:
// import "unsafe"
//
// var p unsafe.Pointer
// var _ = unsafe.Add(p, float64(1))
InvalidUnsafeAdd
// InvalidUnsafeSlice occurs when unsafe.Slice is called with a
// pointer argument that is not of pointer type or a length argument
// that is not of integer type, negative, or out of bounds.
// It also occurs if it is used in a package compiled for a language
// version before go1.17.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(x, 1)
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(&x, float64(1))
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(&x, -1)
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.Slice(&x, uint64(1) << 63)
InvalidUnsafeSlice
// All codes below were added in Go 1.18.
// UnsupportedFeature occurs when a language feature is used that is not
// supported at this Go version.
UnsupportedFeature
// NotAGenericType occurs when a non-generic type is used where a generic
// type is expected: in type or function instantiation.
//
// Example:
// type T int
//
// var _ T[int]
NotAGenericType
// WrongTypeArgCount occurs when a type or function is instantiated with an
// incorrent number of type arguments, including when a generic type or
// function is used without instantiation.
//
// Errors inolving failed type inference are assigned other error codes.
//
// Example:
// type T[p any] int
//
// var _ T[int, string]
//
// Example:
// func f[T any]() {}
//
// var x = f
WrongTypeArgCount
// CannotInferTypeArgs occurs when type or function type argument inference
// fails to infer all type arguments.
//
// Example:
// func f[T any]() {}
//
// func _() {
// f()
// }
CannotInferTypeArgs
// InvalidTypeArg occurs when a type argument does not satisfy its
// corresponding type parameter constraints.
//
// Example:
// type T[P ~int] struct{}
//
// var _ T[string]
InvalidTypeArg // arguments? InferenceFailed
// InvalidInstanceCycle occurs when an invalid cycle is detected
// within the instantiation graph.
//
// Example:
// func f[T any]() { f[*T]() }
InvalidInstanceCycle
// InvalidUnion occurs when an embedded union or approximation element is
// not valid.
//
// Example:
// type _ interface {
// ~int | interface{ m() }
// }
InvalidUnion
// MisplacedConstraintIface occurs when a constraint-type interface is used
// outside of constraint position.
//
// Example:
// type I interface { ~int }
//
// var _ I
MisplacedConstraintIface
// InvalidMethodTypeParams occurs when methods have type parameters.
//
// It cannot be encountered with an AST parsed using go/parser.
InvalidMethodTypeParams
// MisplacedTypeParam occurs when a type parameter is used in a place where
// it is not permitted.
//
// Example:
// type T[P any] P
//
// Example:
// type T[P any] struct{ *P }
MisplacedTypeParam
// InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
// an argument that is not of slice type. It also occurs if it is used
// in a package compiled for a language version before go1.20.
//
// Example:
// import "unsafe"
//
// var x int
// var _ = unsafe.SliceData(x)
InvalidUnsafeSliceData
// InvalidUnsafeString occurs when unsafe.String is called with
// a length argument that is not of integer type, negative, or
// out of bounds. It also occurs if it is used in a package
// compiled for a language version before go1.20.
//
// Example:
// import "unsafe"
//
// var b [10]byte
// var _ = unsafe.String(&b[0], -1)
InvalidUnsafeString
// InvalidUnsafeStringData occurs if it is used in a package
// compiled for a language version before go1.20.
_ // not used anymore
)
|
package bothandlers
import (
"math/rand"
"strings"
"github.com/danryan/hal"
)
type ikr struct{
}
func (h *ikr) Method() string {
hal.Logger.Debug("ikr: called Method()")
return hal.HEAR
}
func (h *ikr) Usage() string {
hal.Logger.Debug("ikr: called Usage()")
return `ikr - listens for enthusiasm; responds with validation`
}
func (h *ikr) Pattern() string {
hal.Logger.Debug("ikr: called Usage()")
triggers:=[]string{
"best.*ev(er|ar)",
"so good",
"they have the best",
"awesome",
"I love",
"fantastic|wonderful|outstanding|magnificent|brilliant|genius|amazing",
"ZOMG|OMG|OMFG",
"(so|pretty) great",
"off the hook",
}
return "(?i)"+strings.Join(triggers,"|")
}
func (h *ikr) Run(res *hal.Response) error {
hal.Logger.Debug("ikr: called Usage()")
replies := []string{
"*I know right?!*",
"*OMG* couldn't agree more",
":+1:",
"+1",
":arrow_up: THAT",
":arrow_up: you complete me :arrow_up:",
"so true",
"agreed.",
"that's the fact jack",
"YUUUUUUP",
"that's what I'm talkin bout",
"*IKR?!*",
"singit",
"^droppin the truth bombs :boom: :boom: :boom:",
"#legit",
"/me nodds emphatically in agreement",
"for REALZ though",
"FOR REALSIES",
"it's like you *literally* just read my mind right now",
}
return res.Send(replies[rand.Intn(len(replies)-1)])
}
// Ping exports
var IKR = &ikr{}
more debbuging
package bothandlers
import (
"math/rand"
"strings"
"github.com/danryan/hal"
)
type ikr struct{
}
func (h *ikr) Method() string {
hal.Logger.Debug("ikr: called Method()")
return hal.HEAR
}
func (h *ikr) Usage() string {
hal.Logger.Debug("ikr: called Usage()")
return `ikr - listens for enthusiasm; responds with validation`
}
func (h *ikr) Pattern() string {
hal.Logger.Debug("ikr: called Pattern()")
triggers:=[]string{
"best.*ev(er|ar)",
"so good",
"they have the best",
"awesome",
"I love",
"fantastic|wonderful|outstanding|magnificent|brilliant|genius|amazing",
"ZOMG|OMG|OMFG",
"(so|pretty) great",
"off the hook",
}
pat := "(?i)"+strings.Join(triggers,"|")
hal.Logger.Debug("ikr: pattern: %s", pat)
return pat
}
func (h *ikr) Run(res *hal.Response) error {
hal.Logger.Debug("ikr: called Run()")
replies := []string{
"*I know right?!*",
"*OMG* couldn't agree more",
":+1:",
"+1",
":arrow_up: THAT",
":arrow_up: you complete me :arrow_up:",
"so true",
"agreed.",
"that's the fact jack",
"YUUUUUUP",
"that's what I'm talkin bout",
"*IKR?!*",
"singit",
"^droppin the truth bombs :boom: :boom: :boom:",
"#legit",
"/me nodds emphatically in agreement",
"for REALZ though",
"FOR REALSIES",
"it's like you *literally* just read my mind right now",
}
reply := replies[rand.Intn(len(replies)-1)]
hal.Logger.Debug("ikr: my reply is: %s", reply)
return res.Send(reply)
}
// Ping exports
var IKR = &ikr{}
|
// Copyright 2016 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package perm
// Features:
// - Resolves remote Group files if necessary.
// - Blocks mutations to Store until it has had a chance to prove that either
// there is no Group file and hence writes are free for all, or until the
// Group file has been fully loaded. This prevents a window of vulnerability
// where all writes would be allowed until the initial load is completed.
//
// TODOs:
// - Cache references so we don't need to retrieve the contents every time.
// - Poll more frequently if there is no control Group set up, so the StoreServer
// updates faster when creating a new one for the first time.
// - Poll more frequently if the DirServer is unreachable (speeds up boot time).
import (
"strings"
"upspin.io/errors"
"upspin.io/upspin"
)
// WrapStore wraps the given StoreServer with a StoreServer that checks access
// permissions. It will only start polling the store permissions after the
// ready channel is closed.
func WrapStore(cfg upspin.Config, ready <-chan struct{}, store upspin.StoreServer) upspin.StoreServer {
const op errors.Op = "serverutil/perm.WrapStore"
p := newPerm(op, cfg, ready, cfg.UserName(), nil, nil, noop, retry, nil)
return p.WrapStore(store)
}
// WrapStore wraps the given StoreServer with a StoreServer that checks access
// permissions using Perm.
func (p *Perm) WrapStore(store upspin.StoreServer) upspin.StoreServer {
return &storeWrapper{
StoreServer: store,
user: p.cfg.UserName(),
perm: p,
}
}
// storeWrapper performs permission checking for StoreServer implementations.
type storeWrapper struct {
upspin.StoreServer
user upspin.UserName // set by Dial
perm *Perm
}
// Get implements upspin.StoreServer.
func (s *storeWrapper) Get(ref upspin.Reference) ([]byte, *upspin.Refdata, []upspin.Location, error) {
const op errors.Op = "store/perm.Get"
// Only storage administrators should be permitted to list references.
if strings.HasPrefix(string(ref), string(upspin.ListRefsMetadata)) && !s.perm.IsWriter(s.user) {
return nil, nil, nil, errors.E(op, s.user, errors.Permission, "user not authorized")
}
return s.StoreServer.Get(ref)
}
// Put implements upspin.StoreServer.
func (s *storeWrapper) Put(data []byte) (*upspin.Refdata, error) {
const op errors.Op = "store/perm.Put"
if !s.perm.IsWriter(s.user) {
return nil, errors.E(op, s.user, errors.Permission, "user not authorized")
}
return s.StoreServer.Put(data)
}
// Delete implements upspin.StoreServer.
func (s *storeWrapper) Delete(ref upspin.Reference) error {
const op errors.Op = "store/perm.Delete"
if s.perm.targetUser != s.user {
return errors.E(op, s.user, errors.Permission, "user not authorized")
}
return s.StoreServer.Delete(ref)
}
// Dial implements upspin.Service.
func (s *storeWrapper) Dial(cfg upspin.Config, e upspin.Endpoint) (upspin.Service, error) {
const op errors.Op = "store/perm.Dial"
service, err := s.StoreServer.Dial(cfg, e)
if err != nil {
return nil, errors.E(op, err)
}
newS := *s
newS.user = cfg.UserName()
newS.StoreServer = service.(upspin.StoreServer)
return &newS, nil
}
serverutil/perm: only permit the storage user to list references
The previous behavior was to allow any store writer to list references,
which leaks information unnecessarily. Only the storage user (the
administrator) has a need to list references, so narrow the access.
Change-Id: I923b8cf8dad8b4fac8983740e4baef0452e2fb74
Reviewed-on: https://upspin-review.googlesource.com/17180
Reviewed-by: Rob Pike <4dc7c9ec434ed06502767136789763ec11d2c4b7@golang.org>
// Copyright 2016 The Upspin Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package perm
// Features:
// - Resolves remote Group files if necessary.
// - Blocks mutations to Store until it has had a chance to prove that either
// there is no Group file and hence writes are free for all, or until the
// Group file has been fully loaded. This prevents a window of vulnerability
// where all writes would be allowed until the initial load is completed.
//
// TODOs:
// - Cache references so we don't need to retrieve the contents every time.
// - Poll more frequently if there is no control Group set up, so the StoreServer
// updates faster when creating a new one for the first time.
// - Poll more frequently if the DirServer is unreachable (speeds up boot time).
import (
"strings"
"upspin.io/errors"
"upspin.io/upspin"
)
// WrapStore wraps the given StoreServer with a StoreServer that checks access
// permissions. It will only start polling the store permissions after the
// ready channel is closed.
func WrapStore(cfg upspin.Config, ready <-chan struct{}, store upspin.StoreServer) upspin.StoreServer {
const op errors.Op = "serverutil/perm.WrapStore"
p := newPerm(op, cfg, ready, cfg.UserName(), nil, nil, noop, retry, nil)
return p.WrapStore(store)
}
// WrapStore wraps the given StoreServer with a StoreServer that checks access
// permissions using Perm.
func (p *Perm) WrapStore(store upspin.StoreServer) upspin.StoreServer {
return &storeWrapper{
StoreServer: store,
user: p.cfg.UserName(),
perm: p,
}
}
// storeWrapper performs permission checking for StoreServer implementations.
type storeWrapper struct {
upspin.StoreServer
user upspin.UserName // set by Dial
perm *Perm
}
// Get implements upspin.StoreServer.
func (s *storeWrapper) Get(ref upspin.Reference) ([]byte, *upspin.Refdata, []upspin.Location, error) {
const op errors.Op = "store/perm.Get"
// Only storage administrators should be permitted to list references.
if strings.HasPrefix(string(ref), string(upspin.ListRefsMetadata)) && s.user != s.perm.targetUser {
return nil, nil, nil, errors.E(op, s.user, errors.Permission, "user not authorized")
}
return s.StoreServer.Get(ref)
}
// Put implements upspin.StoreServer.
func (s *storeWrapper) Put(data []byte) (*upspin.Refdata, error) {
const op errors.Op = "store/perm.Put"
if !s.perm.IsWriter(s.user) {
return nil, errors.E(op, s.user, errors.Permission, "user not authorized")
}
return s.StoreServer.Put(data)
}
// Delete implements upspin.StoreServer.
func (s *storeWrapper) Delete(ref upspin.Reference) error {
const op errors.Op = "store/perm.Delete"
if s.user != s.perm.targetUser {
return errors.E(op, s.user, errors.Permission, "user not authorized")
}
return s.StoreServer.Delete(ref)
}
// Dial implements upspin.Service.
func (s *storeWrapper) Dial(cfg upspin.Config, e upspin.Endpoint) (upspin.Service, error) {
const op errors.Op = "store/perm.Dial"
service, err := s.StoreServer.Dial(cfg, e)
if err != nil {
return nil, errors.E(op, err)
}
newS := *s
newS.user = cfg.UserName()
newS.StoreServer = service.(upspin.StoreServer)
return &newS, nil
}
|
package hook
import (
"fmt"
"log"
"net/http"
"net/url"
"github.com/bitrise-io/api-utils/logging"
"github.com/bitrise-io/bitrise-webhooks/bitriseapi"
"github.com/bitrise-io/bitrise-webhooks/config"
"github.com/bitrise-io/bitrise-webhooks/metrics"
"github.com/bitrise-io/bitrise-webhooks/service"
"github.com/bitrise-io/bitrise-webhooks/service/hook/assembla"
"github.com/bitrise-io/bitrise-webhooks/service/hook/bitbucketserver"
"github.com/bitrise-io/bitrise-webhooks/service/hook/bitbucketv2"
hookCommon "github.com/bitrise-io/bitrise-webhooks/service/hook/common"
"github.com/bitrise-io/bitrise-webhooks/service/hook/deveo"
"github.com/bitrise-io/bitrise-webhooks/service/hook/github"
"github.com/bitrise-io/bitrise-webhooks/service/hook/gitlab"
"github.com/bitrise-io/bitrise-webhooks/service/hook/gogs"
"github.com/bitrise-io/bitrise-webhooks/service/hook/passthrough"
"github.com/bitrise-io/bitrise-webhooks/service/hook/slack"
"github.com/bitrise-io/bitrise-webhooks/service/hook/visualstudioteamservices"
"github.com/bitrise-io/go-utils/colorstring"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"go.uber.org/zap"
)
func supportedProviders() map[string]hookCommon.Provider {
return map[string]hookCommon.Provider{
"github": github.HookProvider{},
"bitbucket-v2": bitbucketv2.HookProvider{},
"bitbucket-server": bitbucketserver.HookProvider{},
"slack": slack.HookProvider{},
"visualstudio": visualstudioteamservices.HookProvider{},
"gitlab": gitlab.HookProvider{},
"gogs": gogs.HookProvider{},
"deveo": deveo.HookProvider{},
"assembla": assembla.HookProvider{},
"passthrough": passthrough.HookProvider{},
}
}
// ----------------------------------
// --- Response handler functions ---
func respondWithErrorString(w http.ResponseWriter, provider *hookCommon.Provider, errStr string) {
responseProvider := hookCommon.ResponseTransformer(hookCommon.DefaultResponseProvider{})
if provider != nil {
if respTransformer, ok := (*provider).(hookCommon.ResponseTransformer); ok {
// provider can transform responses - let it do so
responseProvider = respTransformer
}
}
//
respInfo := responseProvider.TransformErrorMessageResponse(errStr)
httpStatusCode := 400 // default
if respInfo.HTTPStatusCode != 0 {
httpStatusCode = respInfo.HTTPStatusCode
}
service.RespondWith(w, httpStatusCode, respInfo.Data)
}
func respondWithSuccessMessage(w http.ResponseWriter, provider *hookCommon.Provider, msg string) {
responseProvider := hookCommon.ResponseTransformer(hookCommon.DefaultResponseProvider{})
if provider != nil {
if respTransformer, ok := (*provider).(hookCommon.ResponseTransformer); ok {
// provider can transform responses - let it do so
responseProvider = respTransformer
}
}
//
respInfo := responseProvider.TransformSuccessMessageResponse(msg)
httpStatusCode := 201 // default
if respInfo.HTTPStatusCode != 0 {
httpStatusCode = respInfo.HTTPStatusCode
}
service.RespondWith(w, httpStatusCode, respInfo.Data)
}
func respondWithResults(w http.ResponseWriter, provider *hookCommon.Provider, results hookCommon.TransformResponseInputModel) {
responseProvider := hookCommon.ResponseTransformer(hookCommon.DefaultResponseProvider{})
if provider != nil {
if respTransformer, ok := (*provider).(hookCommon.ResponseTransformer); ok {
// provider can transform responses - let it do so
responseProvider = respTransformer
}
}
//
respInfo := responseProvider.TransformResponse(results)
httpStatusCode := 201 // default
if respInfo.HTTPStatusCode != 0 {
httpStatusCode = respInfo.HTTPStatusCode
}
service.RespondWith(w, httpStatusCode, respInfo.Data)
}
// -------------------------
// --- Utility functions ---
func triggerBuild(triggerURL *url.URL, apiToken string, triggerAPIParams bitriseapi.TriggerAPIParamsModel) (bitriseapi.TriggerAPIResponseModel, bool, error) {
logger := logging.WithContext(nil)
defer func() {
err := logger.Sync()
if err != nil {
fmt.Println("Failed to Sync logger")
}
}()
logger.Info(" ===> trigger build", zap.String("triggerURL", triggerURL.String()))
isOnlyLog := !(config.SendRequestToURL != nil || config.GetServerEnvMode() == config.ServerEnvModeProd)
if isOnlyLog {
logger.Debug(colorstring.Yellow(" (debug) isOnlyLog: true"))
}
if err := triggerAPIParams.Validate(); err != nil {
logger.Error(" (!) Failed to trigger build: invalid API parameters", zap.Error(err))
return bitriseapi.TriggerAPIResponseModel{}, false, errors.Wrap(err, "Failed to Trigger the Build: Invalid parameters")
}
responseModel, isSuccess, err := bitriseapi.TriggerBuild(triggerURL, apiToken, triggerAPIParams, isOnlyLog)
if err != nil {
logger.Error(" [!] Exception: Failed to trigger build", zap.Error(err))
return bitriseapi.TriggerAPIResponseModel{}, false, errors.Wrap(err, "Failed to Trigger the Build")
}
logger.Info(" ===> trigger build - DONE", zap.Bool("success", isSuccess), zap.String("triggerURL", triggerURL.String()))
log.Printf(" (debug) response: (%#v)", responseModel)
return responseModel, isSuccess, nil
}
// ------------------------------
// --- Main HTTP Handler code ---
// HTTPHandler ...
func HTTPHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serviceID := vars["service-id"]
appSlug := vars["app-slug"]
apiToken := vars["api-token"]
logger := logging.WithContext(r.Context())
defer func() {
err := logger.Sync()
if err != nil {
fmt.Println("Failed to Sync logger")
}
}()
if serviceID == "" {
respondWithErrorString(w, nil, "No service-id defined")
return
}
hookProvider, isSupported := supportedProviders()[serviceID]
if !isSupported {
respondWithErrorString(w, nil, fmt.Sprintf("Unsupported Webhook Type / Provider: %s", serviceID))
return
}
if appSlug == "" {
respondWithErrorString(w, &hookProvider, "No App Slug parameter defined")
return
}
if apiToken == "" {
respondWithErrorString(w, &hookProvider, "No API Token parameter defined")
return
}
hookTransformResult := hookCommon.TransformResultModel{}
metrics.Trace("Hook: Transform", func() {
hookTransformResult = hookProvider.TransformRequest(r)
})
if hookTransformResult.ShouldSkip {
respondWithSuccessMessage(w, &hookProvider, fmt.Sprintf("Acknowledged, but skipping. Reason: %s", hookTransformResult.Error))
return
}
if hookTransformResult.Error != nil {
errMsg := fmt.Sprintf("Failed to transform the webhook: %s", hookTransformResult.Error)
log.Printf(" (debug) %s", errMsg)
respondWithErrorString(w, &hookProvider, errMsg)
return
}
// Let's Trigger a build / some builds!
triggerURL := config.SendRequestToURL
if triggerURL == nil {
u, err := bitriseapi.BuildTriggerURL("https://app.bitrise.io", appSlug)
if err != nil {
logger.Error(" [!] Exception: hookHandler: failed to create Build Trigger URL", zap.Error(err))
respondWithErrorString(w, &hookProvider, fmt.Sprintf("Failed to create Build Trigger URL: %s", err))
return
}
triggerURL = u
}
buildTriggerCount := len(hookTransformResult.TriggerAPIParams)
if buildTriggerCount == 0 {
respondWithErrorString(w, &hookProvider, "After processing the webhook we failed to detect any event in it which could be turned into a build.")
return
}
respondWith := hookCommon.TransformResponseInputModel{
Errors: []string{},
SuccessTriggerResponses: []bitriseapi.TriggerAPIResponseModel{},
SkippedTriggerResponses: []hookCommon.SkipAPIResponseModel{},
FailedTriggerResponses: []bitriseapi.TriggerAPIResponseModel{},
DidNotWaitForTriggerResponse: false,
}
metrics.Trace("Hook: Trigger Builds", func() {
for _, aBuildTriggerParam := range hookTransformResult.TriggerAPIParams {
commitMessage := aBuildTriggerParam.BuildParams.CommitMessage
if hookCommon.IsSkipBuildByCommitMessage(commitMessage) {
respondWith.SkippedTriggerResponses = append(respondWith.SkippedTriggerResponses, hookCommon.SkipAPIResponseModel{
Message: "Build skipped because the commit message included a skip ci keyword ([skip ci] or [ci skip]).",
CommitHash: aBuildTriggerParam.BuildParams.CommitHash,
CommitMessage: aBuildTriggerParam.BuildParams.CommitMessage,
Branch: aBuildTriggerParam.BuildParams.Branch,
})
continue
}
triggerBuildAndPrepareRespondWith := func() {
if triggerResp, isSuccess, err := triggerBuild(triggerURL, apiToken, aBuildTriggerParam); err != nil {
respondWith.Errors = append(respondWith.Errors, fmt.Sprintf("Failed to Trigger Build: %s", err))
} else if isSuccess {
respondWith.SuccessTriggerResponses = append(respondWith.SuccessTriggerResponses, triggerResp)
} else {
respondWith.FailedTriggerResponses = append(respondWith.FailedTriggerResponses, triggerResp)
}
}
if hookTransformResult.DontWaitForTriggerResponse {
// send it, but don't wait for response
go triggerBuildAndPrepareRespondWith()
respondWith.DidNotWaitForTriggerResponse = true
} else {
// send and wait
triggerBuildAndPrepareRespondWith()
}
}
})
respondWithResults(w, &hookProvider, respondWith)
}
Catch the panic() and log it in the endpoint.go (#108)
package hook
import (
"fmt"
"log"
"net/http"
"net/url"
"github.com/bitrise-io/api-utils/logging"
"github.com/bitrise-io/bitrise-webhooks/bitriseapi"
"github.com/bitrise-io/bitrise-webhooks/config"
"github.com/bitrise-io/bitrise-webhooks/metrics"
"github.com/bitrise-io/bitrise-webhooks/service"
"github.com/bitrise-io/bitrise-webhooks/service/hook/assembla"
"github.com/bitrise-io/bitrise-webhooks/service/hook/bitbucketserver"
"github.com/bitrise-io/bitrise-webhooks/service/hook/bitbucketv2"
hookCommon "github.com/bitrise-io/bitrise-webhooks/service/hook/common"
"github.com/bitrise-io/bitrise-webhooks/service/hook/deveo"
"github.com/bitrise-io/bitrise-webhooks/service/hook/github"
"github.com/bitrise-io/bitrise-webhooks/service/hook/gitlab"
"github.com/bitrise-io/bitrise-webhooks/service/hook/gogs"
"github.com/bitrise-io/bitrise-webhooks/service/hook/passthrough"
"github.com/bitrise-io/bitrise-webhooks/service/hook/slack"
"github.com/bitrise-io/bitrise-webhooks/service/hook/visualstudioteamservices"
"github.com/bitrise-io/go-utils/colorstring"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"go.uber.org/zap"
)
func supportedProviders() map[string]hookCommon.Provider {
return map[string]hookCommon.Provider{
"github": github.HookProvider{},
"bitbucket-v2": bitbucketv2.HookProvider{},
"bitbucket-server": bitbucketserver.HookProvider{},
"slack": slack.HookProvider{},
"visualstudio": visualstudioteamservices.HookProvider{},
"gitlab": gitlab.HookProvider{},
"gogs": gogs.HookProvider{},
"deveo": deveo.HookProvider{},
"assembla": assembla.HookProvider{},
"passthrough": passthrough.HookProvider{},
}
}
// ----------------------------------
// --- Response handler functions ---
func respondWithErrorString(w http.ResponseWriter, provider *hookCommon.Provider, errStr string) {
responseProvider := hookCommon.ResponseTransformer(hookCommon.DefaultResponseProvider{})
if provider != nil {
if respTransformer, ok := (*provider).(hookCommon.ResponseTransformer); ok {
// provider can transform responses - let it do so
responseProvider = respTransformer
}
}
//
respInfo := responseProvider.TransformErrorMessageResponse(errStr)
httpStatusCode := 400 // default
if respInfo.HTTPStatusCode != 0 {
httpStatusCode = respInfo.HTTPStatusCode
}
service.RespondWith(w, httpStatusCode, respInfo.Data)
}
func respondWithSuccessMessage(w http.ResponseWriter, provider *hookCommon.Provider, msg string) {
responseProvider := hookCommon.ResponseTransformer(hookCommon.DefaultResponseProvider{})
if provider != nil {
if respTransformer, ok := (*provider).(hookCommon.ResponseTransformer); ok {
// provider can transform responses - let it do so
responseProvider = respTransformer
}
}
//
respInfo := responseProvider.TransformSuccessMessageResponse(msg)
httpStatusCode := 201 // default
if respInfo.HTTPStatusCode != 0 {
httpStatusCode = respInfo.HTTPStatusCode
}
service.RespondWith(w, httpStatusCode, respInfo.Data)
}
func respondWithResults(w http.ResponseWriter, provider *hookCommon.Provider, results hookCommon.TransformResponseInputModel) {
responseProvider := hookCommon.ResponseTransformer(hookCommon.DefaultResponseProvider{})
if provider != nil {
if respTransformer, ok := (*provider).(hookCommon.ResponseTransformer); ok {
// provider can transform responses - let it do so
responseProvider = respTransformer
}
}
//
respInfo := responseProvider.TransformResponse(results)
httpStatusCode := 201 // default
if respInfo.HTTPStatusCode != 0 {
httpStatusCode = respInfo.HTTPStatusCode
}
service.RespondWith(w, httpStatusCode, respInfo.Data)
}
// -------------------------
// --- Utility functions ---
func triggerBuild(triggerURL *url.URL, apiToken string, triggerAPIParams bitriseapi.TriggerAPIParamsModel) (bitriseapi.TriggerAPIResponseModel, bool, error) {
logger := logging.WithContext(nil)
defer func() {
err := logger.Sync()
if err != nil {
fmt.Println("Failed to Sync logger")
}
}()
defer func() {
if err := recover(); err != nil {
//panic happened
formattedError := fmt.Errorf("%s", err)
logger.Error("PANIC happened ", zap.Error(formattedError))
}
}()
logger.Info(" ===> trigger build", zap.String("triggerURL", triggerURL.String()))
isOnlyLog := !(config.SendRequestToURL != nil || config.GetServerEnvMode() == config.ServerEnvModeProd)
if isOnlyLog {
logger.Debug(colorstring.Yellow(" (debug) isOnlyLog: true"))
}
if err := triggerAPIParams.Validate(); err != nil {
logger.Error(" (!) Failed to trigger build: invalid API parameters", zap.Error(err))
return bitriseapi.TriggerAPIResponseModel{}, false, errors.Wrap(err, "Failed to Trigger the Build: Invalid parameters")
}
responseModel, isSuccess, err := bitriseapi.TriggerBuild(triggerURL, apiToken, triggerAPIParams, isOnlyLog)
if err != nil {
logger.Error(" [!] Exception: Failed to trigger build", zap.Error(err))
return bitriseapi.TriggerAPIResponseModel{}, false, errors.Wrap(err, "Failed to Trigger the Build")
}
logger.Info(" ===> trigger build - DONE", zap.Bool("success", isSuccess), zap.String("triggerURL", triggerURL.String()))
log.Printf(" (debug) response: (%#v)", responseModel)
return responseModel, isSuccess, nil
}
// ------------------------------
// --- Main HTTP Handler code ---
// HTTPHandler ...
func HTTPHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serviceID := vars["service-id"]
appSlug := vars["app-slug"]
apiToken := vars["api-token"]
logger := logging.WithContext(r.Context())
defer func() {
err := logger.Sync()
if err != nil {
fmt.Println("Failed to Sync logger")
}
}()
if serviceID == "" {
respondWithErrorString(w, nil, "No service-id defined")
return
}
hookProvider, isSupported := supportedProviders()[serviceID]
if !isSupported {
respondWithErrorString(w, nil, fmt.Sprintf("Unsupported Webhook Type / Provider: %s", serviceID))
return
}
if appSlug == "" {
respondWithErrorString(w, &hookProvider, "No App Slug parameter defined")
return
}
if apiToken == "" {
respondWithErrorString(w, &hookProvider, "No API Token parameter defined")
return
}
hookTransformResult := hookCommon.TransformResultModel{}
metrics.Trace("Hook: Transform", func() {
hookTransformResult = hookProvider.TransformRequest(r)
})
if hookTransformResult.ShouldSkip {
respondWithSuccessMessage(w, &hookProvider, fmt.Sprintf("Acknowledged, but skipping. Reason: %s", hookTransformResult.Error))
return
}
if hookTransformResult.Error != nil {
errMsg := fmt.Sprintf("Failed to transform the webhook: %s", hookTransformResult.Error)
log.Printf(" (debug) %s", errMsg)
respondWithErrorString(w, &hookProvider, errMsg)
return
}
// Let's Trigger a build / some builds!
triggerURL := config.SendRequestToURL
if triggerURL == nil {
u, err := bitriseapi.BuildTriggerURL("https://app.bitrise.io", appSlug)
if err != nil {
logger.Error(" [!] Exception: hookHandler: failed to create Build Trigger URL", zap.Error(err))
respondWithErrorString(w, &hookProvider, fmt.Sprintf("Failed to create Build Trigger URL: %s", err))
return
}
triggerURL = u
}
buildTriggerCount := len(hookTransformResult.TriggerAPIParams)
if buildTriggerCount == 0 {
respondWithErrorString(w, &hookProvider, "After processing the webhook we failed to detect any event in it which could be turned into a build.")
return
}
respondWith := hookCommon.TransformResponseInputModel{
Errors: []string{},
SuccessTriggerResponses: []bitriseapi.TriggerAPIResponseModel{},
SkippedTriggerResponses: []hookCommon.SkipAPIResponseModel{},
FailedTriggerResponses: []bitriseapi.TriggerAPIResponseModel{},
DidNotWaitForTriggerResponse: false,
}
metrics.Trace("Hook: Trigger Builds", func() {
for _, aBuildTriggerParam := range hookTransformResult.TriggerAPIParams {
commitMessage := aBuildTriggerParam.BuildParams.CommitMessage
if hookCommon.IsSkipBuildByCommitMessage(commitMessage) {
respondWith.SkippedTriggerResponses = append(respondWith.SkippedTriggerResponses, hookCommon.SkipAPIResponseModel{
Message: "Build skipped because the commit message included a skip ci keyword ([skip ci] or [ci skip]).",
CommitHash: aBuildTriggerParam.BuildParams.CommitHash,
CommitMessage: aBuildTriggerParam.BuildParams.CommitMessage,
Branch: aBuildTriggerParam.BuildParams.Branch,
})
continue
}
triggerBuildAndPrepareRespondWith := func() {
if triggerResp, isSuccess, err := triggerBuild(triggerURL, apiToken, aBuildTriggerParam); err != nil {
respondWith.Errors = append(respondWith.Errors, fmt.Sprintf("Failed to Trigger Build: %s", err))
} else if isSuccess {
respondWith.SuccessTriggerResponses = append(respondWith.SuccessTriggerResponses, triggerResp)
} else {
respondWith.FailedTriggerResponses = append(respondWith.FailedTriggerResponses, triggerResp)
}
}
if hookTransformResult.DontWaitForTriggerResponse {
// send it, but don't wait for response
go triggerBuildAndPrepareRespondWith()
respondWith.DidNotWaitForTriggerResponse = true
} else {
// send and wait
triggerBuildAndPrepareRespondWith()
}
}
})
respondWithResults(w, &hookProvider, respondWith)
}
|
package tasks
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/ansible-semaphore/semaphore/lib"
log "github.com/Sirupsen/logrus"
"github.com/ansible-semaphore/semaphore/api/sockets"
"github.com/ansible-semaphore/semaphore/db"
"github.com/ansible-semaphore/semaphore/util"
)
type TaskRunner struct {
task db.Task
template db.Template
inventory db.Inventory
repository db.Repository
environment db.Environment
users []int
alert bool
alertChat *string
prepared bool
process *os.Process
pool *TaskPool
}
func getMD5Hash(filepath string) (string, error) {
file, err := os.Open(filepath)
if err != nil {
return "", err
}
defer file.Close()
hash := md5.New()
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
func (t *TaskRunner) getPlaybookDir() string {
playbookPath := path.Join(t.getRepoPath(), t.template.Playbook)
return path.Dir(playbookPath)
}
func (t *TaskRunner) getRepoPath() string {
repo := lib.GitRepository{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}
return repo.GetFullPath()
}
func (t *TaskRunner) setStatus(status db.TaskStatus) {
if t.task.Status == db.TaskStoppingStatus {
switch status {
case db.TaskFailStatus:
status = db.TaskStoppedStatus
case db.TaskStoppedStatus:
default:
panic("stopping TaskRunner cannot be " + status)
}
}
t.task.Status = status
t.updateStatus()
if status == db.TaskFailStatus {
t.sendMailAlert()
}
if status == db.TaskSuccessStatus || status == db.TaskFailStatus {
t.sendTelegramAlert()
t.sendSlackAlert()
}
}
func (t *TaskRunner) updateStatus() {
for _, user := range t.users {
b, err := json.Marshal(&map[string]interface{}{
"type": "update",
"start": t.task.Start,
"end": t.task.End,
"status": t.task.Status,
"task_id": t.task.ID,
"template_id": t.task.TemplateID,
"project_id": t.task.ProjectID,
"version": t.task.Version,
})
util.LogPanic(err)
sockets.Message(user, b)
}
if err := t.pool.store.UpdateTask(t.task); err != nil {
t.panicOnError(err, "Failed to update TaskRunner status")
}
}
func (t *TaskRunner) fail() {
t.setStatus(db.TaskFailStatus)
}
func (t *TaskRunner) destroyKeys() {
err := t.inventory.SSHKey.Destroy()
if err != nil {
t.Log("Can't destroy inventory user key, error: " + err.Error())
}
err = t.inventory.BecomeKey.Destroy()
if err != nil {
t.Log("Can't destroy inventory become user key, error: " + err.Error())
}
err = t.template.VaultKey.Destroy()
if err != nil {
t.Log("Can't destroy inventory vault password file, error: " + err.Error())
}
}
func (t *TaskRunner) createTaskEvent() {
objType := db.EventTask
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " finished - " + strings.ToUpper(string(t.task.Status))
_, err := t.pool.store.CreateEvent(db.Event{
UserID: t.task.UserID,
ProjectID: &t.task.ProjectID,
ObjectType: &objType,
ObjectID: &t.task.ID,
Description: &desc,
})
if err != nil {
t.panicOnError(err, "Fatal error inserting an event")
}
}
func (t *TaskRunner) prepareRun() {
t.prepared = false
if !t.pool.store.PermanentConnection() {
t.pool.store.Connect("task " + strconv.Itoa(t.task.ID))
defer t.pool.store.Close("task " + strconv.Itoa(t.task.ID))
}
defer func() {
log.Info("Stopped preparing TaskRunner " + strconv.Itoa(t.task.ID))
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.task.ID))
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
t.createTaskEvent()
}()
t.Log("Preparing: " + strconv.Itoa(t.task.ID))
if err := checkTmpDir(util.Config.TmpPath); err != nil {
t.Log("Creating tmp dir failed: " + err.Error())
t.fail()
return
}
objType := db.EventTask
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " is preparing"
evt := db.Event{
UserID: t.task.UserID,
ProjectID: &t.task.ProjectID,
ObjectType: &objType,
ObjectID: &t.task.ID,
Description: &desc,
}
if _, err := t.pool.store.CreateEvent(evt); err != nil {
t.Log("Fatal error inserting an event")
panic(err)
}
t.Log("Prepare TaskRunner with template: " + t.template.Name + "\n")
t.updateStatus()
if t.repository.GetType() == db.RepositoryLocal {
if _, err := os.Stat(t.repository.GitURL); err != nil {
t.Log("Failed in finding static repository at " + t.repository.GitURL + ": " + err.Error())
t.fail()
return
}
} else {
if err := t.updateRepository(); err != nil {
t.Log("Failed updating repository: " + err.Error())
t.fail()
return
}
if err := t.checkoutRepository(); err != nil {
t.Log("Failed to checkout repository to required commit: " + err.Error())
t.fail()
return
}
}
if err := t.installInventory(); err != nil {
t.Log("Failed to install inventory: " + err.Error())
t.fail()
return
}
if err := t.installRequirements(); err != nil {
t.Log("Running galaxy failed: " + err.Error())
t.fail()
return
}
if err := t.installVaultKeyFile(); err != nil {
t.Log("Failed to install vault password file: " + err.Error())
t.fail()
return
}
t.prepared = true
}
func (t *TaskRunner) run() {
defer func() {
log.Info("Stopped running TaskRunner " + strconv.Itoa(t.task.ID))
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.task.ID))
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
now := time.Now()
t.task.End = &now
t.updateStatus()
t.createTaskEvent()
t.destroyKeys()
}()
// TODO: more details
if t.task.Status == db.TaskStoppingStatus {
t.setStatus(db.TaskStoppedStatus)
return
}
now := time.Now()
t.task.Start = &now
t.setStatus(db.TaskRunningStatus)
objType := db.EventTask
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " is running"
_, err := t.pool.store.CreateEvent(db.Event{
UserID: t.task.UserID,
ProjectID: &t.task.ProjectID,
ObjectType: &objType,
ObjectID: &t.task.ID,
Description: &desc,
})
if err != nil {
t.Log("Fatal error inserting an event")
panic(err)
}
t.Log("Started: " + strconv.Itoa(t.task.ID))
t.Log("Run TaskRunner with template: " + t.template.Name + "\n")
// TODO: ?????
if t.task.Status == db.TaskStoppingStatus {
t.setStatus(db.TaskStoppedStatus)
return
}
err = t.runPlaybook()
if err != nil {
t.Log("Running playbook failed: " + err.Error())
t.fail()
return
}
t.setStatus(db.TaskSuccessStatus)
templates, err := t.pool.store.GetTemplates(t.task.ProjectID, db.TemplateFilter{
BuildTemplateID: &t.task.TemplateID,
AutorunOnly: true,
}, db.RetrieveQueryParams{})
if err != nil {
t.Log("Running playbook failed: " + err.Error())
return
}
for _, tpl := range templates {
_, err = t.pool.AddTask(db.Task{
TemplateID: tpl.ID,
ProjectID: tpl.ProjectID,
BuildTaskID: &t.task.ID,
}, nil, tpl.ProjectID)
if err != nil {
t.Log("Running playbook failed: " + err.Error())
continue
}
}
}
func (t *TaskRunner) prepareError(err error, errMsg string) error {
if err == db.ErrNotFound {
t.Log(errMsg)
return err
}
if err != nil {
t.fail()
panic(err)
}
return nil
}
// nolint: gocyclo
func (t *TaskRunner) populateDetails() error {
// get template
var err error
t.template, err = t.pool.store.GetTemplate(t.task.ProjectID, t.task.TemplateID)
if err != nil {
return t.prepareError(err, "Template not found!")
}
// get project alert setting
project, err := t.pool.store.GetProject(t.template.ProjectID)
if err != nil {
return t.prepareError(err, "Project not found!")
}
t.alert = project.Alert
t.alertChat = project.AlertChat
// get project users
users, err := t.pool.store.GetProjectUsers(t.template.ProjectID, db.RetrieveQueryParams{})
if err != nil {
return t.prepareError(err, "Users not found!")
}
t.users = []int{}
for _, user := range users {
t.users = append(t.users, user.ID)
}
// get inventory
t.inventory, err = t.pool.store.GetInventory(t.template.ProjectID, t.template.InventoryID)
if err != nil {
return t.prepareError(err, "Template Inventory not found!")
}
// get repository
t.repository, err = t.pool.store.GetRepository(t.template.ProjectID, t.template.RepositoryID)
if err != nil {
return err
}
err = t.repository.SSHKey.DeserializeSecret()
if err != nil {
return err
}
// get environment
if t.template.EnvironmentID != nil {
t.environment, err = t.pool.store.GetEnvironment(t.template.ProjectID, *t.template.EnvironmentID)
if err != nil {
return err
}
}
if t.task.Environment != "" {
environment := make(map[string]interface{})
if t.environment.JSON != "" {
err = json.Unmarshal([]byte(t.task.Environment), &environment)
if err != nil {
return err
}
}
taskEnvironment := make(map[string]interface{})
err = json.Unmarshal([]byte(t.environment.JSON), &taskEnvironment)
if err != nil {
return err
}
for k, v := range taskEnvironment {
environment[k] = v
}
var ev []byte
ev, err = json.Marshal(environment)
if err != nil {
return err
}
t.environment.JSON = string(ev)
}
return nil
}
func (t *TaskRunner) installVaultKeyFile() error {
if t.template.VaultKeyID == nil {
return nil
}
return t.template.VaultKey.Install(db.AccessKeyRoleAnsiblePasswordVault)
}
func (t *TaskRunner) checkoutRepository() error {
repo := lib.GitRepository{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}
err := repo.ValidateRepo()
if err != nil {
return err
}
if t.task.CommitHash != nil {
// checkout to commit if it is provided for TaskRunner
return repo.Checkout(*t.task.CommitHash)
}
// store commit to TaskRunner table
commitHash, err := repo.GetLastCommitHash()
if err != nil {
return err
}
commitMessage, _ := repo.GetLastCommitMessage()
t.task.CommitHash = &commitHash
t.task.CommitMessage = commitMessage
return t.pool.store.UpdateTask(t.task)
}
func (t *TaskRunner) updateRepository() error {
repo := lib.GitRepository{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}
err := repo.ValidateRepo()
if err != nil {
if !os.IsNotExist(err) {
err = os.RemoveAll(repo.GetFullPath())
if err != nil {
return err
}
}
return repo.Clone()
}
if repo.CanBePulled() {
err = repo.Pull()
if err == nil {
return nil
}
}
err = os.RemoveAll(repo.GetFullPath())
if err != nil {
return err
}
return repo.Clone()
}
func (t *TaskRunner) installCollectionsRequirements() error {
requirementsFilePath := path.Join(t.getPlaybookDir(), "collections", "requirements.yml")
requirementsHashFilePath := fmt.Sprintf("%s.md5", requirementsFilePath)
if _, err := os.Stat(requirementsFilePath); err != nil {
t.Log("No collections/requirements.yml file found. Skip galaxy install process.\n")
return nil
}
if hasRequirementsChanges(requirementsFilePath, requirementsHashFilePath) {
if err := t.runGalaxy([]string{
"collection",
"install",
"-r",
requirementsFilePath,
"--force",
}); err != nil {
return err
}
if err := writeMD5Hash(requirementsFilePath, requirementsHashFilePath); err != nil {
return err
}
} else {
t.Log("collections/requirements.yml has no changes. Skip galaxy install process.\n")
}
return nil
}
func (t *TaskRunner) installRolesRequirements() error {
requirementsFilePath := fmt.Sprintf("%s/roles/requirements.yml", t.getRepoPath())
requirementsHashFilePath := fmt.Sprintf("%s.md5", requirementsFilePath)
if _, err := os.Stat(requirementsFilePath); err != nil {
t.Log("No roles/requirements.yml file found. Skip galaxy install process.\n")
return nil
}
if hasRequirementsChanges(requirementsFilePath, requirementsHashFilePath) {
if err := t.runGalaxy([]string{
"role",
"install",
"-r",
requirementsFilePath,
"--force",
}); err != nil {
return err
}
if err := writeMD5Hash(requirementsFilePath, requirementsHashFilePath); err != nil {
return err
}
} else {
t.Log("roles/requirements.yml has no changes. Skip galaxy install process.\n")
}
return nil
}
func (t *TaskRunner) installRequirements() error {
if err := t.installCollectionsRequirements(); err != nil {
return err
}
if err := t.installRolesRequirements(); err != nil {
return err
}
return nil
}
func (t *TaskRunner) runGalaxy(args []string) error {
return lib.AnsiblePlaybook{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}.RunGalaxy(args)
}
func (t *TaskRunner) runPlaybook() (err error) {
args, err := t.getPlaybookArgs()
if err != nil {
return
}
environmentVariables, err := t.getEnvironmentENV()
if err != nil {
return
}
return lib.AnsiblePlaybook{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}.RunPlaybook(args, &environmentVariables, func(p *os.Process) { t.process = p })
}
func (t *TaskRunner) getEnvironmentENV() (arr []string, err error) {
environmentVars := make(map[string]string)
if t.environment.ENV != nil {
err = json.Unmarshal([]byte(*t.environment.ENV), &environmentVars)
if err != nil {
return
}
}
for key, val := range environmentVars {
arr = append(arr, fmt.Sprintf("%s=%s", key, val))
}
return
}
func (t *TaskRunner) getEnvironmentExtraVars() (str string, err error) {
extraVars := make(map[string]interface{})
if t.environment.JSON != "" {
err = json.Unmarshal([]byte(t.environment.JSON), &extraVars)
if err != nil {
return
}
}
taskDetails := make(map[string]interface{})
if t.task.Message != "" {
taskDetails["message"] = t.task.Message
}
if t.task.UserID != nil {
var user db.User
user, err = t.pool.store.GetUser(*t.task.UserID)
if err == nil {
taskDetails["username"] = user.Username
}
}
if t.template.Type != db.TemplateTask {
taskDetails["type"] = t.template.Type
incomingVersion := t.task.GetIncomingVersion(t.pool.store)
if incomingVersion != nil {
taskDetails["incoming_version"] = incomingVersion
}
if t.template.Type == db.TemplateBuild {
taskDetails["target_version"] = t.task.Version
}
}
vars := make(map[string]interface{})
vars["task_details"] = taskDetails
extraVars["semaphore_vars"] = vars
ev, err := json.Marshal(extraVars)
if err != nil {
return
}
str = string(ev)
return
}
// nolint: gocyclo
func (t *TaskRunner) getPlaybookArgs() (args []string, err error) {
playbookName := t.task.Playbook
if playbookName == "" {
playbookName = t.template.Playbook
}
var inventory string
switch t.inventory.Type {
case db.InventoryFile:
inventory = t.inventory.Inventory
case db.InventoryStatic, db.InventoryStaticYaml:
inventory = util.Config.TmpPath + "/inventory_" + strconv.Itoa(t.task.ID)
if t.inventory.Type == db.InventoryStaticYaml {
inventory += ".yml"
}
default:
err = fmt.Errorf("invalid invetory type")
return
}
args = []string{
"-i", inventory,
}
if t.inventory.SSHKeyID != nil {
switch t.inventory.SSHKey.Type {
case db.AccessKeySSH:
args = append(args, "--private-key="+t.inventory.SSHKey.GetPath())
//args = append(args, "--extra-vars={\"ansible_ssh_private_key_file\": \""+t.inventory.SSHKey.GetPath()+"\"}")
if t.inventory.SSHKey.SshKey.Login != "" {
args = append(args, "--extra-vars={\"ansible_user\": \""+t.inventory.SSHKey.SshKey.Login+"\"}")
}
case db.AccessKeyLoginPassword:
args = append(args, "--extra-vars=@"+t.inventory.SSHKey.GetPath())
case db.AccessKeyNone:
default:
err = fmt.Errorf("access key does not suite for inventory's user credentials")
return
}
}
if t.inventory.BecomeKeyID != nil {
switch t.inventory.BecomeKey.Type {
case db.AccessKeyLoginPassword:
args = append(args, "--extra-vars=@"+t.inventory.BecomeKey.GetPath())
case db.AccessKeyNone:
default:
err = fmt.Errorf("access key does not suite for inventory's sudo user credentials")
return
}
}
if t.task.Debug {
args = append(args, "-vvvv")
}
if t.task.Diff {
args = append(args, "--diff")
}
if t.task.DryRun {
args = append(args, "--check")
}
if t.template.VaultKeyID != nil {
args = append(args, "--vault-password-file", t.template.VaultKey.GetPath())
}
extraVars, err := t.getEnvironmentExtraVars()
if err != nil {
t.Log(err.Error())
t.Log("Could not remove command environment, if existant it will be passed to --extra-vars. This is not fatal but be aware of side effects")
} else if extraVars != "" {
args = append(args, "--extra-vars", extraVars)
}
var templateExtraArgs []string
if t.template.Arguments != nil {
err = json.Unmarshal([]byte(*t.template.Arguments), &templateExtraArgs)
if err != nil {
t.Log("Invalid format of the template extra arguments, must be valid JSON")
return
}
}
var taskExtraArgs []string
if t.template.AllowOverrideArgsInTask && t.task.Arguments != nil {
err = json.Unmarshal([]byte(*t.task.Arguments), &taskExtraArgs)
if err != nil {
t.Log("Invalid format of the TaskRunner extra arguments, must be valid JSON")
return
}
}
if t.task.Limit != "" {
t.Log("--limit=" + t.task.Limit)
taskExtraArgs = append(taskExtraArgs, "--limit="+t.task.Limit)
}
args = append(args, templateExtraArgs...)
args = append(args, taskExtraArgs...)
args = append(args, playbookName)
return
}
func hasRequirementsChanges(requirementsFilePath string, requirementsHashFilePath string) bool {
oldFileMD5HashBytes, err := ioutil.ReadFile(requirementsHashFilePath)
if err != nil {
return true
}
newFileMD5Hash, err := getMD5Hash(requirementsFilePath)
if err != nil {
return true
}
return string(oldFileMD5HashBytes) != newFileMD5Hash
}
func writeMD5Hash(requirementsFile string, requirementsHashFile string) error {
newFileMD5Hash, err := getMD5Hash(requirementsFile)
if err != nil {
return err
}
return ioutil.WriteFile(requirementsHashFile, []byte(newFileMD5Hash), 0644)
}
// checkTmpDir checks to see if the temporary directory exists
// and if it does not attempts to create it
func checkTmpDir(path string) error {
var err error
if _, err = os.Stat(path); err != nil {
if os.IsNotExist(err) {
return os.MkdirAll(path, 0700)
}
}
return err
}
fix(be): running task on boltdb
package tasks
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/ansible-semaphore/semaphore/lib"
log "github.com/Sirupsen/logrus"
"github.com/ansible-semaphore/semaphore/api/sockets"
"github.com/ansible-semaphore/semaphore/db"
"github.com/ansible-semaphore/semaphore/util"
)
type TaskRunner struct {
task db.Task
template db.Template
inventory db.Inventory
repository db.Repository
environment db.Environment
users []int
alert bool
alertChat *string
prepared bool
process *os.Process
pool *TaskPool
}
func getMD5Hash(filepath string) (string, error) {
file, err := os.Open(filepath)
if err != nil {
return "", err
}
defer file.Close()
hash := md5.New()
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
func (t *TaskRunner) getPlaybookDir() string {
playbookPath := path.Join(t.getRepoPath(), t.template.Playbook)
return path.Dir(playbookPath)
}
func (t *TaskRunner) getRepoPath() string {
repo := lib.GitRepository{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}
return repo.GetFullPath()
}
func (t *TaskRunner) setStatus(status db.TaskStatus) {
if t.task.Status == db.TaskStoppingStatus {
switch status {
case db.TaskFailStatus:
status = db.TaskStoppedStatus
case db.TaskStoppedStatus:
default:
panic("stopping TaskRunner cannot be " + status)
}
}
t.task.Status = status
t.updateStatus()
if status == db.TaskFailStatus {
t.sendMailAlert()
}
if status == db.TaskSuccessStatus || status == db.TaskFailStatus {
t.sendTelegramAlert()
t.sendSlackAlert()
}
}
func (t *TaskRunner) updateStatus() {
for _, user := range t.users {
b, err := json.Marshal(&map[string]interface{}{
"type": "update",
"start": t.task.Start,
"end": t.task.End,
"status": t.task.Status,
"task_id": t.task.ID,
"template_id": t.task.TemplateID,
"project_id": t.task.ProjectID,
"version": t.task.Version,
})
util.LogPanic(err)
sockets.Message(user, b)
}
if err := t.pool.store.UpdateTask(t.task); err != nil {
t.panicOnError(err, "Failed to update TaskRunner status")
}
}
func (t *TaskRunner) fail() {
t.setStatus(db.TaskFailStatus)
}
func (t *TaskRunner) destroyKeys() {
err := t.inventory.SSHKey.Destroy()
if err != nil {
t.Log("Can't destroy inventory user key, error: " + err.Error())
}
err = t.inventory.BecomeKey.Destroy()
if err != nil {
t.Log("Can't destroy inventory become user key, error: " + err.Error())
}
err = t.template.VaultKey.Destroy()
if err != nil {
t.Log("Can't destroy inventory vault password file, error: " + err.Error())
}
}
func (t *TaskRunner) createTaskEvent() {
objType := db.EventTask
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " finished - " + strings.ToUpper(string(t.task.Status))
_, err := t.pool.store.CreateEvent(db.Event{
UserID: t.task.UserID,
ProjectID: &t.task.ProjectID,
ObjectType: &objType,
ObjectID: &t.task.ID,
Description: &desc,
})
if err != nil {
t.panicOnError(err, "Fatal error inserting an event")
}
}
func (t *TaskRunner) prepareRun() {
t.prepared = false
if !t.pool.store.PermanentConnection() {
t.pool.store.Connect("prepare " + strconv.Itoa(t.task.ID))
defer t.pool.store.Close("prepare " + strconv.Itoa(t.task.ID))
}
defer func() {
log.Info("Stopped preparing TaskRunner " + strconv.Itoa(t.task.ID))
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.task.ID))
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
t.createTaskEvent()
}()
t.Log("Preparing: " + strconv.Itoa(t.task.ID))
if err := checkTmpDir(util.Config.TmpPath); err != nil {
t.Log("Creating tmp dir failed: " + err.Error())
t.fail()
return
}
objType := db.EventTask
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " is preparing"
evt := db.Event{
UserID: t.task.UserID,
ProjectID: &t.task.ProjectID,
ObjectType: &objType,
ObjectID: &t.task.ID,
Description: &desc,
}
if _, err := t.pool.store.CreateEvent(evt); err != nil {
t.Log("Fatal error inserting an event")
panic(err)
}
t.Log("Prepare TaskRunner with template: " + t.template.Name + "\n")
t.updateStatus()
if t.repository.GetType() == db.RepositoryLocal {
if _, err := os.Stat(t.repository.GitURL); err != nil {
t.Log("Failed in finding static repository at " + t.repository.GitURL + ": " + err.Error())
t.fail()
return
}
} else {
if err := t.updateRepository(); err != nil {
t.Log("Failed updating repository: " + err.Error())
t.fail()
return
}
if err := t.checkoutRepository(); err != nil {
t.Log("Failed to checkout repository to required commit: " + err.Error())
t.fail()
return
}
}
if err := t.installInventory(); err != nil {
t.Log("Failed to install inventory: " + err.Error())
t.fail()
return
}
if err := t.installRequirements(); err != nil {
t.Log("Running galaxy failed: " + err.Error())
t.fail()
return
}
if err := t.installVaultKeyFile(); err != nil {
t.Log("Failed to install vault password file: " + err.Error())
t.fail()
return
}
t.prepared = true
}
func (t *TaskRunner) run() {
if !t.pool.store.PermanentConnection() {
t.pool.store.Connect("run " + strconv.Itoa(t.task.ID))
defer t.pool.store.Close("run " + strconv.Itoa(t.task.ID))
}
defer func() {
log.Info("Stopped running TaskRunner " + strconv.Itoa(t.task.ID))
log.Info("Release resource locker with TaskRunner " + strconv.Itoa(t.task.ID))
t.pool.resourceLocker <- &resourceLock{lock: false, holder: t}
now := time.Now()
t.task.End = &now
t.updateStatus()
t.createTaskEvent()
t.destroyKeys()
}()
// TODO: more details
if t.task.Status == db.TaskStoppingStatus {
t.setStatus(db.TaskStoppedStatus)
return
}
now := time.Now()
t.task.Start = &now
t.setStatus(db.TaskRunningStatus)
objType := db.EventTask
desc := "Task ID " + strconv.Itoa(t.task.ID) + " (" + t.template.Name + ")" + " is running"
_, err := t.pool.store.CreateEvent(db.Event{
UserID: t.task.UserID,
ProjectID: &t.task.ProjectID,
ObjectType: &objType,
ObjectID: &t.task.ID,
Description: &desc,
})
if err != nil {
t.Log("Fatal error inserting an event")
panic(err)
}
t.Log("Started: " + strconv.Itoa(t.task.ID))
t.Log("Run TaskRunner with template: " + t.template.Name + "\n")
// TODO: ?????
if t.task.Status == db.TaskStoppingStatus {
t.setStatus(db.TaskStoppedStatus)
return
}
err = t.runPlaybook()
if err != nil {
t.Log("Running playbook failed: " + err.Error())
t.fail()
return
}
t.setStatus(db.TaskSuccessStatus)
templates, err := t.pool.store.GetTemplates(t.task.ProjectID, db.TemplateFilter{
BuildTemplateID: &t.task.TemplateID,
AutorunOnly: true,
}, db.RetrieveQueryParams{})
if err != nil {
t.Log("Running playbook failed: " + err.Error())
return
}
for _, tpl := range templates {
_, err = t.pool.AddTask(db.Task{
TemplateID: tpl.ID,
ProjectID: tpl.ProjectID,
BuildTaskID: &t.task.ID,
}, nil, tpl.ProjectID)
if err != nil {
t.Log("Running playbook failed: " + err.Error())
continue
}
}
}
func (t *TaskRunner) prepareError(err error, errMsg string) error {
if err == db.ErrNotFound {
t.Log(errMsg)
return err
}
if err != nil {
t.fail()
panic(err)
}
return nil
}
// nolint: gocyclo
func (t *TaskRunner) populateDetails() error {
// get template
var err error
t.template, err = t.pool.store.GetTemplate(t.task.ProjectID, t.task.TemplateID)
if err != nil {
return t.prepareError(err, "Template not found!")
}
// get project alert setting
project, err := t.pool.store.GetProject(t.template.ProjectID)
if err != nil {
return t.prepareError(err, "Project not found!")
}
t.alert = project.Alert
t.alertChat = project.AlertChat
// get project users
users, err := t.pool.store.GetProjectUsers(t.template.ProjectID, db.RetrieveQueryParams{})
if err != nil {
return t.prepareError(err, "Users not found!")
}
t.users = []int{}
for _, user := range users {
t.users = append(t.users, user.ID)
}
// get inventory
t.inventory, err = t.pool.store.GetInventory(t.template.ProjectID, t.template.InventoryID)
if err != nil {
return t.prepareError(err, "Template Inventory not found!")
}
// get repository
t.repository, err = t.pool.store.GetRepository(t.template.ProjectID, t.template.RepositoryID)
if err != nil {
return err
}
err = t.repository.SSHKey.DeserializeSecret()
if err != nil {
return err
}
// get environment
if t.template.EnvironmentID != nil {
t.environment, err = t.pool.store.GetEnvironment(t.template.ProjectID, *t.template.EnvironmentID)
if err != nil {
return err
}
}
if t.task.Environment != "" {
environment := make(map[string]interface{})
if t.environment.JSON != "" {
err = json.Unmarshal([]byte(t.task.Environment), &environment)
if err != nil {
return err
}
}
taskEnvironment := make(map[string]interface{})
err = json.Unmarshal([]byte(t.environment.JSON), &taskEnvironment)
if err != nil {
return err
}
for k, v := range taskEnvironment {
environment[k] = v
}
var ev []byte
ev, err = json.Marshal(environment)
if err != nil {
return err
}
t.environment.JSON = string(ev)
}
return nil
}
func (t *TaskRunner) installVaultKeyFile() error {
if t.template.VaultKeyID == nil {
return nil
}
return t.template.VaultKey.Install(db.AccessKeyRoleAnsiblePasswordVault)
}
func (t *TaskRunner) checkoutRepository() error {
repo := lib.GitRepository{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}
err := repo.ValidateRepo()
if err != nil {
return err
}
if t.task.CommitHash != nil {
// checkout to commit if it is provided for TaskRunner
return repo.Checkout(*t.task.CommitHash)
}
// store commit to TaskRunner table
commitHash, err := repo.GetLastCommitHash()
if err != nil {
return err
}
commitMessage, _ := repo.GetLastCommitMessage()
t.task.CommitHash = &commitHash
t.task.CommitMessage = commitMessage
return t.pool.store.UpdateTask(t.task)
}
func (t *TaskRunner) updateRepository() error {
repo := lib.GitRepository{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}
err := repo.ValidateRepo()
if err != nil {
if !os.IsNotExist(err) {
err = os.RemoveAll(repo.GetFullPath())
if err != nil {
return err
}
}
return repo.Clone()
}
if repo.CanBePulled() {
err = repo.Pull()
if err == nil {
return nil
}
}
err = os.RemoveAll(repo.GetFullPath())
if err != nil {
return err
}
return repo.Clone()
}
func (t *TaskRunner) installCollectionsRequirements() error {
requirementsFilePath := path.Join(t.getPlaybookDir(), "collections", "requirements.yml")
requirementsHashFilePath := fmt.Sprintf("%s.md5", requirementsFilePath)
if _, err := os.Stat(requirementsFilePath); err != nil {
t.Log("No collections/requirements.yml file found. Skip galaxy install process.\n")
return nil
}
if hasRequirementsChanges(requirementsFilePath, requirementsHashFilePath) {
if err := t.runGalaxy([]string{
"collection",
"install",
"-r",
requirementsFilePath,
"--force",
}); err != nil {
return err
}
if err := writeMD5Hash(requirementsFilePath, requirementsHashFilePath); err != nil {
return err
}
} else {
t.Log("collections/requirements.yml has no changes. Skip galaxy install process.\n")
}
return nil
}
func (t *TaskRunner) installRolesRequirements() error {
requirementsFilePath := fmt.Sprintf("%s/roles/requirements.yml", t.getRepoPath())
requirementsHashFilePath := fmt.Sprintf("%s.md5", requirementsFilePath)
if _, err := os.Stat(requirementsFilePath); err != nil {
t.Log("No roles/requirements.yml file found. Skip galaxy install process.\n")
return nil
}
if hasRequirementsChanges(requirementsFilePath, requirementsHashFilePath) {
if err := t.runGalaxy([]string{
"role",
"install",
"-r",
requirementsFilePath,
"--force",
}); err != nil {
return err
}
if err := writeMD5Hash(requirementsFilePath, requirementsHashFilePath); err != nil {
return err
}
} else {
t.Log("roles/requirements.yml has no changes. Skip galaxy install process.\n")
}
return nil
}
func (t *TaskRunner) installRequirements() error {
if err := t.installCollectionsRequirements(); err != nil {
return err
}
if err := t.installRolesRequirements(); err != nil {
return err
}
return nil
}
func (t *TaskRunner) runGalaxy(args []string) error {
return lib.AnsiblePlaybook{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}.RunGalaxy(args)
}
func (t *TaskRunner) runPlaybook() (err error) {
args, err := t.getPlaybookArgs()
if err != nil {
return
}
environmentVariables, err := t.getEnvironmentENV()
if err != nil {
return
}
return lib.AnsiblePlaybook{
Logger: t,
TemplateID: t.template.ID,
Repository: t.repository,
}.RunPlaybook(args, &environmentVariables, func(p *os.Process) { t.process = p })
}
func (t *TaskRunner) getEnvironmentENV() (arr []string, err error) {
environmentVars := make(map[string]string)
if t.environment.ENV != nil {
err = json.Unmarshal([]byte(*t.environment.ENV), &environmentVars)
if err != nil {
return
}
}
for key, val := range environmentVars {
arr = append(arr, fmt.Sprintf("%s=%s", key, val))
}
return
}
func (t *TaskRunner) getEnvironmentExtraVars() (str string, err error) {
extraVars := make(map[string]interface{})
if t.environment.JSON != "" {
err = json.Unmarshal([]byte(t.environment.JSON), &extraVars)
if err != nil {
return
}
}
taskDetails := make(map[string]interface{})
if t.task.Message != "" {
taskDetails["message"] = t.task.Message
}
if t.task.UserID != nil {
var user db.User
user, err = t.pool.store.GetUser(*t.task.UserID)
if err == nil {
taskDetails["username"] = user.Username
}
}
if t.template.Type != db.TemplateTask {
taskDetails["type"] = t.template.Type
incomingVersion := t.task.GetIncomingVersion(t.pool.store)
if incomingVersion != nil {
taskDetails["incoming_version"] = incomingVersion
}
if t.template.Type == db.TemplateBuild {
taskDetails["target_version"] = t.task.Version
}
}
vars := make(map[string]interface{})
vars["task_details"] = taskDetails
extraVars["semaphore_vars"] = vars
ev, err := json.Marshal(extraVars)
if err != nil {
return
}
str = string(ev)
return
}
// nolint: gocyclo
func (t *TaskRunner) getPlaybookArgs() (args []string, err error) {
playbookName := t.task.Playbook
if playbookName == "" {
playbookName = t.template.Playbook
}
var inventory string
switch t.inventory.Type {
case db.InventoryFile:
inventory = t.inventory.Inventory
case db.InventoryStatic, db.InventoryStaticYaml:
inventory = util.Config.TmpPath + "/inventory_" + strconv.Itoa(t.task.ID)
if t.inventory.Type == db.InventoryStaticYaml {
inventory += ".yml"
}
default:
err = fmt.Errorf("invalid invetory type")
return
}
args = []string{
"-i", inventory,
}
if t.inventory.SSHKeyID != nil {
switch t.inventory.SSHKey.Type {
case db.AccessKeySSH:
args = append(args, "--private-key="+t.inventory.SSHKey.GetPath())
//args = append(args, "--extra-vars={\"ansible_ssh_private_key_file\": \""+t.inventory.SSHKey.GetPath()+"\"}")
if t.inventory.SSHKey.SshKey.Login != "" {
args = append(args, "--extra-vars={\"ansible_user\": \""+t.inventory.SSHKey.SshKey.Login+"\"}")
}
case db.AccessKeyLoginPassword:
args = append(args, "--extra-vars=@"+t.inventory.SSHKey.GetPath())
case db.AccessKeyNone:
default:
err = fmt.Errorf("access key does not suite for inventory's user credentials")
return
}
}
if t.inventory.BecomeKeyID != nil {
switch t.inventory.BecomeKey.Type {
case db.AccessKeyLoginPassword:
args = append(args, "--extra-vars=@"+t.inventory.BecomeKey.GetPath())
case db.AccessKeyNone:
default:
err = fmt.Errorf("access key does not suite for inventory's sudo user credentials")
return
}
}
if t.task.Debug {
args = append(args, "-vvvv")
}
if t.task.Diff {
args = append(args, "--diff")
}
if t.task.DryRun {
args = append(args, "--check")
}
if t.template.VaultKeyID != nil {
args = append(args, "--vault-password-file", t.template.VaultKey.GetPath())
}
extraVars, err := t.getEnvironmentExtraVars()
if err != nil {
t.Log(err.Error())
t.Log("Could not remove command environment, if existant it will be passed to --extra-vars. This is not fatal but be aware of side effects")
} else if extraVars != "" {
args = append(args, "--extra-vars", extraVars)
}
var templateExtraArgs []string
if t.template.Arguments != nil {
err = json.Unmarshal([]byte(*t.template.Arguments), &templateExtraArgs)
if err != nil {
t.Log("Invalid format of the template extra arguments, must be valid JSON")
return
}
}
var taskExtraArgs []string
if t.template.AllowOverrideArgsInTask && t.task.Arguments != nil {
err = json.Unmarshal([]byte(*t.task.Arguments), &taskExtraArgs)
if err != nil {
t.Log("Invalid format of the TaskRunner extra arguments, must be valid JSON")
return
}
}
if t.task.Limit != "" {
t.Log("--limit=" + t.task.Limit)
taskExtraArgs = append(taskExtraArgs, "--limit="+t.task.Limit)
}
args = append(args, templateExtraArgs...)
args = append(args, taskExtraArgs...)
args = append(args, playbookName)
return
}
func hasRequirementsChanges(requirementsFilePath string, requirementsHashFilePath string) bool {
oldFileMD5HashBytes, err := ioutil.ReadFile(requirementsHashFilePath)
if err != nil {
return true
}
newFileMD5Hash, err := getMD5Hash(requirementsFilePath)
if err != nil {
return true
}
return string(oldFileMD5HashBytes) != newFileMD5Hash
}
func writeMD5Hash(requirementsFile string, requirementsHashFile string) error {
newFileMD5Hash, err := getMD5Hash(requirementsFile)
if err != nil {
return err
}
return ioutil.WriteFile(requirementsHashFile, []byte(newFileMD5Hash), 0644)
}
// checkTmpDir checks to see if the temporary directory exists
// and if it does not attempts to create it
func checkTmpDir(path string) error {
var err error
if _, err = os.Stat(path); err != nil {
if os.IsNotExist(err) {
return os.MkdirAll(path, 0700)
}
}
return err
}
|
package common
import (
"encoding/base64"
"strings"
"github.com/ricallinson/forgery"
"github.com/spacedock-io/index/models"
)
func BasicAuth(req *f.Request, res *f.Response, next func()) {
auth := req.Get("authorization")
req.Map["_user"] = nil
if len(auth) == 0 {
res.Send("No authorization provided.", 401)
return
}
creds, err := UnpackBasic(auth)
if err != nil {
res.Send("Unauthorized", 401)
return
}
u, ok := models.AuthUser(creds[0], creds[1])
if !ok {
res.Send("Unauthorized", 401)
}
req.Map["_user"] = u
}
func TokenAuth(req *f.Request, res *f.Response, next func()) {
auth := req.Get("authorization")
if len(auth) == 0 {
res.Send("No authorization provided.", 403)
return
}
_, err := UnpackToken(auth)
if err != nil {
if err == models.TokenNotFound {
res.Send(err.Error(), 404)
return
}
res.Send(err.Error(), 403)
return
}
}
func UnpackBasic(raw string) (creds []string, err error) {
auth := strings.Split(raw, " ")
decoded, err := base64.StdEncoding.DecodeString(auth[1])
if err != nil { return nil, err }
creds = strings.Split(string(decoded), ":")
return creds, nil
}
func UnpackToken(raw string) (models.Token, error) {
auth := strings.Split(raw, " ")
return models.GetTokenString(auth[1])
}
Change Unpack methods to Handle methods
package common
import (
"encoding/base64"
"strings"
"github.com/ricallinson/forgery"
"github.com/spacedock-io/index/models"
)
func BasicAuth(req *f.Request, res *f.Response, next func()) {
auth := req.Get("authorization")
req.Map["_user"] = nil
if len(auth) == 0 {
res.Send("No authorization provided.", 401)
return
}
u, err := HandleBasic(auth)
if err != nil {
res.Send("Unauthorized", 401)
return
}
req.Map["_user"] = u
}
func TokenAuth(req *f.Request, res *f.Response, next func()) {
auth := req.Get("authorization")
if len(auth) == 0 {
res.Send("No authorization provided.", 403)
return
}
_, err := HandleToken(auth)
if err != nil {
if err == models.TokenNotFound {
res.Send(err.Error(), 404)
return
}
res.Send(err.Error(), 403)
return
}
}
func HandleBasic(raw string) ((*models.User), error) {
auth := strings.Split(raw, " ")
decoded, err := base64.StdEncoding.DecodeString(auth[1])
if err != nil { return nil, err }
creds := strings.Split(string(decoded), ":")
u, ok := models.AuthUser(creds[0], creds[1])
if !ok {
return nil, models.AuthErr
}
return u, nil
}
func HandleToken(raw string) (models.Token, error) {
auth := strings.Split(raw, " ")
return models.UseTokenString(auth[1])
}
|
package common
import (
"errors"
"io/ioutil"
"os"
"reflect"
"runtime"
)
var (
errInvalidConf = errors.New("invalid conf")
)
// ConfigLoader 配置内容加载器
type ConfigLoader interface {
Load(configPath string) (content []byte, err error)
Exist(configPath string) (exist bool, err error)
}
// ConfigFileLoader 从本地文件中加载配置
type ConfigFileLoader struct {
}
// Load impls ConfigLoader.Load
func (p *ConfigFileLoader) Load(configPath string) (content []byte, err error) {
content, err = ioutil.ReadFile(configPath)
return
}
// Exist impls ConfigLoader.Exist
func (p *ConfigFileLoader) Exist(configPath string) (exist bool, err error) {
info, err := os.Stat(configPath)
if os.IsNotExist(err) {
err = nil
return
}
if info != nil {
exist = !info.IsDir()
}
return
}
var (
//FileLoader 默认加载
FileLoader ConfigLoader = &ConfigFileLoader{}
)
// Configurer 配置器
type Configurer interface {
//解析配置
Parse() error
}
// LogConfig 日志配置
type LogConfig struct {
Env string `yaml:"env"`
FileName string `yaml:"file_name"`
MaxSize int `yaml:"max_size"`
MaxBackups int `yaml:"max_backups"`
MaxAge int `yaml:"max_age"`
}
// Parse 解析日志配置
func (p *LogConfig) Parse() error {
return initLogger(p)
}
// RuntimeConfig 运行期配置
type RuntimeConfig struct {
Maxprocs int //最大的PROCS个数
}
// Parse 解析运行期配置
func (p *RuntimeConfig) Parse() error {
if p.Maxprocs > 0 {
preProcs := runtime.GOMAXPROCS(p.Maxprocs)
Infof("Set runtime.MAXPROCS to %v,old is %v", p.Maxprocs, preProcs)
}
return nil
}
// AppConfig 基础的应用配置
type AppConfig struct {
*LogConfig `yaml:"log"`
*RuntimeConfig `yaml:"runtime"`
*ValidateRuleConfig `yaml:"validates"`
}
// Parse 解析基础的应用配置
func (p *AppConfig) Parse() error {
return Parse(p)
}
// GetValidateRuleConfig implements ValidateConfiguer
func (p *AppConfig) GetValidateRuleConfig() *ValidateRuleConfig {
return p.ValidateRuleConfig
}
// Parse 解析配置
func Parse(conf interface{}) error {
config := reflect.Indirect(reflect.ValueOf(conf))
fieldCount := config.NumField()
for i := 0; i < fieldCount; i++ {
val := reflect.Indirect(config.Field(i))
if !val.IsValid() {
continue
}
if configFieldValue, ok := val.Addr().Interface().(Configurer); ok {
if err := configFieldValue.Parse(); err != nil {
return err
}
}
}
return nil
}
add LogConfiger
package common
import (
"errors"
"io/ioutil"
"os"
"reflect"
"runtime"
)
var (
errInvalidConf = errors.New("invalid conf")
)
// ConfigLoader 配置内容加载器
type ConfigLoader interface {
Load(configPath string) (content []byte, err error)
Exist(configPath string) (exist bool, err error)
}
// ConfigFileLoader 从本地文件中加载配置
type ConfigFileLoader struct {
}
// Load impls ConfigLoader.Load
func (p *ConfigFileLoader) Load(configPath string) (content []byte, err error) {
content, err = ioutil.ReadFile(configPath)
return
}
// Exist impls ConfigLoader.Exist
func (p *ConfigFileLoader) Exist(configPath string) (exist bool, err error) {
info, err := os.Stat(configPath)
if os.IsNotExist(err) {
err = nil
return
}
if info != nil {
exist = !info.IsDir()
}
return
}
var (
//FileLoader 默认加载
FileLoader ConfigLoader = &ConfigFileLoader{}
)
// Configurer 配置器
type Configurer interface {
//解析配置
Parse() error
}
// LogConfig 日志配置
type LogConfig struct {
Env string `yaml:"env"`
FileName string `yaml:"file_name"`
MaxSize int `yaml:"max_size"`
MaxBackups int `yaml:"max_backups"`
MaxAge int `yaml:"max_age"`
}
// LogConfiger the log configer
type LogConfiger interface {
GetLogConfig() *LogConfig
}
// Parse 解析日志配置
func (p *LogConfig) Parse() error {
return initLogger(p)
}
// RuntimeConfig 运行期配置
type RuntimeConfig struct {
Maxprocs int //最大的PROCS个数
}
// Parse 解析运行期配置
func (p *RuntimeConfig) Parse() error {
if p.Maxprocs > 0 {
preProcs := runtime.GOMAXPROCS(p.Maxprocs)
Infof("Set runtime.MAXPROCS to %v,old is %v", p.Maxprocs, preProcs)
}
return nil
}
// AppConfig 基础的应用配置
type AppConfig struct {
*LogConfig `yaml:"log"`
*RuntimeConfig `yaml:"runtime"`
*ValidateRuleConfig `yaml:"validates"`
}
// Parse 解析基础的应用配置
func (p *AppConfig) Parse() error {
return Parse(p)
}
// GetValidateRuleConfig implements ValidateConfiguer
func (p *AppConfig) GetValidateRuleConfig() *ValidateRuleConfig {
return p.ValidateRuleConfig
}
// GetLogConfig impls LogConfiger
func (p *AppConfig) GetLogConfig() *LogConfig {
return p.LogConfig
}
// Parse 解析配置
func Parse(conf interface{}) error {
config := reflect.Indirect(reflect.ValueOf(conf))
fieldCount := config.NumField()
for i := 0; i < fieldCount; i++ {
val := reflect.Indirect(config.Field(i))
if !val.IsValid() {
continue
}
if configFieldValue, ok := val.Addr().Interface().(Configurer); ok {
if err := configFieldValue.Parse(); err != nil {
return err
}
}
}
return nil
}
|
package common
import (
"github.com/EngoEngine/ecs"
"github.com/EngoEngine/engo"
"github.com/EngoEngine/gl"
"image/color"
"sort"
"sync"
"unsafe"
)
const (
// RenderSystemPriority is the priority of the RenderSystem
RenderSystemPriority = -1000
)
type renderChangeMessage struct{}
func (renderChangeMessage) Type() string {
return "renderChangeMessage"
}
// Drawable is that which can be rendered to OpenGL.
type Drawable interface {
Texture() *gl.Texture
Width() float32
Height() float32
View() (float32, float32, float32, float32)
Close()
}
// TextureRepeating is the method used to repeat a texture in OpenGL.
type TextureRepeating uint8
const (
// NoRepeat does not repeat the texture.
NoRepeat TextureRepeating = iota
// ClampToEdge stretches the texture to the edge of the viewport.
ClampToEdge
// ClampToBorder stretches the texture to the border of the viewpport.
ClampToBorder
// Repeat repeats the texture until the border of the viewport.
Repeat
// MirroredRepeat repeats a mirror image of the texture until the border of the viewport.
MirroredRepeat
)
// ZoomFilter is a filter used when zooming in or out of a texture.
type ZoomFilter uint8
const (
// FilterNearest is a simple nearest neighbor algorithm
FilterNearest ZoomFilter = iota
// FilterLinear is a bilinear interpolation algorithm
FilterLinear
)
// RenderComponent is the component needed to render an entity.
type RenderComponent struct {
// Hidden is used to prevent drawing by OpenGL
Hidden bool
// Scale is the scale at which to render, in the X and Y axis. Not defining Scale, will default to engo.Point{1, 1}
Scale engo.Point
// Color defines how much of the color-components of the texture get used
Color color.Color
// Drawable refers to the Texture that should be drawn
Drawable Drawable
// Repeat defines how to repeat the Texture if the SpaceComponent of the entity
// is larger than the texture itself, after applying scale. Defaults to NoRepeat
// which allows the texture to draw entirely without regard to th SpaceComponent
// Do not set to anything other than NoRepeat for textures in a sprite sheet.
// This does not yet work with sprite sheets.
Repeat TextureRepeating
// Buffer represents the buffer object itself
// Avoid using it unless your are writing a custom shader
Buffer *gl.Buffer
// BufferContent contains the buffer data
// Avoid using it unless your are writing a custom shader
BufferContent []float32
// StartZIndex defines the initial Z-Index. Z-Index defines the order which the content is drawn to the
// screen. Higher z-indices are drawn on top of lower ones. Beware that you must use `SetZIndex` function to change
// the Z-Index.
StartZIndex float32
magFilter, minFilter ZoomFilter
shader Shader
zIndex float32
}
// SetShader sets the shader used by the RenderComponent.
func (r *RenderComponent) SetShader(s Shader) {
r.shader = s
engo.Mailbox.Dispatch(&renderChangeMessage{})
}
func (r *RenderComponent) ensureShader() {
// Setting default shader
if r.shader == nil {
switch r.Drawable.(type) {
case Triangle:
r.shader = LegacyShader
case Circle:
r.shader = LegacyShader
case Rectangle:
r.shader = LegacyShader
case ComplexTriangles:
r.shader = LegacyShader
case Text:
r.shader = TextShader
case Blendmap:
r.shader = BlendmapShader
default:
r.shader = DefaultShader
}
}
}
func (r *RenderComponent) Shader() Shader {
r.ensureShader()
return r.shader
}
// SetZIndex sets the order that the RenderComponent is drawn to the screen. Higher z-indices are drawn on top of
// lower ones if they overlap.
func (r *RenderComponent) SetZIndex(index float32) {
r.zIndex = index
engo.Mailbox.Dispatch(&renderChangeMessage{})
}
// SetMinFilter sets the ZoomFilter used for minimizing the RenderComponent
func (r *RenderComponent) SetMinFilter(z ZoomFilter) {
r.minFilter = z
engo.Mailbox.Dispatch(renderChangeMessage{})
}
// SetMagFilter sets the ZoomFilter used for magnifying the RenderComponent
func (r *RenderComponent) SetMagFilter(z ZoomFilter) {
r.magFilter = z
engo.Mailbox.Dispatch(renderChangeMessage{})
}
type renderEntity struct {
*ecs.BasicEntity
*RenderComponent
*SpaceComponent
}
type renderEntityList []renderEntity
func (r renderEntityList) Len() int {
return len(r)
}
func (r renderEntityList) Less(i, j int) bool {
// Sort by shader-pointer if they have the same zIndex
if r[i].RenderComponent.zIndex != r[j].RenderComponent.zIndex {
return r[i].RenderComponent.zIndex < r[j].RenderComponent.zIndex
}
// TODO: optimize this for performance
p1, p2 := uintptr((*emptyInterface)(unsafe.Pointer(&r[i].RenderComponent.shader)).word), uintptr((*emptyInterface)(unsafe.Pointer(&r[j].RenderComponent.shader)).word)
if p1 != p2 {
return p1 < p2
}
switch r[i].RenderComponent.Drawable.(type) {
// Tiles can either be as a spriteSheet or as separate image
// if we sort them by texture and they're saved as separate images,
// sorting by texture messes up rendering.
case *Tile:
// NO-OP
default:
t1, t2 := uintptr(unsafe.Pointer(r[i].RenderComponent.Drawable.Texture())), uintptr(unsafe.Pointer(r[j].RenderComponent.Drawable.Texture()))
if t1 != t2 {
return t1 < t2
}
}
// Sort by minFilter if they're the same magFilter
mag1, mag2 := r[i].RenderComponent.magFilter, r[j].RenderComponent.magFilter
if mag1 != mag2 {
return mag1 < mag2
}
// Sort by position if they're the same minFilter
min1, min2 := r[i].RenderComponent.minFilter, r[j].RenderComponent.minFilter
if min1 != min2 {
return min1 < min2
}
if r[i].Position.Y != r[j].Position.Y {
return r[i].Position.Y < r[j].Position.Y
}
return r[i].Position.X < r[j].Position.X
}
func (r renderEntityList) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
type emptyInterface struct {
word unsafe.Pointer
}
// RenderSystem is the system that draws entities on the OpenGL surface. It requires
// a CameraSystem to work. If a CameraSystem is not in the World when you add RenderSystem
// one is automatically added to the world.
type RenderSystem struct {
entities renderEntityList
ids map[uint64]struct{}
world *ecs.World
sortingNeeded, newCamera bool
}
// Priority implements the ecs.Prioritizer interface.
func (*RenderSystem) Priority() int { return RenderSystemPriority }
// New initializes the RenderSystem
func (rs *RenderSystem) New(w *ecs.World) {
rs.world = w
rs.ids = make(map[uint64]struct{})
engo.Mailbox.Listen("NewCameraMessage", func(engo.Message) {
rs.newCamera = true
})
addCameraSystemOnce(w)
if !engo.Headless() {
if err := initShaders(w); err != nil {
panic(err)
}
engo.Gl.Enable(engo.Gl.MULTISAMPLE)
}
engo.Mailbox.Listen("renderChangeMessage", func(engo.Message) {
rs.sortingNeeded = true
})
}
var cameraInitMutex sync.Mutex
func addCameraSystemOnce(w *ecs.World) {
cameraInitMutex.Lock()
defer cameraInitMutex.Unlock()
camSystemAlreadyAdded := false
for _, system := range w.Systems() {
switch system.(type) {
case *CameraSystem:
camSystemAlreadyAdded = true
}
}
if !camSystemAlreadyAdded {
w.AddSystem(&CameraSystem{})
}
}
// Add adds an entity to the RenderSystem. The entity needs a basic, render, and space component to be added to the system.
func (rs *RenderSystem) Add(basic *ecs.BasicEntity, render *RenderComponent, space *SpaceComponent) {
// Do nothing if entity already exists
if _, ok := rs.ids[basic.ID()]; ok {
return
}
rs.ids[basic.ID()] = struct{}{}
render.ensureShader()
// This is to prevent users from using the wrong one
if render.shader == HUDShader {
switch render.Drawable.(type) {
case Triangle:
render.shader = LegacyHUDShader
case Circle:
render.shader = LegacyHUDShader
case Rectangle:
render.shader = LegacyHUDShader
case ComplexTriangles:
render.shader = LegacyHUDShader
case Text:
render.shader = TextHUDShader
default:
render.shader = HUDShader
}
}
// If the scale is zero, set it to one.
if render.Scale.X == 0 {
render.Scale.X = 1
}
if render.Scale.Y == 0 {
render.Scale.Y = 1
}
render.zIndex = render.StartZIndex
rs.entities = append(rs.entities, renderEntity{basic, render, space})
rs.sortingNeeded = true
}
// EntityExists looks if the entity is already into the System's entities. It will return the index >= 0 of the object into de rs.entities or -1 if it could not be found.
func (rs *RenderSystem) EntityExists(basic *ecs.BasicEntity) int {
for index, entity := range rs.entities {
if entity.ID() == basic.ID() {
return index
}
}
return -1
}
// AddByInterface adds any Renderable to the render system. Any Entity containing a BasicEntity,RenderComponent, and SpaceComponent anonymously does this automatically
func (rs *RenderSystem) AddByInterface(i ecs.Identifier) {
o, _ := i.(Renderable)
rs.Add(o.GetBasicEntity(), o.GetRenderComponent(), o.GetSpaceComponent())
}
// Remove removes an entity from the RenderSystem
func (rs *RenderSystem) Remove(basic ecs.BasicEntity) {
var d = rs.EntityExists(&basic)
if d >= 0 {
rs.entities = append(rs.entities[:d], rs.entities[d+1:]...)
rs.sortingNeeded = true
}
delete(rs.ids, basic.ID())
}
// Update draws the entities in the RenderSystem to the OpenGL Surface.
func (rs *RenderSystem) Update(dt float32) {
if engo.Headless() {
return
}
if rs.sortingNeeded {
sort.Sort(rs.entities)
rs.sortingNeeded = false
}
if rs.newCamera {
newCamera(rs.world)
rs.newCamera = false
}
engo.Gl.Clear(engo.Gl.COLOR_BUFFER_BIT)
preparedCullingShaders := make(map[CullingShader]struct{})
var cullingShader CullingShader // current culling shader
var prevShader Shader // shader of the previous entity
var currentShader Shader // currently "active" shader
// TODO: it's linear for now, but that might very well be a bad idea
for _, e := range rs.entities {
if e.RenderComponent.Hidden {
continue // with other entities
}
// Retrieve a shader, may be the default one -- then use it if we aren't already using it
shader := e.RenderComponent.shader
if !compareShaders(shader, prevShader) {
// to increase performance avoid the type assertions when possible
prevShader = shader
if cs, ok := shader.(CullingShader); ok {
cullingShader = cs
if _, isPrepared := preparedCullingShaders[cullingShader]; !isPrepared {
cullingShader.PrepareCulling()
preparedCullingShaders[cullingShader] = struct{}{}
}
} else {
cullingShader = nil
}
}
if cullingShader != nil && !cullingShader.ShouldDraw(e.RenderComponent, e.SpaceComponent) {
continue
}
// Change Shader if we have to
if !compareShaders(shader, currentShader) {
if currentShader != nil {
currentShader.Post()
}
shader.Pre()
currentShader = shader
}
// Setting default scale to 1
if e.RenderComponent.Scale.X == 0 && e.RenderComponent.Scale.Y == 0 {
e.RenderComponent.Scale = engo.Point{X: 1, Y: 1}
}
// Setting default to white
if e.RenderComponent.Color == nil {
e.RenderComponent.Color = color.White
}
currentShader.Draw(e.RenderComponent, e.SpaceComponent)
}
if currentShader != nil {
currentShader.Post()
}
}
// SetBackground sets the OpenGL ClearColor to the provided color.
func SetBackground(c color.Color) {
if !engo.Headless() {
r, g, b, a := c.RGBA()
engo.Gl.ClearColor(float32(r)/0xffff, float32(g)/0xffff, float32(b)/0xffff, float32(a)/0xffff)
}
}
fix unsafe
package common
import (
"github.com/EngoEngine/ecs"
"github.com/EngoEngine/engo"
"github.com/EngoEngine/gl"
"image/color"
"reflect"
"sort"
"sync"
"unsafe"
)
const (
// RenderSystemPriority is the priority of the RenderSystem
RenderSystemPriority = -1000
)
type renderChangeMessage struct{}
func (renderChangeMessage) Type() string {
return "renderChangeMessage"
}
// Drawable is that which can be rendered to OpenGL.
type Drawable interface {
Texture() *gl.Texture
Width() float32
Height() float32
View() (float32, float32, float32, float32)
Close()
}
// TextureRepeating is the method used to repeat a texture in OpenGL.
type TextureRepeating uint8
const (
// NoRepeat does not repeat the texture.
NoRepeat TextureRepeating = iota
// ClampToEdge stretches the texture to the edge of the viewport.
ClampToEdge
// ClampToBorder stretches the texture to the border of the viewpport.
ClampToBorder
// Repeat repeats the texture until the border of the viewport.
Repeat
// MirroredRepeat repeats a mirror image of the texture until the border of the viewport.
MirroredRepeat
)
// ZoomFilter is a filter used when zooming in or out of a texture.
type ZoomFilter uint8
const (
// FilterNearest is a simple nearest neighbor algorithm
FilterNearest ZoomFilter = iota
// FilterLinear is a bilinear interpolation algorithm
FilterLinear
)
// RenderComponent is the component needed to render an entity.
type RenderComponent struct {
// Hidden is used to prevent drawing by OpenGL
Hidden bool
// Scale is the scale at which to render, in the X and Y axis. Not defining Scale, will default to engo.Point{1, 1}
Scale engo.Point
// Color defines how much of the color-components of the texture get used
Color color.Color
// Drawable refers to the Texture that should be drawn
Drawable Drawable
// Repeat defines how to repeat the Texture if the SpaceComponent of the entity
// is larger than the texture itself, after applying scale. Defaults to NoRepeat
// which allows the texture to draw entirely without regard to th SpaceComponent
// Do not set to anything other than NoRepeat for textures in a sprite sheet.
// This does not yet work with sprite sheets.
Repeat TextureRepeating
// Buffer represents the buffer object itself
// Avoid using it unless your are writing a custom shader
Buffer *gl.Buffer
// BufferContent contains the buffer data
// Avoid using it unless your are writing a custom shader
BufferContent []float32
// StartZIndex defines the initial Z-Index. Z-Index defines the order which the content is drawn to the
// screen. Higher z-indices are drawn on top of lower ones. Beware that you must use `SetZIndex` function to change
// the Z-Index.
StartZIndex float32
magFilter, minFilter ZoomFilter
shader Shader
zIndex float32
}
// SetShader sets the shader used by the RenderComponent.
func (r *RenderComponent) SetShader(s Shader) {
r.shader = s
engo.Mailbox.Dispatch(&renderChangeMessage{})
}
func (r *RenderComponent) ensureShader() {
// Setting default shader
if r.shader == nil {
switch r.Drawable.(type) {
case Triangle:
r.shader = LegacyShader
case Circle:
r.shader = LegacyShader
case Rectangle:
r.shader = LegacyShader
case ComplexTriangles:
r.shader = LegacyShader
case Text:
r.shader = TextShader
case Blendmap:
r.shader = BlendmapShader
default:
r.shader = DefaultShader
}
}
}
func (r *RenderComponent) Shader() Shader {
r.ensureShader()
return r.shader
}
// SetZIndex sets the order that the RenderComponent is drawn to the screen. Higher z-indices are drawn on top of
// lower ones if they overlap.
func (r *RenderComponent) SetZIndex(index float32) {
r.zIndex = index
engo.Mailbox.Dispatch(&renderChangeMessage{})
}
// SetMinFilter sets the ZoomFilter used for minimizing the RenderComponent
func (r *RenderComponent) SetMinFilter(z ZoomFilter) {
r.minFilter = z
engo.Mailbox.Dispatch(renderChangeMessage{})
}
// SetMagFilter sets the ZoomFilter used for magnifying the RenderComponent
func (r *RenderComponent) SetMagFilter(z ZoomFilter) {
r.magFilter = z
engo.Mailbox.Dispatch(renderChangeMessage{})
}
type renderEntity struct {
*ecs.BasicEntity
*RenderComponent
*SpaceComponent
}
type renderEntityList []renderEntity
func (r renderEntityList) Len() int {
return len(r)
}
func (r renderEntityList) Less(i, j int) bool {
// Sort by shader-pointer if they have the same zIndex
if r[i].RenderComponent.zIndex != r[j].RenderComponent.zIndex {
return r[i].RenderComponent.zIndex < r[j].RenderComponent.zIndex
}
// // TODO: optimize this for performance
p1, p2 := reflect.ValueOf(r[i].RenderComponent.shader).Pointer(), reflect.ValueOf(r[j].RenderComponent.shader).Pointer()
if p1 != p2 {
return p1 < p2
}
switch r[i].RenderComponent.Drawable.(type) {
// Tiles can either be as a spriteSheet or as separate image
// if we sort them by texture and they're saved as separate images,
// sorting by texture messes up rendering.
case *Tile:
// NO-OP
default:
t1, t2 := uintptr(unsafe.Pointer(r[i].RenderComponent.Drawable.Texture())), uintptr(unsafe.Pointer(r[j].RenderComponent.Drawable.Texture()))
if t1 != t2 {
return t1 < t2
}
}
// Sort by minFilter if they're the same magFilter
mag1, mag2 := r[i].RenderComponent.magFilter, r[j].RenderComponent.magFilter
if mag1 != mag2 {
return mag1 < mag2
}
// Sort by position if they're the same minFilter
min1, min2 := r[i].RenderComponent.minFilter, r[j].RenderComponent.minFilter
if min1 != min2 {
return min1 < min2
}
if r[i].Position.Y != r[j].Position.Y {
return r[i].Position.Y < r[j].Position.Y
}
return r[i].Position.X < r[j].Position.X
}
func (r renderEntityList) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
// RenderSystem is the system that draws entities on the OpenGL surface. It requires
// a CameraSystem to work. If a CameraSystem is not in the World when you add RenderSystem
// one is automatically added to the world.
type RenderSystem struct {
entities renderEntityList
ids map[uint64]struct{}
world *ecs.World
sortingNeeded, newCamera bool
}
// Priority implements the ecs.Prioritizer interface.
func (*RenderSystem) Priority() int { return RenderSystemPriority }
// New initializes the RenderSystem
func (rs *RenderSystem) New(w *ecs.World) {
rs.world = w
rs.ids = make(map[uint64]struct{})
engo.Mailbox.Listen("NewCameraMessage", func(engo.Message) {
rs.newCamera = true
})
addCameraSystemOnce(w)
if !engo.Headless() {
if err := initShaders(w); err != nil {
panic(err)
}
engo.Gl.Enable(engo.Gl.MULTISAMPLE)
}
engo.Mailbox.Listen("renderChangeMessage", func(engo.Message) {
rs.sortingNeeded = true
})
}
var cameraInitMutex sync.Mutex
func addCameraSystemOnce(w *ecs.World) {
cameraInitMutex.Lock()
defer cameraInitMutex.Unlock()
camSystemAlreadyAdded := false
for _, system := range w.Systems() {
switch system.(type) {
case *CameraSystem:
camSystemAlreadyAdded = true
}
}
if !camSystemAlreadyAdded {
w.AddSystem(&CameraSystem{})
}
}
// Add adds an entity to the RenderSystem. The entity needs a basic, render, and space component to be added to the system.
func (rs *RenderSystem) Add(basic *ecs.BasicEntity, render *RenderComponent, space *SpaceComponent) {
// Do nothing if entity already exists
if _, ok := rs.ids[basic.ID()]; ok {
return
}
rs.ids[basic.ID()] = struct{}{}
render.ensureShader()
// This is to prevent users from using the wrong one
if render.shader == HUDShader {
switch render.Drawable.(type) {
case Triangle:
render.shader = LegacyHUDShader
case Circle:
render.shader = LegacyHUDShader
case Rectangle:
render.shader = LegacyHUDShader
case ComplexTriangles:
render.shader = LegacyHUDShader
case Text:
render.shader = TextHUDShader
default:
render.shader = HUDShader
}
}
// If the scale is zero, set it to one.
if render.Scale.X == 0 {
render.Scale.X = 1
}
if render.Scale.Y == 0 {
render.Scale.Y = 1
}
render.zIndex = render.StartZIndex
rs.entities = append(rs.entities, renderEntity{basic, render, space})
rs.sortingNeeded = true
}
// EntityExists looks if the entity is already into the System's entities. It will return the index >= 0 of the object into de rs.entities or -1 if it could not be found.
func (rs *RenderSystem) EntityExists(basic *ecs.BasicEntity) int {
for index, entity := range rs.entities {
if entity.ID() == basic.ID() {
return index
}
}
return -1
}
// AddByInterface adds any Renderable to the render system. Any Entity containing a BasicEntity,RenderComponent, and SpaceComponent anonymously does this automatically
func (rs *RenderSystem) AddByInterface(i ecs.Identifier) {
o, _ := i.(Renderable)
rs.Add(o.GetBasicEntity(), o.GetRenderComponent(), o.GetSpaceComponent())
}
// Remove removes an entity from the RenderSystem
func (rs *RenderSystem) Remove(basic ecs.BasicEntity) {
var d = rs.EntityExists(&basic)
if d >= 0 {
rs.entities = append(rs.entities[:d], rs.entities[d+1:]...)
rs.sortingNeeded = true
}
delete(rs.ids, basic.ID())
}
// Update draws the entities in the RenderSystem to the OpenGL Surface.
func (rs *RenderSystem) Update(dt float32) {
if engo.Headless() {
return
}
if rs.sortingNeeded {
sort.Sort(rs.entities)
rs.sortingNeeded = false
}
if rs.newCamera {
newCamera(rs.world)
rs.newCamera = false
}
engo.Gl.Clear(engo.Gl.COLOR_BUFFER_BIT)
preparedCullingShaders := make(map[CullingShader]struct{})
var cullingShader CullingShader // current culling shader
var prevShader Shader // shader of the previous entity
var currentShader Shader // currently "active" shader
// TODO: it's linear for now, but that might very well be a bad idea
for _, e := range rs.entities {
if e.RenderComponent.Hidden {
continue // with other entities
}
// Retrieve a shader, may be the default one -- then use it if we aren't already using it
shader := e.RenderComponent.shader
if !compareShaders(shader, prevShader) {
// to increase performance avoid the type assertions when possible
prevShader = shader
if cs, ok := shader.(CullingShader); ok {
cullingShader = cs
if _, isPrepared := preparedCullingShaders[cullingShader]; !isPrepared {
cullingShader.PrepareCulling()
preparedCullingShaders[cullingShader] = struct{}{}
}
} else {
cullingShader = nil
}
}
if cullingShader != nil && !cullingShader.ShouldDraw(e.RenderComponent, e.SpaceComponent) {
continue
}
// Change Shader if we have to
if !compareShaders(shader, currentShader) {
if currentShader != nil {
currentShader.Post()
}
shader.Pre()
currentShader = shader
}
// Setting default scale to 1
if e.RenderComponent.Scale.X == 0 && e.RenderComponent.Scale.Y == 0 {
e.RenderComponent.Scale = engo.Point{X: 1, Y: 1}
}
// Setting default to white
if e.RenderComponent.Color == nil {
e.RenderComponent.Color = color.White
}
currentShader.Draw(e.RenderComponent, e.SpaceComponent)
}
if currentShader != nil {
currentShader.Post()
}
}
// SetBackground sets the OpenGL ClearColor to the provided color.
func SetBackground(c color.Color) {
if !engo.Headless() {
r, g, b, a := c.RGBA()
engo.Gl.ClearColor(float32(r)/0xffff, float32(g)/0xffff, float32(b)/0xffff, float32(a)/0xffff)
}
}
|
// Copyright © 2016 Jip J. Dekker <jip@dekker.li>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"os"
"os/exec"
"path/filepath"
log "github.com/Sirupsen/logrus"
"github.com/alecthomas/template"
"github.com/jjdekker/ponder/helpers"
"github.com/jjdekker/ponder/settings"
)
// TODO: Add git version
// TODO: Support multiple authors
// TODO: Support categories
var bookTempl = `
\documentclass[11pt,fleqn]{book}
\usepackage[utf8]{inputenc}
\usepackage{pdfpages}
\usepackage[space]{grffile}
\usepackage[pdftex,pdfpagelabels,bookmarks,hyperindex,hyperfigures]{hyperref}
{{if ne .Settings.Name ""}}\\title{ {{.Settings.Name}} }{{end}}
{{if ne .Settings.Author ""}}\\author{ {{.Settings.Author}} }{{end}}
\date{\today}
\begin{document}
\maketitle
\newpage
{{range .Scores}}
\phantomsection
\addcontentsline{toc}{section}{{printf "{"}}{{ .Name }}{{printf "}"}}
\includepdf[pages=-]{{printf "{"}}{{.OutputPath}}{{printf "}"}}
{{end}}
\end{document}
`
// MakeBook will combine all scores into a single songbook
// generated using LaTeX.
func MakeBook(path string, opts *settings.Settings) {
// Everything needs to be compiled
CompileDir(path, opts)
// Compile the book template
var templ = template.Must(template.New("songBook").Parse(bookTempl))
texPath := filepath.Join(opts.OutputDir, "songbook.tex")
log.WithFields(log.Fields{
"path": texPath,
}).Info("compiling songbook template")
f, err := os.Create(texPath)
helpers.Check(err, "could not create songbook texfile")
err = templ.Execute(f, &struct {
Scores *[]settings.Score
Settings *settings.Settings
}{
Scores: &scores,
Settings: opts,
})
helpers.Check(err, "error executing book template")
f.Close()
cmd := exec.Command("latexmk", "-silent", "-pdf", "-cd", texPath)
out, err := cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Fatal("songbook failed to compile")
}
cmd = exec.Command("latexmk", "-c", "-cd", texPath)
out, err = cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Error("failed to clean songbook latex files")
}
err = os.Remove(texPath)
helpers.Check(err, "could not remove songbook latex template")
}
Adds a function to find all categories
// Copyright © 2016 Jip J. Dekker <jip@dekker.li>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compiler
import (
"os"
"os/exec"
"path/filepath"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/alecthomas/template"
"github.com/jjdekker/ponder/helpers"
"github.com/jjdekker/ponder/settings"
)
// TODO: Add git version
// TODO: Support multiple authors
// TODO: Support categories
var bookTempl = `
\documentclass[11pt,fleqn]{book}
\usepackage[utf8]{inputenc}
\usepackage{pdfpages}
\usepackage[space]{grffile}
\usepackage[pdftex,pdfpagelabels,bookmarks,hyperindex,hyperfigures]{hyperref}
{{if ne .Settings.Name ""}}\\title{ {{.Settings.Name}} }{{end}}
{{if ne .Settings.Author ""}}\\author{ {{.Settings.Author}} }{{end}}
\date{\today}
\begin{document}
\maketitle
\newpage
{{range .Scores}}
\phantomsection
\addcontentsline{toc}{section}{{printf "{"}}{{ .Name }}{{printf "}"}}
\includepdf[pages=-]{{printf "{"}}{{.OutputPath}}{{printf "}"}}
{{end}}
\end{document}
`
// MakeBook will combine all scores into a single songbook
// generated using LaTeX.
func MakeBook(path string, opts *settings.Settings) {
// Everything needs to be compiled
CompileDir(path, opts)
fmt.Println(scoreCategories(&scores))
// Compile the book template
var templ = template.Must(template.New("songBook").Parse(bookTempl))
texPath := filepath.Join(opts.OutputDir, "songbook.tex")
log.WithFields(log.Fields{
"path": texPath,
}).Info("compiling songbook template")
f, err := os.Create(texPath)
helpers.Check(err, "could not create songbook texfile")
err = templ.Execute(f, &struct {
Scores *[]settings.Score
Settings *settings.Settings
}{
Scores: &scores,
Settings: opts,
})
helpers.Check(err, "error executing book template")
f.Close()
cmd := exec.Command("latexmk", "-silent", "-pdf", "-cd", texPath)
out, err := cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Fatal("songbook failed to compile")
}
cmd = exec.Command("latexmk", "-c", "-cd", texPath)
out, err = cmd.CombinedOutput()
if err != nil {
log.WithFields(log.Fields{
"message": string(out),
"error": err,
}).Error("failed to clean songbook latex files")
}
err = os.Remove(texPath)
helpers.Check(err, "could not remove songbook latex template")
}
// scoreCategories returns a slice of all categories used
// in the given slice of scores
func scoreCategories(scores *[]settings.Score) []string {
catMap := make(map[string]struct{})
for i := range *scores {
for _, cat := range (*scores)[i].Categories {
catMap[cat] = struct{}{}
}
}
categories := make([]string, 0, len(catMap))
for cat := range catMap {
categories = append(categories, cat)
}
return categories
}
|
// Package main is the entry point for the Blue Jay command-line tool called
// Jay.
package main
import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/blue-jay/core/env"
"github.com/blue-jay/core/file"
"github.com/blue-jay/core/find"
"github.com/blue-jay/core/generate"
"github.com/blue-jay/core/jsonconfig"
"github.com/blue-jay/core/replace"
"github.com/blue-jay/core/storage"
"github.com/blue-jay/core/storage/migration/mysql"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
app = kingpin.New("jay", "A command-line application to build faster with Blue Jay.")
cFind = app.Command("find", "Search for files containing matching text.")
cFindFolder = cFind.Arg("folder", "Folder to search").Required().String()
cFindText = cFind.Arg("text", "Case-sensitive text to find.").Required().String()
cFindExtension = cFind.Arg("extension", "File name or extension to search in. Use * as a wildcard. Directory names are not valid.").Default("*.go").String()
cFindRecursive = cFind.Arg("recursive", "True to search in subfolders. Default: true").Default("true").Bool()
cFindFilename = cFind.Arg("filename", "True to include file path in results if matched. Default: false").Default("false").Bool()
cReplace = app.Command("replace", "Search for files containing matching text and then replace it with new text.")
cReplaceFolder = cReplace.Arg("folder", "Folder to search").Required().String()
cReplaceFind = cReplace.Arg("find", "Case-sensitive text to replace.").Required().String()
cReplaceText = cReplace.Arg("replace", "Text to replace with.").String()
cReplaceExtension = cReplace.Arg("extension", "File name or extension to search in. Use * as a wildcard. Directory names are not valid.").Default("*.go").String()
cReplaceRecursive = cReplace.Arg("recursive", "True to search in subfolders. Default: true").Default("true").Bool()
cReplaceFilename = cReplace.Arg("filename", "True to include file path in results if matched. Default: false").Default("false").Bool()
cReplaceCommit = cReplace.Arg("commit", "True to makes the changes instead of just displaying them. Default: true").Default("true").Bool()
cEnv = app.Command("env", "Manage the environment config file.")
cEnvMake = cEnv.Command("make", "Create a new env.json file.")
cEnvKeyshow = cEnv.Command("keyshow", "Show a new set of session keys.")
cEnvKeyUpdate = cEnv.Command("keyupdate", "Update env.json with a new set of session keys.")
cMigrateMySQL = app.Command("migrate:mysql", "Migrate MySQL to different states using 'up' and 'down' files.")
cMigrateMySQLMake = cMigrateMySQL.Command("make", "Create a migration file.")
cMigrateMySQLMakeDesc = cMigrateMySQLMake.Arg("description", "Description for the migration file. Spaces will be converted to underscores and all characters will be make lowercase.").Required().String()
cMigrateMySQLAll = cMigrateMySQL.Command("all", "Run all 'up' files to advance the database to the latest.")
cMigrateMySQLReset = cMigrateMySQL.Command("reset", "Run all 'down' files to rollback the database to empty.")
cMigrateMySQLRefresh = cMigrateMySQL.Command("refresh", "Run all 'down' files and then 'up' files so the database is fresh and updated.")
cMigrateMySQLStatus = cMigrateMySQL.Command("status", "View the last 'up' file performed on the database.")
cMigrateMySQLUp = cMigrateMySQL.Command("up", "Apply only the next 'up' file to the database to advance the database one iteration.")
cMigrateMySQLDown = cMigrateMySQL.Command("down", "Apply only the current 'down' file to the database to rollback the database one iteration.")
cGenerate = app.Command("generate", "Generate files from template pairs.")
cGenerateTmpl = cGenerate.Arg("folder/template", "Template pair name. Don't include an extension.").Required().String()
cGenerateVars = stringList(cGenerate.Arg("key:value", "Key and value required for the template pair."))
)
// init sets runtime settings.
func init() {
// Verbose logging with file name and line number
log.SetFlags(log.Lshortfile)
// Use all CPU cores
runtime.GOMAXPROCS(runtime.NumCPU())
}
func main() {
app.Version("0.5-bravo")
app.VersionFlag.Short('v')
app.HelpFlag.Short('h')
argList := os.Args[1:]
arg := kingpin.MustParse(app.Parse(argList))
commandFind(arg)
commandReplace(arg)
commandEnv(arg)
commandMigrateMySQL(arg, argList)
commandGenerate(arg, argList)
}
func commandFind(arg string) {
switch arg {
case cFind.FullCommand():
contents, err := find.Run(cFindText,
cFindFolder,
cFindExtension,
cFindRecursive,
cFindFilename)
if err != nil {
app.Fatalf("%v", err)
}
for _, line := range contents {
fmt.Println(line)
}
}
}
func commandReplace(arg string) {
switch arg {
case cReplace.FullCommand():
contents, err := replace.Run(cReplaceFind,
cReplaceFolder,
cReplaceText,
cReplaceExtension,
cReplaceRecursive,
cReplaceFilename,
cReplaceCommit)
if err != nil {
app.Fatalf("%v", err)
}
for _, line := range contents {
fmt.Println(line)
}
}
}
func commandEnv(arg string) {
switch arg {
case cEnvMake.FullCommand():
err := file.Copy("env.json.example", "env.json")
if err != nil {
app.Fatalf("%v", err)
}
err = env.UpdateFileKeys("env.json")
if err != nil {
app.Fatalf("%v", err)
}
p, err := filepath.Abs(".")
if err != nil {
app.Fatalf("%v", err)
}
config := filepath.Join(p, "env.json")
if !file.Exists(config) {
app.Fatalf("%v", err)
}
fmt.Println("File, env.json, created successfully with new session keys.")
fmt.Println("Set your environment variable, JAYCONFIG, to:")
fmt.Println(config)
case cEnvKeyshow.FullCommand():
fmt.Println("Paste these into your env.json file:")
fmt.Printf(` "AuthKey":"%v",`+"\n", env.EncodedKey(64))
fmt.Printf(` "EncryptKey":"%v",`+"\n", env.EncodedKey(32))
fmt.Printf(` "CSRFKey":"%v",`+"\n", env.EncodedKey(32))
case cEnvKeyUpdate.FullCommand():
err := env.UpdateFileKeys("env.json")
if err != nil {
app.Fatalf("%v", err)
}
fmt.Println("Session keys updated in env.json.")
}
}
func commandMigrateMySQL(arg string, argList []string) {
if argList[0] != "migrate:mysql" {
return
}
// Load the config
info := &storage.Info{}
err := jsonconfig.LoadFromEnv(info)
if err != nil {
app.Fatalf("%v", err)
}
// Configure MySQL
mysql.SetConfig(info.MySQL)
mig, err := mysql.Shared().New()
if err != nil {
app.Fatalf("%v", err)
}
switch arg {
case cMigrateMySQLMake.FullCommand():
err = mig.Create(*cMigrateMySQLMakeDesc)
case cMigrateMySQLAll.FullCommand():
err = mig.UpAll()
case cMigrateMySQLReset.FullCommand():
err = mig.DownAll()
case cMigrateMySQLRefresh.FullCommand():
if mig.Position() == 0 {
err = mig.UpAll()
} else {
err = mig.DownAll()
err = mig.UpAll()
}
case cMigrateMySQLStatus.FullCommand():
fmt.Println("Last migration:", mig.Status())
case cMigrateMySQLUp.FullCommand():
err = mig.UpOne()
case cMigrateMySQLDown.FullCommand():
err = mig.DownOne()
}
if err != nil {
app.Fatalf("%v", err)
} else {
fmt.Print(mig.Output())
}
}
func commandGenerate(arg string, args []string) {
if args[0] != "generate" {
return
}
// Load the config
info := &generate.Container{}
err := jsonconfig.LoadFromEnv(info)
if err != nil {
app.Fatalf("%v", err)
}
// Get the config path
jc := os.Getenv("JAYCONFIG")
if len(jc) == 0 {
log.Fatalln("Environment variable JAYCONFIG needs to be set to the env.json file location.")
}
// Get the folders
projectFolder := filepath.Dir(jc)
templateFolder := filepath.Join(projectFolder, info.Generation.TemplateFolder)
// Generate the code
err = generate.Run(args[1:], projectFolder, templateFolder)
if err != nil {
app.Fatalf("%v", err)
}
}
// *****************************************************************************
// Custom Arguments
// *****************************************************************************
// StringList is a string array.
type StringList []string
// Set appends the string to the list.
func (i *StringList) Set(value string) error {
*i = append(*i, value)
return nil
}
// String returns the list.
func (i *StringList) String() string {
return strings.Join(*i, " ")
}
// IsCumulative allows more than one value to be passed.
func (i *StringList) IsCumulative() bool {
return true
}
// stringList accepts one or more strings as arguments.
func stringList(s kingpin.Settings) (target *StringList) {
target = new(StringList)
s.SetValue((*StringList)(target))
return
}
Add ability to specify config file in arguments
// Package main is the entry point for the Blue Jay command-line tool called
// Jay.
package main
import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/blue-jay/core/env"
"github.com/blue-jay/core/file"
"github.com/blue-jay/core/find"
"github.com/blue-jay/core/generate"
"github.com/blue-jay/core/jsonconfig"
"github.com/blue-jay/core/replace"
"github.com/blue-jay/core/storage"
mysqlMigration "github.com/blue-jay/core/storage/migration/mysql"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
app = kingpin.New("jay", "A command-line application to build faster with Blue Jay.")
flagConfigFile = app.Flag("config", "Path to the env.json file.").Short('c').String()
cFind = app.Command("find", "Search for files containing matching text.")
cFindFolder = cFind.Arg("folder", "Folder to search").Required().String()
cFindText = cFind.Arg("text", "Case-sensitive text to find.").Required().String()
cFindExtension = cFind.Arg("extension", "File name or extension to search in. Use * as a wildcard. Directory names are not valid.").Default("*.go").String()
cFindRecursive = cFind.Arg("recursive", "True to search in subfolders. Default: true").Default("true").Bool()
cFindFilename = cFind.Arg("filename", "True to include file path in results if matched. Default: false").Default("false").Bool()
cReplace = app.Command("replace", "Search for files containing matching text and then replace it with new text.")
cReplaceFolder = cReplace.Arg("folder", "Folder to search").Required().String()
cReplaceFind = cReplace.Arg("find", "Case-sensitive text to replace.").Required().String()
cReplaceText = cReplace.Arg("replace", "Text to replace with.").String()
cReplaceExtension = cReplace.Arg("extension", "File name or extension to search in. Use * as a wildcard. Directory names are not valid.").Default("*.go").String()
cReplaceRecursive = cReplace.Arg("recursive", "True to search in subfolders. Default: true").Default("true").Bool()
cReplaceFilename = cReplace.Arg("filename", "True to include file path in results if matched. Default: false").Default("false").Bool()
cReplaceCommit = cReplace.Arg("commit", "True to makes the changes instead of just displaying them. Default: true").Default("true").Bool()
cEnv = app.Command("env", "Manage the environment config file.")
cEnvMake = cEnv.Command("make", "Create a new env.json file.")
cEnvKeyshow = cEnv.Command("keyshow", "Show a new set of session keys.")
cEnvKeyUpdate = cEnv.Command("keyupdate", "Update env.json with a new set of session keys.")
cMigrateMySQL = app.Command("migrate:mysql", "Migrate MySQL to different states using 'up' and 'down' files.")
cMigrateMySQLMake = cMigrateMySQL.Command("make", "Create a migration file.")
cMigrateMySQLMakeDesc = cMigrateMySQLMake.Arg("description", "Description for the migration file. Spaces will be converted to underscores and all characters will be make lowercase.").Required().String()
cMigrateMySQLAll = cMigrateMySQL.Command("all", "Run all 'up' files to advance the database to the latest.")
cMigrateMySQLReset = cMigrateMySQL.Command("reset", "Run all 'down' files to rollback the database to empty.")
cMigrateMySQLRefresh = cMigrateMySQL.Command("refresh", "Run all 'down' files and then 'up' files so the database is fresh and updated.")
cMigrateMySQLStatus = cMigrateMySQL.Command("status", "View the last 'up' file performed on the database.")
cMigrateMySQLUp = cMigrateMySQL.Command("up", "Apply only the next 'up' file to the database to advance the database one iteration.")
cMigrateMySQLDown = cMigrateMySQL.Command("down", "Apply only the current 'down' file to the database to rollback the database one iteration.")
cGenerate = app.Command("generate", "Generate files from template pairs.")
cGenerateTmpl = cGenerate.Arg("folder/template", "Template pair name. Don't include an extension.").Required().String()
cGenerateVars = stringList(cGenerate.Arg("key:value", "Key and value required for the template pair."))
)
// init sets runtime settings.
func init() {
// Verbose logging with file name and line number
log.SetFlags(log.Lshortfile)
// Use all CPU cores
runtime.GOMAXPROCS(runtime.NumCPU())
}
func main() {
app.Version("0.5-bravo")
app.VersionFlag.Short('v')
app.HelpFlag.Short('h')
argList := os.Args[1:]
arg := kingpin.MustParse(app.Parse(argList))
commandFind(arg)
commandReplace(arg)
commandEnv(arg)
commandMigrateMySQL(arg, argList)
commandGenerate(arg, argList)
}
func commandFind(arg string) {
switch arg {
case cFind.FullCommand():
contents, err := find.Run(cFindText,
cFindFolder,
cFindExtension,
cFindRecursive,
cFindFilename)
if err != nil {
app.Fatalf("%v", err)
}
for _, line := range contents {
fmt.Println(line)
}
}
}
func commandReplace(arg string) {
switch arg {
case cReplace.FullCommand():
contents, err := replace.Run(cReplaceFind,
cReplaceFolder,
cReplaceText,
cReplaceExtension,
cReplaceRecursive,
cReplaceFilename,
cReplaceCommit)
if err != nil {
app.Fatalf("%v", err)
}
for _, line := range contents {
fmt.Println(line)
}
}
}
func commandEnv(arg string) {
switch arg {
case cEnvMake.FullCommand():
err := file.Copy("env.json.example", "env.json")
if err != nil {
app.Fatalf("%v", err)
}
err = env.UpdateFileKeys("env.json")
if err != nil {
app.Fatalf("%v", err)
}
p, err := filepath.Abs(".")
if err != nil {
app.Fatalf("%v", err)
}
config := filepath.Join(p, "env.json")
if !file.Exists(config) {
app.Fatalf("%v", err)
}
fmt.Println("File, env.json, created successfully with new session keys.")
fmt.Println("Set your environment variable, JAYCONFIG, to:")
fmt.Println(config)
case cEnvKeyshow.FullCommand():
fmt.Println("Paste these into your env.json file:")
fmt.Printf(` "AuthKey":"%v",`+"\n", env.EncodedKey(64))
fmt.Printf(` "EncryptKey":"%v",`+"\n", env.EncodedKey(32))
fmt.Printf(` "CSRFKey":"%v",`+"\n", env.EncodedKey(32))
case cEnvKeyUpdate.FullCommand():
err := env.UpdateFileKeys("env.json")
if err != nil {
app.Fatalf("%v", err)
}
fmt.Println("Session keys updated in env.json.")
}
}
func commandMigrateMySQL(arg string, argList []string) {
if argList[0] != "migrate:mysql" {
return
}
var err error
// Config struct
info := &storage.Info{}
// Check if the config file path was passed
if len(*flagConfigFile) > 0 {
// Load the config from the passed file
err = jsonconfig.Load(*flagConfigFile, info)
} else {
// Load the config from the environment variable
err = jsonconfig.LoadFromEnv(info)
}
if err != nil {
app.Fatalf("%v", err)
}
mysqlConfig := &mysqlMigration.Configuration{
info.MySQL,
}
// Create a new migration object
mig, err := mysqlConfig.New()
if err != nil {
app.Fatalf("%v", err)
}
switch arg {
case cMigrateMySQLMake.FullCommand():
err = mig.Create(*cMigrateMySQLMakeDesc)
case cMigrateMySQLAll.FullCommand():
err = mig.UpAll()
case cMigrateMySQLReset.FullCommand():
err = mig.DownAll()
case cMigrateMySQLRefresh.FullCommand():
if mig.Position() == 0 {
err = mig.UpAll()
} else {
err = mig.DownAll()
err = mig.UpAll()
}
case cMigrateMySQLStatus.FullCommand():
fmt.Println("Last migration:", mig.Status())
case cMigrateMySQLUp.FullCommand():
err = mig.UpOne()
case cMigrateMySQLDown.FullCommand():
err = mig.DownOne()
}
if err != nil {
app.Fatalf("%v", err)
} else {
fmt.Print(mig.Output())
}
}
func commandGenerate(arg string, args []string) {
if args[0] != "generate" {
return
}
var err error
// Load the config
info := &generate.Container{}
configFile := ""
// Check if the config file path was passed
if len(*flagConfigFile) > 0 {
// Load the config from the passed file
err = jsonconfig.Load(*flagConfigFile, info)
configFile = *flagConfigFile
} else {
// Load the config from the environment variable
err = jsonconfig.LoadFromEnv(info)
// Get the config file path
configFile = os.Getenv("JAYCONFIG")
}
if err != nil {
app.Fatalf("%v", err)
}
// Get the folders
projectFolder := filepath.Dir(configFile)
templateFolder := filepath.Join(projectFolder, info.Generation.TemplateFolder)
// Generate the code
err = generate.Run(args[1:], projectFolder, templateFolder)
if err != nil {
app.Fatalf("%v", err)
}
}
// *****************************************************************************
// Custom Arguments
// *****************************************************************************
// StringList is a string array.
type StringList []string
// Set appends the string to the list.
func (i *StringList) Set(value string) error {
*i = append(*i, value)
return nil
}
// String returns the list.
func (i *StringList) String() string {
return strings.Join(*i, " ")
}
// IsCumulative allows more than one value to be passed.
func (i *StringList) IsCumulative() bool {
return true
}
// stringList accepts one or more strings as arguments.
func stringList(s kingpin.Settings) (target *StringList) {
target = new(StringList)
s.SetValue((*StringList)(target))
return
}
|
// Copyright 2015 Vadim Kravcenko
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package gojenkins
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/url"
"path"
"strconv"
"strings"
)
type Job struct {
Raw *JobResponse
Jenkins *Jenkins
Base string
}
type JobBuild struct {
Number int64
URL string
}
type InnerJob struct {
Name string `json:"name"`
Url string `json:"url"`
Color string `json:"color"`
}
type ParameterDefinition struct {
DefaultParameterValue struct {
Name string `json:"name"`
Value interface{} `json:"value"`
} `json:"defaultParameterValue"`
Description string `json:"description"`
Name string `json:"name"`
Type string `json:"type"`
}
type JobResponse struct {
Actions []generalObj
Buildable bool `json:"buildable"`
Builds []JobBuild
Color string `json:"color"`
ConcurrentBuild bool `json:"concurrentBuild"`
Description string `json:"description"`
DisplayName string `json:"displayName"`
DisplayNameOrNull interface{} `json:"displayNameOrNull"`
DownstreamProjects []InnerJob `json:"downstreamProjects"`
FirstBuild JobBuild
HealthReport []struct {
Description string `json:"description"`
IconClassName string `json:"iconClassName"`
IconUrl string `json:"iconUrl"`
Score int64 `json:"score"`
} `json:"healthReport"`
InQueue bool `json:"inQueue"`
KeepDependencies bool `json:"keepDependencies"`
LastBuild JobBuild `json:"lastBuild"`
LastCompletedBuild JobBuild `json:"lastCompletedBuild"`
LastFailedBuild JobBuild `json:"lastFailedBuild"`
LastStableBuild JobBuild `json:"lastStableBuild"`
LastSuccessfulBuild JobBuild `json:"lastSuccessfulBuild"`
LastUnstableBuild JobBuild `json:"lastUnstableBuild"`
LastUnsuccessfulBuild JobBuild `json:"lastUnsuccessfulBuild"`
Name string `json:"name"`
SubJobs []InnerJob `json:"jobs"`
NextBuildNumber int64 `json:"nextBuildNumber"`
Property []struct {
ParameterDefinitions []ParameterDefinition `json:"parameterDefinitions"`
} `json:"property"`
QueueItem interface{} `json:"queueItem"`
Scm struct{} `json:"scm"`
UpstreamProjects []InnerJob `json:"upstreamProjects"`
URL string `json:"url"`
Jobs []InnerJob `json:"jobs"`
PrimaryView *ViewData `json:"primaryView"`
Views []ViewData `json:"views"`
}
func (j *Job) parentBase() string {
return j.Base[:strings.LastIndex(j.Base, "/job/")]
}
type History struct {
BuildNumber int
BuildStatus string
BuildTimestamp int64
}
func (j *Job) GetName() string {
return j.Raw.Name
}
func (j *Job) GetDescription() string {
return j.Raw.Description
}
func (j *Job) GetDetails() *JobResponse {
return j.Raw
}
func (j *Job) GetBuild(id int64) (*Build, error) {
build := Build{Jenkins: j.Jenkins, Job: j, Raw: new(BuildResponse), Depth: 1, Base: "/job/" + j.GetName() + "/" + strconv.FormatInt(id, 10)}
status, err := build.Poll()
if err != nil {
return nil, err
}
if status == 200 {
return &build, nil
}
return nil, errors.New(strconv.Itoa(status))
}
func (j *Job) getBuildByType(buildType string) (*Build, error) {
allowed := map[string]JobBuild{
"lastStableBuild": j.Raw.LastStableBuild,
"lastSuccessfulBuild": j.Raw.LastSuccessfulBuild,
"lastBuild": j.Raw.LastBuild,
"lastCompletedBuild": j.Raw.LastCompletedBuild,
"firstBuild": j.Raw.FirstBuild,
"lastFailedBuild": j.Raw.LastFailedBuild,
}
number := ""
if val, ok := allowed[buildType]; ok {
number = strconv.FormatInt(val.Number, 10)
} else {
panic("No Such Build")
}
build := Build{
Jenkins: j.Jenkins,
Depth: 1,
Job: j,
Raw: new(BuildResponse),
Base: j.Base + "/" + number}
status, err := build.Poll()
if err != nil {
return nil, err
}
if status == 200 {
return &build, nil
}
return nil, errors.New(strconv.Itoa(status))
}
func (j *Job) GetLastSuccessfulBuild() (*Build, error) {
return j.getBuildByType("lastSuccessfulBuild")
}
func (j *Job) GetFirstBuild() (*Build, error) {
return j.getBuildByType("firstBuild")
}
func (j *Job) GetLastBuild() (*Build, error) {
return j.getBuildByType("lastBuild")
}
func (j *Job) GetLastStableBuild() (*Build, error) {
return j.getBuildByType("lastStableBuild")
}
func (j *Job) GetLastFailedBuild() (*Build, error) {
return j.getBuildByType("lastFailedBuild")
}
func (j *Job) GetLastCompletedBuild() (*Build, error) {
return j.getBuildByType("lastCompletedBuild")
}
// Returns All Builds with Number and URL
func (j *Job) GetAllBuildIds() ([]JobBuild, error) {
var buildsResp struct {
Builds []JobBuild `json:"allBuilds"`
}
_, err := j.Jenkins.Requester.GetJSON(j.Base, &buildsResp, map[string]string{"tree": "allBuilds[number,url]"})
if err != nil {
return nil, err
}
return buildsResp.Builds, nil
}
func (j *Job) GetSubJobsMetadata() []InnerJob {
return j.Raw.SubJobs
}
func (j *Job) GetUpstreamJobsMetadata() []InnerJob {
return j.Raw.UpstreamProjects
}
func (j *Job) GetDownstreamJobsMetadata() []InnerJob {
return j.Raw.DownstreamProjects
}
func (j *Job) GetSubJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.SubJobs))
for i, job := range j.Raw.SubJobs {
ji, err := j.Jenkins.GetSubJob(j.GetName(), job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) GetInnerJobsMetadata() []InnerJob {
return j.Raw.Jobs
}
func (j *Job) GetUpstreamJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.UpstreamProjects))
for i, job := range j.Raw.UpstreamProjects {
ji, err := j.Jenkins.GetJob(job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) GetDownstreamJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.DownstreamProjects))
for i, job := range j.Raw.DownstreamProjects {
ji, err := j.Jenkins.GetJob(job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) GetInnerJob(id string) (*Job, error) {
job := Job{Jenkins: j.Jenkins, Raw: new(JobResponse), Base: j.Base + "/job/" + id}
status, err := job.Poll()
if err != nil {
return nil, err
}
if status == 200 {
return &job, nil
}
return nil, errors.New(strconv.Itoa(status))
}
func (j *Job) GetInnerJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.Jobs))
for i, job := range j.Raw.Jobs {
ji, err := j.GetInnerJob(job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) Enable() (bool, error) {
resp, err := j.Jenkins.Requester.Post(j.Base+"/enable", nil, nil, nil)
if err != nil {
return false, err
}
if resp.StatusCode != 200 {
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
return true, nil
}
func (j *Job) Disable() (bool, error) {
resp, err := j.Jenkins.Requester.Post(j.Base+"/disable", nil, nil, nil)
if err != nil {
return false, err
}
if resp.StatusCode != 200 {
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
return true, nil
}
func (j *Job) Delete() (bool, error) {
resp, err := j.Jenkins.Requester.Post(j.Base+"/doDelete", nil, nil, nil)
if err != nil {
return false, err
}
if resp.StatusCode != 200 {
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
return true, nil
}
func (j *Job) Rename(name string) (bool, error) {
data := url.Values{}
data.Set("newName", name)
_, err := j.Jenkins.Requester.Post(j.Base+"/doRename", bytes.NewBufferString(data.Encode()), nil, nil)
if err != nil {
return false, err
}
return true, nil
}
func (j *Job) Create(config string, qr ...interface{}) (*Job, error) {
var querystring map[string]string
if len(qr) > 0 {
querystring = qr[0].(map[string]string)
}
resp, err := j.Jenkins.Requester.PostXML(j.parentBase()+"/createItem", config, j.Raw, querystring)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
j.Poll()
return j, nil
}
return nil, errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) Copy(destinationName string) (*Job, error) {
qr := map[string]string{"name": destinationName, "from": j.GetName(), "mode": "copy"}
resp, err := j.Jenkins.Requester.Post(j.parentBase()+"/createItem", nil, nil, qr)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
newJob := &Job{Jenkins: j.Jenkins, Raw: new(JobResponse), Base: "/job/" + destinationName}
_, err := newJob.Poll()
if err != nil {
return nil, err
}
return newJob, nil
}
return nil, errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) UpdateConfig(config string) error {
var querystring map[string]string
resp, err := j.Jenkins.Requester.PostXML(j.Base+"/config.xml", config, nil, querystring)
if err != nil {
return err
}
if resp.StatusCode == 200 {
j.Poll()
return nil
}
return errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) GetConfig() (string, error) {
var data string
_, err := j.Jenkins.Requester.GetXML(j.Base+"/config.xml", &data, nil)
if err != nil {
return "", err
}
return data, nil
}
func (j *Job) GetParameters() ([]ParameterDefinition, error) {
_, err := j.Poll()
if err != nil {
return nil, err
}
var parameters []ParameterDefinition
for _, property := range j.Raw.Property {
for _, param := range property.ParameterDefinitions {
parameters = append(parameters, param)
}
}
return parameters, nil
}
func (j *Job) IsQueued() (bool, error) {
if _, err := j.Poll(); err != nil {
return false, err
}
return j.Raw.InQueue, nil
}
func (j *Job) IsRunning() (bool, error) {
if _, err := j.Poll(); err != nil {
return false, err
}
lastBuild, err := j.GetLastBuild()
if err != nil {
return false, err
}
return lastBuild.IsRunning(), nil
}
func (j *Job) IsEnabled() (bool, error) {
if _, err := j.Poll(); err != nil {
return false, err
}
return j.Raw.Color != "disabled", nil
}
func (j *Job) HasQueuedBuild() {
panic("Not Implemented yet")
}
func (j *Job) InvokeSimple(params map[string]string) (int64, error) {
isQueued, err := j.IsQueued()
if err != nil {
return 0, err
}
if isQueued {
Error.Printf("%s is already running", j.GetName())
return 0, nil
}
endpoint := "/build"
parameters, err := j.GetParameters()
if err != nil {
return 0, err
}
if len(parameters) > 0 {
endpoint = "/buildWithParameters"
}
data := url.Values{}
for k, v := range params {
data.Set(k, v)
}
resp, err := j.Jenkins.Requester.Post(j.Base+endpoint, bytes.NewBufferString(data.Encode()), nil, nil)
if err != nil {
return 0, err
}
if resp.StatusCode != 200 && resp.StatusCode != 201 {
return 0, errors.New("Could not invoke job " + j.GetName())
}
location := resp.Header.Get("Location")
if location == "" {
return 0, errors.New("Don't have key \"Location\" in response of header")
}
u, err := url.Parse(location)
if err != nil {
return 0, err
}
number, err := strconv.ParseInt(path.Base(u.Path), 10, 64)
if err != nil {
return 0, err
}
return number, nil
}
func (j *Job) Invoke(files []string, skipIfRunning bool, params map[string]string, cause string, securityToken string) (bool, error) {
isQueued, err := j.IsQueued()
if err != nil {
return false, err
}
if isQueued {
Error.Printf("%s is already running", j.GetName())
return false, nil
}
isRunning, err := j.IsRunning()
if err != nil {
return false, err
}
if isRunning && skipIfRunning {
return false, fmt.Errorf("Will not request new build because %s is already running", j.GetName())
}
base := "/build"
// If parameters are specified - url is /builWithParameters
if params != nil {
base = "/buildWithParameters"
} else {
params = make(map[string]string)
}
// If files are specified - url is /build
if files != nil {
base = "/build"
}
reqParams := map[string]string{}
buildParams := map[string]string{}
if securityToken != "" {
reqParams["token"] = securityToken
}
buildParams["json"] = string(makeJson(params))
b, _ := json.Marshal(buildParams)
resp, err := j.Jenkins.Requester.PostFiles(j.Base+base, bytes.NewBuffer(b), nil, reqParams, files)
if err != nil {
return false, err
}
if resp.StatusCode == 200 || resp.StatusCode == 201 {
return true, nil
}
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) Poll() (int, error) {
response, err := j.Jenkins.Requester.GetJSON(j.Base, j.Raw, nil)
if err != nil {
return 0, err
}
return response.StatusCode, nil
}
func (j *Job) History() ([]*History, error) {
resp, err := j.Jenkins.Requester.Get(j.Base+"/buildHistory/ajax", nil, nil)
if err != nil {
return nil, err
}
return parseBuildHistory(resp.Body), nil
}
simplify loop assignment
job.go:381:3: should replace loop with parameters = append(parameters, property.ParameterDefinitions...) (S1011)
// Copyright 2015 Vadim Kravcenko
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package gojenkins
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/url"
"path"
"strconv"
"strings"
)
type Job struct {
Raw *JobResponse
Jenkins *Jenkins
Base string
}
type JobBuild struct {
Number int64
URL string
}
type InnerJob struct {
Name string `json:"name"`
Url string `json:"url"`
Color string `json:"color"`
}
type ParameterDefinition struct {
DefaultParameterValue struct {
Name string `json:"name"`
Value interface{} `json:"value"`
} `json:"defaultParameterValue"`
Description string `json:"description"`
Name string `json:"name"`
Type string `json:"type"`
}
type JobResponse struct {
Actions []generalObj
Buildable bool `json:"buildable"`
Builds []JobBuild
Color string `json:"color"`
ConcurrentBuild bool `json:"concurrentBuild"`
Description string `json:"description"`
DisplayName string `json:"displayName"`
DisplayNameOrNull interface{} `json:"displayNameOrNull"`
DownstreamProjects []InnerJob `json:"downstreamProjects"`
FirstBuild JobBuild
HealthReport []struct {
Description string `json:"description"`
IconClassName string `json:"iconClassName"`
IconUrl string `json:"iconUrl"`
Score int64 `json:"score"`
} `json:"healthReport"`
InQueue bool `json:"inQueue"`
KeepDependencies bool `json:"keepDependencies"`
LastBuild JobBuild `json:"lastBuild"`
LastCompletedBuild JobBuild `json:"lastCompletedBuild"`
LastFailedBuild JobBuild `json:"lastFailedBuild"`
LastStableBuild JobBuild `json:"lastStableBuild"`
LastSuccessfulBuild JobBuild `json:"lastSuccessfulBuild"`
LastUnstableBuild JobBuild `json:"lastUnstableBuild"`
LastUnsuccessfulBuild JobBuild `json:"lastUnsuccessfulBuild"`
Name string `json:"name"`
SubJobs []InnerJob `json:"jobs"`
NextBuildNumber int64 `json:"nextBuildNumber"`
Property []struct {
ParameterDefinitions []ParameterDefinition `json:"parameterDefinitions"`
} `json:"property"`
QueueItem interface{} `json:"queueItem"`
Scm struct{} `json:"scm"`
UpstreamProjects []InnerJob `json:"upstreamProjects"`
URL string `json:"url"`
Jobs []InnerJob `json:"jobs"`
PrimaryView *ViewData `json:"primaryView"`
Views []ViewData `json:"views"`
}
func (j *Job) parentBase() string {
return j.Base[:strings.LastIndex(j.Base, "/job/")]
}
type History struct {
BuildNumber int
BuildStatus string
BuildTimestamp int64
}
func (j *Job) GetName() string {
return j.Raw.Name
}
func (j *Job) GetDescription() string {
return j.Raw.Description
}
func (j *Job) GetDetails() *JobResponse {
return j.Raw
}
func (j *Job) GetBuild(id int64) (*Build, error) {
build := Build{Jenkins: j.Jenkins, Job: j, Raw: new(BuildResponse), Depth: 1, Base: "/job/" + j.GetName() + "/" + strconv.FormatInt(id, 10)}
status, err := build.Poll()
if err != nil {
return nil, err
}
if status == 200 {
return &build, nil
}
return nil, errors.New(strconv.Itoa(status))
}
func (j *Job) getBuildByType(buildType string) (*Build, error) {
allowed := map[string]JobBuild{
"lastStableBuild": j.Raw.LastStableBuild,
"lastSuccessfulBuild": j.Raw.LastSuccessfulBuild,
"lastBuild": j.Raw.LastBuild,
"lastCompletedBuild": j.Raw.LastCompletedBuild,
"firstBuild": j.Raw.FirstBuild,
"lastFailedBuild": j.Raw.LastFailedBuild,
}
number := ""
if val, ok := allowed[buildType]; ok {
number = strconv.FormatInt(val.Number, 10)
} else {
panic("No Such Build")
}
build := Build{
Jenkins: j.Jenkins,
Depth: 1,
Job: j,
Raw: new(BuildResponse),
Base: j.Base + "/" + number}
status, err := build.Poll()
if err != nil {
return nil, err
}
if status == 200 {
return &build, nil
}
return nil, errors.New(strconv.Itoa(status))
}
func (j *Job) GetLastSuccessfulBuild() (*Build, error) {
return j.getBuildByType("lastSuccessfulBuild")
}
func (j *Job) GetFirstBuild() (*Build, error) {
return j.getBuildByType("firstBuild")
}
func (j *Job) GetLastBuild() (*Build, error) {
return j.getBuildByType("lastBuild")
}
func (j *Job) GetLastStableBuild() (*Build, error) {
return j.getBuildByType("lastStableBuild")
}
func (j *Job) GetLastFailedBuild() (*Build, error) {
return j.getBuildByType("lastFailedBuild")
}
func (j *Job) GetLastCompletedBuild() (*Build, error) {
return j.getBuildByType("lastCompletedBuild")
}
// Returns All Builds with Number and URL
func (j *Job) GetAllBuildIds() ([]JobBuild, error) {
var buildsResp struct {
Builds []JobBuild `json:"allBuilds"`
}
_, err := j.Jenkins.Requester.GetJSON(j.Base, &buildsResp, map[string]string{"tree": "allBuilds[number,url]"})
if err != nil {
return nil, err
}
return buildsResp.Builds, nil
}
func (j *Job) GetSubJobsMetadata() []InnerJob {
return j.Raw.SubJobs
}
func (j *Job) GetUpstreamJobsMetadata() []InnerJob {
return j.Raw.UpstreamProjects
}
func (j *Job) GetDownstreamJobsMetadata() []InnerJob {
return j.Raw.DownstreamProjects
}
func (j *Job) GetSubJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.SubJobs))
for i, job := range j.Raw.SubJobs {
ji, err := j.Jenkins.GetSubJob(j.GetName(), job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) GetInnerJobsMetadata() []InnerJob {
return j.Raw.Jobs
}
func (j *Job) GetUpstreamJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.UpstreamProjects))
for i, job := range j.Raw.UpstreamProjects {
ji, err := j.Jenkins.GetJob(job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) GetDownstreamJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.DownstreamProjects))
for i, job := range j.Raw.DownstreamProjects {
ji, err := j.Jenkins.GetJob(job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) GetInnerJob(id string) (*Job, error) {
job := Job{Jenkins: j.Jenkins, Raw: new(JobResponse), Base: j.Base + "/job/" + id}
status, err := job.Poll()
if err != nil {
return nil, err
}
if status == 200 {
return &job, nil
}
return nil, errors.New(strconv.Itoa(status))
}
func (j *Job) GetInnerJobs() ([]*Job, error) {
jobs := make([]*Job, len(j.Raw.Jobs))
for i, job := range j.Raw.Jobs {
ji, err := j.GetInnerJob(job.Name)
if err != nil {
return nil, err
}
jobs[i] = ji
}
return jobs, nil
}
func (j *Job) Enable() (bool, error) {
resp, err := j.Jenkins.Requester.Post(j.Base+"/enable", nil, nil, nil)
if err != nil {
return false, err
}
if resp.StatusCode != 200 {
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
return true, nil
}
func (j *Job) Disable() (bool, error) {
resp, err := j.Jenkins.Requester.Post(j.Base+"/disable", nil, nil, nil)
if err != nil {
return false, err
}
if resp.StatusCode != 200 {
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
return true, nil
}
func (j *Job) Delete() (bool, error) {
resp, err := j.Jenkins.Requester.Post(j.Base+"/doDelete", nil, nil, nil)
if err != nil {
return false, err
}
if resp.StatusCode != 200 {
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
return true, nil
}
func (j *Job) Rename(name string) (bool, error) {
data := url.Values{}
data.Set("newName", name)
_, err := j.Jenkins.Requester.Post(j.Base+"/doRename", bytes.NewBufferString(data.Encode()), nil, nil)
if err != nil {
return false, err
}
return true, nil
}
func (j *Job) Create(config string, qr ...interface{}) (*Job, error) {
var querystring map[string]string
if len(qr) > 0 {
querystring = qr[0].(map[string]string)
}
resp, err := j.Jenkins.Requester.PostXML(j.parentBase()+"/createItem", config, j.Raw, querystring)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
j.Poll()
return j, nil
}
return nil, errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) Copy(destinationName string) (*Job, error) {
qr := map[string]string{"name": destinationName, "from": j.GetName(), "mode": "copy"}
resp, err := j.Jenkins.Requester.Post(j.parentBase()+"/createItem", nil, nil, qr)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
newJob := &Job{Jenkins: j.Jenkins, Raw: new(JobResponse), Base: "/job/" + destinationName}
_, err := newJob.Poll()
if err != nil {
return nil, err
}
return newJob, nil
}
return nil, errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) UpdateConfig(config string) error {
var querystring map[string]string
resp, err := j.Jenkins.Requester.PostXML(j.Base+"/config.xml", config, nil, querystring)
if err != nil {
return err
}
if resp.StatusCode == 200 {
j.Poll()
return nil
}
return errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) GetConfig() (string, error) {
var data string
_, err := j.Jenkins.Requester.GetXML(j.Base+"/config.xml", &data, nil)
if err != nil {
return "", err
}
return data, nil
}
func (j *Job) GetParameters() ([]ParameterDefinition, error) {
_, err := j.Poll()
if err != nil {
return nil, err
}
var parameters []ParameterDefinition
for _, property := range j.Raw.Property {
parameters = append(parameters, property.ParameterDefinitions...)
}
return parameters, nil
}
func (j *Job) IsQueued() (bool, error) {
if _, err := j.Poll(); err != nil {
return false, err
}
return j.Raw.InQueue, nil
}
func (j *Job) IsRunning() (bool, error) {
if _, err := j.Poll(); err != nil {
return false, err
}
lastBuild, err := j.GetLastBuild()
if err != nil {
return false, err
}
return lastBuild.IsRunning(), nil
}
func (j *Job) IsEnabled() (bool, error) {
if _, err := j.Poll(); err != nil {
return false, err
}
return j.Raw.Color != "disabled", nil
}
func (j *Job) HasQueuedBuild() {
panic("Not Implemented yet")
}
func (j *Job) InvokeSimple(params map[string]string) (int64, error) {
isQueued, err := j.IsQueued()
if err != nil {
return 0, err
}
if isQueued {
Error.Printf("%s is already running", j.GetName())
return 0, nil
}
endpoint := "/build"
parameters, err := j.GetParameters()
if err != nil {
return 0, err
}
if len(parameters) > 0 {
endpoint = "/buildWithParameters"
}
data := url.Values{}
for k, v := range params {
data.Set(k, v)
}
resp, err := j.Jenkins.Requester.Post(j.Base+endpoint, bytes.NewBufferString(data.Encode()), nil, nil)
if err != nil {
return 0, err
}
if resp.StatusCode != 200 && resp.StatusCode != 201 {
return 0, errors.New("Could not invoke job " + j.GetName())
}
location := resp.Header.Get("Location")
if location == "" {
return 0, errors.New("Don't have key \"Location\" in response of header")
}
u, err := url.Parse(location)
if err != nil {
return 0, err
}
number, err := strconv.ParseInt(path.Base(u.Path), 10, 64)
if err != nil {
return 0, err
}
return number, nil
}
func (j *Job) Invoke(files []string, skipIfRunning bool, params map[string]string, cause string, securityToken string) (bool, error) {
isQueued, err := j.IsQueued()
if err != nil {
return false, err
}
if isQueued {
Error.Printf("%s is already running", j.GetName())
return false, nil
}
isRunning, err := j.IsRunning()
if err != nil {
return false, err
}
if isRunning && skipIfRunning {
return false, fmt.Errorf("Will not request new build because %s is already running", j.GetName())
}
base := "/build"
// If parameters are specified - url is /builWithParameters
if params != nil {
base = "/buildWithParameters"
} else {
params = make(map[string]string)
}
// If files are specified - url is /build
if files != nil {
base = "/build"
}
reqParams := map[string]string{}
buildParams := map[string]string{}
if securityToken != "" {
reqParams["token"] = securityToken
}
buildParams["json"] = string(makeJson(params))
b, _ := json.Marshal(buildParams)
resp, err := j.Jenkins.Requester.PostFiles(j.Base+base, bytes.NewBuffer(b), nil, reqParams, files)
if err != nil {
return false, err
}
if resp.StatusCode == 200 || resp.StatusCode == 201 {
return true, nil
}
return false, errors.New(strconv.Itoa(resp.StatusCode))
}
func (j *Job) Poll() (int, error) {
response, err := j.Jenkins.Requester.GetJSON(j.Base, j.Raw, nil)
if err != nil {
return 0, err
}
return response.StatusCode, nil
}
func (j *Job) History() ([]*History, error) {
resp, err := j.Jenkins.Requester.Get(j.Base+"/buildHistory/ajax", nil, nil)
if err != nil {
return nil, err
}
return parseBuildHistory(resp.Body), nil
}
|
package main
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/rdwilliamson/aws/glacier"
"io"
"log"
"os"
"strconv"
"time"
)
type retrievalData struct {
Region string
Vault string
PartSize uint64
Job string
Downloaded uint64
Size uint64
FullTreeHash string
}
var (
output string
data retrievalData
)
func (data *retrievalData) saveState(output string) {
file, err := os.Create(output)
if err != nil {
log.Println("could not save state:", err)
return
}
defer file.Close()
enc := gob.NewEncoder(file)
err = enc.Encode(data)
if err != nil {
log.Println("could not save state:", err)
return
}
}
func job(args []string) {
if len(args) < 1 {
fmt.Println("no job command")
os.Exit(1)
}
command := args[0]
args = args[1:]
switch command {
case "inventory":
args = getConnection(args)
if len(args) < 1 {
fmt.Println("no vault")
os.Exit(1)
}
vault := args[0]
args = args[1:]
var description, topic string
if len(args) > 0 {
topic = args[0]
}
if len(args) > 1 {
description = args[1]
}
jobId, err := connection.InitiateInventoryJob(vault, topic, description)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(jobId)
case "archive":
args = getConnection(args)
if len(args) < 2 {
fmt.Println("no vault")
os.Exit(1)
}
vault := args[0]
archive := args[1]
args = args[2:]
var description, topic string
if len(args) > 0 {
topic = args[0]
}
if len(args) > 1 {
description = args[1]
}
jobId, err := connection.InitiateRetrievalJob(vault, archive, topic, description)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(jobId)
case "list":
args = getConnection(args)
if len(args) < 1 {
fmt.Println("no vault")
os.Exit(1)
}
vault := args[0]
jobs, _, err := connection.ListJobs(vault, "", "", "", 0)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, v := range jobs {
fmt.Println("Action:", v.Action)
if v.Action == "ArchiveRetrieval" {
fmt.Println("Archive ID:", v.ArchiveId)
fmt.Println("Archive Size:", v.ArchiveSizeInBytes, prettySize(v.ArchiveSizeInBytes))
}
fmt.Println("Completed:", v.Completed)
if v.Completed {
fmt.Println("Completion Date:", v.CompletionDate)
}
fmt.Println("Creation Date:", v.CreationDate)
if v.Completed && v.Action == "InventoryRetrieval" {
fmt.Println("Invenotry Size:", v.InventorySizeInBytes, prettySize(uint64(v.InventorySizeInBytes)))
}
fmt.Println("Job Description:", v.JobDescription)
fmt.Println("Job ID:", v.JobId)
if v.Action == "ArchiveRetrieval" {
fmt.Println("SHA256 Tree Hash:", v.SHA256TreeHash)
}
fmt.Println("SNS Topic:", v.SNSTopic)
fmt.Println("Status Code:", v.StatusCode)
fmt.Println("Status Message:", v.StatusMessage)
fmt.Println("Vault ARN:", v.VaultARN)
fmt.Println()
}
case "describe":
args = getConnection(args)
if len(args) < 2 {
fmt.Println("no vault and/or job id")
os.Exit(1)
}
vault := args[0]
jobId := args[1]
job, err := connection.DescribeJob(vault, jobId)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Action:", job.Action)
if job.Action == "ArchiveRetrieval" {
fmt.Println("Archive ID:", job.ArchiveId)
fmt.Println("Archive Size:", job.ArchiveSizeInBytes, prettySize(job.ArchiveSizeInBytes))
}
fmt.Println("Completed:", job.Completed)
if job.Completed {
fmt.Println("Completion Date:", job.CompletionDate)
}
fmt.Println("Creation Date:", job.CreationDate)
if job.Completed && job.Action == "InventoryRetrieval" {
fmt.Println("Invenotry Size:", job.InventorySizeInBytes, prettySize(uint64(job.InventorySizeInBytes)))
}
fmt.Println("Job Description:", job.JobDescription)
fmt.Println("Job ID:", job.JobId)
if job.Action == "ArchiveRetrieval" {
fmt.Println("SHA256 Tree Hash:", job.SHA256TreeHash)
}
fmt.Println("SNS Topic:", job.SNSTopic)
fmt.Println("Status Code:", job.StatusCode)
fmt.Println("Status Message:", job.StatusMessage)
fmt.Println("Vault ARN:", job.VaultARN)
case "get":
if len(args) < 1 {
fmt.Println("no job sub command")
os.Exit(1)
}
subCommand := args[0]
args = args[1:]
switch subCommand {
case "inventory":
args = getConnection(args)
if len(args) < 2 {
fmt.Println("no vault and/or job id")
os.Exit(1)
}
vault := args[0]
job := args[1]
inventory, err := connection.GetInventoryJob(vault, job)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Vault ARN:", inventory.VaultARN)
fmt.Println("Inventory Date:", inventory.InventoryDate)
for _, v := range inventory.ArchiveList {
fmt.Println()
fmt.Println("Archive ID:", v.ArchiveId)
fmt.Println("Archive Description:", v.ArchiveDescription)
fmt.Println("Creation Date:", v.CreationDate)
fmt.Println("Size:", v.Size, prettySize(v.Size))
fmt.Println("SHA256 Tree Hash:", v.SHA256TreeHash)
}
case "archive":
args = getConnection(args)
if len(args) < 3 {
fmt.Println("no vault, job id, and/or output file")
os.Exit(1)
}
vault := args[0]
job := args[1]
fileName := args[2]
file, err := os.Create(fileName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer file.Close()
archive, _, err := connection.GetRetrievalJob(vault, job, 0, 0)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer archive.Close()
_, err = io.Copy(file, archive)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
default:
fmt.Println("unknown job sub command:", subCommand)
os.Exit(1)
}
case "run":
args = getConnection(args)
if len(args) < 4 {
fmt.Println("no vault, archive, download size and/or output file")
os.Exit(1)
}
vault := args[0]
archive := args[1]
partSize, err := strconv.ParseUint(args[2], 10, 64)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
partSize *= 1024 * 1024
output = args[3]
args = args[4:]
var topic string
if len(args) > 0 {
topic = args[0]
args = args[1:]
}
var description string
if len(args) > 0 {
description = args[0]
args = args[1:]
}
// initiate retrieval job
job, err := connection.InitiateRetrievalJob(vault, archive, topic, description)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
log.Println("initiated retrieval job:", job)
// save state
data.Region = connection.Signature.Region.Name
data.Vault = vault
data.PartSize = partSize
data.Job = job
data.saveState(output + ".gob")
// wait for job to complete, using polling
time.Sleep(3 * time.Hour)
// check status sleeping 15m?
var try int
for {
job, err := connection.DescribeJob(vault, job)
if err != nil {
log.Println(err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
} else {
try = 0
if job.Completed {
data.Size = uint64(job.ArchiveSizeInBytes)
data.FullTreeHash = job.SHA256TreeHash
data.saveState(output + ".gob")
break
}
log.Println("retrieval job not yet completed")
time.Sleep(15 * time.Minute)
}
}
fallthrough
case "resume":
if command == "resume" {
if len(args) < 1 {
fmt.Println("no filename")
os.Exit(1)
}
output = args[0]
args = args[1:]
file, err := os.Open(output + ".gob")
if err != nil {
fmt.Println("could not resume:", err)
os.Exit(1)
}
dec := gob.NewDecoder(file)
err = dec.Decode(&data)
file.Close()
if err != nil {
fmt.Println("could not resume:", err)
os.Exit(1)
}
getConnection([]string{data.Region})
if len(args) > 0 {
data.PartSize, err = strconv.ParseUint(args[0], 10, 64)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
data.PartSize *= 1024 * 1024
data.saveState(output + ".gob")
}
}
file, err := os.OpenFile(output, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Println(err)
os.Exit(1)
}
defer file.Close()
// loop getting parts, checking tree hash of each
buffer := bytes.NewBuffer(make([]byte, data.PartSize))
hasher := glacier.NewTreeHash()
var try int
if command == "resume" {
_, err = file.Seek(int64(data.Downloaded), 0)
if err != nil {
fmt.Println("could not resume:", err)
os.Exit(1)
}
}
for data.Downloaded < data.Size {
log.Println("downloading", data.Downloaded, "to", data.Downloaded+data.PartSize-1, "of", data.Size)
buffer.Reset()
hasher.Reset()
part, treeHash, err := connection.GetRetrievalJob(data.Vault, data.Job, data.Downloaded,
data.Downloaded+data.PartSize-1)
if err != nil {
log.Println("GetRetrievalJob:", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
// copy to temporary buffer
_, err = io.Copy(buffer, part)
if err != nil {
log.Println("io.Copy:", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
// check tree hash
// TODO only if size is MiB power of two and partial content aligns
// on a MiB
hasher.Write(buffer.Bytes())
hasher.Close()
if treeHash != hasher.TreeHash() {
log.Println("tree hash mismatch, want", treeHash, "got", hasher.TreeHash())
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
log.Println("checked tree hash")
// copy to file
_, err = file.Write(buffer.Bytes())
if err != nil {
log.Println("copying buffer to file:", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
}
log.Println("copied to file")
// save state
data.Downloaded += uint64(buffer.Len())
data.saveState(output + ".gob")
try = 0
}
// check tree hash of entire archive
log.Println("download complete, verifying")
_, err = file.Seek(0, 0)
if err != nil {
log.Println("seek:", err)
os.Exit(1)
}
hasher.Reset()
_, err = io.Copy(hasher, file)
if err != nil {
log.Println("hashing whole file:", err)
os.Exit(1)
}
hasher.Close()
if hasher.TreeHash() != data.FullTreeHash {
log.Println("entire file tree hash mismatch, want", data.FullTreeHash, "got", hasher.TreeHash())
os.Exit(1)
}
os.Remove(output + ".gob")
default:
fmt.Println("unknown job command:", command)
os.Exit(1)
}
}
Use io.CopyN because it hashes correctly when file size is greater than 2 MiB for some reason.
package main
import (
"bytes"
"encoding/gob"
"fmt"
"github.com/rdwilliamson/aws/glacier"
"io"
"log"
"os"
"strconv"
"time"
)
type retrievalData struct {
Region string
Vault string
PartSize uint64
Job string
Downloaded uint64
Size uint64
FullTreeHash string
}
var (
output string
data retrievalData
)
func (data *retrievalData) saveState(output string) {
file, err := os.Create(output)
if err != nil {
log.Println("could not save state:", err)
return
}
defer file.Close()
enc := gob.NewEncoder(file)
err = enc.Encode(data)
if err != nil {
log.Println("could not save state:", err)
return
}
}
func job(args []string) {
if len(args) < 1 {
fmt.Println("no job command")
os.Exit(1)
}
command := args[0]
args = args[1:]
switch command {
case "inventory":
args = getConnection(args)
if len(args) < 1 {
fmt.Println("no vault")
os.Exit(1)
}
vault := args[0]
args = args[1:]
var description, topic string
if len(args) > 0 {
topic = args[0]
}
if len(args) > 1 {
description = args[1]
}
jobId, err := connection.InitiateInventoryJob(vault, topic, description)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(jobId)
case "archive":
args = getConnection(args)
if len(args) < 2 {
fmt.Println("no vault")
os.Exit(1)
}
vault := args[0]
archive := args[1]
args = args[2:]
var description, topic string
if len(args) > 0 {
topic = args[0]
}
if len(args) > 1 {
description = args[1]
}
jobId, err := connection.InitiateRetrievalJob(vault, archive, topic, description)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(jobId)
case "list":
args = getConnection(args)
if len(args) < 1 {
fmt.Println("no vault")
os.Exit(1)
}
vault := args[0]
jobs, _, err := connection.ListJobs(vault, "", "", "", 0)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, v := range jobs {
fmt.Println("Action:", v.Action)
if v.Action == "ArchiveRetrieval" {
fmt.Println("Archive ID:", v.ArchiveId)
fmt.Println("Archive Size:", v.ArchiveSizeInBytes, prettySize(v.ArchiveSizeInBytes))
}
fmt.Println("Completed:", v.Completed)
if v.Completed {
fmt.Println("Completion Date:", v.CompletionDate)
}
fmt.Println("Creation Date:", v.CreationDate)
if v.Completed && v.Action == "InventoryRetrieval" {
fmt.Println("Invenotry Size:", v.InventorySizeInBytes, prettySize(uint64(v.InventorySizeInBytes)))
}
fmt.Println("Job Description:", v.JobDescription)
fmt.Println("Job ID:", v.JobId)
if v.Action == "ArchiveRetrieval" {
fmt.Println("SHA256 Tree Hash:", v.SHA256TreeHash)
}
fmt.Println("SNS Topic:", v.SNSTopic)
fmt.Println("Status Code:", v.StatusCode)
fmt.Println("Status Message:", v.StatusMessage)
fmt.Println("Vault ARN:", v.VaultARN)
fmt.Println()
}
case "describe":
args = getConnection(args)
if len(args) < 2 {
fmt.Println("no vault and/or job id")
os.Exit(1)
}
vault := args[0]
jobId := args[1]
job, err := connection.DescribeJob(vault, jobId)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Action:", job.Action)
if job.Action == "ArchiveRetrieval" {
fmt.Println("Archive ID:", job.ArchiveId)
fmt.Println("Archive Size:", job.ArchiveSizeInBytes, prettySize(job.ArchiveSizeInBytes))
}
fmt.Println("Completed:", job.Completed)
if job.Completed {
fmt.Println("Completion Date:", job.CompletionDate)
}
fmt.Println("Creation Date:", job.CreationDate)
if job.Completed && job.Action == "InventoryRetrieval" {
fmt.Println("Invenotry Size:", job.InventorySizeInBytes, prettySize(uint64(job.InventorySizeInBytes)))
}
fmt.Println("Job Description:", job.JobDescription)
fmt.Println("Job ID:", job.JobId)
if job.Action == "ArchiveRetrieval" {
fmt.Println("SHA256 Tree Hash:", job.SHA256TreeHash)
}
fmt.Println("SNS Topic:", job.SNSTopic)
fmt.Println("Status Code:", job.StatusCode)
fmt.Println("Status Message:", job.StatusMessage)
fmt.Println("Vault ARN:", job.VaultARN)
case "get":
if len(args) < 1 {
fmt.Println("no job sub command")
os.Exit(1)
}
subCommand := args[0]
args = args[1:]
switch subCommand {
case "inventory":
args = getConnection(args)
if len(args) < 2 {
fmt.Println("no vault and/or job id")
os.Exit(1)
}
vault := args[0]
job := args[1]
inventory, err := connection.GetInventoryJob(vault, job)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Vault ARN:", inventory.VaultARN)
fmt.Println("Inventory Date:", inventory.InventoryDate)
for _, v := range inventory.ArchiveList {
fmt.Println()
fmt.Println("Archive ID:", v.ArchiveId)
fmt.Println("Archive Description:", v.ArchiveDescription)
fmt.Println("Creation Date:", v.CreationDate)
fmt.Println("Size:", v.Size, prettySize(v.Size))
fmt.Println("SHA256 Tree Hash:", v.SHA256TreeHash)
}
case "archive":
args = getConnection(args)
if len(args) < 3 {
fmt.Println("no vault, job id, and/or output file")
os.Exit(1)
}
vault := args[0]
job := args[1]
fileName := args[2]
file, err := os.Create(fileName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer file.Close()
archive, _, err := connection.GetRetrievalJob(vault, job, 0, 0)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer archive.Close()
_, err = io.Copy(file, archive)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
default:
fmt.Println("unknown job sub command:", subCommand)
os.Exit(1)
}
case "run":
args = getConnection(args)
if len(args) < 4 {
fmt.Println("no vault, archive, download size and/or output file")
os.Exit(1)
}
vault := args[0]
archive := args[1]
partSize, err := strconv.ParseUint(args[2], 10, 64)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
partSize *= 1024 * 1024
output = args[3]
args = args[4:]
var topic string
if len(args) > 0 {
topic = args[0]
args = args[1:]
}
var description string
if len(args) > 0 {
description = args[0]
args = args[1:]
}
// initiate retrieval job
job, err := connection.InitiateRetrievalJob(vault, archive, topic, description)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
log.Println("initiated retrieval job:", job)
// save state
data.Region = connection.Signature.Region.Name
data.Vault = vault
data.PartSize = partSize
data.Job = job
data.saveState(output + ".gob")
// wait for job to complete, using polling
time.Sleep(3 * time.Hour)
// check status sleeping 15m?
var try int
for {
job, err := connection.DescribeJob(vault, job)
if err != nil {
log.Println(err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
} else {
try = 0
if job.Completed {
data.Size = uint64(job.ArchiveSizeInBytes)
data.FullTreeHash = job.SHA256TreeHash
data.saveState(output + ".gob")
break
}
log.Println("retrieval job not yet completed")
time.Sleep(15 * time.Minute)
}
}
fallthrough
case "resume":
if command == "resume" {
if len(args) < 1 {
fmt.Println("no filename")
os.Exit(1)
}
output = args[0]
args = args[1:]
file, err := os.Open(output + ".gob")
if err != nil {
fmt.Println("could not resume:", err)
os.Exit(1)
}
dec := gob.NewDecoder(file)
err = dec.Decode(&data)
file.Close()
if err != nil {
fmt.Println("could not resume:", err)
os.Exit(1)
}
getConnection([]string{data.Region})
if len(args) > 0 {
data.PartSize, err = strconv.ParseUint(args[0], 10, 64)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
data.PartSize *= 1024 * 1024
data.saveState(output + ".gob")
}
}
file, err := os.OpenFile(output, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666)
if err != nil {
log.Println(err)
os.Exit(1)
}
defer file.Close()
// loop getting parts, checking tree hash of each
buffer := bytes.NewBuffer(make([]byte, data.PartSize))
hasher := glacier.NewTreeHash()
var try int
if command == "resume" {
_, err = file.Seek(int64(data.Downloaded), 0)
if err != nil {
fmt.Println("could not resume:", err)
os.Exit(1)
}
}
for data.Downloaded < data.Size {
log.Println("downloading", data.Downloaded, "to", data.Downloaded+data.PartSize-1, "of", data.Size)
buffer.Reset()
hasher.Reset()
part, treeHash, err := connection.GetRetrievalJob(data.Vault, data.Job, data.Downloaded,
data.Downloaded+data.PartSize-1)
if err != nil {
log.Println("GetRetrievalJob:", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
// copy to temporary buffer
_, err = io.Copy(buffer, part)
if err != nil {
log.Println("io.Copy:", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
// check tree hash
// TODO only if size is MiB power of two and partial content aligns
// on a MiB
_, err = io.CopyN(hasher, buffer, int64(data.PartSize))
if err != nil {
log.Println("hashing", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
hasher.Close()
if treeHash != hasher.TreeHash() {
log.Println("tree hash mismatch, want", treeHash, "got", hasher.TreeHash())
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
continue
}
log.Println("checked tree hash")
// copy to file
_, err = file.Write(buffer.Bytes())
if err != nil {
log.Println("copying buffer to file:", err)
try++
if try > retries {
fmt.Println("too many retries")
os.Exit(1)
}
}
log.Println("copied to file")
// save state
data.Downloaded += uint64(buffer.Len())
data.saveState(output + ".gob")
try = 0
}
// check tree hash of entire archive
log.Println("download complete, verifying")
_, err = file.Seek(0, 0)
if err != nil {
log.Println("seek:", err)
os.Exit(1)
}
hasher.Reset()
_, err = io.Copy(hasher, file)
if err != nil {
log.Println("hashing whole file:", err)
os.Exit(1)
}
hasher.Close()
if hasher.TreeHash() != data.FullTreeHash {
log.Println("entire file tree hash mismatch, want", data.FullTreeHash, "got", hasher.TreeHash())
os.Exit(1)
}
os.Remove(output + ".gob")
default:
fmt.Println("unknown job command:", command)
os.Exit(1)
}
}
|
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// Output is a closure which captures conn and calls conn.Write
type Output func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
} else {
return b
}
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
} else {
return b
}
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// Segment defines a KCP segment
type Segment struct {
conv uint32
cmd uint32
frg uint32
wnd uint32
ts uint32
sn uint32
una uint32
resendts uint32
rto uint32
fastack uint32
xmit uint32
data []byte
}
// encode a segment into buffer
func (seg *Segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
ptr = ikcp_encode8u(ptr, uint8(seg.frg))
ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
return ptr
}
// NewSegment creates a KCP segment
func NewSegment(size int) *Segment {
seg := new(Segment)
seg.data = make([]byte, size)
return seg
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ts_recent, ts_lastack, ssthresh uint32
rx_rttval, rx_srtt, rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
current, interval, ts_flush, xmit uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
snd_queue []Segment
rcv_queue []Segment
snd_buf []Segment
rcv_buf []Segment
acklist []uint32
buffer []byte
fastresend int32
nocwnd, stream int32
logmask int32
output Output
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output Output) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
if seg.frg == 0 {
break
}
}
kcp.rcv_queue = kcp.rcv_queue[count:]
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_queue = append(kcp.rcv_queue, *seg)
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_buf = kcp.rcv_buf[count:]
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
if len(buffer) < int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := NewSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 {
seg.frg = uint32(count - i - 1)
} else {
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, *seg)
buffer = buffer[size:]
}
return 0
}
// https://tools.ietf.org/html/rfc6298
func (kcp *KCP) update_ack(rtt int32) {
var rto uint32 = 0
if kcp.rx_srtt == 0 {
kcp.rx_srtt = uint32(rtt)
kcp.rx_rttval = uint32(rtt) / 2
} else {
delta := rtt - int32(kcp.rx_srtt)
if delta < 0 {
delta = -delta
}
kcp.rx_rttval = (3*kcp.rx_rttval + uint32(delta)) / 4
kcp.rx_srtt = (7*kcp.rx_srtt + uint32(rtt)) / 8
if kcp.rx_srtt < 1 {
kcp.rx_srtt = 1
}
}
rto = kcp.rx_srtt + _imax_(1, 4*kcp.rx_rttval)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.snd_buf = append(kcp.snd_buf[:k], kcp.snd_buf[k+1:]...)
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
count++
} else {
break
}
}
kcp.snd_buf = kcp.snd_buf[count:]
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, sn, ts)
}
func (kcp *KCP) ack_get(p int) (sn, ts uint32) {
return kcp.acklist[p*2+0], kcp.acklist[p*2+1]
}
func (kcp *KCP) parse_data(newseg *Segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, *newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, Segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = *newseg
}
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[k])
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_buf = kcp.rcv_buf[count:]
}
// Input when you received a low level packet (eg. UDP packet), call it
func (kcp *KCP) Input(data []byte) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var flag int
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
kcp.rmt_wnd = uint32(wnd)
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
if _itimediff(kcp.current, ts) >= 0 {
kcp.update_ack(_itimediff(kcp.current, ts))
}
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := NewSegment(int(length))
seg.conv = conv
seg.cmd = uint32(cmd)
seg.frg = uint32(frg)
seg.wnd = uint32(wnd)
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
}
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
data = data[length:]
}
if flag != 0 {
kcp.parse_fastack(maxack)
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
return 0
}
func (kcp *KCP) wnd_unused() int32 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return int32(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush() {
current := kcp.current
buffer := kcp.buffer
change := 0
lost := false
if kcp.updated == 0 {
return
}
var seg Segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = uint32(kcp.wnd_unused())
seg.una = kcp.rcv_nxt
// flush acknowledges
count := len(kcp.acklist) / 2
ptr := buffer
for i := 0; i < count; i++ {
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
seg.sn, seg.ts = kcp.ack_get(i)
ptr = seg.encode(ptr)
}
kcp.acklist = nil
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = kcp.current + kcp.probe_wait
} else {
if _itimediff(kcp.current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = kcp.current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
count = 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.wnd = seg.wnd
newseg.ts = current
newseg.sn = kcp.snd_nxt
newseg.una = kcp.rcv_nxt
newseg.resendts = current
newseg.rto = kcp.rx_rto
newseg.fastack = 0
newseg.xmit = 0
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
count++
}
kcp.snd_queue = kcp.snd_queue[count:]
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
rtomin := (kcp.rx_rto >> 3)
if kcp.nodelay != 0 {
rtomin = 0
}
// flush data segments
for k := range kcp.snd_buf {
segment := &kcp.snd_buf[k]
needsend := false
if segment.xmit == 0 {
needsend = true
segment.xmit++
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto + rtomin
} else if _itimediff(current, segment.resendts) >= 0 {
needsend = true
segment.xmit++
kcp.xmit++
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.rto = _imin_(segment.rto, 8*kcp.rx_rto)
segment.resendts = current + segment.rto
lost = true
atomic.AddUint64(&DefaultSnmp.RetransSegs, 1)
atomic.AddUint64(&DefaultSnmp.LostSegs, 1)
} else if segment.fastack >= resent {
needsend = true
segment.xmit++
segment.fastack = 0
segment.resendts = current + segment.rto
change++
atomic.AddUint64(&DefaultSnmp.RetransSegs, 1)
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, 1)
} else if segment.fastack > 0 && len(kcp.snd_queue) == 0 {
// early retransmit
needsend = true
segment.xmit++
segment.fastack = 0
segment.resendts = current + segment.rto
change++
atomic.AddUint64(&DefaultSnmp.RetransSegs, 1)
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, 1)
}
if needsend {
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need >= int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change != 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update(current uint32) {
var slap int32
kcp.current = current
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = kcp.current
}
slap = _itimediff(kcp.current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = kcp.current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(kcp.current, kcp.ts_flush) >= 0 {
kcp.ts_flush = kcp.current + kcp.interval
}
kcp.flush()
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check(current uint32) uint32 {
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}
add comments
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// Output is a closure which captures conn and calls conn.Write
type Output func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
} else {
return b
}
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
} else {
return b
}
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// Segment defines a KCP segment
type Segment struct {
conv uint32
cmd uint32
frg uint32
wnd uint32
ts uint32
sn uint32
una uint32
resendts uint32
rto uint32
fastack uint32
xmit uint32
data []byte
}
// encode a segment into buffer
func (seg *Segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
ptr = ikcp_encode8u(ptr, uint8(seg.frg))
ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
return ptr
}
// NewSegment creates a KCP segment
func NewSegment(size int) *Segment {
seg := new(Segment)
seg.data = make([]byte, size)
return seg
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ts_recent, ts_lastack, ssthresh uint32
rx_rttval, rx_srtt, rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
current, interval, ts_flush, xmit uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
snd_queue []Segment
rcv_queue []Segment
snd_buf []Segment
rcv_buf []Segment
acklist []uint32
buffer []byte
fastresend int32
nocwnd, stream int32
logmask int32
output Output
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output Output) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
if seg.frg == 0 {
break
}
}
kcp.rcv_queue = kcp.rcv_queue[count:]
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_queue = append(kcp.rcv_queue, *seg)
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_buf = kcp.rcv_buf[count:]
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
if len(buffer) < int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := NewSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 { // message mode
seg.frg = uint32(count - i - 1)
} else { // stream mode
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, *seg)
buffer = buffer[size:]
}
return 0
}
// https://tools.ietf.org/html/rfc6298
func (kcp *KCP) update_ack(rtt int32) {
var rto uint32 = 0
if kcp.rx_srtt == 0 {
kcp.rx_srtt = uint32(rtt)
kcp.rx_rttval = uint32(rtt) / 2
} else {
delta := rtt - int32(kcp.rx_srtt)
if delta < 0 {
delta = -delta
}
kcp.rx_rttval = (3*kcp.rx_rttval + uint32(delta)) / 4
kcp.rx_srtt = (7*kcp.rx_srtt + uint32(rtt)) / 8
if kcp.rx_srtt < 1 {
kcp.rx_srtt = 1
}
}
rto = kcp.rx_srtt + _imax_(1, 4*kcp.rx_rttval)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.snd_buf = append(kcp.snd_buf[:k], kcp.snd_buf[k+1:]...)
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
count++
} else {
break
}
}
kcp.snd_buf = kcp.snd_buf[count:]
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, sn, ts)
}
func (kcp *KCP) ack_get(p int) (sn, ts uint32) {
return kcp.acklist[p*2+0], kcp.acklist[p*2+1]
}
func (kcp *KCP) parse_data(newseg *Segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, *newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, Segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = *newseg
}
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[k])
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_buf = kcp.rcv_buf[count:]
}
// Input when you received a low level packet (eg. UDP packet), call it
func (kcp *KCP) Input(data []byte) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var flag int
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
kcp.rmt_wnd = uint32(wnd)
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
if _itimediff(kcp.current, ts) >= 0 {
kcp.update_ack(_itimediff(kcp.current, ts))
}
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := NewSegment(int(length))
seg.conv = conv
seg.cmd = uint32(cmd)
seg.frg = uint32(frg)
seg.wnd = uint32(wnd)
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
}
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
data = data[length:]
}
if flag != 0 {
kcp.parse_fastack(maxack)
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
return 0
}
func (kcp *KCP) wnd_unused() int32 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return int32(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush() {
current := kcp.current
buffer := kcp.buffer
change := 0
lost := false
if kcp.updated == 0 {
return
}
var seg Segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = uint32(kcp.wnd_unused())
seg.una = kcp.rcv_nxt
// flush acknowledges
count := len(kcp.acklist) / 2
ptr := buffer
for i := 0; i < count; i++ {
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
seg.sn, seg.ts = kcp.ack_get(i)
ptr = seg.encode(ptr)
}
kcp.acklist = nil
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = kcp.current + kcp.probe_wait
} else {
if _itimediff(kcp.current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = kcp.current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
count = 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.wnd = seg.wnd
newseg.ts = current
newseg.sn = kcp.snd_nxt
newseg.una = kcp.rcv_nxt
newseg.resendts = current
newseg.rto = kcp.rx_rto
newseg.fastack = 0
newseg.xmit = 0
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
count++
}
kcp.snd_queue = kcp.snd_queue[count:]
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
rtomin := (kcp.rx_rto >> 3)
if kcp.nodelay != 0 {
rtomin = 0
}
// flush data segments
for k := range kcp.snd_buf {
segment := &kcp.snd_buf[k]
needsend := false
if segment.xmit == 0 {
needsend = true
segment.xmit++
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto + rtomin
} else if _itimediff(current, segment.resendts) >= 0 {
needsend = true
segment.xmit++
kcp.xmit++
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.rto = _imin_(segment.rto, 8*kcp.rx_rto)
segment.resendts = current + segment.rto
lost = true
atomic.AddUint64(&DefaultSnmp.RetransSegs, 1)
atomic.AddUint64(&DefaultSnmp.LostSegs, 1)
} else if segment.fastack >= resent {
needsend = true
segment.xmit++
segment.fastack = 0
segment.resendts = current + segment.rto
change++
atomic.AddUint64(&DefaultSnmp.RetransSegs, 1)
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, 1)
} else if segment.fastack > 0 && len(kcp.snd_queue) == 0 {
// early retransmit
needsend = true
segment.xmit++
segment.fastack = 0
segment.resendts = current + segment.rto
change++
atomic.AddUint64(&DefaultSnmp.RetransSegs, 1)
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, 1)
}
if needsend {
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need >= int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change != 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update(current uint32) {
var slap int32
kcp.current = current
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = kcp.current
}
slap = _itimediff(kcp.current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = kcp.current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(kcp.current, kcp.ts_flush) >= 0 {
kcp.ts_flush = kcp.current + kcp.interval
}
kcp.flush()
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check(current uint32) uint32 {
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}
|
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// Output is a closure which captures conn and calls conn.Write
type Output func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
}
return b
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
}
return b
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// Segment defines a KCP segment
type Segment struct {
conv uint32
cmd uint32
frg uint32
wnd uint32
ts uint32
sn uint32
una uint32
data []byte
resendts uint32
rto uint32
fastack uint32
xmit uint32
}
// encode a segment into buffer
func (seg *Segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
ptr = ikcp_encode8u(ptr, uint8(seg.frg))
ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
atomic.AddUint64(&DefaultSnmp.OutSegs, 1)
return ptr
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ssthresh uint32
rx_rttvar, rx_srtt int32
rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
interval, ts_flush, xmit uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
fastresend int32
nocwnd, stream int32
snd_queue []Segment
rcv_queue []Segment
snd_buf []Segment
rcv_buf []Segment
acklist []ackItem
buffer []byte
output Output
datashard, parityshard int
}
type ackItem struct {
sn uint32
ts uint32
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output Output) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// newSegment creates a KCP segment
func (kcp *KCP) newSegment(size int) *Segment {
seg := new(Segment)
seg.data = xmitBuf.Get().([]byte)[:size]
return seg
}
// delSegment recycles a KCP segment
func (kcp *KCP) delSegment(seg *Segment) {
xmitBuf.Put(seg.data)
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
kcp.delSegment(seg)
if seg.frg == 0 {
break
}
}
kcp.rcv_queue = kcp.rcv_queue[count:]
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.rcv_buf[count:]
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
// append to previous segment in streaming mode (if possible)
if kcp.stream != 0 {
n := len(kcp.snd_queue)
if n > 0 {
old := &kcp.snd_queue[n-1]
if len(old.data) < int(kcp.mss) {
capacity := int(kcp.mss) - len(old.data)
extend := capacity
if len(buffer) < capacity {
extend = len(buffer)
}
seg := kcp.newSegment(len(old.data) + extend)
seg.frg = 0
copy(seg.data, old.data)
copy(seg.data[len(old.data):], buffer)
buffer = buffer[extend:]
kcp.delSegment(old)
kcp.snd_queue[n-1] = *seg
}
}
if len(buffer) == 0 {
return 0
}
}
if len(buffer) <= int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := kcp.newSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 { // message mode
seg.frg = uint32(count - i - 1)
} else { // stream mode
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, *seg)
buffer = buffer[size:]
}
return 0
}
func (kcp *KCP) update_ack(rtt int32) {
// https://tools.ietf.org/html/rfc6298
var rto uint32
if kcp.rx_srtt == 0 {
kcp.rx_srtt = rtt
kcp.rx_rttvar = rtt >> 1
} else {
delta := rtt - kcp.rx_srtt
kcp.rx_srtt += delta >> 3
if delta < 0 {
delta = -delta
}
if rtt < kcp.rx_srtt-kcp.rx_rttvar {
// if the new RTT sample is below the bottom of the range of
// what an RTT measurement is expected to be.
// give an 8x reduced weight versus its normal weighting
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 5
} else {
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 2
}
}
rto = uint32(kcp.rx_srtt) + _imax_(kcp.interval, uint32(kcp.rx_rttvar)<<2)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.delSegment(seg)
copy(kcp.snd_buf[k:], kcp.snd_buf[k+1:])
kcp.snd_buf[len(kcp.snd_buf)-1] = Segment{}
kcp.snd_buf = kcp.snd_buf[:len(kcp.snd_buf)-1]
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
kcp.delSegment(seg)
count++
} else {
break
}
}
kcp.snd_buf = kcp.snd_buf[count:]
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
}
func (kcp *KCP) parse_data(newseg *Segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
kcp.delSegment(newseg)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, *newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, Segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = *newseg
}
} else {
kcp.delSegment(newseg)
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.rcv_buf[count:]
}
// Input when you received a low level packet (eg. UDP packet), call it
// regular indicates a regular packet has received(not from FEC)
func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var flag int
var inSegs uint64
current := currentMs()
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
// only trust window updates from regular packets. i.e: latest update
if regular {
kcp.rmt_wnd = uint32(wnd)
}
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
if _itimediff(current, ts) >= 0 {
kcp.update_ack(_itimediff(current, ts))
}
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := kcp.newSegment(int(length))
seg.conv = conv
seg.cmd = uint32(cmd)
seg.frg = uint32(frg)
seg.wnd = uint32(wnd)
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
inSegs++
data = data[length:]
}
atomic.AddUint64(&DefaultSnmp.InSegs, inSegs)
if flag != 0 && regular {
kcp.parse_fastack(maxack)
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately
kcp.flush(true)
} else if kcp.rmt_wnd == 0 && len(kcp.acklist) > 0 { // window zero
kcp.flush(true)
}
return 0
}
func (kcp *KCP) wnd_unused() int32 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return int32(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush(ackOnly bool) {
buffer := kcp.buffer
change := 0
lost := false
var seg Segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = uint32(kcp.wnd_unused())
seg.una = kcp.rcv_nxt
// flush acknowledges
var required []ackItem
for i, ack := range kcp.acklist {
// filter necessary acks only
if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
required = append(required, kcp.acklist[i])
}
}
kcp.acklist = nil
ptr := buffer
maxBatchSize := kcp.mtu / IKCP_OVERHEAD
for len(required) > 0 {
var batchSize int
if kcp.datashard > 0 && kcp.parityshard > 0 { // try triggering FEC
batchSize = int(_ibound_(1, uint32(len(required)/kcp.datashard), maxBatchSize))
} else {
batchSize = int(_ibound_(1, uint32(len(required)), maxBatchSize))
}
for len(required) >= batchSize {
for i := 0; i < batchSize; i++ {
ack := required[i]
seg.sn, seg.ts = ack.sn, ack.ts
ptr = seg.encode(ptr)
}
size := len(buffer) - len(ptr)
kcp.output(buffer, size)
ptr = buffer
required = required[batchSize:]
}
}
if ackOnly { // flush acks only
return
}
current := currentMs()
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = current + kcp.probe_wait
} else {
if _itimediff(current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
// sliding window, controlled by snd_nxt && sna_una+cwnd
newSegsCount := 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.sn = kcp.snd_nxt
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
newSegsCount++
kcp.snd_queue[k].data = nil
}
kcp.snd_queue = kcp.snd_queue[newSegsCount:]
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
// counters
var lostSegs, fastRetransSegs, earlyRetransSegs uint64
// send new segments
for k := len(kcp.snd_buf) - newSegsCount; k < len(kcp.snd_buf); k++ {
segment := &kcp.snd_buf[k]
segment.xmit++
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
}
// check for retransmissions
for k := 0; k < len(kcp.snd_buf)-newSegsCount; k++ {
segment := &kcp.snd_buf[k]
needsend := false
if _itimediff(current, segment.resendts) >= 0 { // RTO
needsend = true
segment.xmit++
kcp.xmit++
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.resendts = current + segment.rto
lost = true
lostSegs++
} else if segment.fastack >= resent { // fast retransmit
needsend = true
segment.xmit++
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
fastRetransSegs++
} else if segment.fastack > 0 && newSegsCount == 0 { // early retransmit
needsend = true
segment.xmit++
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
earlyRetransSegs++
}
if needsend {
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// counter updates
sum := lostSegs
if lostSegs > 0 {
atomic.AddUint64(&DefaultSnmp.LostSegs, lostSegs)
}
if fastRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, fastRetransSegs)
sum += fastRetransSegs
}
if earlyRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, earlyRetransSegs)
sum += earlyRetransSegs
}
if sum > 0 {
atomic.AddUint64(&DefaultSnmp.RetransSegs, sum)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change != 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update() {
var slap int32
current := currentMs()
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = current
}
slap = _itimediff(current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(current, kcp.ts_flush) >= 0 {
kcp.ts_flush = current + kcp.interval
}
kcp.flush(false)
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check() uint32 {
current := currentMs()
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// set datashard,parityshard info for some optimizations
func (kcp *KCP) setFEC(datashard, parityshard int) {
kcp.datashard = datashard
kcp.parityshard = parityshard
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}
// Cwnd returns current congestion window size
func (kcp *KCP) Cwnd() uint32 {
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
return cwnd
}
recent ack ts
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp
import (
"encoding/binary"
"sync/atomic"
)
const (
IKCP_RTO_NDL = 30 // no delay min rto
IKCP_RTO_MIN = 100 // normal min rto
IKCP_RTO_DEF = 200
IKCP_RTO_MAX = 60000
IKCP_CMD_PUSH = 81 // cmd: push data
IKCP_CMD_ACK = 82 // cmd: ack
IKCP_CMD_WASK = 83 // cmd: window probe (ask)
IKCP_CMD_WINS = 84 // cmd: window size (tell)
IKCP_ASK_SEND = 1 // need to send IKCP_CMD_WASK
IKCP_ASK_TELL = 2 // need to send IKCP_CMD_WINS
IKCP_WND_SND = 32
IKCP_WND_RCV = 32
IKCP_MTU_DEF = 1400
IKCP_ACK_FAST = 3
IKCP_INTERVAL = 100
IKCP_OVERHEAD = 24
IKCP_DEADLINK = 20
IKCP_THRESH_INIT = 2
IKCP_THRESH_MIN = 2
IKCP_PROBE_INIT = 7000 // 7 secs to probe window size
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
)
// Output is a closure which captures conn and calls conn.Write
type Output func(buf []byte, size int)
/* encode 8 bits unsigned int */
func ikcp_encode8u(p []byte, c byte) []byte {
p[0] = c
return p[1:]
}
/* decode 8 bits unsigned int */
func ikcp_decode8u(p []byte, c *byte) []byte {
*c = p[0]
return p[1:]
}
/* encode 16 bits unsigned int (lsb) */
func ikcp_encode16u(p []byte, w uint16) []byte {
binary.LittleEndian.PutUint16(p, w)
return p[2:]
}
/* decode 16 bits unsigned int (lsb) */
func ikcp_decode16u(p []byte, w *uint16) []byte {
*w = binary.LittleEndian.Uint16(p)
return p[2:]
}
/* encode 32 bits unsigned int (lsb) */
func ikcp_encode32u(p []byte, l uint32) []byte {
binary.LittleEndian.PutUint32(p, l)
return p[4:]
}
/* decode 32 bits unsigned int (lsb) */
func ikcp_decode32u(p []byte, l *uint32) []byte {
*l = binary.LittleEndian.Uint32(p)
return p[4:]
}
func _imin_(a, b uint32) uint32 {
if a <= b {
return a
}
return b
}
func _imax_(a, b uint32) uint32 {
if a >= b {
return a
}
return b
}
func _ibound_(lower, middle, upper uint32) uint32 {
return _imin_(_imax_(lower, middle), upper)
}
func _itimediff(later, earlier uint32) int32 {
return (int32)(later - earlier)
}
// Segment defines a KCP segment
type Segment struct {
conv uint32
cmd uint32
frg uint32
wnd uint32
ts uint32
sn uint32
una uint32
data []byte
resendts uint32
rto uint32
fastack uint32
xmit uint32
}
// encode a segment into buffer
func (seg *Segment) encode(ptr []byte) []byte {
ptr = ikcp_encode32u(ptr, seg.conv)
ptr = ikcp_encode8u(ptr, uint8(seg.cmd))
ptr = ikcp_encode8u(ptr, uint8(seg.frg))
ptr = ikcp_encode16u(ptr, uint16(seg.wnd))
ptr = ikcp_encode32u(ptr, seg.ts)
ptr = ikcp_encode32u(ptr, seg.sn)
ptr = ikcp_encode32u(ptr, seg.una)
ptr = ikcp_encode32u(ptr, uint32(len(seg.data)))
atomic.AddUint64(&DefaultSnmp.OutSegs, 1)
return ptr
}
// KCP defines a single KCP connection
type KCP struct {
conv, mtu, mss, state uint32
snd_una, snd_nxt, rcv_nxt uint32
ssthresh uint32
rx_rttvar, rx_srtt int32
rx_rto, rx_minrto uint32
snd_wnd, rcv_wnd, rmt_wnd, cwnd, probe uint32
interval, ts_flush, xmit uint32
nodelay, updated uint32
ts_probe, probe_wait uint32
dead_link, incr uint32
fastresend int32
nocwnd, stream int32
snd_queue []Segment
rcv_queue []Segment
snd_buf []Segment
rcv_buf []Segment
acklist []ackItem
buffer []byte
output Output
datashard, parityshard int
}
type ackItem struct {
sn uint32
ts uint32
}
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint
// from the same connection.
func NewKCP(conv uint32, output Output) *KCP {
kcp := new(KCP)
kcp.conv = conv
kcp.snd_wnd = IKCP_WND_SND
kcp.rcv_wnd = IKCP_WND_RCV
kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3)
kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL
kcp.ts_flush = IKCP_INTERVAL
kcp.ssthresh = IKCP_THRESH_INIT
kcp.dead_link = IKCP_DEADLINK
kcp.output = output
return kcp
}
// newSegment creates a KCP segment
func (kcp *KCP) newSegment(size int) *Segment {
seg := new(Segment)
seg.data = xmitBuf.Get().([]byte)[:size]
return seg
}
// delSegment recycles a KCP segment
func (kcp *KCP) delSegment(seg *Segment) {
xmitBuf.Put(seg.data)
}
// PeekSize checks the size of next message in the recv queue
func (kcp *KCP) PeekSize() (length int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
seg := &kcp.rcv_queue[0]
if seg.frg == 0 {
return len(seg.data)
}
if len(kcp.rcv_queue) < int(seg.frg+1) {
return -1
}
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
length += len(seg.data)
if seg.frg == 0 {
break
}
}
return
}
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN
func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 {
return -1
}
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) {
return -3
}
var fast_recover bool
if len(kcp.rcv_queue) >= int(kcp.rcv_wnd) {
fast_recover = true
}
// merge fragment
count := 0
for k := range kcp.rcv_queue {
seg := &kcp.rcv_queue[k]
copy(buffer, seg.data)
buffer = buffer[len(seg.data):]
n += len(seg.data)
count++
kcp.delSegment(seg)
if seg.frg == 0 {
break
}
}
kcp.rcv_queue = kcp.rcv_queue[count:]
// move available data from rcv_buf -> rcv_queue
count = 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.rcv_buf[count:]
// fast recover
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) && fast_recover {
// ready to send back IKCP_CMD_WINS in ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
}
return
}
// Send is user/upper level send, returns below zero for error
func (kcp *KCP) Send(buffer []byte) int {
var count int
if len(buffer) == 0 {
return -1
}
// append to previous segment in streaming mode (if possible)
if kcp.stream != 0 {
n := len(kcp.snd_queue)
if n > 0 {
old := &kcp.snd_queue[n-1]
if len(old.data) < int(kcp.mss) {
capacity := int(kcp.mss) - len(old.data)
extend := capacity
if len(buffer) < capacity {
extend = len(buffer)
}
seg := kcp.newSegment(len(old.data) + extend)
seg.frg = 0
copy(seg.data, old.data)
copy(seg.data[len(old.data):], buffer)
buffer = buffer[extend:]
kcp.delSegment(old)
kcp.snd_queue[n-1] = *seg
}
}
if len(buffer) == 0 {
return 0
}
}
if len(buffer) <= int(kcp.mss) {
count = 1
} else {
count = (len(buffer) + int(kcp.mss) - 1) / int(kcp.mss)
}
if count > 255 {
return -2
}
if count == 0 {
count = 1
}
for i := 0; i < count; i++ {
var size int
if len(buffer) > int(kcp.mss) {
size = int(kcp.mss)
} else {
size = len(buffer)
}
seg := kcp.newSegment(size)
copy(seg.data, buffer[:size])
if kcp.stream == 0 { // message mode
seg.frg = uint32(count - i - 1)
} else { // stream mode
seg.frg = 0
}
kcp.snd_queue = append(kcp.snd_queue, *seg)
buffer = buffer[size:]
}
return 0
}
func (kcp *KCP) update_ack(rtt int32) {
// https://tools.ietf.org/html/rfc6298
var rto uint32
if kcp.rx_srtt == 0 {
kcp.rx_srtt = rtt
kcp.rx_rttvar = rtt >> 1
} else {
delta := rtt - kcp.rx_srtt
kcp.rx_srtt += delta >> 3
if delta < 0 {
delta = -delta
}
if rtt < kcp.rx_srtt-kcp.rx_rttvar {
// if the new RTT sample is below the bottom of the range of
// what an RTT measurement is expected to be.
// give an 8x reduced weight versus its normal weighting
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 5
} else {
kcp.rx_rttvar += (delta - kcp.rx_rttvar) >> 2
}
}
rto = uint32(kcp.rx_srtt) + _imax_(kcp.interval, uint32(kcp.rx_rttvar)<<2)
kcp.rx_rto = _ibound_(kcp.rx_minrto, rto, IKCP_RTO_MAX)
}
func (kcp *KCP) shrink_buf() {
if len(kcp.snd_buf) > 0 {
seg := &kcp.snd_buf[0]
kcp.snd_una = seg.sn
} else {
kcp.snd_una = kcp.snd_nxt
}
}
func (kcp *KCP) parse_ack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if sn == seg.sn {
kcp.delSegment(seg)
copy(kcp.snd_buf[k:], kcp.snd_buf[k+1:])
kcp.snd_buf[len(kcp.snd_buf)-1] = Segment{}
kcp.snd_buf = kcp.snd_buf[:len(kcp.snd_buf)-1]
break
}
if _itimediff(sn, seg.sn) < 0 {
break
}
}
}
func (kcp *KCP) parse_fastack(sn uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return
}
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 {
break
} else if sn != seg.sn {
seg.fastack++
}
}
}
func (kcp *KCP) parse_una(una uint32) {
count := 0
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 {
kcp.delSegment(seg)
count++
} else {
break
}
}
kcp.snd_buf = kcp.snd_buf[count:]
}
// ack append
func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
}
func (kcp *KCP) parse_data(newseg *Segment) {
sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 {
kcp.delSegment(newseg)
return
}
n := len(kcp.rcv_buf) - 1
insert_idx := 0
repeat := false
for i := n; i >= 0; i-- {
seg := &kcp.rcv_buf[i]
if seg.sn == sn {
repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break
}
if _itimediff(sn, seg.sn) > 0 {
insert_idx = i + 1
break
}
}
if !repeat {
if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, *newseg)
} else {
kcp.rcv_buf = append(kcp.rcv_buf, Segment{})
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = *newseg
}
} else {
kcp.delSegment(newseg)
}
// move available data from rcv_buf -> rcv_queue
count := 0
for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
kcp.rcv_nxt++
count++
} else {
break
}
}
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.rcv_buf[count:]
}
// Input when you received a low level packet (eg. UDP packet), call it
// regular indicates a regular packet has received(not from FEC)
func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
una := kcp.snd_una
if len(data) < IKCP_OVERHEAD {
return -1
}
var maxack uint32
var lastackts uint32
var flag int
var inSegs uint64
for {
var ts, sn, length, una, conv uint32
var wnd uint16
var cmd, frg uint8
if len(data) < int(IKCP_OVERHEAD) {
break
}
data = ikcp_decode32u(data, &conv)
if conv != kcp.conv {
return -1
}
data = ikcp_decode8u(data, &cmd)
data = ikcp_decode8u(data, &frg)
data = ikcp_decode16u(data, &wnd)
data = ikcp_decode32u(data, &ts)
data = ikcp_decode32u(data, &sn)
data = ikcp_decode32u(data, &una)
data = ikcp_decode32u(data, &length)
if len(data) < int(length) {
return -2
}
if cmd != IKCP_CMD_PUSH && cmd != IKCP_CMD_ACK &&
cmd != IKCP_CMD_WASK && cmd != IKCP_CMD_WINS {
return -3
}
// only trust window updates from regular packets. i.e: latest update
if regular {
kcp.rmt_wnd = uint32(wnd)
}
kcp.parse_una(una)
kcp.shrink_buf()
if cmd == IKCP_CMD_ACK {
kcp.parse_ack(sn)
kcp.shrink_buf()
if flag == 0 {
flag = 1
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
lastackts = ts
} else if cmd == IKCP_CMD_PUSH {
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := kcp.newSegment(int(length))
seg.conv = conv
seg.cmd = uint32(cmd)
seg.frg = uint32(frg)
seg.wnd = uint32(wnd)
seg.ts = ts
seg.sn = sn
seg.una = una
copy(seg.data, data[:length])
kcp.parse_data(seg)
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
}
} else if cmd == IKCP_CMD_WASK {
// ready to send back IKCP_CMD_WINS in Ikcp_flush
// tell remote my window size
kcp.probe |= IKCP_ASK_TELL
} else if cmd == IKCP_CMD_WINS {
// do nothing
} else {
return -3
}
inSegs++
data = data[length:]
}
atomic.AddUint64(&DefaultSnmp.InSegs, inSegs)
if flag != 0 && regular {
kcp.parse_fastack(maxack)
current := currentMs()
if _itimediff(current, lastackts) >= 0 {
kcp.update_ack(_itimediff(current, lastackts))
}
}
if _itimediff(kcp.snd_una, una) > 0 {
if kcp.cwnd < kcp.rmt_wnd {
mss := kcp.mss
if kcp.cwnd < kcp.ssthresh {
kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
}
}
}
if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately
kcp.flush(true)
} else if kcp.rmt_wnd == 0 && len(kcp.acklist) > 0 { // window zero
kcp.flush(true)
}
return 0
}
func (kcp *KCP) wnd_unused() int32 {
if len(kcp.rcv_queue) < int(kcp.rcv_wnd) {
return int32(int(kcp.rcv_wnd) - len(kcp.rcv_queue))
}
return 0
}
// flush pending data
func (kcp *KCP) flush(ackOnly bool) {
buffer := kcp.buffer
change := 0
lost := false
var seg Segment
seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK
seg.wnd = uint32(kcp.wnd_unused())
seg.una = kcp.rcv_nxt
// flush acknowledges
var required []ackItem
for i, ack := range kcp.acklist {
// filter necessary acks only
if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
required = append(required, kcp.acklist[i])
}
}
kcp.acklist = nil
ptr := buffer
maxBatchSize := kcp.mtu / IKCP_OVERHEAD
for len(required) > 0 {
var batchSize int
if kcp.datashard > 0 && kcp.parityshard > 0 { // try triggering FEC
batchSize = int(_ibound_(1, uint32(len(required)/kcp.datashard), maxBatchSize))
} else {
batchSize = int(_ibound_(1, uint32(len(required)), maxBatchSize))
}
for len(required) >= batchSize {
for i := 0; i < batchSize; i++ {
ack := required[i]
seg.sn, seg.ts = ack.sn, ack.ts
ptr = seg.encode(ptr)
}
size := len(buffer) - len(ptr)
kcp.output(buffer, size)
ptr = buffer
required = required[batchSize:]
}
}
if ackOnly { // flush acks only
return
}
current := currentMs()
// probe window size (if remote window size equals zero)
if kcp.rmt_wnd == 0 {
if kcp.probe_wait == 0 {
kcp.probe_wait = IKCP_PROBE_INIT
kcp.ts_probe = current + kcp.probe_wait
} else {
if _itimediff(current, kcp.ts_probe) >= 0 {
if kcp.probe_wait < IKCP_PROBE_INIT {
kcp.probe_wait = IKCP_PROBE_INIT
}
kcp.probe_wait += kcp.probe_wait / 2
if kcp.probe_wait > IKCP_PROBE_LIMIT {
kcp.probe_wait = IKCP_PROBE_LIMIT
}
kcp.ts_probe = current + kcp.probe_wait
kcp.probe |= IKCP_ASK_SEND
}
}
} else {
kcp.ts_probe = 0
kcp.probe_wait = 0
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
// flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr)
}
kcp.probe = 0
// calculate window size
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
// sliding window, controlled by snd_nxt && sna_una+cwnd
newSegsCount := 0
for k := range kcp.snd_queue {
if _itimediff(kcp.snd_nxt, kcp.snd_una+cwnd) >= 0 {
break
}
newseg := kcp.snd_queue[k]
newseg.conv = kcp.conv
newseg.cmd = IKCP_CMD_PUSH
newseg.sn = kcp.snd_nxt
kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++
newSegsCount++
kcp.snd_queue[k].data = nil
}
kcp.snd_queue = kcp.snd_queue[newSegsCount:]
// calculate resent
resent := uint32(kcp.fastresend)
if kcp.fastresend <= 0 {
resent = 0xffffffff
}
// counters
var lostSegs, fastRetransSegs, earlyRetransSegs uint64
// send new segments
for k := len(kcp.snd_buf) - newSegsCount; k < len(kcp.snd_buf); k++ {
segment := &kcp.snd_buf[k]
segment.xmit++
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
}
// check for retransmissions
for k := 0; k < len(kcp.snd_buf)-newSegsCount; k++ {
segment := &kcp.snd_buf[k]
needsend := false
if _itimediff(current, segment.resendts) >= 0 { // RTO
needsend = true
segment.xmit++
kcp.xmit++
if kcp.nodelay == 0 {
segment.rto += kcp.rx_rto
} else {
segment.rto += kcp.rx_rto / 2
}
segment.resendts = current + segment.rto
lost = true
lostSegs++
} else if segment.fastack >= resent { // fast retransmit
needsend = true
segment.xmit++
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
fastRetransSegs++
} else if segment.fastack > 0 && newSegsCount == 0 { // early retransmit
needsend = true
segment.xmit++
segment.fastack = 0
segment.rto = kcp.rx_rto
segment.resendts = current + segment.rto
change++
earlyRetransSegs++
}
if needsend {
segment.ts = current
segment.wnd = seg.wnd
segment.una = kcp.rcv_nxt
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = segment.encode(ptr)
copy(ptr, segment.data)
ptr = ptr[len(segment.data):]
if segment.xmit >= kcp.dead_link {
kcp.state = 0xFFFFFFFF
}
}
}
// flash remain segments
size := len(buffer) - len(ptr)
if size > 0 {
kcp.output(buffer, size)
}
// counter updates
sum := lostSegs
if lostSegs > 0 {
atomic.AddUint64(&DefaultSnmp.LostSegs, lostSegs)
}
if fastRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.FastRetransSegs, fastRetransSegs)
sum += fastRetransSegs
}
if earlyRetransSegs > 0 {
atomic.AddUint64(&DefaultSnmp.EarlyRetransSegs, earlyRetransSegs)
sum += earlyRetransSegs
}
if sum > 0 {
atomic.AddUint64(&DefaultSnmp.RetransSegs, sum)
}
// update ssthresh
// rate halving, https://tools.ietf.org/html/rfc6937
if change != 0 {
inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
}
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec.
func (kcp *KCP) Update() {
var slap int32
current := currentMs()
if kcp.updated == 0 {
kcp.updated = 1
kcp.ts_flush = current
}
slap = _itimediff(current, kcp.ts_flush)
if slap >= 10000 || slap < -10000 {
kcp.ts_flush = current
slap = 0
}
if slap >= 0 {
kcp.ts_flush += kcp.interval
if _itimediff(current, kcp.ts_flush) >= 0 {
kcp.ts_flush = current + kcp.interval
}
kcp.flush(false)
}
}
// Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that
// time, instead of call update repeatly.
// Important to reduce unnacessary ikcp_update invoking. use it to
// schedule ikcp_update (eg. implementing an epoll-like mechanism,
// or optimize ikcp_update when handling massive kcp connections)
func (kcp *KCP) Check() uint32 {
current := currentMs()
ts_flush := kcp.ts_flush
tm_flush := int32(0x7fffffff)
tm_packet := int32(0x7fffffff)
minimal := uint32(0)
if kcp.updated == 0 {
return current
}
if _itimediff(current, ts_flush) >= 10000 ||
_itimediff(current, ts_flush) < -10000 {
ts_flush = current
}
if _itimediff(current, ts_flush) >= 0 {
return current
}
tm_flush = _itimediff(ts_flush, current)
for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k]
diff := _itimediff(seg.resendts, current)
if diff <= 0 {
return current
}
if diff < tm_packet {
tm_packet = diff
}
}
minimal = uint32(tm_packet)
if tm_packet >= tm_flush {
minimal = uint32(tm_flush)
}
if minimal >= kcp.interval {
minimal = kcp.interval
}
return current + minimal
}
// set datashard,parityshard info for some optimizations
func (kcp *KCP) setFEC(datashard, parityshard int) {
kcp.datashard = datashard
kcp.parityshard = parityshard
}
// SetMtu changes MTU size, default is 1400
func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1
}
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3)
if buffer == nil {
return -2
}
kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = buffer
return 0
}
// NoDelay options
// fastest: ikcp_nodelay(kcp, 1, 20, 2, 1)
// nodelay: 0:disable(default), 1:enable
// interval: internal update timer interval in millisec, default is 100ms
// resend: 0:disable fast resend(default), 1:enable fast resend
// nc: 0:normal congestion control(default), 1:disable congestion control
func (kcp *KCP) NoDelay(nodelay, interval, resend, nc int) int {
if nodelay >= 0 {
kcp.nodelay = uint32(nodelay)
if nodelay != 0 {
kcp.rx_minrto = IKCP_RTO_NDL
} else {
kcp.rx_minrto = IKCP_RTO_MIN
}
}
if interval >= 0 {
if interval > 5000 {
interval = 5000
} else if interval < 10 {
interval = 10
}
kcp.interval = uint32(interval)
}
if resend >= 0 {
kcp.fastresend = int32(resend)
}
if nc >= 0 {
kcp.nocwnd = int32(nc)
}
return 0
}
// WndSize sets maximum window size: sndwnd=32, rcvwnd=32 by default
func (kcp *KCP) WndSize(sndwnd, rcvwnd int) int {
if sndwnd > 0 {
kcp.snd_wnd = uint32(sndwnd)
}
if rcvwnd > 0 {
kcp.rcv_wnd = uint32(rcvwnd)
}
return 0
}
// WaitSnd gets how many packet is waiting to be sent
func (kcp *KCP) WaitSnd() int {
return len(kcp.snd_buf) + len(kcp.snd_queue)
}
// Cwnd returns current congestion window size
func (kcp *KCP) Cwnd() uint32 {
cwnd := _imin_(kcp.snd_wnd, kcp.rmt_wnd)
if kcp.nocwnd == 0 {
cwnd = _imin_(kcp.cwnd, cwnd)
}
return cwnd
}
|
package datastore
import (
"path"
"strings"
dsq "github.com/ipfs/go-datastore/query"
"github.com/satori/go.uuid"
base32 "github.com/whyrusleeping/base32"
)
/*
A Key represents the unique identifier of an object.
Our Key scheme is inspired by file systems and Google App Engine key model.
Keys are meant to be unique across a system. Keys are hierarchical,
incorporating more and more specific namespaces. Thus keys can be deemed
'children' or 'ancestors' of other keys::
Key("/Comedy")
Key("/Comedy/MontyPython")
Also, every namespace can be parametrized to embed relevant object
information. For example, the Key `name` (most specific namespace) could
include the object type::
Key("/Comedy/MontyPython/Actor:JohnCleese")
Key("/Comedy/MontyPython/Sketch:CheeseShop")
Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender")
*/
type Key struct {
string
}
// NewKey constructs a key from string. it will clean the value.
func NewKey(s string) Key {
k := Key{s}
k.Clean()
return k
}
// RawKey creates a new Key without safety checking the input. Use with care.
func RawKey(s string) Key {
// accept an empty string and fix it to avoid special cases
// elsewhere
if len(s) == 0 {
return Key{"/"}
}
// perform a quick sanity check that the key is in the correct
// format, if it is not then it is a programmer error and it is
// okay to panic
if len(s) == 0 || s[0] != '/' || (len(s) > 1 && s[len(s)-1] == '/') {
panic("invalid datastore key: " + s)
}
return Key{s}
}
func NewSafeKey(s string) Key {
return Key{base32.RawStdEncoding.EncodeToString([]byte(s))}
}
// KeyWithNamespaces constructs a key out of a namespace slice.
func KeyWithNamespaces(ns []string) Key {
return NewKey(strings.Join(ns, "/"))
}
// Clean up a Key, using path.Clean.
func (k *Key) Clean() {
switch {
case len(k.string) == 0:
k.string = "/"
case k.string[0] == '/':
k.string = path.Clean(k.string)
default:
k.string = path.Clean("/" + k.string)
}
}
// Strings is the string value of Key
func (k Key) String() string {
return k.string
}
// Bytes returns the string value of Key as a []byte
func (k Key) Bytes() []byte {
return []byte(k.string)
}
// Equal checks equality of two keys
func (k Key) Equal(k2 Key) bool {
return k.string == k2.string
}
// Less checks whether this key is sorted lower than another.
func (k Key) Less(k2 Key) bool {
list1 := k.List()
list2 := k2.List()
for i, c1 := range list1 {
if len(list2) < (i + 1) {
return false
}
c2 := list2[i]
if c1 < c2 {
return true
} else if c1 > c2 {
return false
}
// c1 == c2, continue
}
// list1 is shorter or exactly the same.
return len(list1) < len(list2)
}
// List returns the `list` representation of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
func (k Key) List() []string {
return strings.Split(k.string, "/")[1:]
}
// Reverse returns the reverse of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse()
// NewKey("/Actor:JohnCleese/MontyPython/Comedy")
func (k Key) Reverse() Key {
l := k.List()
r := make([]string, len(l), len(l))
for i, e := range l {
r[len(l)-i-1] = e
}
return KeyWithNamespaces(r)
}
// Namespaces returns the `namespaces` making up this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Namespaces()
// ["Comedy", "MontyPython", "Actor:JohnCleese"]
func (k Key) Namespaces() []string {
return k.List()
}
// BaseNamespace returns the "base" namespace of this key (path.Base(filename))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace()
// "Actor:JohnCleese"
func (k Key) BaseNamespace() string {
n := k.Namespaces()
return n[len(n)-1]
}
// Type returns the "type" of this key (value of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Type()
// "Actor"
func (k Key) Type() string {
return NamespaceType(k.BaseNamespace())
}
// Name returns the "name" of this key (field of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Name()
// "JohnCleese"
func (k Key) Name() string {
return NamespaceValue(k.BaseNamespace())
}
// Instance returns an "instance" of this type key (appends value to namespace).
// NewKey("/Comedy/MontyPython/Actor").Instance("JohnClesse")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Instance(s string) Key {
return NewKey(k.string + ":" + s)
}
// Path returns the "path" of this key (parent + type).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path()
// NewKey("/Comedy/MontyPython/Actor")
func (k Key) Path() Key {
s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace())
return NewKey(s)
}
// Parent returns the `parent` Key of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent()
// NewKey("/Comedy/MontyPython")
func (k Key) Parent() Key {
n := k.List()
if len(n) == 1 {
return RawKey("/")
}
return NewKey(strings.Join(n[:len(n)-1], "/"))
}
// Child returns the `child` Key of this Key.
// NewKey("/Comedy/MontyPython").Child(NewKey("Actor:JohnCleese"))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Child(k2 Key) Key {
switch {
case k.string == "/":
return k2
case k2.string == "/":
return k
default:
return RawKey(k.string + k2.string)
}
}
// ChildString returns the `child` Key of this Key -- string helper.
// NewKey("/Comedy/MontyPython").ChildString("Actor:JohnCleese")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) ChildString(s string) Key {
return NewKey(k.string + "/" + s)
}
// IsAncestorOf returns whether this key is a prefix of `other`
// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython")
// true
func (k Key) IsAncestorOf(other Key) bool {
if other.string == k.string {
return false
}
return strings.HasPrefix(other.string, k.string)
}
// IsDescendantOf returns whether this key contains another as a prefix.
// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy")
// true
func (k Key) IsDescendantOf(other Key) bool {
if other.string == k.string {
return false
}
return strings.HasPrefix(k.string, other.string)
}
// IsTopLevel returns whether this key has only one namespace.
func (k Key) IsTopLevel() bool {
return len(k.List()) == 1
}
// RandomKey returns a randomly (uuid) generated key.
// RandomKey()
// NewKey("/f98719ea086343f7b71f32ea9d9d521d")
func RandomKey() Key {
return NewKey(strings.Replace(uuid.NewV4().String(), "-", "", -1))
}
/*
A Key Namespace is like a path element.
A namespace can optionally include a type (delimited by ':')
> NamespaceValue("Song:PhilosopherSong")
PhilosopherSong
> NamespaceType("Song:PhilosopherSong")
Song
> NamespaceType("Music:Song:PhilosopherSong")
Music:Song
*/
// NamespaceType is the first component of a namespace. `foo` in `foo:bar`
func NamespaceType(namespace string) string {
parts := strings.Split(namespace, ":")
if len(parts) < 2 {
return ""
}
return strings.Join(parts[0:len(parts)-1], ":")
}
// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz`
func NamespaceValue(namespace string) string {
parts := strings.Split(namespace, ":")
return parts[len(parts)-1]
}
// KeySlice attaches the methods of sort.Interface to []Key,
// sorting in increasing order.
type KeySlice []Key
func (p KeySlice) Len() int { return len(p) }
func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) }
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// EntryKeys
func EntryKeys(e []dsq.Entry) []Key {
ks := make([]Key, len(e))
for i, e := range e {
ks[i] = NewKey(e.Key)
}
return ks
}
Move NewSafeKey functionally into the dshelp package in go-ipfs.
package datastore
import (
"path"
"strings"
dsq "github.com/ipfs/go-datastore/query"
"github.com/satori/go.uuid"
)
/*
A Key represents the unique identifier of an object.
Our Key scheme is inspired by file systems and Google App Engine key model.
Keys are meant to be unique across a system. Keys are hierarchical,
incorporating more and more specific namespaces. Thus keys can be deemed
'children' or 'ancestors' of other keys::
Key("/Comedy")
Key("/Comedy/MontyPython")
Also, every namespace can be parametrized to embed relevant object
information. For example, the Key `name` (most specific namespace) could
include the object type::
Key("/Comedy/MontyPython/Actor:JohnCleese")
Key("/Comedy/MontyPython/Sketch:CheeseShop")
Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender")
*/
type Key struct {
string
}
// NewKey constructs a key from string. it will clean the value.
func NewKey(s string) Key {
k := Key{s}
k.Clean()
return k
}
// RawKey creates a new Key without safety checking the input. Use with care.
func RawKey(s string) Key {
// accept an empty string and fix it to avoid special cases
// elsewhere
if len(s) == 0 {
return Key{"/"}
}
// perform a quick sanity check that the key is in the correct
// format, if it is not then it is a programmer error and it is
// okay to panic
if len(s) == 0 || s[0] != '/' || (len(s) > 1 && s[len(s)-1] == '/') {
panic("invalid datastore key: " + s)
}
return Key{s}
}
// KeyWithNamespaces constructs a key out of a namespace slice.
func KeyWithNamespaces(ns []string) Key {
return NewKey(strings.Join(ns, "/"))
}
// Clean up a Key, using path.Clean.
func (k *Key) Clean() {
switch {
case len(k.string) == 0:
k.string = "/"
case k.string[0] == '/':
k.string = path.Clean(k.string)
default:
k.string = path.Clean("/" + k.string)
}
}
// Strings is the string value of Key
func (k Key) String() string {
return k.string
}
// Bytes returns the string value of Key as a []byte
func (k Key) Bytes() []byte {
return []byte(k.string)
}
// Equal checks equality of two keys
func (k Key) Equal(k2 Key) bool {
return k.string == k2.string
}
// Less checks whether this key is sorted lower than another.
func (k Key) Less(k2 Key) bool {
list1 := k.List()
list2 := k2.List()
for i, c1 := range list1 {
if len(list2) < (i + 1) {
return false
}
c2 := list2[i]
if c1 < c2 {
return true
} else if c1 > c2 {
return false
}
// c1 == c2, continue
}
// list1 is shorter or exactly the same.
return len(list1) < len(list2)
}
// List returns the `list` representation of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
func (k Key) List() []string {
return strings.Split(k.string, "/")[1:]
}
// Reverse returns the reverse of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse()
// NewKey("/Actor:JohnCleese/MontyPython/Comedy")
func (k Key) Reverse() Key {
l := k.List()
r := make([]string, len(l), len(l))
for i, e := range l {
r[len(l)-i-1] = e
}
return KeyWithNamespaces(r)
}
// Namespaces returns the `namespaces` making up this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Namespaces()
// ["Comedy", "MontyPython", "Actor:JohnCleese"]
func (k Key) Namespaces() []string {
return k.List()
}
// BaseNamespace returns the "base" namespace of this key (path.Base(filename))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace()
// "Actor:JohnCleese"
func (k Key) BaseNamespace() string {
n := k.Namespaces()
return n[len(n)-1]
}
// Type returns the "type" of this key (value of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Type()
// "Actor"
func (k Key) Type() string {
return NamespaceType(k.BaseNamespace())
}
// Name returns the "name" of this key (field of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Name()
// "JohnCleese"
func (k Key) Name() string {
return NamespaceValue(k.BaseNamespace())
}
// Instance returns an "instance" of this type key (appends value to namespace).
// NewKey("/Comedy/MontyPython/Actor").Instance("JohnClesse")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Instance(s string) Key {
return NewKey(k.string + ":" + s)
}
// Path returns the "path" of this key (parent + type).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path()
// NewKey("/Comedy/MontyPython/Actor")
func (k Key) Path() Key {
s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace())
return NewKey(s)
}
// Parent returns the `parent` Key of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent()
// NewKey("/Comedy/MontyPython")
func (k Key) Parent() Key {
n := k.List()
if len(n) == 1 {
return RawKey("/")
}
return NewKey(strings.Join(n[:len(n)-1], "/"))
}
// Child returns the `child` Key of this Key.
// NewKey("/Comedy/MontyPython").Child(NewKey("Actor:JohnCleese"))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Child(k2 Key) Key {
switch {
case k.string == "/":
return k2
case k2.string == "/":
return k
default:
return RawKey(k.string + k2.string)
}
}
// ChildString returns the `child` Key of this Key -- string helper.
// NewKey("/Comedy/MontyPython").ChildString("Actor:JohnCleese")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) ChildString(s string) Key {
return NewKey(k.string + "/" + s)
}
// IsAncestorOf returns whether this key is a prefix of `other`
// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython")
// true
func (k Key) IsAncestorOf(other Key) bool {
if other.string == k.string {
return false
}
return strings.HasPrefix(other.string, k.string)
}
// IsDescendantOf returns whether this key contains another as a prefix.
// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy")
// true
func (k Key) IsDescendantOf(other Key) bool {
if other.string == k.string {
return false
}
return strings.HasPrefix(k.string, other.string)
}
// IsTopLevel returns whether this key has only one namespace.
func (k Key) IsTopLevel() bool {
return len(k.List()) == 1
}
// RandomKey returns a randomly (uuid) generated key.
// RandomKey()
// NewKey("/f98719ea086343f7b71f32ea9d9d521d")
func RandomKey() Key {
return NewKey(strings.Replace(uuid.NewV4().String(), "-", "", -1))
}
/*
A Key Namespace is like a path element.
A namespace can optionally include a type (delimited by ':')
> NamespaceValue("Song:PhilosopherSong")
PhilosopherSong
> NamespaceType("Song:PhilosopherSong")
Song
> NamespaceType("Music:Song:PhilosopherSong")
Music:Song
*/
// NamespaceType is the first component of a namespace. `foo` in `foo:bar`
func NamespaceType(namespace string) string {
parts := strings.Split(namespace, ":")
if len(parts) < 2 {
return ""
}
return strings.Join(parts[0:len(parts)-1], ":")
}
// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz`
func NamespaceValue(namespace string) string {
parts := strings.Split(namespace, ":")
return parts[len(parts)-1]
}
// KeySlice attaches the methods of sort.Interface to []Key,
// sorting in increasing order.
type KeySlice []Key
func (p KeySlice) Len() int { return len(p) }
func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) }
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// EntryKeys
func EntryKeys(e []dsq.Entry) []Key {
ks := make([]Key, len(e))
for i, e := range e {
ks[i] = NewKey(e.Key)
}
return ks
}
|
package main
import {
"fmt"
"github.com/stianeikeland/go-rpio"
"os"
"time"
}
var {
pin = rpio.Pin(4)
}
func main() {
if err := rpio.Open(): err != nil {
fmt.Println(err)
os.Exit(1)
}
defer rpio.Close()
pin.Output()
for i := 0; i < 20; i++ {
pin.Toggle()
time.Sleep(time.Second)
}
}
Fixed imports
package main
import "fmt"
import "github.com/stianeikeland/go-rpio"
import "os"
import "time"
var {
pin = rpio.Pin(4)
}
func main() {
if err := rpio.Open(): err != nil {
fmt.Println(err)
os.Exit(1)
}
defer rpio.Close()
pin.Output()
for i := 0; i < 20; i++ {
pin.Toggle()
time.Sleep(time.Second)
}
}
|
/*
SPDX-License-Identifier: MIT
Copyright (c) 2017 Thanh Ha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// lhc is a checker to find code files missing license headers.
package main
import (
"bufio"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"unicode"
"github.com/zxiiro/license-header-checker/licenses"
)
var LICENSE_HEADER_LINES_MAX = 50
var VERSION = "0.1.0"
type License struct {
Name string
Text string
}
// Compare a license header with an approved list of license headers.
// Returns the name of the license that was approved. Else "".
func accepted_license(check string, approved []License) string {
for _, i := range approved {
if strings.Contains(check, i.Text) {
return i.Name
}
}
return ""
}
// check and exit if error.
func check(e error) {
if e != nil {
fmt.Println(e)
os.Exit(1)
}
}
func checkSPDX(license string, filename string) bool {
file, err := os.Open(filename)
check(err)
defer file.Close()
scanner := bufio.NewScanner(file)
i := 0
for scanner.Scan() {
// Read only the first few lines to not read entire code file
i++
if i > LICENSE_HEADER_LINES_MAX {
break
}
s := strings.ToUpper(scanner.Text())
if strings.Contains(s, "SPDX-LICENSE-IDENTIFIER:") {
spdx := stripSpaces(strings.SplitN(s, ":", 2)[1])
if spdx == license {
return true
} else {
return false
}
}
}
return false
}
func exclude(path string, excludes []string) bool {
for i := range excludes {
if strings.Contains(path, excludes[i]) {
return true
}
}
return false
}
func findFiles(directory string, patterns []string) []string {
var files []string
filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
for _, p := range patterns {
f, _ := filepath.Glob(filepath.Join(path, p))
files = append(files, f...)
}
}
return nil
})
return files
}
// fetchLicense from file and return license text.
func fetchLicense(filename string) string {
comment, multilineComment := false, false
licenseText := ""
var scanner *bufio.Scanner
if filename == "Apache-2.0" {
scanner = bufio.NewScanner(strings.NewReader(license.APACHE_20_LICENSE))
} else if filename == "Apache-2.0-ASF" {
scanner = bufio.NewScanner(strings.NewReader(license.APACHE_20_LICENSE_ASF))
} else if filename == "EPL-1.0" {
scanner = bufio.NewScanner(strings.NewReader(license.EPL_10_LICENSE))
} else if filename == "MIT" {
scanner = bufio.NewScanner(strings.NewReader(license.MIT_LICENSE))
} else {
file, err := os.Open(filename)
check(err)
defer file.Close()
// Read the first 2 bytes to decide if it is a comment string
b := make([]byte, 2)
_, err = file.Read(b)
check(err)
if isComment(string(b)) {
comment = true
}
file.Seek(0, 0) // Reset so we can read the full file next
scanner = bufio.NewScanner(file)
}
i := 0
for scanner.Scan() {
// Read only the first few lines to not read entire code file
i++
if i > LICENSE_HEADER_LINES_MAX {
break
}
// We do not care about case sensitivity
s := strings.ToUpper(scanner.Text())
// Some projects DO NOT explicitly print this statement so ignore.
s = strings.Replace(s, "ALL RIGHTS RESERVED.", "", -1)
if ignoreComment(s) {
continue
}
if comment == true {
if strings.HasPrefix(s, "/*") {
multilineComment = true
} else if strings.Contains(s, "*/") {
multilineComment = false
}
if !multilineComment && !isComment(s) ||
// EPL headers can contain contributors list.
strings.Contains(strings.ToUpper(s), " * CONTRIBUTORS:") {
continue
}
s = trimComment(s)
}
licenseText += s
}
return stripSpaces(licenseText)
}
// Check if a string is a comment line.
func isComment(str string) bool {
if !strings.HasPrefix(str, "#") &&
!strings.HasPrefix(str, "//") &&
!strings.HasPrefix(str, "/*") {
return false
}
return true
}
// Ignore certain lines containing key strings
func ignoreComment(str string) bool {
s := strings.ToUpper(trimComment(str))
if strings.HasPrefix(s, "#!") ||
strings.HasPrefix(s, "COPYRIGHT") ||
strings.HasPrefix(s, "SPDX-LICENSE-IDENTIFIER") ||
// License name in LICENSE file but not header
strings.HasPrefix(s, "MIT LICENSE") {
return true
}
return false
}
// Strip whitespace from string.
func stripSpaces(str string) string {
return strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, str)
}
// Trim the comment prefix from string.
func trimComment(str string) string {
str = strings.TrimLeft(str, "#")
str = strings.TrimLeft(str, "//")
str = strings.TrimLeft(str, "/*")
str = strings.TrimLeft(str, " *")
str = strings.Split(str, "*/")[0]
str = strings.TrimLeft(str, "*")
return str
}
// Usage prints a statement to explain how to use this command.
func usage() {
fmt.Printf("Usage: %s [OPTIONS] [FILE]...\n", os.Args[0])
fmt.Printf("Compare FILE with an expected license header.\n")
fmt.Printf("\nOptions:\n")
flag.PrintDefaults()
}
func main() {
directoryPtr := flag.String("directory", ".",
"Directory to search for files.")
disableSPDXPtr := flag.Bool("disable-spdx", false,
"Verify SDPX identifier matches license.")
excludePtr := flag.String("exclude", "",
"Comma-separated list of paths to exclude. The code will search for "+
"paths containing this pattern. For example '/yang/gen/' is "+
"'**/yang/gen/**'.")
licensePtr := flag.String("license", "license.txt",
"Comma-separated list of license files to compare against.")
versionPtr := flag.Bool("version", false, "Print version")
flag.Usage = usage
flag.Parse()
if *versionPtr {
fmt.Println("License Checker version", VERSION)
os.Exit(0)
}
fmt.Println("Search Patterns:", flag.Args())
var accepted_licenses []License
for _, l := range strings.Split(*licensePtr, ",") {
license := License{l, fetchLicense(l)}
accepted_licenses = append(accepted_licenses, license)
if l == "Apache-2.0" {
license := License{l, fetchLicense("Apache-2.0-ASF")}
accepted_licenses = append(accepted_licenses, license)
}
}
checkFiles := findFiles(*directoryPtr, flag.Args())
ignore, miss, pass, spdx_miss, spdx_pass := 0, 0, 0, 0, 0
for _, file := range checkFiles {
if *excludePtr != "" && exclude(file, strings.Split(*excludePtr, ",")) {
ignore++
continue
}
headerText := fetchLicense(file)
license := accepted_license(headerText, accepted_licenses)
result := ""
if license != "" {
result = result + "✔"
pass++
} else {
result = result + "✘"
miss++
}
if !*disableSPDXPtr {
if checkSPDX(license, file) {
result = result + "✔"
spdx_pass++
} else {
result = result + "✘"
spdx_miss++
}
}
fmt.Println(result, file)
}
fmt.Printf("License Total: %d, Ignored: %d, Missing: %d, Passed: %d\n",
len(checkFiles), ignore, miss, pass)
if !*disableSPDXPtr {
fmt.Printf("SPDX Total: %d, Missing: %d, Passed: %d\n",
len(checkFiles), spdx_miss, spdx_pass)
}
}
Set exit status 1 if missing license header or spdx
Signed-off-by: Thanh Ha <ddef40ffcf330e0c1e91b15bb1b0d4953f1680d8@linux.com>
/*
SPDX-License-Identifier: MIT
Copyright (c) 2017 Thanh Ha
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// lhc is a checker to find code files missing license headers.
package main
import (
"bufio"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"unicode"
"github.com/zxiiro/license-header-checker/licenses"
)
var LICENSE_HEADER_LINES_MAX = 50
var VERSION = "0.1.0"
type License struct {
Name string
Text string
}
// Compare a license header with an approved list of license headers.
// Returns the name of the license that was approved. Else "".
func accepted_license(check string, approved []License) string {
for _, i := range approved {
if strings.Contains(check, i.Text) {
return i.Name
}
}
return ""
}
// check and exit if error.
func check(e error) {
if e != nil {
fmt.Println(e)
os.Exit(1)
}
}
func checkSPDX(license string, filename string) bool {
file, err := os.Open(filename)
check(err)
defer file.Close()
scanner := bufio.NewScanner(file)
i := 0
for scanner.Scan() {
// Read only the first few lines to not read entire code file
i++
if i > LICENSE_HEADER_LINES_MAX {
break
}
s := strings.ToUpper(scanner.Text())
if strings.Contains(s, "SPDX-LICENSE-IDENTIFIER:") {
spdx := stripSpaces(strings.SplitN(s, ":", 2)[1])
if spdx == license {
return true
} else {
return false
}
}
}
return false
}
func exclude(path string, excludes []string) bool {
for i := range excludes {
if strings.Contains(path, excludes[i]) {
return true
}
}
return false
}
func findFiles(directory string, patterns []string) []string {
var files []string
filepath.Walk(directory, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
for _, p := range patterns {
f, _ := filepath.Glob(filepath.Join(path, p))
files = append(files, f...)
}
}
return nil
})
return files
}
// fetchLicense from file and return license text.
func fetchLicense(filename string) string {
comment, multilineComment := false, false
licenseText := ""
var scanner *bufio.Scanner
if filename == "Apache-2.0" {
scanner = bufio.NewScanner(strings.NewReader(license.APACHE_20_LICENSE))
} else if filename == "Apache-2.0-ASF" {
scanner = bufio.NewScanner(strings.NewReader(license.APACHE_20_LICENSE_ASF))
} else if filename == "EPL-1.0" {
scanner = bufio.NewScanner(strings.NewReader(license.EPL_10_LICENSE))
} else if filename == "MIT" {
scanner = bufio.NewScanner(strings.NewReader(license.MIT_LICENSE))
} else {
file, err := os.Open(filename)
check(err)
defer file.Close()
// Read the first 2 bytes to decide if it is a comment string
b := make([]byte, 2)
_, err = file.Read(b)
check(err)
if isComment(string(b)) {
comment = true
}
file.Seek(0, 0) // Reset so we can read the full file next
scanner = bufio.NewScanner(file)
}
i := 0
for scanner.Scan() {
// Read only the first few lines to not read entire code file
i++
if i > LICENSE_HEADER_LINES_MAX {
break
}
// We do not care about case sensitivity
s := strings.ToUpper(scanner.Text())
// Some projects DO NOT explicitly print this statement so ignore.
s = strings.Replace(s, "ALL RIGHTS RESERVED.", "", -1)
if ignoreComment(s) {
continue
}
if comment == true {
if strings.HasPrefix(s, "/*") {
multilineComment = true
} else if strings.Contains(s, "*/") {
multilineComment = false
}
if !multilineComment && !isComment(s) ||
// EPL headers can contain contributors list.
strings.Contains(strings.ToUpper(s), " * CONTRIBUTORS:") {
continue
}
s = trimComment(s)
}
licenseText += s
}
return stripSpaces(licenseText)
}
// Check if a string is a comment line.
func isComment(str string) bool {
if !strings.HasPrefix(str, "#") &&
!strings.HasPrefix(str, "//") &&
!strings.HasPrefix(str, "/*") {
return false
}
return true
}
// Ignore certain lines containing key strings
func ignoreComment(str string) bool {
s := strings.ToUpper(trimComment(str))
if strings.HasPrefix(s, "#!") ||
strings.HasPrefix(s, "COPYRIGHT") ||
strings.HasPrefix(s, "SPDX-LICENSE-IDENTIFIER") ||
// License name in LICENSE file but not header
strings.HasPrefix(s, "MIT LICENSE") {
return true
}
return false
}
// Strip whitespace from string.
func stripSpaces(str string) string {
return strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, str)
}
// Trim the comment prefix from string.
func trimComment(str string) string {
str = strings.TrimLeft(str, "#")
str = strings.TrimLeft(str, "//")
str = strings.TrimLeft(str, "/*")
str = strings.TrimLeft(str, " *")
str = strings.Split(str, "*/")[0]
str = strings.TrimLeft(str, "*")
return str
}
// Usage prints a statement to explain how to use this command.
func usage() {
fmt.Printf("Usage: %s [OPTIONS] [FILE]...\n", os.Args[0])
fmt.Printf("Compare FILE with an expected license header.\n")
fmt.Printf("\nOptions:\n")
flag.PrintDefaults()
}
func main() {
directoryPtr := flag.String("directory", ".",
"Directory to search for files.")
disableSPDXPtr := flag.Bool("disable-spdx", false,
"Verify SDPX identifier matches license.")
excludePtr := flag.String("exclude", "",
"Comma-separated list of paths to exclude. The code will search for "+
"paths containing this pattern. For example '/yang/gen/' is "+
"'**/yang/gen/**'.")
licensePtr := flag.String("license", "license.txt",
"Comma-separated list of license files to compare against.")
versionPtr := flag.Bool("version", false, "Print version")
flag.Usage = usage
flag.Parse()
if *versionPtr {
fmt.Println("License Checker version", VERSION)
os.Exit(0)
}
fmt.Println("Search Patterns:", flag.Args())
var accepted_licenses []License
for _, l := range strings.Split(*licensePtr, ",") {
license := License{l, fetchLicense(l)}
accepted_licenses = append(accepted_licenses, license)
if l == "Apache-2.0" {
license := License{l, fetchLicense("Apache-2.0-ASF")}
accepted_licenses = append(accepted_licenses, license)
}
}
checkFiles := findFiles(*directoryPtr, flag.Args())
ignore, miss, pass, spdx_miss, spdx_pass := 0, 0, 0, 0, 0
for _, file := range checkFiles {
if *excludePtr != "" && exclude(file, strings.Split(*excludePtr, ",")) {
ignore++
continue
}
headerText := fetchLicense(file)
license := accepted_license(headerText, accepted_licenses)
result := ""
if license != "" {
result = result + "✔"
pass++
} else {
result = result + "✘"
miss++
}
if !*disableSPDXPtr {
if checkSPDX(license, file) {
result = result + "✔"
spdx_pass++
} else {
result = result + "✘"
spdx_miss++
}
}
fmt.Println(result, file)
}
fmt.Printf("License Total: %d, Ignored: %d, Missing: %d, Passed: %d\n",
len(checkFiles), ignore, miss, pass)
if !*disableSPDXPtr {
fmt.Printf("SPDX Total: %d, Missing: %d, Passed: %d\n",
len(checkFiles), spdx_miss, spdx_pass)
}
if miss != 0 || spdx_miss != 0 {
os.Exit(1)
}
}
|
package xlsx
import (
"archive/zip"
"encoding/xml"
"errors"
"fmt"
"io"
"path"
"strconv"
"strings"
)
// XLSXReaderError is the standard error type for otherwise undefined
// errors in the XSLX reading process.
type XLSXReaderError struct {
Err string
}
// Error returns a string value from an XLSXReaderError struct in order
// that it might comply with the builtin.error interface.
func (e *XLSXReaderError) Error() string {
return e.Err
}
// getRangeFromString is an internal helper function that converts
// XLSX internal range syntax to a pair of integers. For example,
// the range string "1:3" yield the upper and lower intergers 1 and 3.
func getRangeFromString(rangeString string) (lower int, upper int, error error) {
var parts []string
parts = strings.SplitN(rangeString, ":", 2)
if parts[0] == "" {
error = errors.New(fmt.Sprintf("Invalid range '%s'\n", rangeString))
}
if parts[1] == "" {
error = errors.New(fmt.Sprintf("Invalid range '%s'\n", rangeString))
}
lower, error = strconv.Atoi(parts[0])
if error != nil {
error = errors.New(fmt.Sprintf("Invalid range (not integer in lower bound) %s\n", rangeString))
}
upper, error = strconv.Atoi(parts[1])
if error != nil {
error = errors.New(fmt.Sprintf("Invalid range (not integer in upper bound) %s\n", rangeString))
}
return lower, upper, error
}
// lettersToNumeric is used to convert a character based column
// reference to a zero based numeric column identifier.
func lettersToNumeric(letters string) int {
sum, mul, n := 0, 1, 0
for i := len(letters) - 1; i >= 0; i, mul, n = i-1, mul*26, 1 {
c := letters[i]
switch {
case 'A' <= c && c <= 'Z':
n += int(c - 'A')
case 'a' <= c && c <= 'z':
n += int(c - 'a')
}
sum += n * mul
}
return sum
}
// Get the largestDenominator that is a multiple of a basedDenominator
// and fits at least once into a given numerator.
func getLargestDenominator(numerator, multiple, baseDenominator, power int) (int, int) {
if numerator/multiple == 0 {
return 1, power
}
next, nextPower := getLargestDenominator(
numerator, multiple*baseDenominator, baseDenominator, power+1)
if next > multiple {
return next, nextPower
}
return multiple, power
}
// Convers a list of numbers representing a column into a alphabetic
// representation, as used in the spreadsheet.
func formatColumnName(colId []int) string {
lastPart := len(colId) - 1
result := ""
for n, part := range colId {
if n == lastPart {
// The least significant number is in the
// range 0-25, all other numbers are 1-26,
// hence we use a differente offset for the
// last part.
result += string(part + 65)
} else {
// Don't output leading 0s, as there is no
// representation of 0 in this format.
if part > 0 {
result += string(part + 64)
}
}
}
return result
}
func smooshBase26Slice(b26 []int) []int {
// Smoosh values together, eliminating 0s from all but the
// least significant part.
lastButOnePart := len(b26) - 2
for i := lastButOnePart; i > 0; i-- {
part := b26[i]
if part == 0 {
greaterPart := b26[i-1]
if greaterPart > 0 {
b26[i-1] = greaterPart - 1
b26[i] = 26
}
}
}
return b26
}
func intToBase26(x int) (parts []int) {
// Excel column codes are pure evil - in essence they're just
// base26, but they don't represent the number 0.
b26Denominator, _ := getLargestDenominator(x, 1, 26, 0)
// This loop terminates because integer division of 1 / 26
// returns 0.
for d := b26Denominator; d > 0; d = d / 26 {
value := x / d
remainder := x % d
parts = append(parts, value)
x = remainder
}
return parts
}
// numericToLetters is used to convert a zero based, numeric column
// indentifier into a character code.
func numericToLetters(colRef int) string {
parts := intToBase26(colRef)
return formatColumnName(smooshBase26Slice(parts))
}
// letterOnlyMapF is used in conjunction with strings.Map to return
// only the characters A-Z and a-z in a string
func letterOnlyMapF(rune rune) rune {
switch {
case 'A' <= rune && rune <= 'Z':
return rune
case 'a' <= rune && rune <= 'z':
return rune - 32
}
return -1
}
// intOnlyMapF is used in conjunction with strings.Map to return only
// the numeric portions of a string.
func intOnlyMapF(rune rune) rune {
if rune >= 48 && rune < 58 {
return rune
}
return -1
}
// getCoordsFromCellIDString returns the zero based cartesian
// coordinates from a cell name in Excel format, e.g. the cellIDString
// "A1" returns 0, 0 and the "B3" return 1, 2.
func getCoordsFromCellIDString(cellIDString string) (x, y int, error error) {
var letterPart string = strings.Map(letterOnlyMapF, cellIDString)
y, error = strconv.Atoi(strings.Map(intOnlyMapF, cellIDString))
if error != nil {
return x, y, error
}
y -= 1 // Zero based
x = lettersToNumeric(letterPart)
return x, y, error
}
// getCellIDStringFromCoords returns the Excel format cell name that
// represents a pair of zero based cartesian coordinates.
func getCellIDStringFromCoords(x, y int) string {
letterPart := numericToLetters(x)
numericPart := y + 1
return fmt.Sprintf("%s%d", letterPart, numericPart)
}
// getMaxMinFromDimensionRef return the zero based cartesian maximum
// and minimum coordinates from the dimension reference embedded in a
// XLSX worksheet. For example, the dimension reference "A1:B2"
// returns "0,0", "1,1".
func getMaxMinFromDimensionRef(ref string) (minx, miny, maxx, maxy int, err error) {
var parts []string
parts = strings.Split(ref, ":")
minx, miny, err = getCoordsFromCellIDString(parts[0])
if err != nil {
return -1, -1, -1, -1, err
}
if len(parts) == 1 {
maxx, maxy = minx, miny
return
}
maxx, maxy, err = getCoordsFromCellIDString(parts[1])
if err != nil {
return -1, -1, -1, -1, err
}
return
}
// calculateMaxMinFromWorkSheet works out the dimensions of a spreadsheet
// that doesn't have a DimensionRef set. The only case currently
// known where this is true is with XLSX exported from Google Docs.
func calculateMaxMinFromWorksheet(worksheet *xlsxWorksheet) (minx, miny, maxx, maxy int, err error) {
// Note, this method could be very slow for large spreadsheets.
var x, y int
var maxVal int
maxVal = int(^uint(0) >> 1)
minx = maxVal
miny = maxVal
maxy = 0
maxx = 0
for _, row := range worksheet.SheetData.Row {
for _, cell := range row.C {
x, y, err = getCoordsFromCellIDString(cell.R)
if err != nil {
return -1, -1, -1, -1, err
}
if x < minx {
minx = x
}
if x > maxx {
maxx = x
}
if y < miny {
miny = y
}
if y > maxy {
maxy = y
}
}
}
if minx == maxVal {
minx = 0
}
if miny == maxVal {
miny = 0
}
return
}
// makeRowFromSpan will, when given a span expressed as a string,
// return an empty Row large enough to encompass that span and
// populate it with empty cells. All rows start from cell 1 -
// regardless of the lower bound of the span.
func makeRowFromSpan(spans string, sheet *Sheet) *Row {
var error error
var upper int
var row *Row
var cell *Cell
row = new(Row)
row.Sheet = sheet
_, upper, error = getRangeFromString(spans)
if error != nil {
panic(error)
}
error = nil
row.Cells = make([]*Cell, upper)
for i := 0; i < upper; i++ {
cell = new(Cell)
cell.Value = ""
row.Cells[i] = cell
}
return row
}
// makeRowFromRaw returns the Row representation of the xlsxRow.
func makeRowFromRaw(rawrow xlsxRow, sheet *Sheet) *Row {
var upper int
var row *Row
var cell *Cell
row = new(Row)
row.Sheet = sheet
upper = -1
for _, rawcell := range rawrow.C {
if rawcell.R != "" {
x, _, error := getCoordsFromCellIDString(rawcell.R)
if error != nil {
panic(fmt.Sprintf("Invalid Cell Coord, %s\n", rawcell.R))
}
if x > upper {
upper = x
}
continue
}
upper++
}
upper++
row.Cells = make([]*Cell, upper)
for i := 0; i < upper; i++ {
cell = new(Cell)
cell.Value = ""
row.Cells[i] = cell
}
return row
}
func makeEmptyRow(sheet *Sheet) *Row {
row := new(Row)
row.Cells = make([]*Cell, 0)
row.Sheet = sheet
return row
}
type sharedFormula struct {
x, y int
formula string
}
func formulaForCell(rawcell xlsxC, sharedFormulas map[int]sharedFormula) string {
var res string
f := rawcell.F
if f == nil {
return ""
}
if f.T == "shared" {
x, y, err := getCoordsFromCellIDString(rawcell.R)
if err != nil {
res = f.Content
} else {
if f.Ref != "" {
res = f.Content
sharedFormulas[f.Si] = sharedFormula{x, y, res}
} else {
sharedFormula := sharedFormulas[f.Si]
dx := x - sharedFormula.x
dy := y - sharedFormula.y
orig := []byte(sharedFormula.formula)
var start, end int
for end = 0; end < len(orig); end++ {
c := orig[end]
if c >= 'A' && c <= 'Z' || c == '$' {
res += string(orig[start:end])
start = end
end++
foundNum := false
for ; end < len(orig); end++ {
idc := orig[end]
if idc >= '0' && idc <= '9' || idc == '$' {
foundNum = true
} else if idc >= 'A' && idc <= 'Z' {
if foundNum {
break
}
} else {
break
}
}
if foundNum {
cellID := string(orig[start:end])
res += shiftCell(cellID, dx, dy)
start = end
}
}
}
if start < len(orig) {
res += string(orig[start:end])
}
}
}
} else {
res = f.Content
}
return strings.Trim(res, " \t\n\r")
}
// shiftCell returns the cell shifted according to dx and dy taking into consideration of absolute
// references with dollar sign ($)
func shiftCell(cellID string, dx, dy int) string {
fx, fy, _ := getCoordsFromCellIDString(cellID)
// Is fixed column?
fixedCol := strings.Index(cellID, "$") == 0
// Is fixed row?
fixedRow := strings.LastIndex(cellID, "$") > 0
if !fixedCol {
// Shift column
fx += dx
}
if !fixedRow {
// Shift row
fy += dy
}
// New shifted cell
shiftedCellID := getCellIDStringFromCoords(fx, fy)
// Need put the $ back if they have absolute references
letterPart := strings.Map(letterOnlyMapF, shiftedCellID)
numberPart := strings.Map(intOnlyMapF, shiftedCellID)
result := ""
if fixedCol {
// Insert dollar sign ($) back into the formula
result += "$"
}
result += letterPart
if fixedRow {
// Insert dollar sign ($) back into the formula
result += "$"
}
result += numberPart
return result
}
// fillCellData attempts to extract a valid value, usable in
// CSV form from the raw cell value. Note - this is not actually
// general enough - we should support retaining tabs and newlines.
func fillCellData(rawcell xlsxC, reftable *RefTable, sharedFormulas map[int]sharedFormula, cell *Cell) {
var data string = rawcell.V
if len(data) > 0 {
vval := strings.Trim(data, " \t\n\r")
switch rawcell.T {
case "s": // Shared String
ref, error := strconv.Atoi(vval)
if error != nil {
panic(error)
}
cell.Value = reftable.ResolveSharedString(ref)
cell.cellType = CellTypeString
case "b": // Boolean
cell.Value = vval
cell.cellType = CellTypeBool
case "e": // Error
cell.Value = vval
cell.formula = formulaForCell(rawcell, sharedFormulas)
cell.cellType = CellTypeError
default:
if rawcell.F == nil {
// Numeric
cell.Value = vval
cell.cellType = CellTypeNumeric
} else {
// Formula
cell.Value = vval
cell.formula = formulaForCell(rawcell, sharedFormulas)
cell.cellType = CellTypeFormula
}
}
}
}
// readRowsFromSheet is an internal helper function that extracts the
// rows from a XSLXWorksheet, populates them with Cells and resolves
// the value references from the reference table and stores them in
// the rows and columns.
func readRowsFromSheet(Worksheet *xlsxWorksheet, file *File, sheet *Sheet) ([]*Row, []*Col, int, int) {
var rows []*Row
var cols []*Col
var row *Row
var minCol, maxCol, minRow, maxRow, colCount, rowCount int
var reftable *RefTable
var err error
var insertRowIndex, insertColIndex int
sharedFormulas := map[int]sharedFormula{}
if len(Worksheet.SheetData.Row) == 0 {
return nil, nil, 0, 0
}
reftable = file.referenceTable
if len(Worksheet.Dimension.Ref) > 0 {
minCol, minRow, maxCol, maxRow, err = getMaxMinFromDimensionRef(Worksheet.Dimension.Ref)
} else {
minCol, minRow, maxCol, maxRow, err = calculateMaxMinFromWorksheet(Worksheet)
}
if err != nil {
panic(err.Error())
}
rowCount = maxRow + 1
colCount = maxCol + 1
rows = make([]*Row, rowCount)
cols = make([]*Col, colCount)
insertRowIndex = minRow
for i := range cols {
cols[i] = &Col{
Hidden: false,
}
}
if Worksheet.Cols != nil {
// Columns can apply to a range, for convenience we expand the
// ranges out into individual column definitions.
for _, rawcol := range Worksheet.Cols.Col {
// Note, below, that sometimes column definitions can
// exist outside the defined dimensions of the
// spreadsheet - we deliberately exclude these
// columns.
for i := rawcol.Min; i <= rawcol.Max && i <= colCount; i++ {
col := &Col{
Min: rawcol.Min,
Max: rawcol.Max,
Hidden: rawcol.Hidden,
Width: rawcol.Width}
cols[i-1] = col
if file.styles != nil {
col.style = file.styles.getStyle(rawcol.Style)
col.numFmt = file.styles.getNumberFormat(rawcol.Style)
}
}
}
}
// insert leading empty rows that is in front of minRow
for rowIndex := 0; rowIndex < minRow; rowIndex++ {
rows[rowIndex] = makeEmptyRow(sheet)
}
numRows := len(rows)
for rowIndex := 0; rowIndex < len(Worksheet.SheetData.Row); rowIndex++ {
rawrow := Worksheet.SheetData.Row[rowIndex]
// Some spreadsheets will omit blank rows from the
// stored data
for rawrow.R > (insertRowIndex + 1) {
// Put an empty Row into the array
if insertRowIndex < numRows {
rows[insertRowIndex] = makeEmptyRow(sheet)
}
insertRowIndex++
}
// range is not empty and only one range exist
if len(rawrow.Spans) != 0 && strings.Count(rawrow.Spans, ":") == 1 {
row = makeRowFromSpan(rawrow.Spans, sheet)
} else {
row = makeRowFromRaw(rawrow, sheet)
}
row.Hidden = rawrow.Hidden
insertColIndex = minCol
for _, rawcell := range rawrow.C {
h, v, err := Worksheet.MergeCells.getExtent(rawcell.R)
if err != nil {
panic(err.Error())
}
x, _, _ := getCoordsFromCellIDString(rawcell.R)
// Some spreadsheets will omit blank cells
// from the data.
for x > insertColIndex {
// Put an empty Cell into the array
row.Cells[insertColIndex] = new(Cell)
insertColIndex++
}
cellX := insertColIndex
cell := row.Cells[cellX]
cell.HMerge = h
cell.VMerge = v
fillCellData(rawcell, reftable, sharedFormulas, cell)
if file.styles != nil {
cell.style = file.styles.getStyle(rawcell.S)
cell.NumFmt = file.styles.getNumberFormat(rawcell.S)
}
cell.date1904 = file.Date1904
// Cell is considered hidden if the row or the column of this cell is hidden
cell.Hidden = rawrow.Hidden || (len(cols) > cellX && cols[cellX].Hidden)
insertColIndex++
}
if len(rows) > insertRowIndex {
rows[insertRowIndex] = row
}
insertRowIndex++
}
return rows, cols, colCount, rowCount
}
type indexedSheet struct {
Index int
Sheet *Sheet
Error error
}
func readSheetViews(xSheetViews xlsxSheetViews) []SheetView {
if xSheetViews.SheetView == nil || len(xSheetViews.SheetView) == 0 {
return nil
}
sheetViews := []SheetView{}
for _, xSheetView := range xSheetViews.SheetView {
sheetView := SheetView{}
if xSheetView.Pane != nil {
xlsxPane := xSheetView.Pane
pane := &Pane{}
pane.XSplit = xlsxPane.XSplit
pane.YSplit = xlsxPane.YSplit
pane.TopLeftCell = xlsxPane.TopLeftCell
pane.ActivePane = xlsxPane.ActivePane
pane.State = xlsxPane.State
sheetView.Pane = pane
}
sheetViews = append(sheetViews, sheetView)
}
return sheetViews
}
// readSheetFromFile is the logic of converting a xlsxSheet struct
// into a Sheet struct. This work can be done in parallel and so
// readSheetsFromZipFile will spawn an instance of this function per
// sheet and get the results back on the provided channel.
func readSheetFromFile(sc chan *indexedSheet, index int, rsheet xlsxSheet, fi *File, sheetXMLMap map[string]string) {
result := &indexedSheet{Index: index, Sheet: nil, Error: nil}
defer func() {
if e := recover(); e != nil {
switch e.(type) {
case error:
result.Error = e.(error)
default:
result.Error = errors.New("unexpected error")
}
// The only thing here, is if one close the channel. but its not the case
sc <- result
}
}()
worksheet, error := getWorksheetFromSheet(rsheet, fi.worksheets, sheetXMLMap)
if error != nil {
result.Error = error
sc <- result
return
}
sheet := new(Sheet)
sheet.File = fi
sheet.Rows, sheet.Cols, sheet.MaxCol, sheet.MaxRow = readRowsFromSheet(worksheet, fi, sheet)
sheet.Hidden = rsheet.State == sheetStateHidden || rsheet.State == sheetStateVeryHidden
sheet.SheetViews = readSheetViews(worksheet.SheetViews)
sheet.SheetFormat.DefaultColWidth = worksheet.SheetFormatPr.DefaultColWidth
sheet.SheetFormat.DefaultRowHeight = worksheet.SheetFormatPr.DefaultRowHeight
result.Sheet = sheet
sc <- result
}
// readSheetsFromZipFile is an internal helper function that loops
// over the Worksheets defined in the XSLXWorkbook and loads them into
// Sheet objects stored in the Sheets slice of a xlsx.File struct.
func readSheetsFromZipFile(f *zip.File, file *File, sheetXMLMap map[string]string) (map[string]*Sheet, []*Sheet, error) {
var workbook *xlsxWorkbook
var err error
var rc io.ReadCloser
var decoder *xml.Decoder
var sheetCount int
workbook = new(xlsxWorkbook)
rc, err = f.Open()
if err != nil {
return nil, nil, err
}
decoder = xml.NewDecoder(rc)
err = decoder.Decode(workbook)
if err != nil {
return nil, nil, err
}
file.Date1904 = workbook.WorkbookPr.Date1904
for _, entry := range workbook.DefinedNames.DefinedName {
file.DefinedNames = append(file.DefinedNames, &entry)
}
// Only try and read sheets that have corresponding files.
// Notably this excludes chartsheets don't right now
var workbookSheets []xlsxSheet
for _, sheet := range workbook.Sheets.Sheet {
if f := worksheetFileForSheet(sheet, file.worksheets, sheetXMLMap); f != nil {
workbookSheets = append(workbookSheets, sheet)
}
}
sheetCount = len(workbookSheets)
sheetsByName := make(map[string]*Sheet, sheetCount)
sheets := make([]*Sheet, sheetCount)
sheetChan := make(chan *indexedSheet, sheetCount)
defer close(sheetChan)
go func() {
err = nil
for i, rawsheet := range workbookSheets {
readSheetFromFile(sheetChan, i, rawsheet, file, sheetXMLMap)
}
}()
for j := 0; j < sheetCount; j++ {
sheet := <-sheetChan
if sheet.Error != nil {
return nil, nil, sheet.Error
}
sheetName := workbookSheets[sheet.Index].Name
sheetsByName[sheetName] = sheet.Sheet
sheet.Sheet.Name = sheetName
sheets[sheet.Index] = sheet.Sheet
}
return sheetsByName, sheets, nil
}
// readSharedStringsFromZipFile() is an internal helper function to
// extract a reference table from the sharedStrings.xml file within
// the XLSX zip file.
func readSharedStringsFromZipFile(f *zip.File) (*RefTable, error) {
var sst *xlsxSST
var error error
var rc io.ReadCloser
var decoder *xml.Decoder
var reftable *RefTable
// In a file with no strings it's possible that
// sharedStrings.xml doesn't exist. In this case the value
// passed as f will be nil.
if f == nil {
return nil, nil
}
rc, error = f.Open()
if error != nil {
return nil, error
}
sst = new(xlsxSST)
decoder = xml.NewDecoder(rc)
error = decoder.Decode(sst)
if error != nil {
return nil, error
}
reftable = MakeSharedStringRefTable(sst)
return reftable, nil
}
// readStylesFromZipFile() is an internal helper function to
// extract a style table from the style.xml file within
// the XLSX zip file.
func readStylesFromZipFile(f *zip.File, theme *theme) (*xlsxStyleSheet, error) {
var style *xlsxStyleSheet
var error error
var rc io.ReadCloser
var decoder *xml.Decoder
rc, error = f.Open()
if error != nil {
return nil, error
}
style = newXlsxStyleSheet(theme)
decoder = xml.NewDecoder(rc)
error = decoder.Decode(style)
if error != nil {
return nil, error
}
buildNumFmtRefTable(style)
return style, nil
}
func buildNumFmtRefTable(style *xlsxStyleSheet) {
for _, numFmt := range style.NumFmts.NumFmt {
// We do this for the side effect of populating the NumFmtRefTable.
style.addNumFmt(numFmt)
}
}
func readThemeFromZipFile(f *zip.File) (*theme, error) {
rc, err := f.Open()
if err != nil {
return nil, err
}
var themeXml xlsxTheme
err = xml.NewDecoder(rc).Decode(&themeXml)
if err != nil {
return nil, err
}
return newTheme(themeXml), nil
}
type WorkBookRels map[string]string
func (w *WorkBookRels) MakeXLSXWorkbookRels() xlsxWorkbookRels {
relCount := len(*w)
xWorkbookRels := xlsxWorkbookRels{}
xWorkbookRels.Relationships = make([]xlsxWorkbookRelation, relCount+3)
for k, v := range *w {
index, err := strconv.Atoi(k[3:])
if err != nil {
panic(err.Error())
}
xWorkbookRels.Relationships[index-1] = xlsxWorkbookRelation{
Id: k,
Target: v,
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet"}
}
relCount++
sheetId := fmt.Sprintf("rId%d", relCount)
xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
Id: sheetId,
Target: "sharedStrings.xml",
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings"}
relCount++
sheetId = fmt.Sprintf("rId%d", relCount)
xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
Id: sheetId,
Target: "theme/theme1.xml",
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme"}
relCount++
sheetId = fmt.Sprintf("rId%d", relCount)
xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
Id: sheetId,
Target: "styles.xml",
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles"}
return xWorkbookRels
}
// readWorkbookRelationsFromZipFile is an internal helper function to
// extract a map of relationship ID strings to the name of the
// worksheet.xml file they refer to. The resulting map can be used to
// reliably derefence the worksheets in the XLSX file.
func readWorkbookRelationsFromZipFile(workbookRels *zip.File) (WorkBookRels, error) {
var sheetXMLMap WorkBookRels
var wbRelationships *xlsxWorkbookRels
var rc io.ReadCloser
var decoder *xml.Decoder
var err error
rc, err = workbookRels.Open()
if err != nil {
return nil, err
}
decoder = xml.NewDecoder(rc)
wbRelationships = new(xlsxWorkbookRels)
err = decoder.Decode(wbRelationships)
if err != nil {
return nil, err
}
sheetXMLMap = make(WorkBookRels)
for _, rel := range wbRelationships.Relationships {
if strings.HasSuffix(rel.Target, ".xml") && rel.Type == "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet" {
_, filename := path.Split(rel.Target)
sheetXMLMap[rel.Id] = strings.Replace(filename, ".xml", "", 1)
}
}
return sheetXMLMap, nil
}
// ReadZip() takes a pointer to a zip.ReadCloser and returns a
// xlsx.File struct populated with its contents. In most cases
// ReadZip is not used directly, but is called internally by OpenFile.
func ReadZip(f *zip.ReadCloser) (*File, error) {
defer f.Close()
return ReadZipReader(&f.Reader)
}
// ReadZipReader() can be used to read an XLSX in memory without
// touching the filesystem.
func ReadZipReader(r *zip.Reader) (*File, error) {
var err error
var file *File
var reftable *RefTable
var sharedStrings *zip.File
var sheetXMLMap map[string]string
var sheetsByName map[string]*Sheet
var sheets []*Sheet
var style *xlsxStyleSheet
var styles *zip.File
var themeFile *zip.File
var v *zip.File
var workbook *zip.File
var workbookRels *zip.File
var worksheets map[string]*zip.File
file = NewFile()
// file.numFmtRefTable = make(map[int]xlsxNumFmt, 1)
worksheets = make(map[string]*zip.File, len(r.File))
for _, v = range r.File {
switch v.Name {
case "xl/sharedStrings.xml":
sharedStrings = v
case "xl/workbook.xml":
workbook = v
case "xl/_rels/workbook.xml.rels":
workbookRels = v
case "xl/styles.xml":
styles = v
case "xl/theme/theme1.xml":
themeFile = v
default:
if len(v.Name) > 14 {
if v.Name[0:13] == "xl/worksheets" {
worksheets[v.Name[14:len(v.Name)-4]] = v
}
}
}
}
if workbookRels == nil {
return nil, fmt.Errorf("xl/_rels/workbook.xml.rels not found in input xlsx.")
}
sheetXMLMap, err = readWorkbookRelationsFromZipFile(workbookRels)
if err != nil {
return nil, err
}
if len(worksheets) == 0 {
return nil, fmt.Errorf("Input xlsx contains no worksheets.")
}
file.worksheets = worksheets
reftable, err = readSharedStringsFromZipFile(sharedStrings)
if err != nil {
return nil, err
}
file.referenceTable = reftable
if themeFile != nil {
theme, err := readThemeFromZipFile(themeFile)
if err != nil {
return nil, err
}
file.theme = theme
}
if styles != nil {
style, err = readStylesFromZipFile(styles, file.theme)
if err != nil {
return nil, err
}
file.styles = style
}
sheetsByName, sheets, err = readSheetsFromZipFile(workbook, file, sheetXMLMap)
if err != nil {
return nil, err
}
if sheets == nil {
readerErr := new(XLSXReaderError)
readerErr.Err = "No sheets found in XLSX File"
return nil, readerErr
}
file.Sheet = sheetsByName
file.Sheets = sheets
return file, nil
}
No need to try to put the $ back into the formula where there are none to begin with.
package xlsx
import (
"archive/zip"
"encoding/xml"
"errors"
"fmt"
"io"
"path"
"strconv"
"strings"
)
// XLSXReaderError is the standard error type for otherwise undefined
// errors in the XSLX reading process.
type XLSXReaderError struct {
Err string
}
// Error returns a string value from an XLSXReaderError struct in order
// that it might comply with the builtin.error interface.
func (e *XLSXReaderError) Error() string {
return e.Err
}
// getRangeFromString is an internal helper function that converts
// XLSX internal range syntax to a pair of integers. For example,
// the range string "1:3" yield the upper and lower intergers 1 and 3.
func getRangeFromString(rangeString string) (lower int, upper int, error error) {
var parts []string
parts = strings.SplitN(rangeString, ":", 2)
if parts[0] == "" {
error = errors.New(fmt.Sprintf("Invalid range '%s'\n", rangeString))
}
if parts[1] == "" {
error = errors.New(fmt.Sprintf("Invalid range '%s'\n", rangeString))
}
lower, error = strconv.Atoi(parts[0])
if error != nil {
error = errors.New(fmt.Sprintf("Invalid range (not integer in lower bound) %s\n", rangeString))
}
upper, error = strconv.Atoi(parts[1])
if error != nil {
error = errors.New(fmt.Sprintf("Invalid range (not integer in upper bound) %s\n", rangeString))
}
return lower, upper, error
}
// lettersToNumeric is used to convert a character based column
// reference to a zero based numeric column identifier.
func lettersToNumeric(letters string) int {
sum, mul, n := 0, 1, 0
for i := len(letters) - 1; i >= 0; i, mul, n = i-1, mul*26, 1 {
c := letters[i]
switch {
case 'A' <= c && c <= 'Z':
n += int(c - 'A')
case 'a' <= c && c <= 'z':
n += int(c - 'a')
}
sum += n * mul
}
return sum
}
// Get the largestDenominator that is a multiple of a basedDenominator
// and fits at least once into a given numerator.
func getLargestDenominator(numerator, multiple, baseDenominator, power int) (int, int) {
if numerator/multiple == 0 {
return 1, power
}
next, nextPower := getLargestDenominator(
numerator, multiple*baseDenominator, baseDenominator, power+1)
if next > multiple {
return next, nextPower
}
return multiple, power
}
// Convers a list of numbers representing a column into a alphabetic
// representation, as used in the spreadsheet.
func formatColumnName(colId []int) string {
lastPart := len(colId) - 1
result := ""
for n, part := range colId {
if n == lastPart {
// The least significant number is in the
// range 0-25, all other numbers are 1-26,
// hence we use a differente offset for the
// last part.
result += string(part + 65)
} else {
// Don't output leading 0s, as there is no
// representation of 0 in this format.
if part > 0 {
result += string(part + 64)
}
}
}
return result
}
func smooshBase26Slice(b26 []int) []int {
// Smoosh values together, eliminating 0s from all but the
// least significant part.
lastButOnePart := len(b26) - 2
for i := lastButOnePart; i > 0; i-- {
part := b26[i]
if part == 0 {
greaterPart := b26[i-1]
if greaterPart > 0 {
b26[i-1] = greaterPart - 1
b26[i] = 26
}
}
}
return b26
}
func intToBase26(x int) (parts []int) {
// Excel column codes are pure evil - in essence they're just
// base26, but they don't represent the number 0.
b26Denominator, _ := getLargestDenominator(x, 1, 26, 0)
// This loop terminates because integer division of 1 / 26
// returns 0.
for d := b26Denominator; d > 0; d = d / 26 {
value := x / d
remainder := x % d
parts = append(parts, value)
x = remainder
}
return parts
}
// numericToLetters is used to convert a zero based, numeric column
// indentifier into a character code.
func numericToLetters(colRef int) string {
parts := intToBase26(colRef)
return formatColumnName(smooshBase26Slice(parts))
}
// letterOnlyMapF is used in conjunction with strings.Map to return
// only the characters A-Z and a-z in a string
func letterOnlyMapF(rune rune) rune {
switch {
case 'A' <= rune && rune <= 'Z':
return rune
case 'a' <= rune && rune <= 'z':
return rune - 32
}
return -1
}
// intOnlyMapF is used in conjunction with strings.Map to return only
// the numeric portions of a string.
func intOnlyMapF(rune rune) rune {
if rune >= 48 && rune < 58 {
return rune
}
return -1
}
// getCoordsFromCellIDString returns the zero based cartesian
// coordinates from a cell name in Excel format, e.g. the cellIDString
// "A1" returns 0, 0 and the "B3" return 1, 2.
func getCoordsFromCellIDString(cellIDString string) (x, y int, error error) {
var letterPart string = strings.Map(letterOnlyMapF, cellIDString)
y, error = strconv.Atoi(strings.Map(intOnlyMapF, cellIDString))
if error != nil {
return x, y, error
}
y -= 1 // Zero based
x = lettersToNumeric(letterPart)
return x, y, error
}
// getCellIDStringFromCoords returns the Excel format cell name that
// represents a pair of zero based cartesian coordinates.
func getCellIDStringFromCoords(x, y int) string {
letterPart := numericToLetters(x)
numericPart := y + 1
return fmt.Sprintf("%s%d", letterPart, numericPart)
}
// getMaxMinFromDimensionRef return the zero based cartesian maximum
// and minimum coordinates from the dimension reference embedded in a
// XLSX worksheet. For example, the dimension reference "A1:B2"
// returns "0,0", "1,1".
func getMaxMinFromDimensionRef(ref string) (minx, miny, maxx, maxy int, err error) {
var parts []string
parts = strings.Split(ref, ":")
minx, miny, err = getCoordsFromCellIDString(parts[0])
if err != nil {
return -1, -1, -1, -1, err
}
if len(parts) == 1 {
maxx, maxy = minx, miny
return
}
maxx, maxy, err = getCoordsFromCellIDString(parts[1])
if err != nil {
return -1, -1, -1, -1, err
}
return
}
// calculateMaxMinFromWorkSheet works out the dimensions of a spreadsheet
// that doesn't have a DimensionRef set. The only case currently
// known where this is true is with XLSX exported from Google Docs.
func calculateMaxMinFromWorksheet(worksheet *xlsxWorksheet) (minx, miny, maxx, maxy int, err error) {
// Note, this method could be very slow for large spreadsheets.
var x, y int
var maxVal int
maxVal = int(^uint(0) >> 1)
minx = maxVal
miny = maxVal
maxy = 0
maxx = 0
for _, row := range worksheet.SheetData.Row {
for _, cell := range row.C {
x, y, err = getCoordsFromCellIDString(cell.R)
if err != nil {
return -1, -1, -1, -1, err
}
if x < minx {
minx = x
}
if x > maxx {
maxx = x
}
if y < miny {
miny = y
}
if y > maxy {
maxy = y
}
}
}
if minx == maxVal {
minx = 0
}
if miny == maxVal {
miny = 0
}
return
}
// makeRowFromSpan will, when given a span expressed as a string,
// return an empty Row large enough to encompass that span and
// populate it with empty cells. All rows start from cell 1 -
// regardless of the lower bound of the span.
func makeRowFromSpan(spans string, sheet *Sheet) *Row {
var error error
var upper int
var row *Row
var cell *Cell
row = new(Row)
row.Sheet = sheet
_, upper, error = getRangeFromString(spans)
if error != nil {
panic(error)
}
error = nil
row.Cells = make([]*Cell, upper)
for i := 0; i < upper; i++ {
cell = new(Cell)
cell.Value = ""
row.Cells[i] = cell
}
return row
}
// makeRowFromRaw returns the Row representation of the xlsxRow.
func makeRowFromRaw(rawrow xlsxRow, sheet *Sheet) *Row {
var upper int
var row *Row
var cell *Cell
row = new(Row)
row.Sheet = sheet
upper = -1
for _, rawcell := range rawrow.C {
if rawcell.R != "" {
x, _, error := getCoordsFromCellIDString(rawcell.R)
if error != nil {
panic(fmt.Sprintf("Invalid Cell Coord, %s\n", rawcell.R))
}
if x > upper {
upper = x
}
continue
}
upper++
}
upper++
row.Cells = make([]*Cell, upper)
for i := 0; i < upper; i++ {
cell = new(Cell)
cell.Value = ""
row.Cells[i] = cell
}
return row
}
func makeEmptyRow(sheet *Sheet) *Row {
row := new(Row)
row.Cells = make([]*Cell, 0)
row.Sheet = sheet
return row
}
type sharedFormula struct {
x, y int
formula string
}
func formulaForCell(rawcell xlsxC, sharedFormulas map[int]sharedFormula) string {
var res string
f := rawcell.F
if f == nil {
return ""
}
if f.T == "shared" {
x, y, err := getCoordsFromCellIDString(rawcell.R)
if err != nil {
res = f.Content
} else {
if f.Ref != "" {
res = f.Content
sharedFormulas[f.Si] = sharedFormula{x, y, res}
} else {
sharedFormula := sharedFormulas[f.Si]
dx := x - sharedFormula.x
dy := y - sharedFormula.y
orig := []byte(sharedFormula.formula)
var start, end int
for end = 0; end < len(orig); end++ {
c := orig[end]
if c >= 'A' && c <= 'Z' || c == '$' {
res += string(orig[start:end])
start = end
end++
foundNum := false
for ; end < len(orig); end++ {
idc := orig[end]
if idc >= '0' && idc <= '9' || idc == '$' {
foundNum = true
} else if idc >= 'A' && idc <= 'Z' {
if foundNum {
break
}
} else {
break
}
}
if foundNum {
cellID := string(orig[start:end])
res += shiftCell(cellID, dx, dy)
start = end
}
}
}
if start < len(orig) {
res += string(orig[start:end])
}
}
}
} else {
res = f.Content
}
return strings.Trim(res, " \t\n\r")
}
// shiftCell returns the cell shifted according to dx and dy taking into consideration of absolute
// references with dollar sign ($)
func shiftCell(cellID string, dx, dy int) string {
fx, fy, _ := getCoordsFromCellIDString(cellID)
// Is fixed column?
fixedCol := strings.Index(cellID, "$") == 0
// Is fixed row?
fixedRow := strings.LastIndex(cellID, "$") > 0
if !fixedCol {
// Shift column
fx += dx
}
if !fixedRow {
// Shift row
fy += dy
}
// New shifted cell
shiftedCellID := getCellIDStringFromCoords(fx, fy)
if !fixedCol && !fixedRow {
return shiftedCellID
}
// There are absolute references, need to put the $ back into the formula.
letterPart := strings.Map(letterOnlyMapF, shiftedCellID)
numberPart := strings.Map(intOnlyMapF, shiftedCellID)
result := ""
if fixedCol {
result += "$"
}
result += letterPart
if fixedRow {
result += "$"
}
result += numberPart
return result
}
// fillCellData attempts to extract a valid value, usable in
// CSV form from the raw cell value. Note - this is not actually
// general enough - we should support retaining tabs and newlines.
func fillCellData(rawcell xlsxC, reftable *RefTable, sharedFormulas map[int]sharedFormula, cell *Cell) {
var data string = rawcell.V
if len(data) > 0 {
vval := strings.Trim(data, " \t\n\r")
switch rawcell.T {
case "s": // Shared String
ref, error := strconv.Atoi(vval)
if error != nil {
panic(error)
}
cell.Value = reftable.ResolveSharedString(ref)
cell.cellType = CellTypeString
case "b": // Boolean
cell.Value = vval
cell.cellType = CellTypeBool
case "e": // Error
cell.Value = vval
cell.formula = formulaForCell(rawcell, sharedFormulas)
cell.cellType = CellTypeError
default:
if rawcell.F == nil {
// Numeric
cell.Value = vval
cell.cellType = CellTypeNumeric
} else {
// Formula
cell.Value = vval
cell.formula = formulaForCell(rawcell, sharedFormulas)
cell.cellType = CellTypeFormula
}
}
}
}
// readRowsFromSheet is an internal helper function that extracts the
// rows from a XSLXWorksheet, populates them with Cells and resolves
// the value references from the reference table and stores them in
// the rows and columns.
func readRowsFromSheet(Worksheet *xlsxWorksheet, file *File, sheet *Sheet) ([]*Row, []*Col, int, int) {
var rows []*Row
var cols []*Col
var row *Row
var minCol, maxCol, minRow, maxRow, colCount, rowCount int
var reftable *RefTable
var err error
var insertRowIndex, insertColIndex int
sharedFormulas := map[int]sharedFormula{}
if len(Worksheet.SheetData.Row) == 0 {
return nil, nil, 0, 0
}
reftable = file.referenceTable
if len(Worksheet.Dimension.Ref) > 0 {
minCol, minRow, maxCol, maxRow, err = getMaxMinFromDimensionRef(Worksheet.Dimension.Ref)
} else {
minCol, minRow, maxCol, maxRow, err = calculateMaxMinFromWorksheet(Worksheet)
}
if err != nil {
panic(err.Error())
}
rowCount = maxRow + 1
colCount = maxCol + 1
rows = make([]*Row, rowCount)
cols = make([]*Col, colCount)
insertRowIndex = minRow
for i := range cols {
cols[i] = &Col{
Hidden: false,
}
}
if Worksheet.Cols != nil {
// Columns can apply to a range, for convenience we expand the
// ranges out into individual column definitions.
for _, rawcol := range Worksheet.Cols.Col {
// Note, below, that sometimes column definitions can
// exist outside the defined dimensions of the
// spreadsheet - we deliberately exclude these
// columns.
for i := rawcol.Min; i <= rawcol.Max && i <= colCount; i++ {
col := &Col{
Min: rawcol.Min,
Max: rawcol.Max,
Hidden: rawcol.Hidden,
Width: rawcol.Width}
cols[i-1] = col
if file.styles != nil {
col.style = file.styles.getStyle(rawcol.Style)
col.numFmt = file.styles.getNumberFormat(rawcol.Style)
}
}
}
}
// insert leading empty rows that is in front of minRow
for rowIndex := 0; rowIndex < minRow; rowIndex++ {
rows[rowIndex] = makeEmptyRow(sheet)
}
numRows := len(rows)
for rowIndex := 0; rowIndex < len(Worksheet.SheetData.Row); rowIndex++ {
rawrow := Worksheet.SheetData.Row[rowIndex]
// Some spreadsheets will omit blank rows from the
// stored data
for rawrow.R > (insertRowIndex + 1) {
// Put an empty Row into the array
if insertRowIndex < numRows {
rows[insertRowIndex] = makeEmptyRow(sheet)
}
insertRowIndex++
}
// range is not empty and only one range exist
if len(rawrow.Spans) != 0 && strings.Count(rawrow.Spans, ":") == 1 {
row = makeRowFromSpan(rawrow.Spans, sheet)
} else {
row = makeRowFromRaw(rawrow, sheet)
}
row.Hidden = rawrow.Hidden
insertColIndex = minCol
for _, rawcell := range rawrow.C {
h, v, err := Worksheet.MergeCells.getExtent(rawcell.R)
if err != nil {
panic(err.Error())
}
x, _, _ := getCoordsFromCellIDString(rawcell.R)
// Some spreadsheets will omit blank cells
// from the data.
for x > insertColIndex {
// Put an empty Cell into the array
row.Cells[insertColIndex] = new(Cell)
insertColIndex++
}
cellX := insertColIndex
cell := row.Cells[cellX]
cell.HMerge = h
cell.VMerge = v
fillCellData(rawcell, reftable, sharedFormulas, cell)
if file.styles != nil {
cell.style = file.styles.getStyle(rawcell.S)
cell.NumFmt = file.styles.getNumberFormat(rawcell.S)
}
cell.date1904 = file.Date1904
// Cell is considered hidden if the row or the column of this cell is hidden
cell.Hidden = rawrow.Hidden || (len(cols) > cellX && cols[cellX].Hidden)
insertColIndex++
}
if len(rows) > insertRowIndex {
rows[insertRowIndex] = row
}
insertRowIndex++
}
return rows, cols, colCount, rowCount
}
type indexedSheet struct {
Index int
Sheet *Sheet
Error error
}
func readSheetViews(xSheetViews xlsxSheetViews) []SheetView {
if xSheetViews.SheetView == nil || len(xSheetViews.SheetView) == 0 {
return nil
}
sheetViews := []SheetView{}
for _, xSheetView := range xSheetViews.SheetView {
sheetView := SheetView{}
if xSheetView.Pane != nil {
xlsxPane := xSheetView.Pane
pane := &Pane{}
pane.XSplit = xlsxPane.XSplit
pane.YSplit = xlsxPane.YSplit
pane.TopLeftCell = xlsxPane.TopLeftCell
pane.ActivePane = xlsxPane.ActivePane
pane.State = xlsxPane.State
sheetView.Pane = pane
}
sheetViews = append(sheetViews, sheetView)
}
return sheetViews
}
// readSheetFromFile is the logic of converting a xlsxSheet struct
// into a Sheet struct. This work can be done in parallel and so
// readSheetsFromZipFile will spawn an instance of this function per
// sheet and get the results back on the provided channel.
func readSheetFromFile(sc chan *indexedSheet, index int, rsheet xlsxSheet, fi *File, sheetXMLMap map[string]string) {
result := &indexedSheet{Index: index, Sheet: nil, Error: nil}
defer func() {
if e := recover(); e != nil {
switch e.(type) {
case error:
result.Error = e.(error)
default:
result.Error = errors.New("unexpected error")
}
// The only thing here, is if one close the channel. but its not the case
sc <- result
}
}()
worksheet, error := getWorksheetFromSheet(rsheet, fi.worksheets, sheetXMLMap)
if error != nil {
result.Error = error
sc <- result
return
}
sheet := new(Sheet)
sheet.File = fi
sheet.Rows, sheet.Cols, sheet.MaxCol, sheet.MaxRow = readRowsFromSheet(worksheet, fi, sheet)
sheet.Hidden = rsheet.State == sheetStateHidden || rsheet.State == sheetStateVeryHidden
sheet.SheetViews = readSheetViews(worksheet.SheetViews)
sheet.SheetFormat.DefaultColWidth = worksheet.SheetFormatPr.DefaultColWidth
sheet.SheetFormat.DefaultRowHeight = worksheet.SheetFormatPr.DefaultRowHeight
result.Sheet = sheet
sc <- result
}
// readSheetsFromZipFile is an internal helper function that loops
// over the Worksheets defined in the XSLXWorkbook and loads them into
// Sheet objects stored in the Sheets slice of a xlsx.File struct.
func readSheetsFromZipFile(f *zip.File, file *File, sheetXMLMap map[string]string) (map[string]*Sheet, []*Sheet, error) {
var workbook *xlsxWorkbook
var err error
var rc io.ReadCloser
var decoder *xml.Decoder
var sheetCount int
workbook = new(xlsxWorkbook)
rc, err = f.Open()
if err != nil {
return nil, nil, err
}
decoder = xml.NewDecoder(rc)
err = decoder.Decode(workbook)
if err != nil {
return nil, nil, err
}
file.Date1904 = workbook.WorkbookPr.Date1904
for _, entry := range workbook.DefinedNames.DefinedName {
file.DefinedNames = append(file.DefinedNames, &entry)
}
// Only try and read sheets that have corresponding files.
// Notably this excludes chartsheets don't right now
var workbookSheets []xlsxSheet
for _, sheet := range workbook.Sheets.Sheet {
if f := worksheetFileForSheet(sheet, file.worksheets, sheetXMLMap); f != nil {
workbookSheets = append(workbookSheets, sheet)
}
}
sheetCount = len(workbookSheets)
sheetsByName := make(map[string]*Sheet, sheetCount)
sheets := make([]*Sheet, sheetCount)
sheetChan := make(chan *indexedSheet, sheetCount)
defer close(sheetChan)
go func() {
err = nil
for i, rawsheet := range workbookSheets {
readSheetFromFile(sheetChan, i, rawsheet, file, sheetXMLMap)
}
}()
for j := 0; j < sheetCount; j++ {
sheet := <-sheetChan
if sheet.Error != nil {
return nil, nil, sheet.Error
}
sheetName := workbookSheets[sheet.Index].Name
sheetsByName[sheetName] = sheet.Sheet
sheet.Sheet.Name = sheetName
sheets[sheet.Index] = sheet.Sheet
}
return sheetsByName, sheets, nil
}
// readSharedStringsFromZipFile() is an internal helper function to
// extract a reference table from the sharedStrings.xml file within
// the XLSX zip file.
func readSharedStringsFromZipFile(f *zip.File) (*RefTable, error) {
var sst *xlsxSST
var error error
var rc io.ReadCloser
var decoder *xml.Decoder
var reftable *RefTable
// In a file with no strings it's possible that
// sharedStrings.xml doesn't exist. In this case the value
// passed as f will be nil.
if f == nil {
return nil, nil
}
rc, error = f.Open()
if error != nil {
return nil, error
}
sst = new(xlsxSST)
decoder = xml.NewDecoder(rc)
error = decoder.Decode(sst)
if error != nil {
return nil, error
}
reftable = MakeSharedStringRefTable(sst)
return reftable, nil
}
// readStylesFromZipFile() is an internal helper function to
// extract a style table from the style.xml file within
// the XLSX zip file.
func readStylesFromZipFile(f *zip.File, theme *theme) (*xlsxStyleSheet, error) {
var style *xlsxStyleSheet
var error error
var rc io.ReadCloser
var decoder *xml.Decoder
rc, error = f.Open()
if error != nil {
return nil, error
}
style = newXlsxStyleSheet(theme)
decoder = xml.NewDecoder(rc)
error = decoder.Decode(style)
if error != nil {
return nil, error
}
buildNumFmtRefTable(style)
return style, nil
}
func buildNumFmtRefTable(style *xlsxStyleSheet) {
for _, numFmt := range style.NumFmts.NumFmt {
// We do this for the side effect of populating the NumFmtRefTable.
style.addNumFmt(numFmt)
}
}
func readThemeFromZipFile(f *zip.File) (*theme, error) {
rc, err := f.Open()
if err != nil {
return nil, err
}
var themeXml xlsxTheme
err = xml.NewDecoder(rc).Decode(&themeXml)
if err != nil {
return nil, err
}
return newTheme(themeXml), nil
}
type WorkBookRels map[string]string
func (w *WorkBookRels) MakeXLSXWorkbookRels() xlsxWorkbookRels {
relCount := len(*w)
xWorkbookRels := xlsxWorkbookRels{}
xWorkbookRels.Relationships = make([]xlsxWorkbookRelation, relCount+3)
for k, v := range *w {
index, err := strconv.Atoi(k[3:])
if err != nil {
panic(err.Error())
}
xWorkbookRels.Relationships[index-1] = xlsxWorkbookRelation{
Id: k,
Target: v,
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet"}
}
relCount++
sheetId := fmt.Sprintf("rId%d", relCount)
xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
Id: sheetId,
Target: "sharedStrings.xml",
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/sharedStrings"}
relCount++
sheetId = fmt.Sprintf("rId%d", relCount)
xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
Id: sheetId,
Target: "theme/theme1.xml",
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme"}
relCount++
sheetId = fmt.Sprintf("rId%d", relCount)
xWorkbookRels.Relationships[relCount-1] = xlsxWorkbookRelation{
Id: sheetId,
Target: "styles.xml",
Type: "http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles"}
return xWorkbookRels
}
// readWorkbookRelationsFromZipFile is an internal helper function to
// extract a map of relationship ID strings to the name of the
// worksheet.xml file they refer to. The resulting map can be used to
// reliably derefence the worksheets in the XLSX file.
func readWorkbookRelationsFromZipFile(workbookRels *zip.File) (WorkBookRels, error) {
var sheetXMLMap WorkBookRels
var wbRelationships *xlsxWorkbookRels
var rc io.ReadCloser
var decoder *xml.Decoder
var err error
rc, err = workbookRels.Open()
if err != nil {
return nil, err
}
decoder = xml.NewDecoder(rc)
wbRelationships = new(xlsxWorkbookRels)
err = decoder.Decode(wbRelationships)
if err != nil {
return nil, err
}
sheetXMLMap = make(WorkBookRels)
for _, rel := range wbRelationships.Relationships {
if strings.HasSuffix(rel.Target, ".xml") && rel.Type == "http://schemas.openxmlformats.org/officeDocument/2006/relationships/worksheet" {
_, filename := path.Split(rel.Target)
sheetXMLMap[rel.Id] = strings.Replace(filename, ".xml", "", 1)
}
}
return sheetXMLMap, nil
}
// ReadZip() takes a pointer to a zip.ReadCloser and returns a
// xlsx.File struct populated with its contents. In most cases
// ReadZip is not used directly, but is called internally by OpenFile.
func ReadZip(f *zip.ReadCloser) (*File, error) {
defer f.Close()
return ReadZipReader(&f.Reader)
}
// ReadZipReader() can be used to read an XLSX in memory without
// touching the filesystem.
func ReadZipReader(r *zip.Reader) (*File, error) {
var err error
var file *File
var reftable *RefTable
var sharedStrings *zip.File
var sheetXMLMap map[string]string
var sheetsByName map[string]*Sheet
var sheets []*Sheet
var style *xlsxStyleSheet
var styles *zip.File
var themeFile *zip.File
var v *zip.File
var workbook *zip.File
var workbookRels *zip.File
var worksheets map[string]*zip.File
file = NewFile()
// file.numFmtRefTable = make(map[int]xlsxNumFmt, 1)
worksheets = make(map[string]*zip.File, len(r.File))
for _, v = range r.File {
switch v.Name {
case "xl/sharedStrings.xml":
sharedStrings = v
case "xl/workbook.xml":
workbook = v
case "xl/_rels/workbook.xml.rels":
workbookRels = v
case "xl/styles.xml":
styles = v
case "xl/theme/theme1.xml":
themeFile = v
default:
if len(v.Name) > 14 {
if v.Name[0:13] == "xl/worksheets" {
worksheets[v.Name[14:len(v.Name)-4]] = v
}
}
}
}
if workbookRels == nil {
return nil, fmt.Errorf("xl/_rels/workbook.xml.rels not found in input xlsx.")
}
sheetXMLMap, err = readWorkbookRelationsFromZipFile(workbookRels)
if err != nil {
return nil, err
}
if len(worksheets) == 0 {
return nil, fmt.Errorf("Input xlsx contains no worksheets.")
}
file.worksheets = worksheets
reftable, err = readSharedStringsFromZipFile(sharedStrings)
if err != nil {
return nil, err
}
file.referenceTable = reftable
if themeFile != nil {
theme, err := readThemeFromZipFile(themeFile)
if err != nil {
return nil, err
}
file.theme = theme
}
if styles != nil {
style, err = readStylesFromZipFile(styles, file.theme)
if err != nil {
return nil, err
}
file.styles = style
}
sheetsByName, sheets, err = readSheetsFromZipFile(workbook, file, sheetXMLMap)
if err != nil {
return nil, err
}
if sheets == nil {
readerErr := new(XLSXReaderError)
readerErr.Err = "No sheets found in XLSX File"
return nil, readerErr
}
file.Sheet = sheetsByName
file.Sheets = sheets
return file, nil
}
|
package fiftyonedegrees
/*
#cgo CFLAGS: -I . -Wimplicit-function-declaration
#cgo LDFLAGS: -lm
#include "51Degrees.h"
*/
import "C"
import (
"errors"
"fmt"
"unsafe"
)
type FiftyoneDegrees struct {
dataSet *C.fiftyoneDegreesDataSet
}
func NewFiftyoneDegrees(fileName, properties string) (*FiftyoneDegrees, error) {
item := &FiftyoneDegrees{dataSet: new(C.fiftyoneDegreesDataSet)}
status := C.fiftyoneDegreesInitWithPropertyString(C.CString(fileName), item.dataSet, C.CString(properties))
if status != 0 {
return nil, errors.New(fmt.Sprintln("InitWithPropertyString Error,Status:", status))
}
return item, nil
}
func (this *FiftyoneDegrees) Close() {
C.fiftyoneDegreesDestroy(this.dataSet)
}
func (this *FiftyoneDegrees) Parse(userAgent string) string {
ws := C.fiftyoneDegreesCreateWorkset(this.dataSet)
defer C.fiftyoneDegreesFreeWorkset(ws)
C.fiftyoneDegreesMatch(ws, C.CString(userAgent))
resultLength := 50000
buff := make([]byte, resultLength)
length := int32(C.fiftyoneDegreesProcessDeviceJSON(ws, (*C.char)(unsafe.Pointer(&buff[0]))))
result := buff[:length]
return string(result)
}
adding back -lrt because I wrote -rt the first time
package fiftyonedegrees
/*
#cgo CFLAGS: -I . -Wimplicit-function-declaration
#cgo LDFLAGS: -lm -lrt
#include "51Degrees.h"
*/
import "C"
import (
"errors"
"fmt"
"unsafe"
)
type FiftyoneDegrees struct {
dataSet *C.fiftyoneDegreesDataSet
}
func NewFiftyoneDegrees(fileName, properties string) (*FiftyoneDegrees, error) {
item := &FiftyoneDegrees{dataSet: new(C.fiftyoneDegreesDataSet)}
status := C.fiftyoneDegreesInitWithPropertyString(C.CString(fileName), item.dataSet, C.CString(properties))
if status != 0 {
return nil, errors.New(fmt.Sprintln("InitWithPropertyString Error,Status:", status))
}
return item, nil
}
func (this *FiftyoneDegrees) Close() {
C.fiftyoneDegreesDestroy(this.dataSet)
}
func (this *FiftyoneDegrees) Parse(userAgent string) string {
ws := C.fiftyoneDegreesCreateWorkset(this.dataSet)
defer C.fiftyoneDegreesFreeWorkset(ws)
C.fiftyoneDegreesMatch(ws, C.CString(userAgent))
resultLength := 50000
buff := make([]byte, resultLength)
length := int32(C.fiftyoneDegreesProcessDeviceJSON(ws, (*C.char)(unsafe.Pointer(&buff[0]))))
result := buff[:length]
return string(result)
}
|
package excelize
import (
"archive/zip"
"bytes"
"encoding/gob"
"io"
"log"
"math"
)
// ReadZipReader can be used to read an XLSX in memory without touching the
// filesystem.
func ReadZipReader(r *zip.Reader) (map[string]string, int, error) {
fileList := make(map[string]string)
worksheets := 0
for _, v := range r.File {
fileList[v.Name] = readFile(v)
if len(v.Name) > 18 {
if v.Name[0:19] == "xl/worksheets/sheet" {
worksheets++
}
}
}
return fileList, worksheets, nil
}
// readXML provides function to read XML content as string.
func (f *File) readXML(name string) string {
if content, ok := f.XLSX[name]; ok {
return content
}
return ""
}
// saveFileList provides function to update given file content in file list of
// XLSX.
func (f *File) saveFileList(name, content string) {
f.XLSX[name] = XMLHeader + content
}
// Read file content as string in a archive file.
func readFile(file *zip.File) string {
rc, err := file.Open()
if err != nil {
log.Fatal(err)
}
buff := bytes.NewBuffer(nil)
io.Copy(buff, rc)
rc.Close()
return string(buff.Bytes())
}
// ToAlphaString provides function to convert integer to Excel sheet column
// title. For example convert 36 to column title AK:
//
// excelize.ToAlphaString(36)
//
func ToAlphaString(value int) string {
if value < 0 {
return ""
}
var ans string
i := value + 1
for i > 0 {
ans = string((i-1)%26+65) + ans
i = (i - 1) / 26
}
return ans
}
// TitleToNumber provides function to convert Excel sheet column title to int
// (this function doesn't do value check currently). For example convert AK(ak、Ak) to
// column title 36:
//
// excelize.TitleToNumber("AK")
// excelize.TitleToNumber("ak")
//
func TitleToNumber(s string) int {
weight := 0.0
sum := 0
for i := len(s) - 1; i >= 0; i-- {
var ch int
if int(s[i]) >= int('a') && int(s[i]) <= int('z') {
ch = int(s[i]) - 32
} else {
ch = int(s[i])
}
sum = sum + (ch-int('A')+1)*int(math.Pow(26, weight))
weight++
}
return sum - 1
}
// letterOnlyMapF is used in conjunction with strings.Map to return only the
// characters A-Z and a-z in a string.
func letterOnlyMapF(rune rune) rune {
switch {
case 'A' <= rune && rune <= 'Z':
return rune
case 'a' <= rune && rune <= 'z':
return rune - 32
}
return -1
}
// intOnlyMapF is used in conjunction with strings.Map to return only the
// numeric portions of a string.
func intOnlyMapF(rune rune) rune {
if rune >= 48 && rune < 58 {
return rune
}
return -1
}
// deepCopy provides method to creates a deep copy of whatever is passed to it
// and returns the copy in an interface. The returned value will need to be
// asserted to the correct type.
func deepCopy(dst, src interface{}) error {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(src); err != nil {
return err
}
return gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst)
}
Optimize code.
package excelize
import (
"archive/zip"
"bytes"
"encoding/gob"
"io"
"log"
"math"
)
// ReadZipReader can be used to read an XLSX in memory without touching the
// filesystem.
func ReadZipReader(r *zip.Reader) (map[string]string, int, error) {
fileList := make(map[string]string)
worksheets := 0
for _, v := range r.File {
fileList[v.Name] = readFile(v)
if len(v.Name) > 18 {
if v.Name[0:19] == "xl/worksheets/sheet" {
worksheets++
}
}
}
return fileList, worksheets, nil
}
// readXML provides function to read XML content as string.
func (f *File) readXML(name string) string {
if content, ok := f.XLSX[name]; ok {
return content
}
return ""
}
// saveFileList provides function to update given file content in file list of
// XLSX.
func (f *File) saveFileList(name, content string) {
f.XLSX[name] = XMLHeader + content
}
// Read file content as string in a archive file.
func readFile(file *zip.File) string {
rc, err := file.Open()
if err != nil {
log.Fatal(err)
}
buff := bytes.NewBuffer(nil)
io.Copy(buff, rc)
rc.Close()
return string(buff.Bytes())
}
// ToAlphaString provides function to convert integer to Excel sheet column
// title. For example convert 36 to column title AK:
//
// excelize.ToAlphaString(36)
//
func ToAlphaString(value int) string {
if value < 0 {
return ""
}
var ans string
i := value + 1
for i > 0 {
ans = string((i-1)%26+65) + ans
i = (i - 1) / 26
}
return ans
}
// TitleToNumber provides function to convert Excel sheet column title to int
// (this function doesn't do value check currently). For example convert AK
// and ak to column title 36:
//
// excelize.TitleToNumber("AK")
// excelize.TitleToNumber("ak")
//
func TitleToNumber(s string) int {
weight := 0.0
sum := 0
for i := len(s) - 1; i >= 0; i-- {
ch := int(s[i])
if int(s[i]) >= int('a') && int(s[i]) <= int('z') {
ch = int(s[i]) - 32
}
sum = sum + (ch-int('A')+1)*int(math.Pow(26, weight))
weight++
}
return sum - 1
}
// letterOnlyMapF is used in conjunction with strings.Map to return only the
// characters A-Z and a-z in a string.
func letterOnlyMapF(rune rune) rune {
switch {
case 'A' <= rune && rune <= 'Z':
return rune
case 'a' <= rune && rune <= 'z':
return rune - 32
}
return -1
}
// intOnlyMapF is used in conjunction with strings.Map to return only the
// numeric portions of a string.
func intOnlyMapF(rune rune) rune {
if rune >= 48 && rune < 58 {
return rune
}
return -1
}
// deepCopy provides method to creates a deep copy of whatever is passed to it
// and returns the copy in an interface. The returned value will need to be
// asserted to the correct type.
func deepCopy(dst, src interface{}) error {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(src); err != nil {
return err
}
return gob.NewDecoder(bytes.NewBuffer(buf.Bytes())).Decode(dst)
}
|
/*
Package dejavu offers quick detection of already witnessed data.
Limited memory of witnessed data, oldest are forgotten. Library is
thread safe. Offers deterministic and probabilistic (over an order of
magnatude less memory consuming) implementation.
*/
package dejavu
import (
"bufio"
"crypto/sha256"
"fmt"
"github.com/willf/bloom"
"io"
"os"
"sync"
)
// Version information
const Version string = "0.1.0"
// DejaVu witnesses data and recalls if seen before.
type DejaVu interface {
// Witness data and add to memory. Returns true if previously seen.
Witness(data []byte) bool
// WitnessDigest is equivalent to the Winness method but bypasses
// hashing the data. Use this to improve performance if you already
// happen to have the sha256 digest.
WitnessDigest(digest [sha256.Size]byte) bool
}
// New creates a probabilistic or deterministic DejaVu memory with given
// entrie limit and false positive ratio (only used for probabilistic).
func New(probabilistic bool, limit uint32, fpRatio float64) DejaVu {
if probabilistic {
return NewProbabilistic(limit, fpRatio)
}
return NewDeterministic(limit)
}
///////////////////////////////////
// PROCESS TEXT (for dejavu bin) //
///////////////////////////////////
// ProcessPaths is equivalent to Process, only that file paths are given.
// If - in inputs to use stdin and empty out to use stdout.
func ProcessPaths(d DejaVu, filter bool, out string, inputs ...string) error {
// get output writer
var writer *os.File
if out == "" {
writer = os.Stdout
} else {
writer, err := os.Create(out)
if err != nil {
return err
}
defer writer.Close()
}
// get input readers
readers := make([]io.Reader, len(inputs))
for i, path := range inputs {
if path == "-" { // read from stdin
readers[i] = os.Stdin
} else { // read from file path
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
readers[i] = file
}
}
Process(d, filter, writer, readers...)
return nil
}
// Process given inputs as text to output with dejavu instance.
// If filter is true duplicates are filtered, otherwise only
// duplicates sent to output.
func Process(d DejaVu, filter bool, out io.Writer, inputs ...io.Reader) {
for _, input := range inputs {
scanner := bufio.NewScanner(input)
for scanner.Scan() {
text := fmt.Sprintf("%s\n", scanner.Text())
seen := d.Witness([]byte(text))
if (!filter && seen) || (filter && !seen) {
out.Write([]byte(text))
}
}
}
}
//////////////////////////////////
// DETERMINISTIC IMPLEMENTATION //
//////////////////////////////////
type deterministic struct {
buffer [][sha256.Size]byte // ring buffer
size int // ring buffer size
index int // current ring buffer index
lookup map[[sha256.Size]byte]int // digest -> newest index
mutex *sync.Mutex
}
// NewDeterministic creates a deterministic DejaVu memory. Will remember
// most recent entries within given entrie limit and forget older entries.
func NewDeterministic(limit uint32) DejaVu {
return &deterministic{
buffer: make([][sha256.Size]byte, limit),
size: int(limit),
index: 0,
lookup: make(map[[sha256.Size]byte]int),
mutex: new(sync.Mutex),
}
}
func (d *deterministic) WitnessDigest(digest [sha256.Size]byte) bool {
d.mutex.Lock()
_, familiar := d.lookup[digest] // check if previously seen
// rm oldest lookup key if no newer entry
maxed := len(d.buffer) == d.size // overwriting oldest entry
if maxed && (d.lookup[d.buffer[d.index]] == d.index) {
delete(d.lookup, d.buffer[d.index]) // no newer entries
}
// add entry and update index/lookup
d.buffer[d.index] = digest
d.lookup[digest] = d.index
d.index = (d.index + 1) % d.size
d.mutex.Unlock()
return familiar
}
func (d *deterministic) Witness(data []byte) bool {
return d.WitnessDigest(sha256.Sum256(data))
}
//////////////////////////////////
// PROBABILISTIC IMPLEMENTATION //
//////////////////////////////////
const liveFilterCnt = 8
const totalFilterCnt = liveFilterCnt + 1
type probabilistic struct {
filters []*bloom.BloomFilter
limit uint32 // filter size
falsePositiveRatio float64 // remember for buffer switch
index int // current filter index
entries uint32 // entries in currently indexed filter
mutex *sync.Mutex
}
// NewProbabilistic creates a probabilistic DejaVu memory. Probably
// remembers most recent entries within given entrie limit and false
// positive ratio. False positive ratio should be between 0.0 and 1.0.
func NewProbabilistic(limit uint32, falsePositiveRatio float64) DejaVu {
filters := make([]*bloom.BloomFilter, totalFilterCnt, totalFilterCnt)
for i := 0; i < totalFilterCnt; i++ {
fl := uint(limit / liveFilterCnt)
filters[i] = bloom.NewWithEstimates(fl, falsePositiveRatio)
}
return &probabilistic{
filters: filters,
limit: limit,
falsePositiveRatio: falsePositiveRatio,
index: 0,
entries: 0,
mutex: new(sync.Mutex),
}
}
func (p *probabilistic) WitnessDigest(digest [sha256.Size]byte) bool {
p.mutex.Lock()
// check if exists
d := digest[:]
familiar := false
for _, f := range p.filters {
if f.Test(d) {
familiar = true
break
}
}
// always add in case its from the old buffer
p.filters[p.index].Add(d)
p.entries++
// switch buffers if current is maxed
if p.entries >= (p.limit / liveFilterCnt) {
p.entries = 0
p.index = (p.index + 1) % len(p.filters)
fl := uint(p.limit / liveFilterCnt)
f := bloom.NewWithEstimates(fl, p.falsePositiveRatio)
p.filters[p.index] = f // replace old filter
}
p.mutex.Unlock()
return familiar
}
func (p *probabilistic) Witness(data []byte) bool {
return p.WitnessDigest(sha256.Sum256(data))
}
v1.0.0
/*
Package dejavu offers quick detection of already witnessed data.
Limited memory of witnessed data, oldest are forgotten. Library is
thread safe. Offers deterministic and probabilistic (over an order of
magnatude less memory consuming) implementation.
*/
package dejavu
import (
"bufio"
"crypto/sha256"
"fmt"
"github.com/willf/bloom"
"io"
"os"
"sync"
)
// Version information
const Version string = "1.0.0"
// DejaVu witnesses data and recalls if seen before.
type DejaVu interface {
// Witness data and add to memory. Returns true if previously seen.
Witness(data []byte) bool
// WitnessDigest is equivalent to the Winness method but bypasses
// hashing the data. Use this to improve performance if you already
// happen to have the sha256 digest.
WitnessDigest(digest [sha256.Size]byte) bool
}
// New creates a probabilistic or deterministic DejaVu memory with given
// entrie limit and false positive ratio (only used for probabilistic).
func New(probabilistic bool, limit uint32, fpRatio float64) DejaVu {
if probabilistic {
return NewProbabilistic(limit, fpRatio)
}
return NewDeterministic(limit)
}
///////////////////////////////////
// PROCESS TEXT (for dejavu bin) //
///////////////////////////////////
// ProcessPaths is equivalent to Process, only that file paths are given.
// If - in inputs to use stdin and empty out to use stdout.
func ProcessPaths(d DejaVu, filter bool, out string, inputs ...string) error {
// get output writer
var writer *os.File
if out == "" {
writer = os.Stdout
} else {
writer, err := os.Create(out)
if err != nil {
return err
}
defer writer.Close()
}
// get input readers
readers := make([]io.Reader, len(inputs))
for i, path := range inputs {
if path == "-" { // read from stdin
readers[i] = os.Stdin
} else { // read from file path
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
readers[i] = file
}
}
Process(d, filter, writer, readers...)
return nil
}
// Process given inputs as text to output with dejavu instance.
// If filter is true duplicates are filtered, otherwise only
// duplicates sent to output.
func Process(d DejaVu, filter bool, out io.Writer, inputs ...io.Reader) {
for _, input := range inputs {
scanner := bufio.NewScanner(input)
for scanner.Scan() {
text := fmt.Sprintf("%s\n", scanner.Text())
seen := d.Witness([]byte(text))
if (!filter && seen) || (filter && !seen) {
out.Write([]byte(text))
}
}
}
}
//////////////////////////////////
// DETERMINISTIC IMPLEMENTATION //
//////////////////////////////////
type deterministic struct {
buffer [][sha256.Size]byte // ring buffer
size int // ring buffer size
index int // current ring buffer index
lookup map[[sha256.Size]byte]int // digest -> newest index
mutex *sync.Mutex
}
// NewDeterministic creates a deterministic DejaVu memory. Will remember
// most recent entries within given entrie limit and forget older entries.
func NewDeterministic(limit uint32) DejaVu {
return &deterministic{
buffer: make([][sha256.Size]byte, limit),
size: int(limit),
index: 0,
lookup: make(map[[sha256.Size]byte]int),
mutex: new(sync.Mutex),
}
}
func (d *deterministic) WitnessDigest(digest [sha256.Size]byte) bool {
d.mutex.Lock()
_, familiar := d.lookup[digest] // check if previously seen
// rm oldest lookup key if no newer entry
maxed := len(d.buffer) == d.size // overwriting oldest entry
if maxed && (d.lookup[d.buffer[d.index]] == d.index) {
delete(d.lookup, d.buffer[d.index]) // no newer entries
}
// add entry and update index/lookup
d.buffer[d.index] = digest
d.lookup[digest] = d.index
d.index = (d.index + 1) % d.size
d.mutex.Unlock()
return familiar
}
func (d *deterministic) Witness(data []byte) bool {
return d.WitnessDigest(sha256.Sum256(data))
}
//////////////////////////////////
// PROBABILISTIC IMPLEMENTATION //
//////////////////////////////////
const liveFilterCnt = 8
const totalFilterCnt = liveFilterCnt + 1
type probabilistic struct {
filters []*bloom.BloomFilter
limit uint32 // filter size
falsePositiveRatio float64 // remember for buffer switch
index int // current filter index
entries uint32 // entries in currently indexed filter
mutex *sync.Mutex
}
// NewProbabilistic creates a probabilistic DejaVu memory. Probably
// remembers most recent entries within given entrie limit and false
// positive ratio. False positive ratio should be between 0.0 and 1.0.
func NewProbabilistic(limit uint32, falsePositiveRatio float64) DejaVu {
filters := make([]*bloom.BloomFilter, totalFilterCnt, totalFilterCnt)
for i := 0; i < totalFilterCnt; i++ {
fl := uint(limit / liveFilterCnt)
filters[i] = bloom.NewWithEstimates(fl, falsePositiveRatio)
}
return &probabilistic{
filters: filters,
limit: limit,
falsePositiveRatio: falsePositiveRatio,
index: 0,
entries: 0,
mutex: new(sync.Mutex),
}
}
func (p *probabilistic) WitnessDigest(digest [sha256.Size]byte) bool {
p.mutex.Lock()
// check if exists
d := digest[:]
familiar := false
for _, f := range p.filters {
if f.Test(d) {
familiar = true
break
}
}
// always add in case its from the old buffer
p.filters[p.index].Add(d)
p.entries++
// switch buffers if current is maxed
if p.entries >= (p.limit / liveFilterCnt) {
p.entries = 0
p.index = (p.index + 1) % len(p.filters)
fl := uint(p.limit / liveFilterCnt)
f := bloom.NewWithEstimates(fl, p.falsePositiveRatio)
p.filters[p.index] = f // replace old filter
}
p.mutex.Unlock()
return familiar
}
func (p *probabilistic) Witness(data []byte) bool {
return p.WitnessDigest(sha256.Sum256(data))
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/adiabat/btcd/chaincfg"
"github.com/mit-dci/lit/litbamf"
"github.com/mit-dci/lit/litrpc"
"github.com/mit-dci/lit/lnutil"
"github.com/mit-dci/lit/qln"
)
const (
litHomeDirName = ".lit"
keyFileName = "privkey.hex"
// this is my local testnet node, replace it with your own close by.
// Random internet testnet nodes usually work but sometimes don't, so
// maybe I should test against different versions out there.
hardHeight = 1111111 // height to start at if not specified
)
// variables for a lit node & lower layers
type LitConfig struct {
reSync, hard bool // flag to set networks
// hostnames to connect to for different networks
tn3host, bc2host, lt4host, reghost string
verbose bool
birthblock int32
rpcport uint16
litHomeDir string
Params *chaincfg.Params
}
func setConfig(lc *LitConfig) {
birthptr := flag.Int("tip", hardHeight, "height to begin db sync")
easyptr := flag.Bool("ez", false, "use easy mode (bloom filters)")
verbptr := flag.Bool("v", false, "verbose; print all logs to stdout")
tn3ptr := flag.String("tn3", "", "testnet3 full node")
regptr := flag.String("reg", "", "regtest full node")
bc2ptr := flag.String("bc2", "", "bc2 full node")
lt4ptr := flag.String("lt4", "", "litecoin testnet4 full node")
resyncprt := flag.Bool("resync", false, "force resync from given tip")
rpcportptr := flag.Int("rpcport", 8001, "port to listen for RPC")
litHomeDir := flag.String("dir",
filepath.Join(os.Getenv("HOME"), litHomeDirName), "lit home directory")
flag.Parse()
lc.birthblock = int32(*birthptr)
lc.tn3host, lc.bc2host, lc.lt4host, lc.reghost =
*tn3ptr, *bc2ptr, *lt4ptr, *regptr
lc.reSync = *resyncprt
lc.hard = !*easyptr
lc.verbose = *verbptr
lc.rpcport = uint16(*rpcportptr)
lc.litHomeDir = *litHomeDir
}
// linkWallets tries to link the wallets given in conf to the litNode
func linkWallets(node *qln.LitNode, key *[32]byte, conf *LitConfig) error {
// for now, wallets are linked to the litnode on startup, and
// can't appear / disappear while it's running. Later
// could support dynamically adding / removing wallets
// order matters; the first registered wallet becomes the default
var err error
// try regtest
if conf.reghost != "" {
if !strings.Contains(conf.reghost, ":") {
conf.reghost = conf.reghost + ":18444"
}
fmt.Printf("reg: %s\n", conf.reghost)
err = node.LinkBaseWallet(
key, 120, conf.reSync,
conf.reghost, &chaincfg.RegressionNetParams)
if err != nil {
return err
}
}
// try testnet3
if conf.tn3host != "" {
if !strings.Contains(conf.tn3host, ":") {
conf.tn3host = conf.tn3host + ":18333"
}
err = node.LinkBaseWallet(
key, conf.birthblock, conf.reSync,
conf.tn3host, &chaincfg.TestNet3Params)
if err != nil {
return err
}
}
// try litecoin testnet4
if conf.lt4host != "" {
if !strings.Contains(conf.lt4host, ":") {
conf.lt4host = conf.lt4host + ":19335"
}
err = node.LinkBaseWallet(
key, 47295, conf.reSync,
conf.lt4host, &chaincfg.LiteCoinTestNet4Params)
if err != nil {
return err
}
}
return nil
}
func main() {
log.Printf("lit node v0.1\n")
log.Printf("-h for list of options.\n")
conf := new(LitConfig)
setConfig(conf)
// create lit home directory if the diretory does not exist
if _, err := os.Stat(conf.litHomeDir); os.IsNotExist(err) {
os.Mkdir(conf.litHomeDir, 0700)
}
logFilePath := filepath.Join(conf.litHomeDir, "lit.log")
logfile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer logfile.Close()
if conf.verbose {
logOutput := io.MultiWriter(os.Stdout, logfile)
log.SetOutput(logOutput)
} else {
log.SetOutput(logfile)
}
// Allow node with no linked wallets, for testing.
// TODO Should update tests and disallow nodes without wallets later.
// if conf.tn3host == "" && conf.lt4host == "" && conf.reghost == "" {
// log.Fatal("error: no network specified; use -tn3, -reg, -lt4")
// }
// Keys: the litNode, and wallits, all get 32 byte keys.
// Right now though, they all get the *same* key. For lit as a single binary
// now, all using the same key makes sense; could split up later.
keyFilePath := filepath.Join(conf.litHomeDir, keyFileName)
// read key file (generate if not found)
key, err := lnutil.ReadKeyFile(keyFilePath)
if err != nil {
log.Fatal(err)
}
// Setup LN node. Activate Tower if in hard mode.
// give node and below file pathof lit home directoy
node, err := qln.NewLitNode(key, conf.litHomeDir, false)
if err != nil {
log.Fatal(err)
}
// node is up; link wallets based on args
err = linkWallets(node, key, conf)
if err != nil {
log.Fatal(err)
}
rpcl := new(litrpc.LitRPC)
rpcl.Node = node
rpcl.OffButton = make(chan bool, 1)
litrpc.RPCListen(rpcl, conf.rpcport)
litbamf.BamfListen(conf.rpcport, conf.litHomeDir)
<-rpcl.OffButton
fmt.Printf("Got stop request\n")
time.Sleep(time.Second)
return
}
[logging] enable microsecond logging
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/adiabat/btcd/chaincfg"
"github.com/mit-dci/lit/litbamf"
"github.com/mit-dci/lit/litrpc"
"github.com/mit-dci/lit/lnutil"
"github.com/mit-dci/lit/qln"
)
const (
litHomeDirName = ".lit"
keyFileName = "privkey.hex"
// this is my local testnet node, replace it with your own close by.
// Random internet testnet nodes usually work but sometimes don't, so
// maybe I should test against different versions out there.
hardHeight = 1111111 // height to start at if not specified
)
// variables for a lit node & lower layers
type LitConfig struct {
reSync, hard bool // flag to set networks
// hostnames to connect to for different networks
tn3host, bc2host, lt4host, reghost string
verbose bool
birthblock int32
rpcport uint16
litHomeDir string
Params *chaincfg.Params
}
func setConfig(lc *LitConfig) {
birthptr := flag.Int("tip", hardHeight, "height to begin db sync")
easyptr := flag.Bool("ez", false, "use easy mode (bloom filters)")
verbptr := flag.Bool("v", false, "verbose; print all logs to stdout")
tn3ptr := flag.String("tn3", "", "testnet3 full node")
regptr := flag.String("reg", "", "regtest full node")
bc2ptr := flag.String("bc2", "", "bc2 full node")
lt4ptr := flag.String("lt4", "", "litecoin testnet4 full node")
resyncprt := flag.Bool("resync", false, "force resync from given tip")
rpcportptr := flag.Int("rpcport", 8001, "port to listen for RPC")
litHomeDir := flag.String("dir",
filepath.Join(os.Getenv("HOME"), litHomeDirName), "lit home directory")
flag.Parse()
lc.birthblock = int32(*birthptr)
lc.tn3host, lc.bc2host, lc.lt4host, lc.reghost =
*tn3ptr, *bc2ptr, *lt4ptr, *regptr
lc.reSync = *resyncprt
lc.hard = !*easyptr
lc.verbose = *verbptr
lc.rpcport = uint16(*rpcportptr)
lc.litHomeDir = *litHomeDir
}
// linkWallets tries to link the wallets given in conf to the litNode
func linkWallets(node *qln.LitNode, key *[32]byte, conf *LitConfig) error {
// for now, wallets are linked to the litnode on startup, and
// can't appear / disappear while it's running. Later
// could support dynamically adding / removing wallets
// order matters; the first registered wallet becomes the default
var err error
// try regtest
if conf.reghost != "" {
if !strings.Contains(conf.reghost, ":") {
conf.reghost = conf.reghost + ":18444"
}
fmt.Printf("reg: %s\n", conf.reghost)
err = node.LinkBaseWallet(
key, 120, conf.reSync,
conf.reghost, &chaincfg.RegressionNetParams)
if err != nil {
return err
}
}
// try testnet3
if conf.tn3host != "" {
if !strings.Contains(conf.tn3host, ":") {
conf.tn3host = conf.tn3host + ":18333"
}
err = node.LinkBaseWallet(
key, conf.birthblock, conf.reSync,
conf.tn3host, &chaincfg.TestNet3Params)
if err != nil {
return err
}
}
// try litecoin testnet4
if conf.lt4host != "" {
if !strings.Contains(conf.lt4host, ":") {
conf.lt4host = conf.lt4host + ":19335"
}
err = node.LinkBaseWallet(
key, 47295, conf.reSync,
conf.lt4host, &chaincfg.LiteCoinTestNet4Params)
if err != nil {
return err
}
}
return nil
}
func main() {
log.Printf("lit node v0.1\n")
log.Printf("-h for list of options.\n")
conf := new(LitConfig)
setConfig(conf)
// create lit home directory if the diretory does not exist
if _, err := os.Stat(conf.litHomeDir); os.IsNotExist(err) {
os.Mkdir(conf.litHomeDir, 0700)
}
logFilePath := filepath.Join(conf.litHomeDir, "lit.log")
logfile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer logfile.Close()
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
if conf.verbose {
logOutput := io.MultiWriter(os.Stdout, logfile)
log.SetOutput(logOutput)
} else {
log.SetOutput(logfile)
}
// Allow node with no linked wallets, for testing.
// TODO Should update tests and disallow nodes without wallets later.
// if conf.tn3host == "" && conf.lt4host == "" && conf.reghost == "" {
// log.Fatal("error: no network specified; use -tn3, -reg, -lt4")
// }
// Keys: the litNode, and wallits, all get 32 byte keys.
// Right now though, they all get the *same* key. For lit as a single binary
// now, all using the same key makes sense; could split up later.
keyFilePath := filepath.Join(conf.litHomeDir, keyFileName)
// read key file (generate if not found)
key, err := lnutil.ReadKeyFile(keyFilePath)
if err != nil {
log.Fatal(err)
}
// Setup LN node. Activate Tower if in hard mode.
// give node and below file pathof lit home directoy
node, err := qln.NewLitNode(key, conf.litHomeDir, false)
if err != nil {
log.Fatal(err)
}
// node is up; link wallets based on args
err = linkWallets(node, key, conf)
if err != nil {
log.Fatal(err)
}
rpcl := new(litrpc.LitRPC)
rpcl.Node = node
rpcl.OffButton = make(chan bool, 1)
litrpc.RPCListen(rpcl, conf.rpcport)
litbamf.BamfListen(conf.rpcport, conf.litHomeDir)
<-rpcl.OffButton
fmt.Printf("Got stop request\n")
time.Sleep(time.Second)
return
}
|
package main
import (
"bufio"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"time"
flags "github.com/jessevdk/go-flags"
"github.com/mit-dci/lit/coinparam"
"github.com/mit-dci/lit/litbamf"
"github.com/mit-dci/lit/litrpc"
"github.com/mit-dci/lit/lnutil"
"github.com/mit-dci/lit/qln"
)
type config struct { // define a struct for usage with go-flags
Tn3host string `long:"tn3" description:"Connect to bitcoin testnet3."`
Bc2host string `long:"bc2" description:"bc2 full node."`
Lt4host string `long:"lt4" description:"Connect to litecoin testnet4."`
Reghost string `long:"reg" description:"Connect to bitcoin regtest."`
Litereghost string `long:"litereg" description:"Connect to litecoin regtest."`
Tvtchost string `long:"tvtc" description:"Connect to Vertcoin test node."`
Vtchost string `long:"vtc" description:"Connect to Vertcoin."`
LitHomeDir string `long:"dir" description:"Specify Home Directory of lit as an absolute path."`
ConfigFile string
ReSync bool `short:"r"long:"reSync" description:"Resync from the given tip."`
Tower bool `long:"tower" description:"Watchtower: Run a watching node"`
Hard bool `short:"t" long:"hard" description:"Flag to set networks."`
Verbose bool `short:"v" long:"verbose" description:"Set verbosity to true."`
Rpcport uint16 `short:"p"long:"rpcport" description:"Set rpcport to connect to."`
Params *coinparam.Params
}
var (
defaultLitHomeDirName = os.Getenv("HOME") + "/.lit"
defaultKeyFileName = "privkey.hex"
defaultConfigFilename = "lit.conf"
defaultHomeDir = os.Getenv("HOME")
defaultConfigFile = filepath.Join(os.Getenv("HOME"), "/.lit/lit.conf")
defaultRpcport = uint16(8001)
)
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// newConfigParser returns a new command line flags parser.
func newConfigParser(conf *config, options flags.Options) *flags.Parser {
parser := flags.NewParser(conf, options)
return parser
}
func linkWallets(node *qln.LitNode, key *[32]byte, conf *config) error {
// for now, wallets are linked to the litnode on startup, and
// can't appear / disappear while it's running. Later
// could support dynamically adding / removing wallets
// order matters; the first registered wallet becomes the default
var err error
// try regtest
if conf.Reghost != "" {
p := &coinparam.RegressionNetParams
if !strings.Contains(conf.Reghost, ":") {
conf.Reghost = conf.Reghost + ":" + p.DefaultPort
}
fmt.Printf("reg: %s\n", conf.Reghost)
err = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Reghost, p)
if err != nil {
return err
}
}
// try testnet3
if conf.Tn3host != "" {
p := &coinparam.TestNet3Params
if !strings.Contains(conf.Tn3host, ":") {
conf.Tn3host = conf.Tn3host + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, 1150000, conf.ReSync, conf.Tower,
conf.Tn3host, p)
if err != nil {
return err
}
}
// try litecoin regtest
if conf.Litereghost != "" {
p := &coinparam.LiteRegNetParams
if !strings.Contains(conf.Litereghost, ":") {
conf.Litereghost = conf.Litereghost + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Litereghost, p)
if err != nil {
return err
}
}
// try litecoin testnet4
if conf.Lt4host != "" {
p := &coinparam.LiteCoinTestNet4Params
if !strings.Contains(conf.Lt4host, ":") {
conf.Lt4host = conf.Lt4host + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, p.StartHeight, conf.ReSync, conf.Tower,
conf.Lt4host, p)
if err != nil {
return err
}
}
// try vertcoin testnet
if conf.Tvtchost != "" {
p := &coinparam.VertcoinTestNetParams
if !strings.Contains(conf.Tvtchost, ":") {
conf.Tvtchost = conf.Tvtchost + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, 0, conf.ReSync, conf.Tower,
conf.Tvtchost, p)
if err != nil {
return err
}
}
// try vertcoin mainnet
if conf.Vtchost != "" {
p := &coinparam.VertcoinParams
if !strings.Contains(conf.Vtchost, ":") {
conf.Vtchost = conf.Vtchost + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, p.StartHeight, conf.ReSync, conf.Tower,
conf.Vtchost, p)
if err != nil {
return err
}
}
return nil
}
func main() {
conf := config{
LitHomeDir: defaultLitHomeDirName,
ConfigFile: defaultConfigFile,
Rpcport: defaultRpcport,
}
// Pre-parse the command line options to see if an alternative config
// file or the version flag was specified. Any errors aside from the
// help message error can be ignored here since they will be caught by
// the final parse below.
preconf := conf
preParser := newConfigParser(&preconf, flags.HelpFlag)
_, err := preParser.Parse()
if err != nil { // if there is some sort of error while parsing the CLI arguments
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
fmt.Fprintln(os.Stderr, err)
log.Fatal(err)
return
// return nil, nil, err
}
}
// appName := filepath.Base(os.Args[0])
// appName = strings.TrimSuffix(appName, filepath.Ext(appName))
// usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
// if preconf.ShowVersion {
// fmt.Println(appName, "version", version())
// os.Exit(0)
// }
// Load additional config from file
var configFileError error
parser := newConfigParser(&conf, flags.Default) // Single line command to read all the CLI params passed
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse() // no extra work, free overloading.
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
// fmt.Fprintln(os.Stderr, usageMessage)
}
log.Fatal(err)
// return nil, nil, err
}
if configFileError != nil {
fmt.Printf("%v", configFileError)
}
if remainingArgs != nil {
//fmt.Printf("%v", remainingArgs)
}
// creates a directory in the absolute sense
if _, err := os.Stat(conf.LitHomeDir); os.IsNotExist(err) {
os.Mkdir(conf.LitHomeDir, 0700)
}
if err != nil {
fmt.Println("Error while creating a directory")
fmt.Println(err)
}
if !(preconf.ConfigFile != defaultConfigFile) {
// means the user has not provided us with a config file, lazy guy
ex := filepath.Join(conf.LitHomeDir)
// passing works fine.
// fmt.Println("Watch out")
// fmt.Println(ex)
if _, err := os.Stat(filepath.Join(ex, "lit.conf")); os.IsNotExist(err) {
if err != nil {
fmt.Println(err)
}
fmt.Println("Creating a new config file")
err1 := createDefaultConfigFile(ex) // Source of error
if err1 != nil {
fmt.Fprintf(os.Stderr, "Error creating a "+
"default config file: %v\n", err)
}
}
preconf.ConfigFile = filepath.Join(ex, "lit.conf")
err := flags.NewIniParser(parser).ParseFile(preconf.ConfigFile) // lets parse the config file provided, if any
if err != nil {
if _, ok := err.(*os.PathError); !ok {
fmt.Fprintf(os.Stderr, "Error parsing config "+
"file: %v\n", err)
// fmt.Fprintln(os.Stderr, usageMessage)
log.Fatal(err)
// return nil, nil, err
}
configFileError = err
}
}
logFilePath := filepath.Join(conf.LitHomeDir, "lit.log")
logfile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer logfile.Close()
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
if conf.Verbose {
logOutput := io.MultiWriter(os.Stdout, logfile)
log.SetOutput(logOutput)
} else {
log.SetOutput(logfile)
}
// Allow node with no linked wallets, for testing.
// TODO Should update tests and disallow nodes without wallets later.
// if conf.Tn3host == "" && conf.Lt4host == "" && conf.Reghost == "" {
// log.Fatal("error: no network specified; use -tn3, -reg, -lt4")
// }
// Keys: the litNode, and wallits, all get 32 byte keys.
// Right now though, they all get the *same* key. For lit as a single binary
// now, all using the same key makes sense; could split up later.
keyFilePath := filepath.Join(conf.LitHomeDir, defaultKeyFileName)
// read key file (generate if not found)
key, err := lnutil.ReadKeyFile(keyFilePath)
if err != nil {
log.Fatal(err)
}
// Setup LN node. Activate Tower if in hard mode.
// give node and below file pathof lit home directoy
node, err := qln.NewLitNode(key, conf.LitHomeDir)
if err != nil {
log.Fatal(err)
}
// node is up; link wallets based on args
err = linkWallets(node, key, &conf)
if err != nil {
log.Fatal(err)
}
rpcl := new(litrpc.LitRPC)
rpcl.Node = node
rpcl.OffButton = make(chan bool, 1)
litrpc.RPCListen(rpcl, conf.Rpcport)
litbamf.BamfListen(conf.Rpcport, conf.LitHomeDir)
<-rpcl.OffButton
fmt.Printf("Got stop request\n")
time.Sleep(time.Second)
return
// New directory being created over at PWD
// conf file being created at /
}
func createDefaultConfigFile(destinationPath string) error {
// Create the destination directory if it does not exists
if _, err := os.Stat(destinationPath); os.IsNotExist(err) {
err := os.Mkdir(destinationPath, 0700)
if err != nil {
return err
}
}
// We assume sample config file path is same as binary TODO: change to ~/.lit/config/
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return err
}
sampleConfigPath := filepath.Join(path, defaultConfigFilename)
// We generate a random user and password
randomBytes := make([]byte, 20)
_, err = rand.Read(randomBytes)
if err != nil {
return err
}
generatedRPCUser := base64.StdEncoding.EncodeToString(randomBytes)
_, err = rand.Read(randomBytes)
if err != nil {
return err
}
generatedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
dest, err := os.OpenFile(filepath.Join(destinationPath, defaultConfigFilename),
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer dest.Close()
// We copy every line from the sample config file to the destination,
// only replacing the two lines for rpcuser and rpcpass
reader := bufio.NewReader(src)
for err != io.EOF {
var line string
line, err = reader.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if strings.Contains(line, "rpcuser=") {
line = "rpcuser=" + generatedRPCUser + "\n"
} else if strings.Contains(line, "rpcpass=") {
line = "rpcpass=" + generatedRPCPass + "\n"
}
if _, err := dest.WriteString(line); err != nil {
return err
}
}
return nil
}
Give preference to CLI params
package main
import (
"bufio"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"time"
flags "github.com/jessevdk/go-flags"
"github.com/mit-dci/lit/coinparam"
"github.com/mit-dci/lit/litbamf"
"github.com/mit-dci/lit/litrpc"
"github.com/mit-dci/lit/lnutil"
"github.com/mit-dci/lit/qln"
)
type config struct { // define a struct for usage with go-flags
Tn3host string `long:"tn3" description:"Connect to bitcoin testnet3."`
Bc2host string `long:"bc2" description:"bc2 full node."`
Lt4host string `long:"lt4" description:"Connect to litecoin testnet4."`
Reghost string `long:"reg" description:"Connect to bitcoin regtest."`
Litereghost string `long:"litereg" description:"Connect to litecoin regtest."`
Tvtchost string `long:"tvtc" description:"Connect to Vertcoin test node."`
Vtchost string `long:"vtc" description:"Connect to Vertcoin."`
LitHomeDir string `long:"dir" description:"Specify Home Directory of lit as an absolute path."`
ConfigFile string
ReSync bool `short:"r"long:"reSync" description:"Resync from the given tip."`
Tower bool `long:"tower" description:"Watchtower: Run a watching node"`
Hard bool `short:"t" long:"hard" description:"Flag to set networks."`
Verbose bool `short:"v" long:"verbose" description:"Set verbosity to true."`
Rpcport uint16 `short:"p"long:"rpcport" description:"Set rpcport to connect to."`
Params *coinparam.Params
}
var (
defaultLitHomeDirName = os.Getenv("HOME") + "/.lit"
defaultKeyFileName = "privkey.hex"
defaultConfigFilename = "lit.conf"
defaultHomeDir = os.Getenv("HOME")
defaultConfigFile = filepath.Join(os.Getenv("HOME"), "/.lit/lit.conf")
defaultRpcport = uint16(8001)
)
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// newConfigParser returns a new command line flags parser.
func newConfigParser(conf *config, options flags.Options) *flags.Parser {
parser := flags.NewParser(conf, options)
return parser
}
func linkWallets(node *qln.LitNode, key *[32]byte, conf *config) error {
// for now, wallets are linked to the litnode on startup, and
// can't appear / disappear while it's running. Later
// could support dynamically adding / removing wallets
// order matters; the first registered wallet becomes the default
var err error
// try regtest
if conf.Reghost != "" {
p := &coinparam.RegressionNetParams
if !strings.Contains(conf.Reghost, ":") {
conf.Reghost = conf.Reghost + ":" + p.DefaultPort
}
fmt.Printf("reg: %s\n", conf.Reghost)
err = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Reghost, p)
if err != nil {
return err
}
}
// try testnet3
if conf.Tn3host != "" {
p := &coinparam.TestNet3Params
if !strings.Contains(conf.Tn3host, ":") {
conf.Tn3host = conf.Tn3host + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, 1150000, conf.ReSync, conf.Tower,
conf.Tn3host, p)
if err != nil {
return err
}
}
// try litecoin regtest
if conf.Litereghost != "" {
p := &coinparam.LiteRegNetParams
if !strings.Contains(conf.Litereghost, ":") {
conf.Litereghost = conf.Litereghost + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Litereghost, p)
if err != nil {
return err
}
}
// try litecoin testnet4
if conf.Lt4host != "" {
p := &coinparam.LiteCoinTestNet4Params
if !strings.Contains(conf.Lt4host, ":") {
conf.Lt4host = conf.Lt4host + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, p.StartHeight, conf.ReSync, conf.Tower,
conf.Lt4host, p)
if err != nil {
return err
}
}
// try vertcoin testnet
if conf.Tvtchost != "" {
p := &coinparam.VertcoinTestNetParams
if !strings.Contains(conf.Tvtchost, ":") {
conf.Tvtchost = conf.Tvtchost + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, 0, conf.ReSync, conf.Tower,
conf.Tvtchost, p)
if err != nil {
return err
}
}
// try vertcoin mainnet
if conf.Vtchost != "" {
p := &coinparam.VertcoinParams
if !strings.Contains(conf.Vtchost, ":") {
conf.Vtchost = conf.Vtchost + ":" + p.DefaultPort
}
err = node.LinkBaseWallet(
key, p.StartHeight, conf.ReSync, conf.Tower,
conf.Vtchost, p)
if err != nil {
return err
}
}
return nil
}
func main() {
conf := config{
LitHomeDir: defaultLitHomeDirName,
ConfigFile: defaultConfigFile,
Rpcport: defaultRpcport,
}
// Pre-parse the command line options to see if an alternative config
// file or the version flag was specified. Any errors aside from the
// help message error can be ignored here since they will be caught by
// the final parse below.
preconf := conf
preParser := newConfigParser(&preconf, flags.HelpFlag)
_, err := preParser.Parse()
if err != nil { // if there is some sort of error while parsing the CLI arguments
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
fmt.Fprintln(os.Stderr, err)
log.Fatal(err)
return
// return nil, nil, err
}
}
// appName := filepath.Base(os.Args[0])
// appName = strings.TrimSuffix(appName, filepath.Ext(appName))
// usageMessage := fmt.Sprintf("Use %s -h to show usage", appName)
// if preconf.ShowVersion {
// fmt.Println(appName, "version", version())
// os.Exit(0)
// }
// Load additional config from file
var configFileError error
parser := newConfigParser(&conf, flags.Default) // Single line command to read all the CLI params passed
// creates a directory in the absolute sense
if _, err := os.Stat(preconf.LitHomeDir); os.IsNotExist(err) {
os.Mkdir(preconf.LitHomeDir, 0700)
fmt.Println("Creating a new config file")
err1 := createDefaultConfigFile(preconf.LitHomeDir) // Source of error
if err1 != nil {
fmt.Fprintf(os.Stderr, "Error creating a "+
"default config file: %v\n", err)
}
}
if err != nil {
fmt.Println("Error while creating a directory")
fmt.Println(err)
}
if !(preconf.ConfigFile != defaultConfigFile) {
// passing works fine.
// fmt.Println("Watch out")
// fmt.Println(filepath.Join(preconf.LitHomeDir))
if _, err := os.Stat(filepath.Join(filepath.Join(preconf.LitHomeDir), "lit.conf")); os.IsNotExist(err) {
if err != nil {
fmt.Println(err)
}
fmt.Println("Creating a new config file")
err1 := createDefaultConfigFile(filepath.Join(preconf.LitHomeDir)) // Source of error
if err1 != nil {
fmt.Fprintf(os.Stderr, "Error creating a "+
"default config file: %v\n", err)
}
}
preconf.ConfigFile = filepath.Join(filepath.Join(preconf.LitHomeDir), "lit.conf")
err := flags.NewIniParser(parser).ParseFile(preconf.ConfigFile) // lets parse the config file provided, if any
if err != nil {
if _, ok := err.(*os.PathError); !ok {
fmt.Fprintf(os.Stderr, "Error parsing config "+
"file: %v\n", err)
// fmt.Fprintln(os.Stderr, usageMessage)
log.Fatal(err)
// return nil, nil, err
}
configFileError = err
}
}
// Parse command line options again to ensure they take precedence.
remainingArgs, err := parser.Parse() // no extra work, free overloading.
if err != nil {
if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp {
// fmt.Fprintln(os.Stderr, usageMessage)
}
log.Fatal(err)
// return nil, nil, err
}
if configFileError != nil {
fmt.Printf("%v", configFileError)
}
if remainingArgs != nil {
//fmt.Printf("%v", remainingArgs)
}
logFilePath := filepath.Join(conf.LitHomeDir, "lit.log")
logfile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer logfile.Close()
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
if conf.Verbose {
logOutput := io.MultiWriter(os.Stdout, logfile)
log.SetOutput(logOutput)
} else {
log.SetOutput(logfile)
}
// Allow node with no linked wallets, for testing.
// TODO Should update tests and disallow nodes without wallets later.
// if conf.Tn3host == "" && conf.Lt4host == "" && conf.Reghost == "" {
// log.Fatal("error: no network specified; use -tn3, -reg, -lt4")
// }
// Keys: the litNode, and wallits, all get 32 byte keys.
// Right now though, they all get the *same* key. For lit as a single binary
// now, all using the same key makes sense; could split up later.
keyFilePath := filepath.Join(conf.LitHomeDir, defaultKeyFileName)
// read key file (generate if not found)
key, err := lnutil.ReadKeyFile(keyFilePath)
if err != nil {
log.Fatal(err)
}
// Setup LN node. Activate Tower if in hard mode.
// give node and below file pathof lit home directoy
node, err := qln.NewLitNode(key, conf.LitHomeDir)
if err != nil {
log.Fatal(err)
}
// node is up; link wallets based on args
err = linkWallets(node, key, &conf)
if err != nil {
log.Fatal(err)
}
rpcl := new(litrpc.LitRPC)
rpcl.Node = node
rpcl.OffButton = make(chan bool, 1)
litrpc.RPCListen(rpcl, conf.Rpcport)
litbamf.BamfListen(conf.Rpcport, conf.LitHomeDir)
<-rpcl.OffButton
fmt.Printf("Got stop request\n")
time.Sleep(time.Second)
return
// New directory being created over at PWD
// conf file being created at /
}
func createDefaultConfigFile(destinationPath string) error {
// We assume sample config file path is same as binary TODO: change to ~/.lit/config/
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return err
}
sampleConfigPath := filepath.Join(path, defaultConfigFilename)
// We generate a random user and password
randomBytes := make([]byte, 20)
_, err = rand.Read(randomBytes)
if err != nil {
return err
}
generatedRPCUser := base64.StdEncoding.EncodeToString(randomBytes)
_, err = rand.Read(randomBytes)
if err != nil {
return err
}
generatedRPCPass := base64.StdEncoding.EncodeToString(randomBytes)
src, err := os.Open(sampleConfigPath)
if err != nil {
return err
}
defer src.Close()
dest, err := os.OpenFile(filepath.Join(destinationPath, defaultConfigFilename),
os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer dest.Close()
// We copy every line from the sample config file to the destination,
// only replacing the two lines for rpcuser and rpcpass
reader := bufio.NewReader(src)
for err != io.EOF {
var line string
line, err = reader.ReadString('\n')
if err != nil && err != io.EOF {
return err
}
if strings.Contains(line, "rpcuser=") {
line = "rpcuser=" + generatedRPCUser + "\n"
} else if strings.Contains(line, "rpcpass=") {
line = "rpcpass=" + generatedRPCPass + "\n"
}
if _, err := dest.WriteString(line); err != nil {
return err
}
}
return nil
}
|
package lm2
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"os"
"sync"
"sync/atomic"
)
const sentinelMagic = 0xDEAD10CC
const (
maxLevels = 4
levelProb = 0.1
cacheProb = 0.1
)
var (
// ErrDoesNotExist is returned when a collection's data file
// doesn't exist.
ErrDoesNotExist = errors.New("lm2: does not exist")
// ErrInternal is returned when the internal state of the collection
// is invalid. The collection should be closed and reopened.
ErrInternal = errors.New("lm2: internal error")
// ErrKeyNotFound is returned when a Cursor.Get() doesn't find
// the requested key.
ErrKeyNotFound = errors.New("lm2: key not found")
fileVersion = [8]byte{'l', 'm', '2', '_', '0', '0', '1', '\n'}
)
// RollbackError is the error type returned after rollbacks.
type RollbackError struct {
DuplicateKey bool
ConflictedKey string
Err error
}
func (e RollbackError) Error() string {
if e.DuplicateKey {
return fmt.Sprintf("lm2: rolled back due to duplicate key (conflicted key: `%s`)",
e.ConflictedKey)
}
return fmt.Sprintf("lm2: rolled back (%s)", e.Err.Error())
}
// IsRollbackError returns true if err is a RollbackError.
func IsRollbackError(err error) bool {
_, ok := err.(RollbackError)
return ok
}
// Collection represents an ordered linked list map.
type Collection struct {
fileHeader
f *os.File
wal *wal
stats Stats
dirty map[int64]*record
cache *recordCache
dirtyLock sync.Mutex
// internalState is 0 if OK, 1 if inconsistent.
internalState uint32
metaLock sync.RWMutex
writeLock sync.Mutex
readAt func(b []byte, off int64) (n int, err error)
writeAt func(b []byte, off int64) (n int, err error)
}
type fileHeader struct {
Version [8]byte
Next [maxLevels]int64
LastCommit int64
}
func (h fileHeader) bytes() []byte {
buf := bytes.NewBuffer(nil)
binary.Write(buf, binary.LittleEndian, h)
return buf.Bytes()
}
type recordHeader struct {
_ uint8 // reserved
_ uint8 // reserved
Next [maxLevels]int64
Deleted int64
KeyLen uint16
ValLen uint32
}
const recordHeaderSize = 2 + (maxLevels * 8) + 8 + 2 + 4
func (h recordHeader) bytes() []byte {
buf := bytes.NewBuffer(nil)
binary.Write(buf, binary.LittleEndian, h)
return buf.Bytes()
}
type sentinelRecord struct {
Magic uint32 // some fixed pattern
Offset int64 // this record's offset
}
type record struct {
recordHeader
Offset int64
Key string
Value string
lock sync.RWMutex
}
func generateLevel() int {
level := 0
for i := 0; i < maxLevels-1; i++ {
if rand.Float32() <= levelProb {
level++
} else {
break
}
}
return level
}
func (c *Collection) getDirty(offset int64) *record {
c.dirtyLock.Lock()
defer c.dirtyLock.Unlock()
if c.dirty == nil {
return nil
}
return c.dirty[offset]
}
func (c *Collection) setDirty(offset int64, rec *record) {
c.dirtyLock.Lock()
defer c.dirtyLock.Unlock()
c.dirty[offset] = rec
}
func (c *Collection) readRecord(offset int64, dirty bool) (*record, error) {
if offset == 0 {
return nil, errors.New("lm2: invalid record offset 0")
}
if dirty {
if rec := c.getDirty(offset); rec != nil {
return rec, nil
}
}
c.cache.lock.RLock()
if rec := c.cache.cache[offset]; rec != nil {
c.cache.lock.RUnlock()
c.stats.incRecordsRead(1)
c.stats.incCacheHits(1)
return rec, nil
}
c.cache.lock.RUnlock()
recordHeaderBytes := [recordHeaderSize]byte{}
n, err := c.readAt(recordHeaderBytes[:], offset)
if err != nil && n != recordHeaderSize {
return nil, fmt.Errorf("lm2: partial read (%s)", err)
}
header := recordHeader{}
err = binary.Read(bytes.NewReader(recordHeaderBytes[:]), binary.LittleEndian, &header)
if err != nil {
return nil, err
}
keyValBuf := make([]byte, int(header.KeyLen)+int(header.ValLen))
n, err = c.readAt(keyValBuf, offset+recordHeaderSize)
if err != nil && n != len(keyValBuf) {
return nil, fmt.Errorf("lm2: partial read (%s)", err)
}
key := string(keyValBuf[:int(header.KeyLen)])
value := string(keyValBuf[int(header.KeyLen):])
rec := &record{
recordHeader: header,
Offset: offset,
Key: key,
Value: value,
}
c.stats.incRecordsRead(1)
c.stats.incCacheMisses(1)
return rec, nil
}
func (c *Collection) nextRecord(rec *record, level int, dirty bool) (*record, error) {
if rec == nil {
return nil, errors.New("lm2: invalid record")
}
if atomic.LoadInt64(&rec.Next[level]) == 0 {
// There's no next record.
return nil, nil
}
nextRec, err := c.readRecord(atomic.LoadInt64(&rec.Next[level]), dirty)
if err != nil {
return nil, err
}
if level >= 2 {
c.cache.push(rec)
}
return nextRec, nil
}
// NewCollection creates a new collection with a data file at file.
// cacheSize represents the size of the collection cache.
func NewCollection(file string, cacheSize int) (*Collection, error) {
f, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return nil, err
}
err = f.Truncate(0)
if err != nil {
f.Close()
return nil, err
}
wal, err := newWAL(file + ".wal")
if err != nil {
f.Close()
return nil, err
}
c := &Collection{
f: f,
wal: wal,
cache: newCache(cacheSize),
readAt: f.ReadAt,
writeAt: f.WriteAt,
}
// write file header
c.fileHeader.Version = fileVersion
c.fileHeader.Next[0] = 0
c.fileHeader.LastCommit = int64(512)
c.f.Seek(0, 0)
err = binary.Write(c.f, binary.LittleEndian, c.fileHeader)
if err != nil {
c.f.Close()
c.wal.Close()
return nil, err
}
return c, nil
}
// OpenCollection opens a collection with a data file at file.
// cacheSize represents the size of the collection cache.
// ErrDoesNotExist is returned if file does not exist.
func OpenCollection(file string, cacheSize int) (*Collection, error) {
f, err := os.OpenFile(file, os.O_RDWR, 0666)
if err != nil {
if os.IsNotExist(err) {
// Check if there's a compacted version.
if _, err = os.Stat(file + ".compact"); err == nil {
// There is.
err = os.Rename(file+".compact", file)
if err != nil {
return nil, fmt.Errorf("lm2: error recovering compacted data file: %v", err)
}
return OpenCollection(file, cacheSize)
}
return nil, ErrDoesNotExist
}
return nil, fmt.Errorf("lm2: error opening data file: %v", err)
}
// Check if there's a compacted version.
if _, err = os.Stat(file + ".compact"); err == nil {
// There is. Remove it and its wal.
os.Remove(file + ".compact")
os.Remove(file + ".compact.wal")
}
wal, err := openWAL(file + ".wal")
if os.IsNotExist(err) {
wal, err = newWAL(file + ".wal")
}
if err != nil {
f.Close()
return nil, fmt.Errorf("lm2: error WAL: %v", err)
}
c := &Collection{
f: f,
wal: wal,
cache: newCache(cacheSize),
readAt: f.ReadAt,
writeAt: f.WriteAt,
}
// Read file header.
c.f.Seek(0, 0)
err = binary.Read(c.f, binary.LittleEndian, &c.fileHeader)
if err != nil {
c.Close()
return nil, fmt.Errorf("lm2: error reading file header: %v", err)
}
// Read last WAL entry.
lastEntry, err := c.wal.ReadLastEntry()
if err != nil {
// Maybe latest WAL write didn't succeed.
// Truncate.
c.wal.Truncate()
} else {
// Apply last WAL entry again.
for _, walRec := range lastEntry.records {
_, err := c.writeAt(walRec.Data, walRec.Offset)
if err != nil {
c.Close()
return nil, fmt.Errorf("lm2: partial write (%s)", err)
}
}
// Reread file header because it could have been updated
c.f.Seek(0, 0)
err = binary.Read(c.f, binary.LittleEndian, &c.fileHeader)
if err != nil {
c.Close()
return nil, fmt.Errorf("lm2: error reading file header: %v", err)
}
}
c.f.Truncate(c.LastCommit)
err = c.sync()
if err != nil {
c.Close()
return nil, err
}
return c, nil
}
func (c *Collection) sync() error {
if err := c.wal.f.Sync(); err != nil {
return errors.New("lm2: error syncing WAL")
}
if err := c.f.Sync(); err != nil {
return errors.New("lm2: error syncing data file")
}
return nil
}
// Close closes a collection and all of its resources.
func (c *Collection) Close() {
c.metaLock.Lock()
defer c.metaLock.Unlock()
c.f.Close()
c.wal.Close()
if atomic.LoadUint32(&c.internalState) == 0 {
// Internal state is OK. Safe to delete WAL.
c.wal.Destroy()
}
atomic.StoreUint32(&c.internalState, 1)
}
// Version returns the last committed version.
func (c *Collection) Version() int64 {
c.metaLock.RLock()
defer c.metaLock.RUnlock()
return c.LastCommit
}
// Stats returns collection statistics.
func (c *Collection) Stats() Stats {
return c.stats.clone()
}
// Destroy closes the collection and removes its associated data files.
func (c *Collection) Destroy() error {
c.Close()
var err error
err = os.Remove(c.f.Name())
if err != nil {
return err
}
return nil
}
// Compact rewrites a collection to clean up deleted records and optimize
// data layout on disk.
// NOTE: The collection is closed after compaction, so you'll have to reopen it.
func (c *Collection) Compact() error {
return c.CompactFunc(func(key, value string) (string, string, bool) {
return key, value, true
})
}
// CompactFunc compacts with a custom compaction function. f is called with
// each key-value pair, and it should return the new key and value for that record
// if they should be changed, and whether to keep the record.
// Returning false will skip the record.
// NOTE: The collection is closed after compaction, so you'll have to reopen it.
func (c *Collection) CompactFunc(f func(key, value string) (string, string, bool)) error {
c.writeLock.Lock()
defer c.writeLock.Unlock()
newCollection, err := NewCollection(c.f.Name()+".compact", 10)
if err != nil {
return err
}
cur, err := c.NewCursor()
if err != nil {
return err
}
const batchSize = 1000
remaining := batchSize
wb := NewWriteBatch()
for cur.Next() {
key, val, keep := f(cur.Key(), cur.Value())
if !keep {
continue
}
wb.Set(key, val)
remaining--
if remaining == 0 {
_, err := newCollection.Update(wb)
if err != nil {
return err
}
remaining = batchSize
wb = NewWriteBatch()
}
}
if remaining < batchSize {
_, err := newCollection.Update(wb)
if err != nil {
return err
}
}
err = c.Destroy()
if err != nil {
return err
}
newCollection.Close()
return os.Rename(newCollection.f.Name(), c.f.Name())
}
// OK returns true if the internal state of the collection is valid.
// If false is returned you should close and reopen the collection.
func (c *Collection) OK() bool {
return atomic.LoadUint32(&c.internalState) == 0
}
fix cache (#48)
https://github.com/Preetam/lm2/issues/47
package lm2
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"os"
"sync"
"sync/atomic"
)
const sentinelMagic = 0xDEAD10CC
const (
maxLevels = 4
levelProb = 0.1
cacheProb = 0.1
)
var (
// ErrDoesNotExist is returned when a collection's data file
// doesn't exist.
ErrDoesNotExist = errors.New("lm2: does not exist")
// ErrInternal is returned when the internal state of the collection
// is invalid. The collection should be closed and reopened.
ErrInternal = errors.New("lm2: internal error")
// ErrKeyNotFound is returned when a Cursor.Get() doesn't find
// the requested key.
ErrKeyNotFound = errors.New("lm2: key not found")
fileVersion = [8]byte{'l', 'm', '2', '_', '0', '0', '1', '\n'}
)
// RollbackError is the error type returned after rollbacks.
type RollbackError struct {
DuplicateKey bool
ConflictedKey string
Err error
}
func (e RollbackError) Error() string {
if e.DuplicateKey {
return fmt.Sprintf("lm2: rolled back due to duplicate key (conflicted key: `%s`)",
e.ConflictedKey)
}
return fmt.Sprintf("lm2: rolled back (%s)", e.Err.Error())
}
// IsRollbackError returns true if err is a RollbackError.
func IsRollbackError(err error) bool {
_, ok := err.(RollbackError)
return ok
}
// Collection represents an ordered linked list map.
type Collection struct {
fileHeader
f *os.File
wal *wal
stats Stats
dirty map[int64]*record
cache *recordCache
dirtyLock sync.Mutex
// internalState is 0 if OK, 1 if inconsistent.
internalState uint32
metaLock sync.RWMutex
writeLock sync.Mutex
readAt func(b []byte, off int64) (n int, err error)
writeAt func(b []byte, off int64) (n int, err error)
}
type fileHeader struct {
Version [8]byte
Next [maxLevels]int64
LastCommit int64
}
func (h fileHeader) bytes() []byte {
buf := bytes.NewBuffer(nil)
binary.Write(buf, binary.LittleEndian, h)
return buf.Bytes()
}
type recordHeader struct {
_ uint8 // reserved
_ uint8 // reserved
Next [maxLevels]int64
Deleted int64
KeyLen uint16
ValLen uint32
}
const recordHeaderSize = 2 + (maxLevels * 8) + 8 + 2 + 4
func (h recordHeader) bytes() []byte {
buf := bytes.NewBuffer(nil)
binary.Write(buf, binary.LittleEndian, h)
return buf.Bytes()
}
type sentinelRecord struct {
Magic uint32 // some fixed pattern
Offset int64 // this record's offset
}
type record struct {
recordHeader
Offset int64
Key string
Value string
lock sync.RWMutex
}
func generateLevel() int {
level := 0
for i := 0; i < maxLevels-1; i++ {
if rand.Float32() <= levelProb {
level++
} else {
break
}
}
return level
}
func (c *Collection) getDirty(offset int64) *record {
c.dirtyLock.Lock()
defer c.dirtyLock.Unlock()
if c.dirty == nil {
return nil
}
return c.dirty[offset]
}
func (c *Collection) setDirty(offset int64, rec *record) {
c.dirtyLock.Lock()
defer c.dirtyLock.Unlock()
c.dirty[offset] = rec
}
func (c *Collection) readRecord(offset int64, dirty bool) (*record, error) {
if offset == 0 {
return nil, errors.New("lm2: invalid record offset 0")
}
if dirty {
if rec := c.getDirty(offset); rec != nil {
return rec, nil
}
}
c.cache.lock.RLock()
if rec := c.cache.cache[offset]; rec != nil {
c.cache.lock.RUnlock()
c.stats.incRecordsRead(1)
c.stats.incCacheHits(1)
return rec, nil
}
c.cache.lock.RUnlock()
recordHeaderBytes := [recordHeaderSize]byte{}
n, err := c.readAt(recordHeaderBytes[:], offset)
if err != nil && n != recordHeaderSize {
return nil, fmt.Errorf("lm2: partial read (%s)", err)
}
header := recordHeader{}
err = binary.Read(bytes.NewReader(recordHeaderBytes[:]), binary.LittleEndian, &header)
if err != nil {
return nil, err
}
keyValBuf := make([]byte, int(header.KeyLen)+int(header.ValLen))
n, err = c.readAt(keyValBuf, offset+recordHeaderSize)
if err != nil && n != len(keyValBuf) {
return nil, fmt.Errorf("lm2: partial read (%s)", err)
}
key := string(keyValBuf[:int(header.KeyLen)])
value := string(keyValBuf[int(header.KeyLen):])
rec := &record{
recordHeader: header,
Offset: offset,
Key: key,
Value: value,
}
c.stats.incRecordsRead(1)
c.stats.incCacheMisses(1)
c.cache.push(rec)
return rec, nil
}
func (c *Collection) nextRecord(rec *record, level int, dirty bool) (*record, error) {
if rec == nil {
return nil, errors.New("lm2: invalid record")
}
if atomic.LoadInt64(&rec.Next[level]) == 0 {
// There's no next record.
return nil, nil
}
nextRec, err := c.readRecord(atomic.LoadInt64(&rec.Next[level]), dirty)
if err != nil {
return nil, err
}
return nextRec, nil
}
// NewCollection creates a new collection with a data file at file.
// cacheSize represents the size of the collection cache.
func NewCollection(file string, cacheSize int) (*Collection, error) {
f, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
return nil, err
}
err = f.Truncate(0)
if err != nil {
f.Close()
return nil, err
}
wal, err := newWAL(file + ".wal")
if err != nil {
f.Close()
return nil, err
}
c := &Collection{
f: f,
wal: wal,
cache: newCache(cacheSize),
readAt: f.ReadAt,
writeAt: f.WriteAt,
}
// write file header
c.fileHeader.Version = fileVersion
c.fileHeader.Next[0] = 0
c.fileHeader.LastCommit = int64(512)
c.f.Seek(0, 0)
err = binary.Write(c.f, binary.LittleEndian, c.fileHeader)
if err != nil {
c.f.Close()
c.wal.Close()
return nil, err
}
return c, nil
}
// OpenCollection opens a collection with a data file at file.
// cacheSize represents the size of the collection cache.
// ErrDoesNotExist is returned if file does not exist.
func OpenCollection(file string, cacheSize int) (*Collection, error) {
f, err := os.OpenFile(file, os.O_RDWR, 0666)
if err != nil {
if os.IsNotExist(err) {
// Check if there's a compacted version.
if _, err = os.Stat(file + ".compact"); err == nil {
// There is.
err = os.Rename(file+".compact", file)
if err != nil {
return nil, fmt.Errorf("lm2: error recovering compacted data file: %v", err)
}
return OpenCollection(file, cacheSize)
}
return nil, ErrDoesNotExist
}
return nil, fmt.Errorf("lm2: error opening data file: %v", err)
}
// Check if there's a compacted version.
if _, err = os.Stat(file + ".compact"); err == nil {
// There is. Remove it and its wal.
os.Remove(file + ".compact")
os.Remove(file + ".compact.wal")
}
wal, err := openWAL(file + ".wal")
if os.IsNotExist(err) {
wal, err = newWAL(file + ".wal")
}
if err != nil {
f.Close()
return nil, fmt.Errorf("lm2: error WAL: %v", err)
}
c := &Collection{
f: f,
wal: wal,
cache: newCache(cacheSize),
readAt: f.ReadAt,
writeAt: f.WriteAt,
}
// Read file header.
c.f.Seek(0, 0)
err = binary.Read(c.f, binary.LittleEndian, &c.fileHeader)
if err != nil {
c.Close()
return nil, fmt.Errorf("lm2: error reading file header: %v", err)
}
// Read last WAL entry.
lastEntry, err := c.wal.ReadLastEntry()
if err != nil {
// Maybe latest WAL write didn't succeed.
// Truncate.
c.wal.Truncate()
} else {
// Apply last WAL entry again.
for _, walRec := range lastEntry.records {
_, err := c.writeAt(walRec.Data, walRec.Offset)
if err != nil {
c.Close()
return nil, fmt.Errorf("lm2: partial write (%s)", err)
}
}
// Reread file header because it could have been updated
c.f.Seek(0, 0)
err = binary.Read(c.f, binary.LittleEndian, &c.fileHeader)
if err != nil {
c.Close()
return nil, fmt.Errorf("lm2: error reading file header: %v", err)
}
}
c.f.Truncate(c.LastCommit)
err = c.sync()
if err != nil {
c.Close()
return nil, err
}
return c, nil
}
func (c *Collection) sync() error {
if err := c.wal.f.Sync(); err != nil {
return errors.New("lm2: error syncing WAL")
}
if err := c.f.Sync(); err != nil {
return errors.New("lm2: error syncing data file")
}
return nil
}
// Close closes a collection and all of its resources.
func (c *Collection) Close() {
c.metaLock.Lock()
defer c.metaLock.Unlock()
c.f.Close()
c.wal.Close()
if atomic.LoadUint32(&c.internalState) == 0 {
// Internal state is OK. Safe to delete WAL.
c.wal.Destroy()
}
atomic.StoreUint32(&c.internalState, 1)
}
// Version returns the last committed version.
func (c *Collection) Version() int64 {
c.metaLock.RLock()
defer c.metaLock.RUnlock()
return c.LastCommit
}
// Stats returns collection statistics.
func (c *Collection) Stats() Stats {
return c.stats.clone()
}
// Destroy closes the collection and removes its associated data files.
func (c *Collection) Destroy() error {
c.Close()
var err error
err = os.Remove(c.f.Name())
if err != nil {
return err
}
return nil
}
// Compact rewrites a collection to clean up deleted records and optimize
// data layout on disk.
// NOTE: The collection is closed after compaction, so you'll have to reopen it.
func (c *Collection) Compact() error {
return c.CompactFunc(func(key, value string) (string, string, bool) {
return key, value, true
})
}
// CompactFunc compacts with a custom compaction function. f is called with
// each key-value pair, and it should return the new key and value for that record
// if they should be changed, and whether to keep the record.
// Returning false will skip the record.
// NOTE: The collection is closed after compaction, so you'll have to reopen it.
func (c *Collection) CompactFunc(f func(key, value string) (string, string, bool)) error {
c.writeLock.Lock()
defer c.writeLock.Unlock()
newCollection, err := NewCollection(c.f.Name()+".compact", 10)
if err != nil {
return err
}
cur, err := c.NewCursor()
if err != nil {
return err
}
const batchSize = 1000
remaining := batchSize
wb := NewWriteBatch()
for cur.Next() {
key, val, keep := f(cur.Key(), cur.Value())
if !keep {
continue
}
wb.Set(key, val)
remaining--
if remaining == 0 {
_, err := newCollection.Update(wb)
if err != nil {
return err
}
remaining = batchSize
wb = NewWriteBatch()
}
}
if remaining < batchSize {
_, err := newCollection.Update(wb)
if err != nil {
return err
}
}
err = c.Destroy()
if err != nil {
return err
}
newCollection.Close()
return os.Rename(newCollection.f.Name(), c.f.Name())
}
// OK returns true if the internal state of the collection is valid.
// If false is returned you should close and reopen the collection.
func (c *Collection) OK() bool {
return atomic.LoadUint32(&c.internalState) == 0
}
|
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (C) 2015-2017 The Lightning Network Developers
package main
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"runtime/pprof"
"strconv"
"time"
"gopkg.in/macaroon-bakery.v1/bakery"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
flags "github.com/btcsuite/go-flags"
proxy "github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/lightningnetwork/lnd/walletunlocker"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcutil"
)
const (
// Make certificate valid for 14 months.
autogenCertValidity = 14 /*months*/ * 30 /*days*/ * 24 * time.Hour
)
var (
cfg *config
shutdownChannel = make(chan struct{})
registeredChains = newChainRegistry()
macaroonDatabaseDir string
// End of ASN.1 time.
endOfTime = time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
// Max serial number.
serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128)
/*
* These cipher suites fit the following criteria:
* - Don't use outdated algorithms like SHA-1 and 3DES
* - Don't use ECB mode or other insecure symmetric methods
* - Included in the TLS v1.2 suite
* - Are available in the Go 1.7.6 standard library (more are
* available in 1.8.3 and will be added after lnd no longer
* supports 1.7, including suites that support CBC mode)
*
* The cipher suites are ordered from strongest to weakest
* primitives, but the client's preference order has more
* effect during negotiation.
**/
tlsCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
}
)
// lndMain is the true entry point for lnd. This function is required since
// defers created in the top-level scope of a main method aren't executed if
// os.Exit() is called.
func lndMain() error {
// Load the configuration, and parse any command line options. This
// function will also set up logging properly.
loadedConfig, err := loadConfig()
if err != nil {
return err
}
cfg = loadedConfig
defer func() {
if logRotator != nil {
logRotator.Close()
}
}()
// Show version at startup.
ltndLog.Infof("Version %s", version())
// Enable http profiling server if requested.
if cfg.Profile != "" {
go func() {
listenAddr := net.JoinHostPort("", cfg.Profile)
profileRedirect := http.RedirectHandler("/debug/pprof",
http.StatusSeeOther)
http.Handle("/", profileRedirect)
fmt.Println(http.ListenAndServe(listenAddr, nil))
}()
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
ltndLog.Errorf("Unable to create cpu profile: %v", err)
return err
}
pprof.StartCPUProfile(f)
defer f.Close()
defer pprof.StopCPUProfile()
}
// Open the channeldb, which is dedicated to storing channel, and
// network related metadata.
chanDB, err := channeldb.Open(cfg.DataDir)
if err != nil {
ltndLog.Errorf("unable to open channeldb: %v", err)
return err
}
defer chanDB.Close()
// Only process macaroons if --no-macaroons isn't set.
var macaroonService *bakery.Service
if !cfg.NoMacaroons {
// Create the macaroon authentication/authorization service.
macaroonService, err = macaroons.NewService(macaroonDatabaseDir)
if err != nil {
srvrLog.Errorf("unable to create macaroon service: %v", err)
return err
}
// Create macaroon files for lncli to use if they don't exist.
if !fileExists(cfg.AdminMacPath) && !fileExists(cfg.ReadMacPath) {
err = genMacaroons(macaroonService, cfg.AdminMacPath,
cfg.ReadMacPath)
if err != nil {
ltndLog.Errorf("unable to create macaroon "+
"files: %v", err)
return err
}
}
}
// Ensure we create TLS key and certificate if they don't exist
if !fileExists(cfg.TLSCertPath) && !fileExists(cfg.TLSKeyPath) {
if err := genCertPair(cfg.TLSCertPath, cfg.TLSKeyPath); err != nil {
return err
}
}
cert, err := tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
if err != nil {
return err
}
tlsConf := &tls.Config{
Certificates: []tls.Certificate{cert},
CipherSuites: tlsCipherSuites,
MinVersion: tls.VersionTLS12,
}
sCreds := credentials.NewTLS(tlsConf)
serverOpts := []grpc.ServerOption{grpc.Creds(sCreds)}
grpcEndpoint := fmt.Sprintf("localhost:%d", loadedConfig.RPCPort)
restEndpoint := fmt.Sprintf(":%d", loadedConfig.RESTPort)
cCreds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath,
"")
if err != nil {
return err
}
proxyOpts := []grpc.DialOption{grpc.WithTransportCredentials(cCreds)}
// We wait until the user provides a password over RPC. In case lnd is
// started with the --noencryptwallet flag, we use the default password
// "hello" for wallet encryption.
privateWalletPw := []byte("hello")
publicWalletPw := []byte("public")
if !cfg.NoEncryptWallet {
privateWalletPw, publicWalletPw, err = waitForWalletPassword(
grpcEndpoint, restEndpoint, serverOpts, proxyOpts,
tlsConf, macaroonService,
)
if err != nil {
return err
}
}
// With the information parsed from the configuration, create valid
// instances of the pertinent interfaces required to operate the
// Lightning Network Daemon.
activeChainControl, chainCleanUp, err := newChainControlFromConfig(cfg,
chanDB, privateWalletPw, publicWalletPw)
if err != nil {
fmt.Printf("unable to create chain control: %v\n", err)
return err
}
if chainCleanUp != nil {
defer chainCleanUp()
}
// Finally before we start the server, we'll register the "holy
// trinity" of interface for our current "home chain" with the active
// chainRegistry interface.
primaryChain := registeredChains.PrimaryChain()
registeredChains.RegisterChain(primaryChain, activeChainControl)
idPrivKey, err := activeChainControl.wallet.GetIdentitykey()
if err != nil {
return err
}
idPrivKey.Curve = btcec.S256()
// Set up the core server which will listen for incoming peer
// connections.
defaultListenAddrs := []string{
net.JoinHostPort("", strconv.Itoa(cfg.PeerPort)),
}
server, err := newServer(defaultListenAddrs, chanDB, activeChainControl,
idPrivKey)
if err != nil {
srvrLog.Errorf("unable to create server: %v\n", err)
return err
}
// Next, we'll initialize the funding manager itself so it can answer
// queries while the wallet+chain are still syncing.
nodeSigner := newNodeSigner(idPrivKey)
var chanIDSeed [32]byte
if _, err := rand.Read(chanIDSeed[:]); err != nil {
return err
}
fundingMgr, err := newFundingManager(fundingConfig{
IDKey: idPrivKey.PubKey(),
Wallet: activeChainControl.wallet,
Notifier: activeChainControl.chainNotifier,
FeeEstimator: activeChainControl.feeEstimator,
SignMessage: func(pubKey *btcec.PublicKey,
msg []byte) (*btcec.Signature, error) {
if pubKey.IsEqual(idPrivKey.PubKey()) {
return nodeSigner.SignMessage(pubKey, msg)
}
return activeChainControl.msgSigner.SignMessage(
pubKey, msg,
)
},
CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, error) {
return server.genNodeAnnouncement(true)
},
SendAnnouncement: func(msg lnwire.Message) error {
errChan := server.authGossiper.ProcessLocalAnnouncement(msg,
idPrivKey.PubKey())
return <-errChan
},
ArbiterChan: server.breachArbiter.newContracts,
SendToPeer: server.SendToPeer,
NotifyWhenOnline: server.NotifyWhenOnline,
FindPeer: server.FindPeer,
TempChanIDSeed: chanIDSeed,
FindChannel: func(chanID lnwire.ChannelID) (*lnwallet.LightningChannel, error) {
dbChannels, err := chanDB.FetchAllChannels()
if err != nil {
return nil, err
}
for _, channel := range dbChannels {
if chanID.IsChanPoint(&channel.FundingOutpoint) {
return lnwallet.NewLightningChannel(
activeChainControl.signer,
activeChainControl.chainNotifier,
activeChainControl.feeEstimator,
channel)
}
}
return nil, fmt.Errorf("unable to find channel")
},
DefaultRoutingPolicy: activeChainControl.routingPolicy,
NumRequiredConfs: func(chanAmt btcutil.Amount, pushAmt lnwire.MilliSatoshi) uint16 {
// TODO(roasbeef): add configurable mapping
// * simple switch initially
// * assign coefficient, etc
return uint16(cfg.Bitcoin.DefaultNumChanConfs)
},
RequiredRemoteDelay: func(chanAmt btcutil.Amount) uint16 {
// TODO(roasbeef): add additional hooks for
// configuration
return 4
},
})
if err != nil {
return err
}
if err := fundingMgr.Start(); err != nil {
return err
}
server.fundingMgr = fundingMgr
// Initialize, and register our implementation of the gRPC interface
// exported by the rpcServer.
rpcServer := newRPCServer(server, macaroonService)
if err := rpcServer.Start(); err != nil {
return err
}
grpcServer := grpc.NewServer(serverOpts...)
lnrpc.RegisterLightningServer(grpcServer, rpcServer)
// Next, Start the gRPC server listening for HTTP/2 connections.
lis, err := net.Listen("tcp", grpcEndpoint)
if err != nil {
fmt.Printf("failed to listen: %v", err)
return err
}
defer lis.Close()
go func() {
rpcsLog.Infof("RPC server listening on %s", lis.Addr())
grpcServer.Serve(lis)
}()
// Finally, start the REST proxy for our gRPC server above.
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := proxy.NewServeMux()
err = lnrpc.RegisterLightningHandlerFromEndpoint(ctx, mux, grpcEndpoint,
proxyOpts)
if err != nil {
return err
}
go func() {
listener, err := tls.Listen("tcp", restEndpoint, tlsConf)
if err != nil {
ltndLog.Errorf("gRPC proxy unable to listen on "+
"localhost%s", restEndpoint)
return
}
rpcsLog.Infof("gRPC proxy started at localhost%s", restEndpoint)
http.Serve(listener, mux)
}()
// If we're not in simnet mode, We'll wait until we're fully synced to
// continue the start up of the remainder of the daemon. This ensures
// that we don't accept any possibly invalid state transitions, or
// accept channels with spent funds.
if !(cfg.Bitcoin.SimNet || cfg.Litecoin.SimNet) {
_, bestHeight, err := activeChainControl.chainIO.GetBestBlock()
if err != nil {
return err
}
ltndLog.Infof("Waiting for chain backend to finish sync, "+
"start_height=%v", bestHeight)
for {
synced, err := activeChainControl.wallet.IsSynced()
if err != nil {
return err
}
if synced {
break
}
time.Sleep(time.Second * 1)
}
_, bestHeight, err = activeChainControl.chainIO.GetBestBlock()
if err != nil {
return err
}
ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
bestHeight)
}
// With all the relevant chains initialized, we can finally start the
// server itself.
if err := server.Start(); err != nil {
srvrLog.Errorf("unable to start server: %v\n", err)
return err
}
// Now that the server has started, if the autopilot mode is currently
// active, then we'll initialize a fresh instance of it and start it.
var pilot *autopilot.Agent
if cfg.Autopilot.Active {
pilot, err := initAutoPilot(server, cfg.Autopilot)
if err != nil {
ltndLog.Errorf("unable to create autopilot agent: %v",
err)
return err
}
if err := pilot.Start(); err != nil {
ltndLog.Errorf("unable to start autopilot agent: %v",
err)
return err
}
}
addInterruptHandler(func() {
ltndLog.Infof("Gracefully shutting down the server...")
rpcServer.Stop()
fundingMgr.Stop()
server.Stop()
if pilot != nil {
pilot.Stop()
}
server.WaitForShutdown()
})
// Wait for shutdown signal from either a graceful server stop or from
// the interrupt handler.
<-shutdownChannel
ltndLog.Info("Shutdown complete")
return nil
}
func main() {
// Use all processor cores.
// TODO(roasbeef): remove this if required version # is > 1.6?
runtime.GOMAXPROCS(runtime.NumCPU())
// Call the "real" main in a nested manner so the defers will properly
// be executed in the case of a graceful shutdown.
if err := lndMain(); err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
} else {
fmt.Fprintln(os.Stderr, err)
}
os.Exit(1)
}
}
// fileExists reports whether the named file or directory exists.
// This function is taken from https://github.com/btcsuite/btcd
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// genCertPair generates a key/cert pair to the paths provided. The
// auto-generated certificates should *not* be used in production for public
// access as they're self-signed and don't necessarily contain all of the
// desired hostnames for the service. For production/public use, consider a
// real PKI.
//
// This function is adapted from https://github.com/btcsuite/btcd and
// https://github.com/btcsuite/btcutil
func genCertPair(certFile, keyFile string) error {
rpcsLog.Infof("Generating TLS certificates...")
org := "lnd autogenerated cert"
now := time.Now()
validUntil := now.Add(autogenCertValidity)
// Check that the certificate validity isn't past the ASN.1 end of time.
if validUntil.After(endOfTime) {
validUntil = endOfTime
}
// Generate a serial number that's below the serialNumberLimit.
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return fmt.Errorf("failed to generate serial number: %s", err)
}
// Collect the host's IP addresses, including loopback, in a slice.
ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}
// addIP appends an IP address only if it isn't already in the slice.
addIP := func(ipAddr net.IP) {
for _, ip := range ipAddresses {
if bytes.Equal(ip, ipAddr) {
return
}
}
ipAddresses = append(ipAddresses, ipAddr)
}
// Add all the interface IPs that aren't already in the slice.
addrs, err := net.InterfaceAddrs()
if err != nil {
return err
}
for _, a := range addrs {
ipAddr, _, err := net.ParseCIDR(a.String())
if err == nil {
addIP(ipAddr)
}
}
// Collect the host's names into a slice.
host, err := os.Hostname()
if err != nil {
return err
}
dnsNames := []string{host}
if host != "localhost" {
dnsNames = append(dnsNames, "localhost")
}
// Generate a private key for the certificate.
priv, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return err
}
// Construct the certificate template.
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{org},
CommonName: host,
},
NotBefore: now.Add(-time.Hour * 24),
NotAfter: validUntil,
KeyUsage: x509.KeyUsageKeyEncipherment |
x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
IsCA: true, // so can sign self.
BasicConstraintsValid: true,
DNSNames: dnsNames,
IPAddresses: ipAddresses,
// This signature algorithm is most likely to be compatible
// with clients using less-common TLS libraries like BoringSSL.
SignatureAlgorithm: x509.SHA256WithRSA,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template,
&template, &priv.PublicKey, priv)
if err != nil {
return fmt.Errorf("failed to create certificate: %v", err)
}
certBuf := &bytes.Buffer{}
err = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE",
Bytes: derBytes})
if err != nil {
return fmt.Errorf("failed to encode certificate: %v", err)
}
keybytes := x509.MarshalPKCS1PrivateKey(priv)
keyBuf := &bytes.Buffer{}
err = pem.Encode(keyBuf, &pem.Block{Type: "RSA PRIVATE KEY",
Bytes: keybytes})
if err != nil {
return fmt.Errorf("failed to encode private key: %v", err)
}
// Write cert and key files.
if err = ioutil.WriteFile(certFile, certBuf.Bytes(), 0644); err != nil {
return err
}
if err = ioutil.WriteFile(keyFile, keyBuf.Bytes(), 0600); err != nil {
os.Remove(certFile)
return err
}
rpcsLog.Infof("Done generating TLS certificates")
return nil
}
// genMacaroons generates a pair of macaroon files; one admin-level and one
// read-only. These can also be used to generate more granular macaroons.
func genMacaroons(svc *bakery.Service, admFile, roFile string) error {
// Generate the admin macaroon and write it to a file.
admMacaroon, err := svc.NewMacaroon("", nil, nil)
if err != nil {
return err
}
admBytes, err := admMacaroon.MarshalBinary()
if err != nil {
return err
}
if err = ioutil.WriteFile(admFile, admBytes, 0600); err != nil {
return err
}
// Generate the read-only macaroon and write it to a file.
roMacaroon, err := macaroons.AddConstraints(admMacaroon,
macaroons.AllowConstraint(roPermissions...))
if err != nil {
return err
}
roBytes, err := roMacaroon.MarshalBinary()
if err != nil {
return err
}
if err = ioutil.WriteFile(roFile, roBytes, 0644); err != nil {
os.Remove(admFile)
return err
}
return nil
}
// waitForWalletPassword will spin up gRPC and REST endpoints for the
// WalletUnlocker server, and block until a password is provided by
// the user to this RPC server.
func waitForWalletPassword(grpcEndpoint, restEndpoint string,
serverOpts []grpc.ServerOption, proxyOpts []grpc.DialOption,
tlsConf *tls.Config, macaroonService *bakery.Service) ([]byte, []byte, error) {
// Set up a new PasswordService, which will listen
// for passwords provided over RPC.
grpcServer := grpc.NewServer(serverOpts...)
chainConfig := cfg.Bitcoin
if registeredChains.PrimaryChain() == litecoinChain {
chainConfig = cfg.Litecoin
}
pwService := walletunlocker.New(macaroonService,
chainConfig.ChainDir, activeNetParams.Params)
lnrpc.RegisterWalletUnlockerServer(grpcServer, pwService)
// Start a gRPC server listening for HTTP/2 connections, solely
// used for getting the encryption password from the client.
lis, err := net.Listen("tcp", grpcEndpoint)
if err != nil {
fmt.Printf("failed to listen: %v", err)
return nil, nil, err
}
defer lis.Close()
// Use a two channels to synchronize on, so we can be sure the
// instructions on how to input the password is the last
// thing to be printed to the console.
grpcServing := make(chan struct{})
restServing := make(chan struct{})
go func(c chan struct{}) {
rpcsLog.Infof("password RPC server listening on %s",
lis.Addr())
close(c)
grpcServer.Serve(lis)
}(grpcServing)
// Start a REST proxy for our gRPC server above.
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := proxy.NewServeMux()
err = lnrpc.RegisterWalletUnlockerHandlerFromEndpoint(ctx, mux,
grpcEndpoint, proxyOpts)
if err != nil {
return nil, nil, err
}
srv := &http.Server{Handler: mux}
defer func() {
// We must shut down this server, since we'll let
// the regular rpcServer listen on the same address.
if err := srv.Shutdown(ctx); err != nil {
ltndLog.Errorf("unable to shutdown gPRC proxy: %v", err)
}
}()
go func(c chan struct{}) {
listener, err := tls.Listen("tcp", restEndpoint,
tlsConf)
if err != nil {
ltndLog.Errorf("gRPC proxy unable to listen "+
"on localhost%s", restEndpoint)
return
}
rpcsLog.Infof("password gRPC proxy started at "+
"localhost%s", restEndpoint)
close(c)
srv.Serve(listener)
}(restServing)
// Wait for gRPC and REST server to be up running.
<-grpcServing
<-restServing
// Wait for user to provide the password.
ltndLog.Infof("Waiting for wallet encryption password. " +
"Use `lncli create` to create wallet, or " +
"`lncli unlock` to unlock already created wallet.")
// We currently don't distinguish between getting a password to
// be used for creation or unlocking, as a new wallet db will be
// created if none exists when creating the chain control.
select {
case walletPw := <-pwService.CreatePasswords:
return walletPw, walletPw, nil
case walletPw := <-pwService.UnlockPasswords:
return walletPw, walletPw, nil
case <-shutdownChannel:
return nil, nil, fmt.Errorf("shutting down")
}
}
lnd: make NumRequiredConfs and RequiredRemoteDelay scale with chanAmt
This commit makes the value returned fomr NumRequiredConfs
and RequiredRemoteDelay used during the funding process scale
linearly with the channel size. This is done to ensure that
in cases there are more at stake in a channel, we have more
time to react to reorgs, or unilateral closes.
If the user explicitly specified values for these two at
startup, we return those instead, without doing the scaling.
// Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (C) 2015-2017 The Lightning Network Developers
package main
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"math/big"
"net"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"runtime/pprof"
"strconv"
"time"
"gopkg.in/macaroon-bakery.v1/bakery"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
flags "github.com/btcsuite/go-flags"
proxy "github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/lightningnetwork/lnd/autopilot"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/lightningnetwork/lnd/walletunlocker"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcutil"
)
const (
// Make certificate valid for 14 months.
autogenCertValidity = 14 /*months*/ * 30 /*days*/ * 24 * time.Hour
)
var (
cfg *config
shutdownChannel = make(chan struct{})
registeredChains = newChainRegistry()
macaroonDatabaseDir string
// End of ASN.1 time.
endOfTime = time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
// Max serial number.
serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128)
/*
* These cipher suites fit the following criteria:
* - Don't use outdated algorithms like SHA-1 and 3DES
* - Don't use ECB mode or other insecure symmetric methods
* - Included in the TLS v1.2 suite
* - Are available in the Go 1.7.6 standard library (more are
* available in 1.8.3 and will be added after lnd no longer
* supports 1.7, including suites that support CBC mode)
*
* The cipher suites are ordered from strongest to weakest
* primitives, but the client's preference order has more
* effect during negotiation.
**/
tlsCipherSuites = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
}
)
// lndMain is the true entry point for lnd. This function is required since
// defers created in the top-level scope of a main method aren't executed if
// os.Exit() is called.
func lndMain() error {
// Load the configuration, and parse any command line options. This
// function will also set up logging properly.
loadedConfig, err := loadConfig()
if err != nil {
return err
}
cfg = loadedConfig
defer func() {
if logRotator != nil {
logRotator.Close()
}
}()
// Show version at startup.
ltndLog.Infof("Version %s", version())
// Enable http profiling server if requested.
if cfg.Profile != "" {
go func() {
listenAddr := net.JoinHostPort("", cfg.Profile)
profileRedirect := http.RedirectHandler("/debug/pprof",
http.StatusSeeOther)
http.Handle("/", profileRedirect)
fmt.Println(http.ListenAndServe(listenAddr, nil))
}()
}
// Write cpu profile if requested.
if cfg.CPUProfile != "" {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
ltndLog.Errorf("Unable to create cpu profile: %v", err)
return err
}
pprof.StartCPUProfile(f)
defer f.Close()
defer pprof.StopCPUProfile()
}
// Open the channeldb, which is dedicated to storing channel, and
// network related metadata.
chanDB, err := channeldb.Open(cfg.DataDir)
if err != nil {
ltndLog.Errorf("unable to open channeldb: %v", err)
return err
}
defer chanDB.Close()
// Only process macaroons if --no-macaroons isn't set.
var macaroonService *bakery.Service
if !cfg.NoMacaroons {
// Create the macaroon authentication/authorization service.
macaroonService, err = macaroons.NewService(macaroonDatabaseDir)
if err != nil {
srvrLog.Errorf("unable to create macaroon service: %v", err)
return err
}
// Create macaroon files for lncli to use if they don't exist.
if !fileExists(cfg.AdminMacPath) && !fileExists(cfg.ReadMacPath) {
err = genMacaroons(macaroonService, cfg.AdminMacPath,
cfg.ReadMacPath)
if err != nil {
ltndLog.Errorf("unable to create macaroon "+
"files: %v", err)
return err
}
}
}
// Ensure we create TLS key and certificate if they don't exist
if !fileExists(cfg.TLSCertPath) && !fileExists(cfg.TLSKeyPath) {
if err := genCertPair(cfg.TLSCertPath, cfg.TLSKeyPath); err != nil {
return err
}
}
cert, err := tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
if err != nil {
return err
}
tlsConf := &tls.Config{
Certificates: []tls.Certificate{cert},
CipherSuites: tlsCipherSuites,
MinVersion: tls.VersionTLS12,
}
sCreds := credentials.NewTLS(tlsConf)
serverOpts := []grpc.ServerOption{grpc.Creds(sCreds)}
grpcEndpoint := fmt.Sprintf("localhost:%d", loadedConfig.RPCPort)
restEndpoint := fmt.Sprintf(":%d", loadedConfig.RESTPort)
cCreds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath,
"")
if err != nil {
return err
}
proxyOpts := []grpc.DialOption{grpc.WithTransportCredentials(cCreds)}
// We wait until the user provides a password over RPC. In case lnd is
// started with the --noencryptwallet flag, we use the default password
// "hello" for wallet encryption.
privateWalletPw := []byte("hello")
publicWalletPw := []byte("public")
if !cfg.NoEncryptWallet {
privateWalletPw, publicWalletPw, err = waitForWalletPassword(
grpcEndpoint, restEndpoint, serverOpts, proxyOpts,
tlsConf, macaroonService,
)
if err != nil {
return err
}
}
// With the information parsed from the configuration, create valid
// instances of the pertinent interfaces required to operate the
// Lightning Network Daemon.
activeChainControl, chainCleanUp, err := newChainControlFromConfig(cfg,
chanDB, privateWalletPw, publicWalletPw)
if err != nil {
fmt.Printf("unable to create chain control: %v\n", err)
return err
}
if chainCleanUp != nil {
defer chainCleanUp()
}
// Finally before we start the server, we'll register the "holy
// trinity" of interface for our current "home chain" with the active
// chainRegistry interface.
primaryChain := registeredChains.PrimaryChain()
registeredChains.RegisterChain(primaryChain, activeChainControl)
idPrivKey, err := activeChainControl.wallet.GetIdentitykey()
if err != nil {
return err
}
idPrivKey.Curve = btcec.S256()
// Set up the core server which will listen for incoming peer
// connections.
defaultListenAddrs := []string{
net.JoinHostPort("", strconv.Itoa(cfg.PeerPort)),
}
server, err := newServer(defaultListenAddrs, chanDB, activeChainControl,
idPrivKey)
if err != nil {
srvrLog.Errorf("unable to create server: %v\n", err)
return err
}
// Next, we'll initialize the funding manager itself so it can answer
// queries while the wallet+chain are still syncing.
nodeSigner := newNodeSigner(idPrivKey)
var chanIDSeed [32]byte
if _, err := rand.Read(chanIDSeed[:]); err != nil {
return err
}
fundingMgr, err := newFundingManager(fundingConfig{
IDKey: idPrivKey.PubKey(),
Wallet: activeChainControl.wallet,
Notifier: activeChainControl.chainNotifier,
FeeEstimator: activeChainControl.feeEstimator,
SignMessage: func(pubKey *btcec.PublicKey,
msg []byte) (*btcec.Signature, error) {
if pubKey.IsEqual(idPrivKey.PubKey()) {
return nodeSigner.SignMessage(pubKey, msg)
}
return activeChainControl.msgSigner.SignMessage(
pubKey, msg,
)
},
CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, error) {
return server.genNodeAnnouncement(true)
},
SendAnnouncement: func(msg lnwire.Message) error {
errChan := server.authGossiper.ProcessLocalAnnouncement(msg,
idPrivKey.PubKey())
return <-errChan
},
ArbiterChan: server.breachArbiter.newContracts,
SendToPeer: server.SendToPeer,
NotifyWhenOnline: server.NotifyWhenOnline,
FindPeer: server.FindPeer,
TempChanIDSeed: chanIDSeed,
FindChannel: func(chanID lnwire.ChannelID) (*lnwallet.LightningChannel, error) {
dbChannels, err := chanDB.FetchAllChannels()
if err != nil {
return nil, err
}
for _, channel := range dbChannels {
if chanID.IsChanPoint(&channel.FundingOutpoint) {
return lnwallet.NewLightningChannel(
activeChainControl.signer,
activeChainControl.chainNotifier,
activeChainControl.feeEstimator,
channel)
}
}
return nil, fmt.Errorf("unable to find channel")
},
DefaultRoutingPolicy: activeChainControl.routingPolicy,
NumRequiredConfs: func(chanAmt btcutil.Amount,
pushAmt lnwire.MilliSatoshi) uint16 {
// For large channels we increase the number
// of confirmations we require for the
// channel to be considered open. As it is
// always the responder that gets to choose
// value, the pushAmt is value being pushed
// to us. This means we have more to lose
// in the case this gets re-orged out, and
// we will require more confirmations before
// we consider it open.
// TODO(halseth): Use Litecoin params in case
// of LTC channels.
// In case the user has explicitly specified
// a default value for the number of
// confirmations, we use it.
defaultConf := uint16(cfg.Bitcoin.DefaultNumChanConfs)
if defaultConf != 0 {
return defaultConf
}
// If not we return a value scaled linearly
// between 3 and 6, depending on channel size.
// TODO(halseth): Use 1 as minimum?
minConf := uint64(3)
maxConf := uint64(6)
maxChannelSize := uint64(
lnwire.NewMSatFromSatoshis(maxFundingAmount))
stake := lnwire.NewMSatFromSatoshis(chanAmt) + pushAmt
conf := maxConf * uint64(stake) / maxChannelSize
if conf < minConf {
conf = minConf
}
if conf > maxConf {
conf = maxConf
}
return uint16(conf)
},
RequiredRemoteDelay: func(chanAmt btcutil.Amount) uint16 {
// We scale the remote CSV delay (the time the
// remote have to claim funds in case of a unilateral
// close) linearly from minRemoteDelay blocks
// for small channels, to maxRemoteDelay blocks
// for channels of size maxFundingAmount.
// TODO(halseth): Litecoin parameter for LTC.
// In case the user has explicitly specified
// a default value for the remote delay, we
// use it.
defaultDelay := uint16(cfg.Bitcoin.DefaultRemoteDelay)
if defaultDelay > 0 {
return defaultDelay
}
// If not we scale according to channel size.
delay := uint16(maxRemoteDelay *
chanAmt / maxFundingAmount)
if delay < minRemoteDelay {
delay = minRemoteDelay
}
if delay > maxRemoteDelay {
delay = maxRemoteDelay
}
return delay
},
})
if err != nil {
return err
}
if err := fundingMgr.Start(); err != nil {
return err
}
server.fundingMgr = fundingMgr
// Initialize, and register our implementation of the gRPC interface
// exported by the rpcServer.
rpcServer := newRPCServer(server, macaroonService)
if err := rpcServer.Start(); err != nil {
return err
}
grpcServer := grpc.NewServer(serverOpts...)
lnrpc.RegisterLightningServer(grpcServer, rpcServer)
// Next, Start the gRPC server listening for HTTP/2 connections.
lis, err := net.Listen("tcp", grpcEndpoint)
if err != nil {
fmt.Printf("failed to listen: %v", err)
return err
}
defer lis.Close()
go func() {
rpcsLog.Infof("RPC server listening on %s", lis.Addr())
grpcServer.Serve(lis)
}()
// Finally, start the REST proxy for our gRPC server above.
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := proxy.NewServeMux()
err = lnrpc.RegisterLightningHandlerFromEndpoint(ctx, mux, grpcEndpoint,
proxyOpts)
if err != nil {
return err
}
go func() {
listener, err := tls.Listen("tcp", restEndpoint, tlsConf)
if err != nil {
ltndLog.Errorf("gRPC proxy unable to listen on "+
"localhost%s", restEndpoint)
return
}
rpcsLog.Infof("gRPC proxy started at localhost%s", restEndpoint)
http.Serve(listener, mux)
}()
// If we're not in simnet mode, We'll wait until we're fully synced to
// continue the start up of the remainder of the daemon. This ensures
// that we don't accept any possibly invalid state transitions, or
// accept channels with spent funds.
if !(cfg.Bitcoin.SimNet || cfg.Litecoin.SimNet) {
_, bestHeight, err := activeChainControl.chainIO.GetBestBlock()
if err != nil {
return err
}
ltndLog.Infof("Waiting for chain backend to finish sync, "+
"start_height=%v", bestHeight)
for {
synced, err := activeChainControl.wallet.IsSynced()
if err != nil {
return err
}
if synced {
break
}
time.Sleep(time.Second * 1)
}
_, bestHeight, err = activeChainControl.chainIO.GetBestBlock()
if err != nil {
return err
}
ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
bestHeight)
}
// With all the relevant chains initialized, we can finally start the
// server itself.
if err := server.Start(); err != nil {
srvrLog.Errorf("unable to start server: %v\n", err)
return err
}
// Now that the server has started, if the autopilot mode is currently
// active, then we'll initialize a fresh instance of it and start it.
var pilot *autopilot.Agent
if cfg.Autopilot.Active {
pilot, err := initAutoPilot(server, cfg.Autopilot)
if err != nil {
ltndLog.Errorf("unable to create autopilot agent: %v",
err)
return err
}
if err := pilot.Start(); err != nil {
ltndLog.Errorf("unable to start autopilot agent: %v",
err)
return err
}
}
addInterruptHandler(func() {
ltndLog.Infof("Gracefully shutting down the server...")
rpcServer.Stop()
fundingMgr.Stop()
server.Stop()
if pilot != nil {
pilot.Stop()
}
server.WaitForShutdown()
})
// Wait for shutdown signal from either a graceful server stop or from
// the interrupt handler.
<-shutdownChannel
ltndLog.Info("Shutdown complete")
return nil
}
func main() {
// Use all processor cores.
// TODO(roasbeef): remove this if required version # is > 1.6?
runtime.GOMAXPROCS(runtime.NumCPU())
// Call the "real" main in a nested manner so the defers will properly
// be executed in the case of a graceful shutdown.
if err := lndMain(); err != nil {
if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp {
} else {
fmt.Fprintln(os.Stderr, err)
}
os.Exit(1)
}
}
// fileExists reports whether the named file or directory exists.
// This function is taken from https://github.com/btcsuite/btcd
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// genCertPair generates a key/cert pair to the paths provided. The
// auto-generated certificates should *not* be used in production for public
// access as they're self-signed and don't necessarily contain all of the
// desired hostnames for the service. For production/public use, consider a
// real PKI.
//
// This function is adapted from https://github.com/btcsuite/btcd and
// https://github.com/btcsuite/btcutil
func genCertPair(certFile, keyFile string) error {
rpcsLog.Infof("Generating TLS certificates...")
org := "lnd autogenerated cert"
now := time.Now()
validUntil := now.Add(autogenCertValidity)
// Check that the certificate validity isn't past the ASN.1 end of time.
if validUntil.After(endOfTime) {
validUntil = endOfTime
}
// Generate a serial number that's below the serialNumberLimit.
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return fmt.Errorf("failed to generate serial number: %s", err)
}
// Collect the host's IP addresses, including loopback, in a slice.
ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}
// addIP appends an IP address only if it isn't already in the slice.
addIP := func(ipAddr net.IP) {
for _, ip := range ipAddresses {
if bytes.Equal(ip, ipAddr) {
return
}
}
ipAddresses = append(ipAddresses, ipAddr)
}
// Add all the interface IPs that aren't already in the slice.
addrs, err := net.InterfaceAddrs()
if err != nil {
return err
}
for _, a := range addrs {
ipAddr, _, err := net.ParseCIDR(a.String())
if err == nil {
addIP(ipAddr)
}
}
// Collect the host's names into a slice.
host, err := os.Hostname()
if err != nil {
return err
}
dnsNames := []string{host}
if host != "localhost" {
dnsNames = append(dnsNames, "localhost")
}
// Generate a private key for the certificate.
priv, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return err
}
// Construct the certificate template.
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{org},
CommonName: host,
},
NotBefore: now.Add(-time.Hour * 24),
NotAfter: validUntil,
KeyUsage: x509.KeyUsageKeyEncipherment |
x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
IsCA: true, // so can sign self.
BasicConstraintsValid: true,
DNSNames: dnsNames,
IPAddresses: ipAddresses,
// This signature algorithm is most likely to be compatible
// with clients using less-common TLS libraries like BoringSSL.
SignatureAlgorithm: x509.SHA256WithRSA,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template,
&template, &priv.PublicKey, priv)
if err != nil {
return fmt.Errorf("failed to create certificate: %v", err)
}
certBuf := &bytes.Buffer{}
err = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE",
Bytes: derBytes})
if err != nil {
return fmt.Errorf("failed to encode certificate: %v", err)
}
keybytes := x509.MarshalPKCS1PrivateKey(priv)
keyBuf := &bytes.Buffer{}
err = pem.Encode(keyBuf, &pem.Block{Type: "RSA PRIVATE KEY",
Bytes: keybytes})
if err != nil {
return fmt.Errorf("failed to encode private key: %v", err)
}
// Write cert and key files.
if err = ioutil.WriteFile(certFile, certBuf.Bytes(), 0644); err != nil {
return err
}
if err = ioutil.WriteFile(keyFile, keyBuf.Bytes(), 0600); err != nil {
os.Remove(certFile)
return err
}
rpcsLog.Infof("Done generating TLS certificates")
return nil
}
// genMacaroons generates a pair of macaroon files; one admin-level and one
// read-only. These can also be used to generate more granular macaroons.
func genMacaroons(svc *bakery.Service, admFile, roFile string) error {
// Generate the admin macaroon and write it to a file.
admMacaroon, err := svc.NewMacaroon("", nil, nil)
if err != nil {
return err
}
admBytes, err := admMacaroon.MarshalBinary()
if err != nil {
return err
}
if err = ioutil.WriteFile(admFile, admBytes, 0600); err != nil {
return err
}
// Generate the read-only macaroon and write it to a file.
roMacaroon, err := macaroons.AddConstraints(admMacaroon,
macaroons.AllowConstraint(roPermissions...))
if err != nil {
return err
}
roBytes, err := roMacaroon.MarshalBinary()
if err != nil {
return err
}
if err = ioutil.WriteFile(roFile, roBytes, 0644); err != nil {
os.Remove(admFile)
return err
}
return nil
}
// waitForWalletPassword will spin up gRPC and REST endpoints for the
// WalletUnlocker server, and block until a password is provided by
// the user to this RPC server.
func waitForWalletPassword(grpcEndpoint, restEndpoint string,
serverOpts []grpc.ServerOption, proxyOpts []grpc.DialOption,
tlsConf *tls.Config, macaroonService *bakery.Service) ([]byte, []byte, error) {
// Set up a new PasswordService, which will listen
// for passwords provided over RPC.
grpcServer := grpc.NewServer(serverOpts...)
chainConfig := cfg.Bitcoin
if registeredChains.PrimaryChain() == litecoinChain {
chainConfig = cfg.Litecoin
}
pwService := walletunlocker.New(macaroonService,
chainConfig.ChainDir, activeNetParams.Params)
lnrpc.RegisterWalletUnlockerServer(grpcServer, pwService)
// Start a gRPC server listening for HTTP/2 connections, solely
// used for getting the encryption password from the client.
lis, err := net.Listen("tcp", grpcEndpoint)
if err != nil {
fmt.Printf("failed to listen: %v", err)
return nil, nil, err
}
defer lis.Close()
// Use a two channels to synchronize on, so we can be sure the
// instructions on how to input the password is the last
// thing to be printed to the console.
grpcServing := make(chan struct{})
restServing := make(chan struct{})
go func(c chan struct{}) {
rpcsLog.Infof("password RPC server listening on %s",
lis.Addr())
close(c)
grpcServer.Serve(lis)
}(grpcServing)
// Start a REST proxy for our gRPC server above.
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
mux := proxy.NewServeMux()
err = lnrpc.RegisterWalletUnlockerHandlerFromEndpoint(ctx, mux,
grpcEndpoint, proxyOpts)
if err != nil {
return nil, nil, err
}
srv := &http.Server{Handler: mux}
defer func() {
// We must shut down this server, since we'll let
// the regular rpcServer listen on the same address.
if err := srv.Shutdown(ctx); err != nil {
ltndLog.Errorf("unable to shutdown gPRC proxy: %v", err)
}
}()
go func(c chan struct{}) {
listener, err := tls.Listen("tcp", restEndpoint,
tlsConf)
if err != nil {
ltndLog.Errorf("gRPC proxy unable to listen "+
"on localhost%s", restEndpoint)
return
}
rpcsLog.Infof("password gRPC proxy started at "+
"localhost%s", restEndpoint)
close(c)
srv.Serve(listener)
}(restServing)
// Wait for gRPC and REST server to be up running.
<-grpcServing
<-restServing
// Wait for user to provide the password.
ltndLog.Infof("Waiting for wallet encryption password. " +
"Use `lncli create` to create wallet, or " +
"`lncli unlock` to unlock already created wallet.")
// We currently don't distinguish between getting a password to
// be used for creation or unlocking, as a new wallet db will be
// created if none exists when creating the chain control.
select {
case walletPw := <-pwService.CreatePasswords:
return walletPw, walletPw, nil
case walletPw := <-pwService.UnlockPasswords:
return walletPw, walletPw, nil
case <-shutdownChannel:
return nil, nil, fmt.Errorf("shutting down")
}
}
|
package gohm
import (
"fmt"
"io"
"net/http"
"strings"
"time"
)
const apacheLogFormat = "%s [%s] \"%s\" %d %d %f\n"
const timeFormat = "02/Jan/2006:15:04:05 MST"
type loggedResponseWriter struct {
http.ResponseWriter
responseBytes int64
status int
}
func (r *loggedResponseWriter) Write(p []byte) (int, error) {
written, err := r.ResponseWriter.Write(p)
r.responseBytes += int64(written)
return written, err
}
func (r *loggedResponseWriter) WriteHeader(status int) {
r.status = status
r.ResponseWriter.WriteHeader(status)
}
// LogAll returns a new http.Handler that logs HTTP requests and responses in common log format
// to the specified io.Writer.
//
// mux := http.NewServeMux()
// mux.Handle("/example/path", gohm.LogAll(os.Stderr, decodeURI(expand(querier))))
func LogAll(out io.Writer, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lrw := &loggedResponseWriter{
ResponseWriter: w,
status: http.StatusOK,
}
begin := time.Now()
next.ServeHTTP(lrw, r)
end := time.Now()
clientIP := r.RemoteAddr
if colon := strings.LastIndex(clientIP, ":"); colon != -1 {
clientIP = clientIP[:colon]
}
request := fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto)
duration := end.Sub(begin).Seconds()
formattedTime := end.UTC().Format(timeFormat)
fmt.Fprintf(out, apacheLogFormat, clientIP, formattedTime, request, lrw.status, lrw.responseBytes, duration)
})
}
// LogErrors returns a new http.Handler that logs HTTP requests that result in response errors, or
// more specifically, HTTP status codes that are either 4xx or 5xx. The handler will output lines
// in common log format to the specified io.Writer.
//
// mux := http.NewServeMux()
// mux.Handle("/example/path", gohm.LogErrors(os.Stderr, decodeURI(expand(querier))))
func LogErrors(out io.Writer, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lrw := &loggedResponseWriter{
ResponseWriter: w,
status: http.StatusOK,
}
begin := time.Now()
next.ServeHTTP(lrw, r)
if !member(lrw.status, 100) && !member(lrw.status, 200) && !member(lrw.status, 300) {
end := time.Now()
clientIP := r.RemoteAddr
if colon := strings.LastIndex(clientIP, ":"); colon != -1 {
clientIP = clientIP[:colon]
}
request := fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto)
duration := end.Sub(begin).Seconds()
formattedTime := end.UTC().Format(timeFormat)
fmt.Fprintf(out, apacheLogFormat, clientIP, formattedTime, request, lrw.status, lrw.responseBytes, duration)
}
})
}
three conditionals reduced to two
package gohm
import (
"fmt"
"io"
"net/http"
"strings"
"time"
)
const apacheLogFormat = "%s [%s] \"%s\" %d %d %f\n"
const timeFormat = "02/Jan/2006:15:04:05 MST"
type loggedResponseWriter struct {
http.ResponseWriter
responseBytes int64
status int
}
func (r *loggedResponseWriter) Write(p []byte) (int, error) {
written, err := r.ResponseWriter.Write(p)
r.responseBytes += int64(written)
return written, err
}
func (r *loggedResponseWriter) WriteHeader(status int) {
r.status = status
r.ResponseWriter.WriteHeader(status)
}
// LogAll returns a new http.Handler that logs HTTP requests and responses in common log format
// to the specified io.Writer.
//
// mux := http.NewServeMux()
// mux.Handle("/example/path", gohm.LogAll(os.Stderr, decodeURI(expand(querier))))
func LogAll(out io.Writer, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lrw := &loggedResponseWriter{
ResponseWriter: w,
status: http.StatusOK,
}
begin := time.Now()
next.ServeHTTP(lrw, r)
end := time.Now()
clientIP := r.RemoteAddr
if colon := strings.LastIndex(clientIP, ":"); colon != -1 {
clientIP = clientIP[:colon]
}
request := fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto)
duration := end.Sub(begin).Seconds()
formattedTime := end.UTC().Format(timeFormat)
fmt.Fprintf(out, apacheLogFormat, clientIP, formattedTime, request, lrw.status, lrw.responseBytes, duration)
})
}
// LogErrors returns a new http.Handler that logs HTTP requests that result in response errors, or
// more specifically, HTTP status codes that are either 4xx or 5xx. The handler will output lines
// in common log format to the specified io.Writer.
//
// mux := http.NewServeMux()
// mux.Handle("/example/path", gohm.LogErrors(os.Stderr, decodeURI(expand(querier))))
func LogErrors(out io.Writer, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lrw := &loggedResponseWriter{
ResponseWriter: w,
status: http.StatusOK,
}
begin := time.Now()
next.ServeHTTP(lrw, r)
if member(lrw.status, 400) || member(lrw.status, 500) {
end := time.Now()
clientIP := r.RemoteAddr
if colon := strings.LastIndex(clientIP, ":"); colon != -1 {
clientIP = clientIP[:colon]
}
request := fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto)
duration := end.Sub(begin).Seconds()
formattedTime := end.UTC().Format(timeFormat)
fmt.Fprintf(out, apacheLogFormat, clientIP, formattedTime, request, lrw.status, lrw.responseBytes, duration)
}
})
}
|
package stripe
import (
"fmt"
"io"
"os"
)
//
// Public constants
//
const (
// LevelNull sets a logger to show no messages at all.
LevelNull Level = 0
// LevelError sets a logger to show error messages only.
LevelError Level = 1
// LevelWarn sets a logger to show warning messages or anything more
// severe.
LevelWarn Level = 2
// LevelInfo sets a logger to show informational messages or anything more
// severe.
LevelInfo Level = 3
// LevelDebug sets a logger to show informational messages or anything more
// severe.
LevelDebug Level = 4
)
//
// Public variables
//
// DefaultLeveledLogger is the default logger that the library will use to log
// errors, warnings, and informational messages.
//
// LeveledLoggerInterface is implemented by LeveledLogger, and one can be
// initialized at the desired level of logging. LeveledLoggerInterface also
// provides out-of-the-box compatibility with a Logrus Logger, but may require
// a thin shim for use with other logging libraries that use less standard
// conventions like Zap.
//
// This Logger will be inherited by any backends created by default, but will
// be overridden if a backend is created with GetBackendWithConfig with a
// custom LeveledLogger set.
var DefaultLeveledLogger LeveledLoggerInterface = &LeveledLogger{
Level: LevelError,
}
//
// Public types
//
// Level represents a logging level.
type Level uint32
// LeveledLogger is a leveled logger implementation.
//
// It prints warnings and errors to `os.Stderr` and other messages to
// `os.Stdout`.
type LeveledLogger struct {
// Level is the minimum logging level that will be emitted by this logger.
//
// For example, a Level set to LevelWarn will emit warnings and errors, but
// not informational or debug messages.
//
// Always set this with a constant like LevelWarn because the individual
// values are not guaranteed to be stable.
Level Level
// Internal testing use only.
stderrOverride io.Writer
stdoutOverride io.Writer
}
// Debugf logs a debug message using Printf conventions.
func (l *LeveledLogger) Debugf(format string, v ...interface{}) {
l.printLog(format, "DEBUG", LevelDebug, l.stdout(), v...)
}
// Errorf logs a warning message using Printf conventions.
func (l *LeveledLogger) Errorf(format string, v ...interface{}) {
l.printLog(format, "ERROR", LevelError, l.stderr(), v...)
}
// Infof logs an informational message using Printf conventions.
func (l *LeveledLogger) Infof(format string, v ...interface{}) {
l.printLog(format, "INFO", LevelInfo, l.stdout(), v...)
}
// Warnf logs a warning message using Printf conventions.
func (l *LeveledLogger) Warnf(format string, v ...interface{}) {
l.printLog(format, "WARN", LevelWarn, l.stderr(), v...)
}
func (l *LeveledLogger) stderr() io.Writer {
if l.stderrOverride != nil {
return l.stderrOverride
}
return os.Stderr
}
func (l *LeveledLogger) stdout() io.Writer {
if l.stdoutOverride != nil {
return l.stdoutOverride
}
return os.Stdout
}
// LeveledLoggerInterface provides a basic leveled logging interface for
// printing debug, informational, warning, and error messages.
//
// It's implemented by LeveledLogger and also provides out-of-the-box
// compatibility with a Logrus Logger, but may require a thin shim for use with
// other logging libraries that you use less standard conventions like Zap.
type LeveledLoggerInterface interface {
// Debugf logs a debug message using Printf conventions.
Debugf(format string, v ...interface{})
// Errorf logs a warning message using Printf conventions.
Errorf(format string, v ...interface{})
// Infof logs an informational message using Printf conventions.
Infof(format string, v ...interface{})
// Warnf logs a warning message using Printf conventions.
Warnf(format string, v ...interface{})
}
//
// Private functions
//
func (l *LeveledLogger) printLog(format string, tag string, level Level, output io.Writer, v ...interface{}) {
if l.Level >= level {
s := fmt.Sprintf(format, v...)
fmt.Fprintf(output, "[%s] %s\n", tag, s)
}
}
Less DRY is ok
package stripe
import (
"fmt"
"io"
"os"
)
//
// Public constants
//
const (
// LevelNull sets a logger to show no messages at all.
LevelNull Level = 0
// LevelError sets a logger to show error messages only.
LevelError Level = 1
// LevelWarn sets a logger to show warning messages or anything more
// severe.
LevelWarn Level = 2
// LevelInfo sets a logger to show informational messages or anything more
// severe.
LevelInfo Level = 3
// LevelDebug sets a logger to show informational messages or anything more
// severe.
LevelDebug Level = 4
)
//
// Public variables
//
// DefaultLeveledLogger is the default logger that the library will use to log
// errors, warnings, and informational messages.
//
// LeveledLoggerInterface is implemented by LeveledLogger, and one can be
// initialized at the desired level of logging. LeveledLoggerInterface also
// provides out-of-the-box compatibility with a Logrus Logger, but may require
// a thin shim for use with other logging libraries that use less standard
// conventions like Zap.
//
// This Logger will be inherited by any backends created by default, but will
// be overridden if a backend is created with GetBackendWithConfig with a
// custom LeveledLogger set.
var DefaultLeveledLogger LeveledLoggerInterface = &LeveledLogger{
Level: LevelError,
}
//
// Public types
//
// Level represents a logging level.
type Level uint32
// LeveledLogger is a leveled logger implementation.
//
// It prints warnings and errors to `os.Stderr` and other messages to
// `os.Stdout`.
type LeveledLogger struct {
// Level is the minimum logging level that will be emitted by this logger.
//
// For example, a Level set to LevelWarn will emit warnings and errors, but
// not informational or debug messages.
//
// Always set this with a constant like LevelWarn because the individual
// values are not guaranteed to be stable.
Level Level
// Internal testing use only.
stderrOverride io.Writer
stdoutOverride io.Writer
}
// Debugf logs a debug message using Printf conventions.
func (l *LeveledLogger) Debugf(format string, v ...interface{}) {
if l.Level >= LevelDebug {
fmt.Fprintf(l.stdout(), "[DEBUG] "+format+"\n", v...)
}
}
// Errorf logs a warning message using Printf conventions.
func (l *LeveledLogger) Errorf(format string, v ...interface{}) {
// Infof logs a debug message using Printf conventions.
if l.Level >= LevelError {
fmt.Fprintf(l.stderr(), "[ERROR] "+format+"\n", v...)
}
}
// Infof logs an informational message using Printf conventions.
func (l *LeveledLogger) Infof(format string, v ...interface{}) {
if l.Level >= LevelInfo {
fmt.Fprintf(l.stdout(), "[INFO] "+format+"\n", v...)
}
}
// Warnf logs a warning message using Printf conventions.
func (l *LeveledLogger) Warnf(format string, v ...interface{}) {
if l.Level >= LevelWarn {
fmt.Fprintf(l.stderr(), "[WARN] "+format+"\n", v...)
}
}
func (l *LeveledLogger) stderr() io.Writer {
if l.stderrOverride != nil {
return l.stderrOverride
}
return os.Stderr
}
func (l *LeveledLogger) stdout() io.Writer {
if l.stdoutOverride != nil {
return l.stdoutOverride
}
return os.Stdout
}
// LeveledLoggerInterface provides a basic leveled logging interface for
// printing debug, informational, warning, and error messages.
//
// It's implemented by LeveledLogger and also provides out-of-the-box
// compatibility with a Logrus Logger, but may require a thin shim for use with
// other logging libraries that you use less standard conventions like Zap.
type LeveledLoggerInterface interface {
// Debugf logs a debug message using Printf conventions.
Debugf(format string, v ...interface{})
// Errorf logs a warning message using Printf conventions.
Errorf(format string, v ...interface{})
// Infof logs an informational message using Printf conventions.
Infof(format string, v ...interface{})
// Warnf logs a warning message using Printf conventions.
Warnf(format string, v ...interface{})
}
|
// Package ltsvlog is a minimalist logging library for writing logs in
// LTSV (Labeled Tab-separated Value) format.
// See http://ltsv.org/ for LTSV.
//
// This logging library has three log levels: Debug, Info and Error.
// The Info and Error levels are always enabled.
// You can disable the Debug level but only when you create a logger.
//
// Each log record is printed as one line. A line has multiple fields
// separated by a tab character. Each field has a label and a value
// which are separated by a colon ':' character.
//
// So you must not contain a colon character in labels.
// This is not checked in this library for performance reason,
// so it is your responsibility not to contain a colon character in labels.
//
// Newline and tab characters in values are escaped with "\\n" and "\\t"
// respectively.
package ltsvlog
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
)
// Deprecated.
//
// LV represents a label L and a value V.
type LV struct {
L string
V interface{}
}
// LogWriter is a LTSV logger interface
type LogWriter interface {
DebugEnabled() bool
Debug(lv ...LV) *Event
Info(lv ...LV) *Event
Err(err error)
Error(lv ...LV)
ErrorWithStack(lv ...LV)
}
// LTSVLogger is a LTSV logger.
type LTSVLogger struct {
writer io.Writer
debugEnabled bool
timeLabel string
levelLabel string
appendPrefixFunc AppendPrefixFunc
appendValueFunc AppendValueFunc
buf []byte
stackBuf []byte
mu sync.Mutex
}
// Option is the function type to set an option of LTSVLogger
type Option func(l *LTSVLogger)
// Deprecated. This is not needed if you use LTSVLogger.Err instead of
// deprecated LTSVLogger.Error and LTSVLogger.ErrorWithStack.
//
// StackBufSize returns the option function to set the stack buffer size.
func StackBufSize(size int) Option {
return func(l *LTSVLogger) {
l.stackBuf = make([]byte, size)
}
}
// SetTimeLabel returns the option function to set the time label.
// If the label is empty, loggers do not print time values.
func SetTimeLabel(label string) Option {
return func(l *LTSVLogger) {
l.timeLabel = label
}
}
// SetLevelLabel returns the option function to set the level label.
// If the label is empty, loggers do not print level values.
func SetLevelLabel(label string) Option {
return func(l *LTSVLogger) {
l.levelLabel = label
}
}
// Deprecated.
//
// SetAppendValue returns the option function to set the function
// to append a value.
func SetAppendValue(f AppendValueFunc) Option {
return func(l *LTSVLogger) {
l.appendValueFunc = f
}
}
// Deprecated. Use SetTimeLabel or SetLevelLabel instead.
//
// AppendPrefixFunc is a function type for appending a prefix
// for a log record to a byte buffer and returns the result buffer.
type AppendPrefixFunc func(buf []byte, level string) []byte
// Deprecated.
//
// AppendValueFunc is a function type for appending a value to
// a byte buffer and returns the result buffer.
type AppendValueFunc func(buf []byte, v interface{}) []byte
const (
defaultTimeLabel = "time"
defaultLevelLabel = "level"
)
var defaultAppendPrefixFunc = appendPrefixFunc(defaultTimeLabel, defaultLevelLabel)
// NewLTSVLogger creates a LTSV logger with the default time and value format.
//
// The folloing two values are prepended to each log line.
//
// The first value is the current time, and has the default label "time".
// The time format is RFC3339 with microseconds in UTC timezone.
// This format is the same as "2006-01-02T15:04:05.000000Z" in the
// go time format https://golang.org/pkg/time/#Time.Format
//
// The second value is the log level with the default label "level".
func NewLTSVLogger(w io.Writer, debugEnabled bool, options ...Option) *LTSVLogger {
l := <SVLogger{
writer: w,
debugEnabled: debugEnabled,
timeLabel: defaultTimeLabel,
levelLabel: defaultLevelLabel,
appendPrefixFunc: defaultAppendPrefixFunc,
appendValueFunc: appendValue,
buf: make([]byte, 1024),
stackBuf: make([]byte, 8192),
}
for _, o := range options {
o(l)
}
if l.timeLabel != defaultTimeLabel || l.levelLabel != defaultLevelLabel {
l.appendPrefixFunc = appendPrefixFunc(l.timeLabel, l.levelLabel)
}
return l
}
// Deprecated. Use NewLTSVLogger with options instead.
//
// NewLTSVLoggerCustomFormat creates a LTSV logger with the buffer size for
// filling stack traces and user-supplied functions for appending a log
// record prefix and appending a log value.
func NewLTSVLoggerCustomFormat(w io.Writer, debugEnabled bool, stackBufSize int, appendPrefixFunc AppendPrefixFunc, appendValueFunc AppendValueFunc) *LTSVLogger {
if appendPrefixFunc == nil {
appendPrefixFunc = appendPrefix
}
if appendValueFunc == nil {
appendValueFunc = appendValue
}
return <SVLogger{
writer: w,
debugEnabled: debugEnabled,
appendPrefixFunc: appendPrefixFunc,
appendValueFunc: appendValueFunc,
stackBuf: make([]byte, stackBufSize),
}
}
// DebugEnabled returns whether or not the debug level is enabled.
// You can avoid the cost of evaluation of arguments passed to Debug like:
//
// if ltsvlog.Logger.DebugEnabled() {
// ltsvlog.Logger.Debug().String("label1", someSlowFunction()).Log()
// }
func (l *LTSVLogger) DebugEnabled() bool {
return l.debugEnabled
}
// Debug returns a new Event for writing a Debug level log.
//
// Note there still exists the cost of evaluating argument values if the debug level is disabled, even though those arguments are not used.
// So guarding with if and DebugEnabled is recommended.
//
// Passing one more lv is deprecated. This is left for backward
// compatiblity for a while and it will not be supported in future version.
// This means the signature of thie method will be changed to
// func (l *LTSVLogger) Debug() *Event
func (l *LTSVLogger) Debug(lv ...LV) *Event {
if len(lv) == 0 {
ev := eventPool.Get().(*Event)
ev.logger = l
ev.enabled = l.debugEnabled
ev.buf = ev.buf[:0]
if ev.enabled {
ev.buf = l.appendPrefixFunc(ev.buf, "Debug")
}
return ev
} else {
// NOTE: This code is left for backward compatibility.
// TODO: Remove this code in a later version.
if l.debugEnabled {
l.mu.Lock()
l.log("Debug", lv...)
l.mu.Unlock()
}
return nil
}
}
// Info returns a new Event for writing a Info level log.
//
// Passing one more lv is deprecated. This is left for backward
// compatiblity for a while and it will not be supported in future version.
// This means the signature of thie method will be changed to
// func (l *LTSVLogger) Info() *Event
func (l *LTSVLogger) Info(lv ...LV) *Event {
if len(lv) == 0 {
ev := eventPool.Get().(*Event)
ev.logger = l
ev.enabled = true
ev.buf = ev.buf[:0]
ev.buf = l.appendPrefixFunc(ev.buf, "Info")
return ev
} else {
l.mu.Lock()
l.log("Info", lv...)
l.mu.Unlock()
return nil
}
}
// Deprecated. Use Err instead.
//
// Error writes a log with the error level.
func (l *LTSVLogger) Error(lv ...LV) {
l.mu.Lock()
l.log("Error", lv...)
l.mu.Unlock()
}
// Deprecated. Use Err instead.
//
// ErrorWithStack writes a log and a stack with the error level.
func (l *LTSVLogger) ErrorWithStack(lv ...LV) {
l.mu.Lock()
args := lv
args = append(args, LV{"stack", stack(2, l.stackBuf)})
l.log("Error", args...)
l.mu.Unlock()
}
// Err writes a log for an error with the error level.
// If err is a *ErrorEvent, this logs the error with labeled values.
// If err is not a *ErrorEvent, this logs the error with the label "err".
func (l *LTSVLogger) Err(err error) {
errorEvent, ok := err.(*ErrorEvent)
if !ok {
errorEvent = Err(err)
}
buf := make([]byte, 0, 8192)
buf = l.appendPrefixFunc(buf, "Error")
buf = errorEvent.AppendErrorWithValues(buf)
buf = append(buf, '\n')
_, _ = l.writer.Write(buf)
}
func (l *LTSVLogger) log(level string, lv ...LV) {
// Note: To reuse the buffer, create an empty slice pointing to
// the previously allocated buffer.
buf := l.appendPrefixFunc(l.buf[:0], level)
for _, labelAndVal := range lv {
buf = append(buf, labelAndVal.L...)
buf = append(buf, ':')
buf = l.appendValueFunc(buf, labelAndVal.V)
buf = append(buf, '\t')
}
buf[len(buf)-1] = '\n'
_, _ = l.writer.Write(buf)
l.buf = buf
}
func appendPrefixFunc(timeLabel, levelLabel string) AppendPrefixFunc {
if timeLabel != "" && levelLabel != "" {
return func(buf []byte, level string) []byte {
buf = append(buf, timeLabel...)
buf = append(buf, ':')
now := time.Now().UTC()
buf = appendUTCTime(buf, now)
buf = append(buf, '\t')
buf = append(buf, levelLabel...)
buf = append(buf, ':')
buf = append(buf, level...)
buf = append(buf, '\t')
return buf
}
} else if timeLabel != "" && levelLabel == "" {
return func(buf []byte, level string) []byte {
buf = append(buf, timeLabel...)
buf = append(buf, ':')
now := time.Now().UTC()
buf = appendUTCTime(buf, now)
buf = append(buf, '\t')
return buf
}
} else if timeLabel == "" && levelLabel != "" {
return func(buf []byte, level string) []byte {
buf = append(buf, levelLabel...)
buf = append(buf, ':')
buf = append(buf, level...)
buf = append(buf, '\t')
return buf
}
} else {
return func(buf []byte, level string) []byte {
return buf
}
}
}
func appendPrefix(buf []byte, level string) []byte {
buf = append(buf, "time:"...)
now := time.Now().UTC()
buf = appendUTCTime(buf, now)
buf = append(buf, "\tlevel:"...)
buf = append(buf, level...)
buf = append(buf, '\t')
return buf
}
func appendUTCTime(buf []byte, t time.Time) []byte {
t = t.UTC()
tmp := []byte("0000-00-00T00:00:00.000000Z")
year, month, day := t.Date()
hour, min, sec := t.Clock()
itoa(tmp[:4], year, 4)
itoa(tmp[5:7], int(month), 2)
itoa(tmp[8:10], day, 2)
itoa(tmp[11:13], hour, 2)
itoa(tmp[14:16], min, 2)
itoa(tmp[17:19], sec, 2)
itoa(tmp[20:26], t.Nanosecond()/1e3, 6)
return append(buf, tmp...)
}
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
// Copied from https://github.com/golang/go/blob/go1.8.1/src/log/log.go#L75-L90
// and modified for ltsvlog.
// It is user's responsibility to pass buf which len(buf) >= wid
func itoa(buf []byte, i int, wid int) {
// Assemble decimal in reverse order.
bp := wid - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
buf[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
buf[bp] = byte('0' + i)
}
var escaper = strings.NewReplacer("\t", "\\t", "\n", "\\n")
func escape(s string) string {
return escaper.Replace(s)
}
func appendValue(buf []byte, v interface{}) []byte {
// NOTE: In type switch case, case byte and case uint8 cannot coexist,
// case rune and case uint cannot coexist.
switch v.(type) {
case nil:
buf = append(buf, "<nil>"...)
case string:
buf = append(buf, escape(v.(string))...)
case int:
buf = strconv.AppendInt(buf, int64(v.(int)), 10)
case uint:
buf = strconv.AppendUint(buf, uint64(v.(uint)), 10)
case int8:
buf = strconv.AppendInt(buf, int64(v.(int8)), 10)
case int16:
buf = strconv.AppendInt(buf, int64(v.(int16)), 10)
case int32:
buf = strconv.AppendInt(buf, int64(v.(int32)), 10)
case int64:
buf = strconv.AppendInt(buf, v.(int64), 10)
case uint8:
buf = strconv.AppendUint(buf, uint64(v.(uint8)), 10)
case uint16:
buf = strconv.AppendUint(buf, uint64(v.(uint16)), 10)
case uint32:
buf = strconv.AppendUint(buf, uint64(v.(uint32)), 10)
case uint64:
buf = strconv.AppendUint(buf, v.(uint64), 10)
case float32:
buf = append(buf, strconv.FormatFloat(float64(v.(float32)), 'g', -1, 32)...)
case float64:
buf = append(buf, strconv.FormatFloat(v.(float64), 'g', -1, 64)...)
case bool:
buf = strconv.AppendBool(buf, v.(bool))
case uintptr:
buf = strconv.AppendUint(buf, uint64(v.(uintptr)), 10)
case []byte:
buf = appendHexBytes(buf, v.([]byte))
case fmt.Stringer:
buf = append(buf, escape(v.(fmt.Stringer).String())...)
default:
buf = append(buf, escape(fmt.Sprintf("%+v", v))...)
}
return buf
}
var digits = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}
func appendHexBytes(buf []byte, v []byte) []byte {
buf = append(buf, "0x"...)
for _, b := range v {
buf = append(buf, digits[b/16])
buf = append(buf, digits[b%16])
}
return buf
}
func appendHexByte(buf []byte, b byte) []byte {
buf = append(buf, "0x"...)
buf = append(buf, digits[b/16])
buf = append(buf, digits[b%16])
return buf
}
// Logger is the global logger.
// You can change this logger like
// ltsvlog.Logger = ltsvlog.NewLTSVLogger(os.Stdout, false)
// You can change the global logger safely only before writing
// to the logger. Changing the logger while writing may cause
// the unexpected behavior.
var Logger = NewLTSVLogger(os.Stdout, true)
// Discard discards any logging outputs.
type Discard struct{}
// DebugEnabled always return false
func (*Discard) DebugEnabled() bool { return false }
// Debug prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
// Guarding with if and DebugEnabled is recommended.
func (*Discard) Debug(lv ...LV) *Event {
ev := eventPool.Get().(*Event)
ev.logger = nil
ev.enabled = false
ev.buf = ev.buf[:0]
return ev
}
// Info prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
func (*Discard) Info(lv ...LV) *Event {
ev := eventPool.Get().(*Event)
ev.logger = nil
ev.enabled = false
ev.buf = ev.buf[:0]
return ev
}
// Error prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
func (*Discard) Error(lv ...LV) {}
// ErrorWithStack prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
func (*Discard) ErrorWithStack(lv ...LV) {}
// Err prints nothing.
func (*Discard) Err(err error) {}
Escape backslashes in values
// Package ltsvlog is a minimalist logging library for writing logs in
// LTSV (Labeled Tab-separated Value) format.
// See http://ltsv.org/ for LTSV.
//
// This logging library has three log levels: Debug, Info and Error.
// The Info and Error levels are always enabled.
// You can disable the Debug level but only when you create a logger.
//
// Each log record is printed as one line. A line has multiple fields
// separated by a tab character. Each field has a label and a value
// which are separated by a colon ':' character.
//
// So you must not contain a colon character in labels.
// This is not checked in this library for performance reason,
// so it is your responsibility not to contain a colon character in labels.
//
// Newline and tab characters in values are escaped with "\\n" and "\\t"
// respectively.
package ltsvlog
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
)
// Deprecated.
//
// LV represents a label L and a value V.
type LV struct {
L string
V interface{}
}
// LogWriter is a LTSV logger interface
type LogWriter interface {
DebugEnabled() bool
Debug(lv ...LV) *Event
Info(lv ...LV) *Event
Err(err error)
Error(lv ...LV)
ErrorWithStack(lv ...LV)
}
// LTSVLogger is a LTSV logger.
type LTSVLogger struct {
writer io.Writer
debugEnabled bool
timeLabel string
levelLabel string
appendPrefixFunc AppendPrefixFunc
appendValueFunc AppendValueFunc
buf []byte
stackBuf []byte
mu sync.Mutex
}
// Option is the function type to set an option of LTSVLogger
type Option func(l *LTSVLogger)
// Deprecated. This is not needed if you use LTSVLogger.Err instead of
// deprecated LTSVLogger.Error and LTSVLogger.ErrorWithStack.
//
// StackBufSize returns the option function to set the stack buffer size.
func StackBufSize(size int) Option {
return func(l *LTSVLogger) {
l.stackBuf = make([]byte, size)
}
}
// SetTimeLabel returns the option function to set the time label.
// If the label is empty, loggers do not print time values.
func SetTimeLabel(label string) Option {
return func(l *LTSVLogger) {
l.timeLabel = label
}
}
// SetLevelLabel returns the option function to set the level label.
// If the label is empty, loggers do not print level values.
func SetLevelLabel(label string) Option {
return func(l *LTSVLogger) {
l.levelLabel = label
}
}
// Deprecated.
//
// SetAppendValue returns the option function to set the function
// to append a value.
func SetAppendValue(f AppendValueFunc) Option {
return func(l *LTSVLogger) {
l.appendValueFunc = f
}
}
// Deprecated. Use SetTimeLabel or SetLevelLabel instead.
//
// AppendPrefixFunc is a function type for appending a prefix
// for a log record to a byte buffer and returns the result buffer.
type AppendPrefixFunc func(buf []byte, level string) []byte
// Deprecated.
//
// AppendValueFunc is a function type for appending a value to
// a byte buffer and returns the result buffer.
type AppendValueFunc func(buf []byte, v interface{}) []byte
const (
defaultTimeLabel = "time"
defaultLevelLabel = "level"
)
var defaultAppendPrefixFunc = appendPrefixFunc(defaultTimeLabel, defaultLevelLabel)
// NewLTSVLogger creates a LTSV logger with the default time and value format.
//
// The folloing two values are prepended to each log line.
//
// The first value is the current time, and has the default label "time".
// The time format is RFC3339 with microseconds in UTC timezone.
// This format is the same as "2006-01-02T15:04:05.000000Z" in the
// go time format https://golang.org/pkg/time/#Time.Format
//
// The second value is the log level with the default label "level".
func NewLTSVLogger(w io.Writer, debugEnabled bool, options ...Option) *LTSVLogger {
l := <SVLogger{
writer: w,
debugEnabled: debugEnabled,
timeLabel: defaultTimeLabel,
levelLabel: defaultLevelLabel,
appendPrefixFunc: defaultAppendPrefixFunc,
appendValueFunc: appendValue,
buf: make([]byte, 1024),
stackBuf: make([]byte, 8192),
}
for _, o := range options {
o(l)
}
if l.timeLabel != defaultTimeLabel || l.levelLabel != defaultLevelLabel {
l.appendPrefixFunc = appendPrefixFunc(l.timeLabel, l.levelLabel)
}
return l
}
// Deprecated. Use NewLTSVLogger with options instead.
//
// NewLTSVLoggerCustomFormat creates a LTSV logger with the buffer size for
// filling stack traces and user-supplied functions for appending a log
// record prefix and appending a log value.
func NewLTSVLoggerCustomFormat(w io.Writer, debugEnabled bool, stackBufSize int, appendPrefixFunc AppendPrefixFunc, appendValueFunc AppendValueFunc) *LTSVLogger {
if appendPrefixFunc == nil {
appendPrefixFunc = appendPrefix
}
if appendValueFunc == nil {
appendValueFunc = appendValue
}
return <SVLogger{
writer: w,
debugEnabled: debugEnabled,
appendPrefixFunc: appendPrefixFunc,
appendValueFunc: appendValueFunc,
stackBuf: make([]byte, stackBufSize),
}
}
// DebugEnabled returns whether or not the debug level is enabled.
// You can avoid the cost of evaluation of arguments passed to Debug like:
//
// if ltsvlog.Logger.DebugEnabled() {
// ltsvlog.Logger.Debug().String("label1", someSlowFunction()).Log()
// }
func (l *LTSVLogger) DebugEnabled() bool {
return l.debugEnabled
}
// Debug returns a new Event for writing a Debug level log.
//
// Note there still exists the cost of evaluating argument values if the debug level is disabled, even though those arguments are not used.
// So guarding with if and DebugEnabled is recommended.
//
// Passing one more lv is deprecated. This is left for backward
// compatiblity for a while and it will not be supported in future version.
// This means the signature of thie method will be changed to
// func (l *LTSVLogger) Debug() *Event
func (l *LTSVLogger) Debug(lv ...LV) *Event {
if len(lv) == 0 {
ev := eventPool.Get().(*Event)
ev.logger = l
ev.enabled = l.debugEnabled
ev.buf = ev.buf[:0]
if ev.enabled {
ev.buf = l.appendPrefixFunc(ev.buf, "Debug")
}
return ev
} else {
// NOTE: This code is left for backward compatibility.
// TODO: Remove this code in a later version.
if l.debugEnabled {
l.mu.Lock()
l.log("Debug", lv...)
l.mu.Unlock()
}
return nil
}
}
// Info returns a new Event for writing a Info level log.
//
// Passing one more lv is deprecated. This is left for backward
// compatiblity for a while and it will not be supported in future version.
// This means the signature of thie method will be changed to
// func (l *LTSVLogger) Info() *Event
func (l *LTSVLogger) Info(lv ...LV) *Event {
if len(lv) == 0 {
ev := eventPool.Get().(*Event)
ev.logger = l
ev.enabled = true
ev.buf = ev.buf[:0]
ev.buf = l.appendPrefixFunc(ev.buf, "Info")
return ev
} else {
l.mu.Lock()
l.log("Info", lv...)
l.mu.Unlock()
return nil
}
}
// Deprecated. Use Err instead.
//
// Error writes a log with the error level.
func (l *LTSVLogger) Error(lv ...LV) {
l.mu.Lock()
l.log("Error", lv...)
l.mu.Unlock()
}
// Deprecated. Use Err instead.
//
// ErrorWithStack writes a log and a stack with the error level.
func (l *LTSVLogger) ErrorWithStack(lv ...LV) {
l.mu.Lock()
args := lv
args = append(args, LV{"stack", stack(2, l.stackBuf)})
l.log("Error", args...)
l.mu.Unlock()
}
// Err writes a log for an error with the error level.
// If err is a *ErrorEvent, this logs the error with labeled values.
// If err is not a *ErrorEvent, this logs the error with the label "err".
func (l *LTSVLogger) Err(err error) {
errorEvent, ok := err.(*ErrorEvent)
if !ok {
errorEvent = Err(err)
}
buf := make([]byte, 0, 8192)
buf = l.appendPrefixFunc(buf, "Error")
buf = errorEvent.AppendErrorWithValues(buf)
buf = append(buf, '\n')
_, _ = l.writer.Write(buf)
}
func (l *LTSVLogger) log(level string, lv ...LV) {
// Note: To reuse the buffer, create an empty slice pointing to
// the previously allocated buffer.
buf := l.appendPrefixFunc(l.buf[:0], level)
for _, labelAndVal := range lv {
buf = append(buf, labelAndVal.L...)
buf = append(buf, ':')
buf = l.appendValueFunc(buf, labelAndVal.V)
buf = append(buf, '\t')
}
buf[len(buf)-1] = '\n'
_, _ = l.writer.Write(buf)
l.buf = buf
}
func appendPrefixFunc(timeLabel, levelLabel string) AppendPrefixFunc {
if timeLabel != "" && levelLabel != "" {
return func(buf []byte, level string) []byte {
buf = append(buf, timeLabel...)
buf = append(buf, ':')
now := time.Now().UTC()
buf = appendUTCTime(buf, now)
buf = append(buf, '\t')
buf = append(buf, levelLabel...)
buf = append(buf, ':')
buf = append(buf, level...)
buf = append(buf, '\t')
return buf
}
} else if timeLabel != "" && levelLabel == "" {
return func(buf []byte, level string) []byte {
buf = append(buf, timeLabel...)
buf = append(buf, ':')
now := time.Now().UTC()
buf = appendUTCTime(buf, now)
buf = append(buf, '\t')
return buf
}
} else if timeLabel == "" && levelLabel != "" {
return func(buf []byte, level string) []byte {
buf = append(buf, levelLabel...)
buf = append(buf, ':')
buf = append(buf, level...)
buf = append(buf, '\t')
return buf
}
} else {
return func(buf []byte, level string) []byte {
return buf
}
}
}
func appendPrefix(buf []byte, level string) []byte {
buf = append(buf, "time:"...)
now := time.Now().UTC()
buf = appendUTCTime(buf, now)
buf = append(buf, "\tlevel:"...)
buf = append(buf, level...)
buf = append(buf, '\t')
return buf
}
func appendUTCTime(buf []byte, t time.Time) []byte {
t = t.UTC()
tmp := []byte("0000-00-00T00:00:00.000000Z")
year, month, day := t.Date()
hour, min, sec := t.Clock()
itoa(tmp[:4], year, 4)
itoa(tmp[5:7], int(month), 2)
itoa(tmp[8:10], day, 2)
itoa(tmp[11:13], hour, 2)
itoa(tmp[14:16], min, 2)
itoa(tmp[17:19], sec, 2)
itoa(tmp[20:26], t.Nanosecond()/1e3, 6)
return append(buf, tmp...)
}
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
// Copied from https://github.com/golang/go/blob/go1.8.1/src/log/log.go#L75-L90
// and modified for ltsvlog.
// It is user's responsibility to pass buf which len(buf) >= wid
func itoa(buf []byte, i int, wid int) {
// Assemble decimal in reverse order.
bp := wid - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
buf[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
buf[bp] = byte('0' + i)
}
var escaper = strings.NewReplacer("\t", "\\t", "\n", "\\n", "\\", "\\\\")
func escape(s string) string {
return escaper.Replace(s)
}
func appendValue(buf []byte, v interface{}) []byte {
// NOTE: In type switch case, case byte and case uint8 cannot coexist,
// case rune and case uint cannot coexist.
switch v.(type) {
case nil:
buf = append(buf, "<nil>"...)
case string:
buf = append(buf, escape(v.(string))...)
case int:
buf = strconv.AppendInt(buf, int64(v.(int)), 10)
case uint:
buf = strconv.AppendUint(buf, uint64(v.(uint)), 10)
case int8:
buf = strconv.AppendInt(buf, int64(v.(int8)), 10)
case int16:
buf = strconv.AppendInt(buf, int64(v.(int16)), 10)
case int32:
buf = strconv.AppendInt(buf, int64(v.(int32)), 10)
case int64:
buf = strconv.AppendInt(buf, v.(int64), 10)
case uint8:
buf = strconv.AppendUint(buf, uint64(v.(uint8)), 10)
case uint16:
buf = strconv.AppendUint(buf, uint64(v.(uint16)), 10)
case uint32:
buf = strconv.AppendUint(buf, uint64(v.(uint32)), 10)
case uint64:
buf = strconv.AppendUint(buf, v.(uint64), 10)
case float32:
buf = append(buf, strconv.FormatFloat(float64(v.(float32)), 'g', -1, 32)...)
case float64:
buf = append(buf, strconv.FormatFloat(v.(float64), 'g', -1, 64)...)
case bool:
buf = strconv.AppendBool(buf, v.(bool))
case uintptr:
buf = strconv.AppendUint(buf, uint64(v.(uintptr)), 10)
case []byte:
buf = appendHexBytes(buf, v.([]byte))
case fmt.Stringer:
buf = append(buf, escape(v.(fmt.Stringer).String())...)
default:
buf = append(buf, escape(fmt.Sprintf("%+v", v))...)
}
return buf
}
var digits = []byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}
func appendHexBytes(buf []byte, v []byte) []byte {
buf = append(buf, "0x"...)
for _, b := range v {
buf = append(buf, digits[b/16])
buf = append(buf, digits[b%16])
}
return buf
}
func appendHexByte(buf []byte, b byte) []byte {
buf = append(buf, "0x"...)
buf = append(buf, digits[b/16])
buf = append(buf, digits[b%16])
return buf
}
// Logger is the global logger.
// You can change this logger like
// ltsvlog.Logger = ltsvlog.NewLTSVLogger(os.Stdout, false)
// You can change the global logger safely only before writing
// to the logger. Changing the logger while writing may cause
// the unexpected behavior.
var Logger = NewLTSVLogger(os.Stdout, true)
// Discard discards any logging outputs.
type Discard struct{}
// DebugEnabled always return false
func (*Discard) DebugEnabled() bool { return false }
// Debug prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
// Guarding with if and DebugEnabled is recommended.
func (*Discard) Debug(lv ...LV) *Event {
ev := eventPool.Get().(*Event)
ev.logger = nil
ev.enabled = false
ev.buf = ev.buf[:0]
return ev
}
// Info prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
func (*Discard) Info(lv ...LV) *Event {
ev := eventPool.Get().(*Event)
ev.logger = nil
ev.enabled = false
ev.buf = ev.buf[:0]
return ev
}
// Error prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
func (*Discard) Error(lv ...LV) {}
// ErrorWithStack prints nothing.
// Note there still exists the cost of evaluating argument values, even though they are not used.
func (*Discard) ErrorWithStack(lv ...LV) {}
// Err prints nothing.
func (*Discard) Err(err error) {}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
)
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("logging-soak")
// Not a global constant (irrelevant outside this test), also not a parameter (if you want more logs, use --scale=).
kbRateInSeconds := 1 * time.Second
totalLogTime := 2 * time.Minute
// This test is designed to run and confirm that logs are being generated at a large scale, and that they can be grabbed by the kubelet.
// By running it repeatedly in the background, you can simulate large collections of chatty containers.
// This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load
// scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch.
// Returns scale (how many waves of pods).
// Returns wave interval (how many seconds to wait before dumping the next wave of pods).
readConfig := func() (int, time.Duration) {
// Read in configuration settings, reasonable defaults.
scale := framework.TestContext.LoggingSoak.Scale
if framework.TestContext.LoggingSoak.Scale == 0 {
scale = 1
framework.Logf("Overriding default scale value of zero to %d", scale)
}
milliSecondsBetweenWaves := framework.TestContext.LoggingSoak.MilliSecondsBetweenWaves
if milliSecondsBetweenWaves == 0 {
milliSecondsBetweenWaves = 5000
framework.Logf("Overriding default milliseconds value of zero to %d", milliSecondsBetweenWaves)
}
return scale, time.Duration(milliSecondsBetweenWaves) * time.Millisecond
}
scale, millisecondsBetweenWaves := readConfig()
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v, scaling up to %v pods per node", kbRateInSeconds, totalLogTime, scale), func() {
defer GinkgoRecover()
var wg sync.WaitGroup
wg.Add(scale)
for i := 0; i < scale; i++ {
go func() {
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
framework.Logf("Starting logging soak, wave = %v", wave)
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
framework.Logf("Completed logging soak, wave %v", i)
wg.Done()
}()
// Niceness.
time.Sleep(millisecondsBetweenWaves)
}
framework.Logf("Waiting on all %v logging soak waves to complete", scale)
wg.Wait()
})
})
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0))
kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.
appName := "logging-soak" + podname
podlables := f.CreatePodsPerNodeForSimpleApp(
appName,
func(n v1.Node) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "logging-soak",
Image: "busybox",
Args: []string{
"/bin/sh",
"-c",
fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
},
}},
NodeName: n.Name,
RestartPolicy: v1.RestartPolicyAlways,
}
},
totalPods,
)
logSoakVerification := f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: podlables,
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no guarantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err
},
},
)
largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
if err != nil {
framework.Failf("Error in wait... %v", err)
} else if len(pods) < totalPods {
framework.Failf("Only got %v out of %v", len(pods), totalPods)
}
}
UPSTREAM: <drop>: Filter out a log message from output that blocks dry-run
This is fixed in 1.12 so the commit can safely be dropped.
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logging
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
)
var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("logging-soak")
// Not a global constant (irrelevant outside this test), also not a parameter (if you want more logs, use --scale=).
kbRateInSeconds := 1 * time.Second
totalLogTime := 2 * time.Minute
// This test is designed to run and confirm that logs are being generated at a large scale, and that they can be grabbed by the kubelet.
// By running it repeatedly in the background, you can simulate large collections of chatty containers.
// This can expose problems in your docker configuration (logging), log searching infrastructure, to tune deployments to match high load
// scenarios. TODO jayunit100 add this to the kube CI in a follow on infra patch.
// Returns scale (how many waves of pods).
// Returns wave interval (how many seconds to wait before dumping the next wave of pods).
readConfig := func() (int, time.Duration) {
// Read in configuration settings, reasonable defaults.
scale := framework.TestContext.LoggingSoak.Scale
if framework.TestContext.LoggingSoak.Scale == 0 {
scale = 1
}
milliSecondsBetweenWaves := framework.TestContext.LoggingSoak.MilliSecondsBetweenWaves
if milliSecondsBetweenWaves == 0 {
milliSecondsBetweenWaves = 5000
}
return scale, time.Duration(milliSecondsBetweenWaves) * time.Millisecond
}
scale, millisecondsBetweenWaves := readConfig()
It(fmt.Sprintf("should survive logging 1KB every %v seconds, for a duration of %v, scaling up to %v pods per node", kbRateInSeconds, totalLogTime, scale), func() {
defer GinkgoRecover()
var wg sync.WaitGroup
wg.Add(scale)
for i := 0; i < scale; i++ {
go func() {
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
framework.Logf("Starting logging soak, wave = %v", wave)
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
framework.Logf("Completed logging soak, wave %v", i)
wg.Done()
}()
// Niceness.
time.Sleep(millisecondsBetweenWaves)
}
framework.Logf("Waiting on all %v logging soak waves to complete", scale)
wg.Wait()
})
})
// RunLogPodsWithSleepOf creates a pod on every node, logs continuously (with "sleep" pauses), and verifies that the log string
// was produced in each and every pod at least once. The final arg is the timeout for the test to verify all the pods got logs.
func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname string, timeout time.Duration) {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0))
kilobyte := strings.Repeat("logs-123", 128) // 8*128=1024 = 1KB of text.
appName := "logging-soak" + podname
podlables := f.CreatePodsPerNodeForSimpleApp(
appName,
func(n v1.Node) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "logging-soak",
Image: "busybox",
Args: []string{
"/bin/sh",
"-c",
fmt.Sprintf("while true ; do echo %v ; sleep %v; done", kilobyte, sleep.Seconds()),
},
}},
NodeName: n.Name,
RestartPolicy: v1.RestartPolicyAlways,
}
},
totalPods,
)
logSoakVerification := f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: podlables,
ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no guarantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err
},
},
)
largeClusterForgiveness := time.Duration(len(nodes.Items)/5) * time.Second // i.e. a 100 node cluster gets an extra 20 seconds to complete.
pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness)
if err != nil {
framework.Failf("Error in wait... %v", err)
} else if len(pods) < totalPods {
framework.Failf("Only got %v out of %v", len(pods), totalPods)
}
}
|
package misc
import (
"net/http"
"os"
"strings"
)
type justFilesFilesystem struct {
Fs http.FileSystem
}
func (fs justFilesFilesystem) Open(name string) (http.File, error) {
f, err := fs.Fs.Open(name)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if stat.IsDir() {
err = f.Close()
if err != nil {
return nil, err
}
return nil, os.ErrNotExist
}
return f, nil
}
func ServePrefixedFiles(prefix string, root http.FileSystem) http.HandlerFunc {
fs := justFilesFilesystem{
Fs: root,
}
return func(w http.ResponseWriter, r *http.Request) {
url := r.URL.Path
filepath := strings.TrimPrefix(url, prefix)
if len(url) == len(filepath) {
http.NotFound(w, r)
return
}
r.URL.Path = filepath
http.FileServer(fs).ServeHTTP(w, r)
}
}
func ServeFile(path string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, path)
}
}
func ServeFolder(root http.FileSystem) http.HandlerFunc {
fs := justFilesFilesystem{
Fs: root,
}
return func(w http.ResponseWriter, r *http.Request) {
http.FileServer(fs).ServeHTTP(w, r)
}
}
update
package misc
import (
"net/http"
"os"
"path"
"strings"
)
type fileFilesystem struct {
Fs http.FileSystem
}
func (fs fileFilesystem) Open(name string) (http.File, error) {
f, err := fs.Fs.Open(name)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if stat.IsDir() {
err = f.Close()
if err != nil {
return nil, err
}
return fs.Open(path.Join(name, "index.html"))
}
return f, nil
}
func ServePrefixedFiles(prefix string, root http.FileSystem) http.HandlerFunc {
fs := fileFilesystem{
Fs: root,
}
return func(w http.ResponseWriter, r *http.Request) {
url := r.URL.Path
filepath := strings.TrimPrefix(url, prefix)
if len(url) == len(filepath) {
http.NotFound(w, r)
return
}
r.URL.Path = filepath
http.FileServer(fs).ServeHTTP(w, r)
}
}
func ServeFile(path string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, path)
}
}
func ServeFolder(root string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
file := path.Join(root, r.URL.Path)
fi, err := os.Stat(file)
if err == nil {
if fi.IsDir() {
ii, err := os.Stat(path.Join(file, "index.html"))
if err != nil || ii.IsDir() {
http.Error(w, http.StatusText(403), 403)
}
}
}
http.ServeFile(w, r, file)
}
}
|
package acceptance
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/DATA-DOG/godog"
"github.com/DATA-DOG/godog/gherkin"
"github.com/ironman-project/ironman/testutils"
"github.com/rendon/testcli"
)
var generatedPath string
var generateFilePath string
func aTemplateToGenerateInstalledWithURL(URL string) error {
testcli.Run(testutils.ExecutablePath(), "install", "--ironman-home="+ironmanTestDir, URL)
if !testcli.Success() {
return fmt.Errorf("Failed to install test template %s", URL)
}
return nil
}
func generateRunsWithIDGeneratorIDAndFlags(templateID, generatorID, flags string) error {
generatedPath = filepath.Join(ironmanTestDir, "test")
testcli.Run(testutils.ExecutablePath(), "generate", templateID+":"+generatorID, generatedPath, "--ironman-home="+ironmanTestDir, "--set", flags)
return nil
}
func theGenerateProcessStateShouldBeSuccess() error {
if !testcli.Success() {
return fmt.Errorf("Generate command did not succeeded %s\n %s %s", testcli.Stdout(), testcli.Error(), testcli.Stderr())
}
return nil
}
func theGenerateOutputShouldContainAnd(out1, out2 string) error {
if !(testcli.StdoutContains(out1) && testcli.StdoutContains(out2)) {
return fmt.Errorf("output => %s", testcli.Stdout())
}
return nil
}
func aFileUnderTheGeneratedPathShouldContain(file, contents string) error {
filePath := filepath.Join(generatedPath, file)
if !testutils.FileExists(filePath) {
return fmt.Errorf("Expected file don't exists %s", filePath)
}
fileContent, err := ioutil.ReadFile(filePath)
if err != nil {
return fmt.Errorf("Failed to read file contents %s", err)
}
if string(fileContent) != contents {
return fmt.Errorf("File content %s want %s", fileContent, contents)
}
return nil
}
func generateWithNonExistingIDRunsWithIDGeneratorID(templateID, generatorID string) error {
testcli.Run(testutils.ExecutablePath(), "generate", templateID+":"+generatorID, generatedPath, "--ironman-home="+ironmanTestDir)
return nil
}
func theGenerateWithNonExistingIDProcessStateShouldBeFailure() error {
if !testcli.Failure() {
return fmt.Errorf("Generate command did not failed %s", testcli.Stdout())
}
return nil
}
func theGenerateWithNonExistingIDOutputShouldCointain(expectedOutput string) error {
if !strings.Contains(testcli.Stderr(), expectedOutput) {
return fmt.Errorf("output => %s", testcli.Stderr())
}
return nil
}
func aTemplateToGenerateAFileInstalledWithURL(URL string) error {
testcli.Run(testutils.ExecutablePath(), "install", "--ironman-home="+ironmanTestDir, URL)
if !testcli.Success() {
return fmt.Errorf("Failed to install test template %s", URL)
}
return nil
}
func generateFileRunsWithIDGeneratorIDAndFileNameAndFlags(templateID, generatorID, fileName, flags string) error {
generateFilePath = filepath.Join(ironmanTestDir, "testfile", fileName)
_ = os.Mkdir(filepath.Dir(generateFilePath), os.ModePerm)
testcli.Run(testutils.ExecutablePath(), "generate", templateID+":"+generatorID, generateFilePath, "--ironman-home="+ironmanTestDir, "--set", flags)
return nil
}
func theGenerateFileProcessStateShouldBeSuccess() error {
if !testcli.Success() {
return fmt.Errorf("Generate command did not succeeded %s\n %s %s", testcli.Stdout(), testcli.Error(), testcli.Stderr())
}
return nil
}
func theGenerateFileOutputShouldContainAnd(out1, out2 string) error {
if !(testcli.StdoutContains(out1) && testcli.StdoutContains(out2)) {
return fmt.Errorf("output => %s", testcli.Stdout())
}
return nil
}
func aFileFromAFileGeneratorUnderTheGeneratedPathShouldContain(contents *gherkin.DocString) error {
if !testutils.FileExists(generateFilePath) {
return fmt.Errorf("Expected file don't exists %s", generateFilePath)
}
fileContent, err := ioutil.ReadFile(generateFilePath)
if err != nil {
return fmt.Errorf("Failed to read file contents %s", err)
}
if string(fileContent) != contents.Content {
return fmt.Errorf("File content \n%s\n want \n%s\n", fileContent, contents.Content)
}
return nil
}
//GenerateContext context for generate command
func GenerateContext(s *godog.Suite) {
s.Step(`^A template to generate installed with URL "([^"]*)"$`, aTemplateToGenerateInstalledWithURL)
s.Step(`^Generate runs with ID "([^"]*)" generator ID "([^"]*)" and flags "([^"]*)"$`, generateRunsWithIDGeneratorIDAndFlags)
s.Step(`^The generate process state should be success$`, theGenerateProcessStateShouldBeSuccess)
s.Step(`^The generate output should contain "([^"]*)" and "([^"]*)"$`, theGenerateOutputShouldContainAnd)
s.Step(`^A file "([^"]*)" under the generated path should contain "([^"]*)"$`, aFileUnderTheGeneratedPathShouldContain)
s.Step(`^Generate with non existing ID runs with ID "([^"]*)" generator ID "([^"]*)"$`, generateWithNonExistingIDRunsWithIDGeneratorID)
s.Step(`^The generate with non existing ID process state should be failure$`, theGenerateWithNonExistingIDProcessStateShouldBeFailure)
s.Step(`^The generate with non existing ID output should cointain "([^"]*)"$`, theGenerateWithNonExistingIDOutputShouldCointain)
s.Step(`^A template to generate a file installed with URL "([^"]*)"$`, aTemplateToGenerateAFileInstalledWithURL)
s.Step(`^Generate file runs with ID "([^"]*)" generator ID "([^"]*)" and fileName "([^"]*)" and flags "([^"]*)"$`, generateFileRunsWithIDGeneratorIDAndFileNameAndFlags)
s.Step(`^The generate file process state should be success$`, theGenerateFileProcessStateShouldBeSuccess)
s.Step(`^The generate file output should contain "([^"]*)" and "([^"]*)"$`, theGenerateFileOutputShouldContainAnd)
s.Step(`^A file under the generated path should contain$`, aFileFromAFileGeneratorUnderTheGeneratedPathShouldContain)
}
fix linting error
package acceptance
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/DATA-DOG/godog"
"github.com/DATA-DOG/godog/gherkin"
"github.com/ironman-project/ironman/testutils"
"github.com/rendon/testcli"
)
var generatedPath string
var generateFilePath string
func aTemplateToGenerateInstalledWithURL(URL string) error {
testcli.Run(testutils.ExecutablePath(), "install", "--ironman-home="+ironmanTestDir, URL)
if !testcli.Success() {
return fmt.Errorf("Failed to install test template %s", URL)
}
return nil
}
func generateRunsWithIDGeneratorIDAndFlags(templateID, generatorID, flags string) error {
generatedPath = filepath.Join(ironmanTestDir, "test")
testcli.Run(testutils.ExecutablePath(), "generate", templateID+":"+generatorID, generatedPath, "--ironman-home="+ironmanTestDir, "--set", flags)
return nil
}
func theGenerateProcessStateShouldBeSuccess() error {
if !testcli.Success() {
return fmt.Errorf("Generate command did not succeeded %s\n %s %s", testcli.Stdout(), testcli.Error(), testcli.Stderr())
}
return nil
}
func theGenerateOutputShouldContainAnd(out1, out2 string) error {
if !(testcli.StdoutContains(out1) && testcli.StdoutContains(out2)) {
return fmt.Errorf("output => %s", testcli.Stdout())
}
return nil
}
func aFileUnderTheGeneratedPathShouldContain(file, contents string) error {
filePath := filepath.Join(generatedPath, file)
if !testutils.FileExists(filePath) {
return fmt.Errorf("Expected file don't exists %s", filePath)
}
fileContent, err := ioutil.ReadFile(filePath)
if err != nil {
return fmt.Errorf("Failed to read file contents %s", err)
}
if string(fileContent) != contents {
return fmt.Errorf("File content %s want %s", fileContent, contents)
}
return nil
}
func generateWithNonExistingIDRunsWithIDGeneratorID(templateID, generatorID string) error {
testcli.Run(testutils.ExecutablePath(), "generate", templateID+":"+generatorID, generatedPath, "--ironman-home="+ironmanTestDir)
return nil
}
func theGenerateWithNonExistingIDProcessStateShouldBeFailure() error {
if !testcli.Failure() {
return fmt.Errorf("Generate command did not failed %s", testcli.Stdout())
}
return nil
}
func theGenerateWithNonExistingIDOutputShouldCointain(expectedOutput string) error {
if !strings.Contains(testcli.Stderr(), expectedOutput) {
return fmt.Errorf("output => %s", testcli.Stderr())
}
return nil
}
func aTemplateToGenerateAFileInstalledWithURL(URL string) error {
testcli.Run(testutils.ExecutablePath(), "install", "--ironman-home="+ironmanTestDir, URL)
if !testcli.Success() {
return fmt.Errorf("Failed to install test template %s", URL)
}
return nil
}
func generateFileRunsWithIDGeneratorIDAndFileNameAndFlags(templateID, generatorID, fileName, flags string) error {
generateFilePath = filepath.Join(ironmanTestDir, "testfile", fileName)
_ = os.Mkdir(filepath.Dir(generateFilePath), os.ModePerm)
testcli.Run(testutils.ExecutablePath(), "generate", templateID+":"+generatorID, generateFilePath, "--ironman-home="+ironmanTestDir, "--set", flags)
return nil
}
func theGenerateFileProcessStateShouldBeSuccess() error {
if !testcli.Success() {
return fmt.Errorf("Generate command did not succeeded %s\n %s %s", testcli.Stdout(), testcli.Error(), testcli.Stderr())
}
return nil
}
func theGenerateFileOutputShouldContainAnd(out1, out2 string) error {
if !(testcli.StdoutContains(out1) && testcli.StdoutContains(out2)) {
return fmt.Errorf("output => %s", testcli.Stdout())
}
return nil
}
func aFileFromAFileGeneratorUnderTheGeneratedPathShouldContain(contents *gherkin.DocString) error {
if !testutils.FileExists(generateFilePath) {
return fmt.Errorf("Expected file don't exists %s", generateFilePath)
}
fileContent, err := ioutil.ReadFile(generateFilePath)
if err != nil {
return fmt.Errorf("Failed to read file contents %s", err)
}
if string(fileContent) != contents.Content {
return fmt.Errorf("file content \n%s\n want \n%s", fileContent, contents.Content)
}
return nil
}
//GenerateContext context for generate command
func GenerateContext(s *godog.Suite) {
s.Step(`^A template to generate installed with URL "([^"]*)"$`, aTemplateToGenerateInstalledWithURL)
s.Step(`^Generate runs with ID "([^"]*)" generator ID "([^"]*)" and flags "([^"]*)"$`, generateRunsWithIDGeneratorIDAndFlags)
s.Step(`^The generate process state should be success$`, theGenerateProcessStateShouldBeSuccess)
s.Step(`^The generate output should contain "([^"]*)" and "([^"]*)"$`, theGenerateOutputShouldContainAnd)
s.Step(`^A file "([^"]*)" under the generated path should contain "([^"]*)"$`, aFileUnderTheGeneratedPathShouldContain)
s.Step(`^Generate with non existing ID runs with ID "([^"]*)" generator ID "([^"]*)"$`, generateWithNonExistingIDRunsWithIDGeneratorID)
s.Step(`^The generate with non existing ID process state should be failure$`, theGenerateWithNonExistingIDProcessStateShouldBeFailure)
s.Step(`^The generate with non existing ID output should cointain "([^"]*)"$`, theGenerateWithNonExistingIDOutputShouldCointain)
s.Step(`^A template to generate a file installed with URL "([^"]*)"$`, aTemplateToGenerateAFileInstalledWithURL)
s.Step(`^Generate file runs with ID "([^"]*)" generator ID "([^"]*)" and fileName "([^"]*)" and flags "([^"]*)"$`, generateFileRunsWithIDGeneratorIDAndFileNameAndFlags)
s.Step(`^The generate file process state should be success$`, theGenerateFileProcessStateShouldBeSuccess)
s.Step(`^The generate file output should contain "([^"]*)" and "([^"]*)"$`, theGenerateFileOutputShouldContainAnd)
s.Step(`^A file under the generated path should contain$`, aFileFromAFileGeneratorUnderTheGeneratedPathShouldContain)
}
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/sha256"
"crypto/subtle"
"errors"
"io"
"golang.org/x/crypto/cryptobyte"
)
// sessionState contains the information that is serialized into a session
// ticket in order to later resume a connection.
type sessionState struct {
vers uint16
cipherSuite uint16
createdAt uint64
masterSecret []byte // opaque master_secret<1..2^16-1>;
// struct { opaque certificate<1..2^24-1> } Certificate;
certificates [][]byte // Certificate certificate_list<0..2^24-1>;
// usedOldKey is true if the ticket from which this session came from
// was encrypted with an older key and thus should be refreshed.
usedOldKey bool
}
func (m *sessionState) marshal() []byte {
var b cryptobyte.Builder
b.AddUint16(m.vers)
b.AddUint16(m.cipherSuite)
addUint64(&b, m.createdAt)
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.masterSecret)
})
b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
for _, cert := range m.certificates {
b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(cert)
})
}
})
return b.BytesOrPanic()
}
func (m *sessionState) unmarshal(data []byte) bool {
*m = sessionState{usedOldKey: m.usedOldKey}
s := cryptobyte.String(data)
if ok := s.ReadUint16(&m.vers) &&
m.vers != VersionTLS13 &&
s.ReadUint16(&m.cipherSuite) &&
readUint64(&s, &m.createdAt) &&
readUint16LengthPrefixed(&s, &m.masterSecret) &&
len(m.masterSecret) != 0; !ok {
return false
}
var certList cryptobyte.String
if !s.ReadUint24LengthPrefixed(&certList) {
return false
}
for !certList.Empty() {
var cert []byte
if !readUint24LengthPrefixed(&certList, &cert) {
return false
}
m.certificates = append(m.certificates, cert)
}
return s.Empty()
}
// sessionStateTLS13 is the content of a TLS 1.3 session ticket. Its first
// version (revision = 0) doesn't carry any of the information needed for 0-RTT
// validation and the nonce is always empty.
type sessionStateTLS13 struct {
// uint8 version = 0x0304;
// uint8 revision = 0;
cipherSuite uint16
createdAt uint64
resumptionSecret []byte // opaque resumption_master_secret<1..2^8-1>;
certificate Certificate // CertificateEntry certificate_list<0..2^24-1>;
}
func (m *sessionStateTLS13) marshal() []byte {
var b cryptobyte.Builder
b.AddUint16(VersionTLS13)
b.AddUint8(0) // revision
b.AddUint16(m.cipherSuite)
addUint64(&b, m.createdAt)
b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.resumptionSecret)
})
marshalCertificate(&b, m.certificate)
return b.BytesOrPanic()
}
func (m *sessionStateTLS13) unmarshal(data []byte) bool {
*m = sessionStateTLS13{}
s := cryptobyte.String(data)
var version uint16
var revision uint8
return s.ReadUint16(&version) &&
version == VersionTLS13 &&
s.ReadUint8(&revision) &&
revision == 0 &&
s.ReadUint16(&m.cipherSuite) &&
readUint64(&s, &m.createdAt) &&
readUint8LengthPrefixed(&s, &m.resumptionSecret) &&
len(m.resumptionSecret) != 0 &&
unmarshalCertificate(&s, &m.certificate) &&
s.Empty()
}
func (c *Conn) encryptTicket(state []byte) ([]byte, error) {
if len(c.ticketKeys) == 0 {
return nil, errors.New("tls: internal error: session ticket keys unavailable")
}
encrypted := make([]byte, ticketKeyNameLen+aes.BlockSize+len(state)+sha256.Size)
keyName := encrypted[:ticketKeyNameLen]
iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
macBytes := encrypted[len(encrypted)-sha256.Size:]
if _, err := io.ReadFull(c.config.rand(), iv); err != nil {
return nil, err
}
key := c.ticketKeys[0]
copy(keyName, key.keyName[:])
block, err := aes.NewCipher(key.aesKey[:])
if err != nil {
return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error())
}
cipher.NewCTR(block, iv).XORKeyStream(encrypted[ticketKeyNameLen+aes.BlockSize:], state)
mac := hmac.New(sha256.New, key.hmacKey[:])
mac.Write(encrypted[:len(encrypted)-sha256.Size])
mac.Sum(macBytes[:0])
return encrypted, nil
}
func (c *Conn) decryptTicket(encrypted []byte) (plaintext []byte, usedOldKey bool) {
if len(encrypted) < ticketKeyNameLen+aes.BlockSize+sha256.Size {
return nil, false
}
keyName := encrypted[:ticketKeyNameLen]
iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
macBytes := encrypted[len(encrypted)-sha256.Size:]
ciphertext := encrypted[ticketKeyNameLen+aes.BlockSize : len(encrypted)-sha256.Size]
keyIndex := -1
for i, candidateKey := range c.ticketKeys {
if bytes.Equal(keyName, candidateKey.keyName[:]) {
keyIndex = i
break
}
}
if keyIndex == -1 {
return nil, false
}
key := &c.ticketKeys[keyIndex]
mac := hmac.New(sha256.New, key.hmacKey[:])
mac.Write(encrypted[:len(encrypted)-sha256.Size])
expected := mac.Sum(nil)
if subtle.ConstantTimeCompare(macBytes, expected) != 1 {
return nil, false
}
block, err := aes.NewCipher(key.aesKey[:])
if err != nil {
return nil, false
}
plaintext = make([]byte, len(ciphertext))
cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext)
return plaintext, keyIndex > 0
}
crypto/tls: remove version check when unmarshaling sessionState
This was causing issues when fuzzing with
TestMarshalUnmarshal since the test would
occassionally set the version to VersionTLS13,
which would fail when unmarshaling. The check
doesn't add much in practice, and there is no
harm in removing it to de-flake the test.
Fixes #38902
Change-Id: I0906c570e9ed69c85fdd2c15f1b52f9e372c62e3
Reviewed-on: https://go-review.googlesource.com/c/go/+/234486
Run-TryBot: Katie Hockman <e321a8fd89c4908465ddaf8d5d0d62480efacb63@golang.org>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
Reviewed-by: Filippo Valsorda <4f40cda291c5f9634e1affd3db44947af61f705c@golang.org>
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/sha256"
"crypto/subtle"
"errors"
"io"
"golang.org/x/crypto/cryptobyte"
)
// sessionState contains the information that is serialized into a session
// ticket in order to later resume a connection.
type sessionState struct {
vers uint16
cipherSuite uint16
createdAt uint64
masterSecret []byte // opaque master_secret<1..2^16-1>;
// struct { opaque certificate<1..2^24-1> } Certificate;
certificates [][]byte // Certificate certificate_list<0..2^24-1>;
// usedOldKey is true if the ticket from which this session came from
// was encrypted with an older key and thus should be refreshed.
usedOldKey bool
}
func (m *sessionState) marshal() []byte {
var b cryptobyte.Builder
b.AddUint16(m.vers)
b.AddUint16(m.cipherSuite)
addUint64(&b, m.createdAt)
b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.masterSecret)
})
b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
for _, cert := range m.certificates {
b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(cert)
})
}
})
return b.BytesOrPanic()
}
func (m *sessionState) unmarshal(data []byte) bool {
*m = sessionState{usedOldKey: m.usedOldKey}
s := cryptobyte.String(data)
if ok := s.ReadUint16(&m.vers) &&
s.ReadUint16(&m.cipherSuite) &&
readUint64(&s, &m.createdAt) &&
readUint16LengthPrefixed(&s, &m.masterSecret) &&
len(m.masterSecret) != 0; !ok {
return false
}
var certList cryptobyte.String
if !s.ReadUint24LengthPrefixed(&certList) {
return false
}
for !certList.Empty() {
var cert []byte
if !readUint24LengthPrefixed(&certList, &cert) {
return false
}
m.certificates = append(m.certificates, cert)
}
return s.Empty()
}
// sessionStateTLS13 is the content of a TLS 1.3 session ticket. Its first
// version (revision = 0) doesn't carry any of the information needed for 0-RTT
// validation and the nonce is always empty.
type sessionStateTLS13 struct {
// uint8 version = 0x0304;
// uint8 revision = 0;
cipherSuite uint16
createdAt uint64
resumptionSecret []byte // opaque resumption_master_secret<1..2^8-1>;
certificate Certificate // CertificateEntry certificate_list<0..2^24-1>;
}
func (m *sessionStateTLS13) marshal() []byte {
var b cryptobyte.Builder
b.AddUint16(VersionTLS13)
b.AddUint8(0) // revision
b.AddUint16(m.cipherSuite)
addUint64(&b, m.createdAt)
b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
b.AddBytes(m.resumptionSecret)
})
marshalCertificate(&b, m.certificate)
return b.BytesOrPanic()
}
func (m *sessionStateTLS13) unmarshal(data []byte) bool {
*m = sessionStateTLS13{}
s := cryptobyte.String(data)
var version uint16
var revision uint8
return s.ReadUint16(&version) &&
version == VersionTLS13 &&
s.ReadUint8(&revision) &&
revision == 0 &&
s.ReadUint16(&m.cipherSuite) &&
readUint64(&s, &m.createdAt) &&
readUint8LengthPrefixed(&s, &m.resumptionSecret) &&
len(m.resumptionSecret) != 0 &&
unmarshalCertificate(&s, &m.certificate) &&
s.Empty()
}
func (c *Conn) encryptTicket(state []byte) ([]byte, error) {
if len(c.ticketKeys) == 0 {
return nil, errors.New("tls: internal error: session ticket keys unavailable")
}
encrypted := make([]byte, ticketKeyNameLen+aes.BlockSize+len(state)+sha256.Size)
keyName := encrypted[:ticketKeyNameLen]
iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
macBytes := encrypted[len(encrypted)-sha256.Size:]
if _, err := io.ReadFull(c.config.rand(), iv); err != nil {
return nil, err
}
key := c.ticketKeys[0]
copy(keyName, key.keyName[:])
block, err := aes.NewCipher(key.aesKey[:])
if err != nil {
return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error())
}
cipher.NewCTR(block, iv).XORKeyStream(encrypted[ticketKeyNameLen+aes.BlockSize:], state)
mac := hmac.New(sha256.New, key.hmacKey[:])
mac.Write(encrypted[:len(encrypted)-sha256.Size])
mac.Sum(macBytes[:0])
return encrypted, nil
}
func (c *Conn) decryptTicket(encrypted []byte) (plaintext []byte, usedOldKey bool) {
if len(encrypted) < ticketKeyNameLen+aes.BlockSize+sha256.Size {
return nil, false
}
keyName := encrypted[:ticketKeyNameLen]
iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize]
macBytes := encrypted[len(encrypted)-sha256.Size:]
ciphertext := encrypted[ticketKeyNameLen+aes.BlockSize : len(encrypted)-sha256.Size]
keyIndex := -1
for i, candidateKey := range c.ticketKeys {
if bytes.Equal(keyName, candidateKey.keyName[:]) {
keyIndex = i
break
}
}
if keyIndex == -1 {
return nil, false
}
key := &c.ticketKeys[keyIndex]
mac := hmac.New(sha256.New, key.hmacKey[:])
mac.Write(encrypted[:len(encrypted)-sha256.Size])
expected := mac.Sum(nil)
if subtle.ConstantTimeCompare(macBytes, expected) != 1 {
return nil, false
}
block, err := aes.NewCipher(key.aesKey[:])
if err != nil {
return nil, false
}
plaintext = make([]byte, len(ciphertext))
cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext)
return plaintext, keyIndex > 0
}
|
package hm_test
import (
"errors"
"time"
. "github.com/cloudfoundry/hm9000/hm"
"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("HM9000Component", func() {
var (
hmc *Component
pollingInterval time.Duration
timeout time.Duration
logger *fakelogger.FakeLogger
action func() error
actionChan chan time.Time
)
BeforeEach(func() {
pollingInterval = 100 * time.Millisecond
timeout = 1 * time.Second
logger = fakelogger.NewFakeLogger()
actionChan = make(chan time.Time, 10)
actionChan := actionChan
action = func() error {
actionChan <- time.Now()
return nil
}
})
JustBeforeEach(func() {
hmc = NewComponent(
"component",
nil,
pollingInterval,
timeout,
logger,
action,
)
})
It("Exits on receiving the correct signal", func() {
proc := ifrit.Background(hmc)
ginkgomon.Kill(proc)
Eventually(proc.Wait()).Should(Receive(BeNil()))
})
It("Exits on receiving the correct signal after the action executes", func() {
proc := ifrit.Background(hmc)
Eventually(actionChan).Should(Receive())
ginkgomon.Kill(proc)
Eventually(proc.Wait()).Should(Receive(BeNil()))
Consistently(actionChan).ShouldNot(Receive())
})
It("Executes the component action on each polling interval", func() {
proc := ifrit.Background(hmc)
var t1, t2, t3 time.Time
Eventually(actionChan).Should(Receive(&t1))
Eventually(actionChan).Should(Receive(&t2))
Eventually(actionChan).Should(Receive(&t3))
ginkgomon.Kill(proc)
Expect(t2.Sub(t1)).To(BeNumerically("~", pollingInterval, 10*time.Millisecond))
Expect(t3.Sub(t2)).To(BeNumerically("~", pollingInterval, 10*time.Millisecond))
})
Context("When the action returns an error", func() {
BeforeEach(func() {
action = func() error {
actionChan <- time.Now()
return errors.New("Action failed")
}
})
It("Continues to execute", func() {
proc := ifrit.Background(hmc)
Eventually(actionChan).Should(Receive())
Eventually(actionChan).Should(Receive())
ginkgomon.Kill(proc)
})
})
Context("when the timeout expires", func() {
BeforeEach(func() {
timeout = 10 * time.Millisecond
action = func() error {
time.Sleep(2 * timeout)
return nil
}
})
It("Returns an error", func() {
proc := ifrit.Background(hmc)
Eventually(proc.Wait()).Should(Receive(MatchError(Equal("component timed out. Aborting!"))))
})
})
})
Increase polling interval diff threashold to half of the polling interval
[finishes #117439981]
Signed-off-by: Dan Lavine <5ca8f6c726ce2120d02818e3c4896f90201eef24@us.ibm.com>
package hm_test
import (
"errors"
"time"
. "github.com/cloudfoundry/hm9000/hm"
"github.com/cloudfoundry/hm9000/testhelpers/fakelogger"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("HM9000Component", func() {
var (
hmc *Component
pollingInterval time.Duration
timeout time.Duration
logger *fakelogger.FakeLogger
action func() error
actionChan chan time.Time
)
BeforeEach(func() {
pollingInterval = 100 * time.Millisecond
timeout = 1 * time.Second
logger = fakelogger.NewFakeLogger()
actionChan = make(chan time.Time, 10)
actionChan := actionChan
action = func() error {
actionChan <- time.Now()
return nil
}
})
JustBeforeEach(func() {
hmc = NewComponent(
"component",
nil,
pollingInterval,
timeout,
logger,
action,
)
})
It("Exits on receiving the correct signal", func() {
proc := ifrit.Background(hmc)
ginkgomon.Kill(proc)
Eventually(proc.Wait()).Should(Receive(BeNil()))
})
It("Exits on receiving the correct signal after the action executes", func() {
proc := ifrit.Background(hmc)
Eventually(actionChan).Should(Receive())
ginkgomon.Kill(proc)
Eventually(proc.Wait()).Should(Receive(BeNil()))
Consistently(actionChan).ShouldNot(Receive())
})
It("Executes the component action on each polling interval", func() {
proc := ifrit.Background(hmc)
var t1, t2, t3 time.Time
Eventually(actionChan).Should(Receive(&t1))
Eventually(actionChan).Should(Receive(&t2))
Eventually(actionChan).Should(Receive(&t3))
ginkgomon.Kill(proc)
Expect(t2.Sub(t1)).To(BeNumerically("~", pollingInterval, pollingInterval/2))
Expect(t3.Sub(t2)).To(BeNumerically("~", pollingInterval, pollingInterval/2))
})
Context("When the action returns an error", func() {
BeforeEach(func() {
action = func() error {
actionChan <- time.Now()
return errors.New("Action failed")
}
})
It("Continues to execute", func() {
proc := ifrit.Background(hmc)
Eventually(actionChan).Should(Receive())
Eventually(actionChan).Should(Receive())
ginkgomon.Kill(proc)
})
})
Context("when the timeout expires", func() {
BeforeEach(func() {
timeout = 10 * time.Millisecond
action = func() error {
time.Sleep(2 * timeout)
return nil
}
})
It("Returns an error", func() {
proc := ifrit.Background(hmc)
Eventually(proc.Wait()).Should(Receive(MatchError(Equal("component timed out. Aborting!"))))
})
})
})
|
package autocmd
import (
"nvim-go/commands"
"nvim-go/config"
"github.com/garyburd/neovim-go/vim"
"github.com/garyburd/neovim-go/vim/plugin"
)
func init() {
plugin.HandleAutocmd("BufWritePre",
&plugin.AutocmdOptions{Pattern: "*.go", Group: "nvim-go", Eval: "[getcwd(), expand('%:p')]"}, autocmdBufWritePre)
}
type bufwritepreEval struct {
Cwd string `msgpack:",array"`
File string
}
func autocmdBufWritePre(v *vim.Vim, eval bufwritepreEval) error {
if config.IferrAutosave {
var env = commands.CmdIferrEval{
Cwd: eval.Cwd,
File: eval.File,
}
go commands.Iferr(v, env)
}
if config.MetalinterAutosave {
go commands.Metalinter(v, eval.Cwd)
}
if config.FmtAsync {
go commands.Fmt(v, eval.Cwd)
} else {
return commands.Fmt(v, eval.Cwd)
}
return nil
}
Fix Eval to use current buffer path for GoFmt
Signed-off-by: Koichi Shiraishi <13fbd79c3d390e5d6585a21e11ff5ec1970cff0c@zchee.io>
package autocmd
import (
"nvim-go/commands"
"nvim-go/config"
"github.com/garyburd/neovim-go/vim"
"github.com/garyburd/neovim-go/vim/plugin"
)
func init() {
plugin.HandleAutocmd("BufWritePre",
&plugin.AutocmdOptions{Pattern: "*.go", Group: "nvim-go", Eval: "[getcwd(), expand('%:p:h'), expand('%:p')]"}, autocmdBufWritePre)
}
type bufwritepreEval struct {
Cwd string `msgpack:",array"`
Dir string
File string
}
func autocmdBufWritePre(v *vim.Vim, eval bufwritepreEval) error {
if config.IferrAutosave {
var env = commands.CmdIferrEval{
Cwd: eval.Cwd,
File: eval.File,
}
go commands.Iferr(v, env)
}
if config.MetalinterAutosave {
go commands.Metalinter(v, eval.Cwd)
}
if config.FmtAsync {
go commands.Fmt(v, eval.Dir)
} else {
return commands.Fmt(v, eval.Dir)
}
return nil
}
|
Return InvalidBody when failing to decode a new login
We always assume that a failure to decode indicates an invalid body, and
return a InvalidBody error instead of the error from the decoder.
|
package main
import (
"fmt"
"math/big"
"github.com/spf13/cobra"
"github.com/NebulousLabs/Sia/api"
)
var (
hostCmd = &cobra.Command{
Use: "host",
Short: "Perform host actions",
Long: "View or modify host settings. Modifying host settings also announces the host to the network.",
Run: wrap(hostcmd),
}
hostConfigCmd = &cobra.Command{
Use: "config [setting] [value]",
Short: "Modify host settings",
Long: `Modify host settings.
Available settings:
totalstorage
minduration
maxduration
windowsize
price (in SC per GB per month)`,
Run: wrap(hostconfigcmd),
}
hostAnnounceCmd = &cobra.Command{
Use: "announce",
Short: "Announce yourself as a host",
Long: `Announce yourself as a host on the network.
You may also supply a specific address to be announced, e.g.:
siac host announce my-host-domain.com:9001
Doing so will override the standard connectivity checks.`,
Run: hostannouncecmd,
}
)
func hostconfigcmd(param, value string) {
// convert price to hastings/byte/block
if param == "price" {
p, ok := new(big.Rat).SetString(value)
if !ok {
fmt.Println("could not parse price")
return
}
p.Mul(p, big.NewRat(1e24/1e9, 4320))
value = new(big.Int).Div(p.Num(), p.Denom()).String()
}
// parse sizes of form 10GB, 10TB, 1TiB etc
if param == "totalstorage" {
var err error
value, err = parseSize(value)
if err != nil {
fmt.Println("could not parse " + param)
}
}
err := post("/host", param+"="+value)
if err != nil {
fmt.Println("Could not update host settings:", err)
return
}
fmt.Println("Host settings updated.")
}
func hostannouncecmd(cmd *cobra.Command, args []string) {
var err error
switch len(args) {
case 0:
err = post("/host/announce", "")
case 1:
err = post("/host/announce", "netaddress="+args[0])
default:
cmd.Usage()
return
}
if err != nil {
fmt.Println("Could not announce host:", err)
return
}
fmt.Println("Host announcement submitted to network.")
}
func hostcmd() {
hg := new(api.HostGET)
err := getAPI("/host", &hg)
if err != nil {
fmt.Println("Could not fetch host settings:", err)
return
}
// convert price to SC/GB/mo
price := new(big.Rat).SetInt(hg.Price.Big())
price.Mul(price, big.NewRat(4320, 1e24/1e9))
fmt.Printf(`Host info:
Storage: %v (%v used)
Price: %v SC per GB per month
Max Duration: %v Blocks
Contracts: %v
Anticipated Revenue: %v
Revenue: %v
Lost Revenue: %v
`, filesizeUnits(hg.TotalStorage), filesizeUnits(hg.TotalStorage-hg.StorageRemaining),
price.FloatString(3), hg.MaxDuration, hg.NumContracts, hg.AnticipatedRevenue,
hg.Revenue, hg.LostRevenue)
// display more info if verbose flag is set
if !hostVerbose {
return
}
fmt.Printf(`
Net Address: %v
Unlock Hash: %v
(NOT a wallet address!)
RPC Stats:
Error Calls: %v
Unrecognized Calls: %v
Download Calls: %v
Renew Calls: %v
Revise Calls: %v
Settings Calls: %v
Upload Calls: %v
`, hg.NetAddress, hg.UnlockHash, hg.RPCErrorCalls, hg.RPCUnrecognizedCalls, hg.RPCDownloadCalls,
hg.RPCRenewCalls, hg.RPCReviseCalls, hg.RPCSettingsCalls, hg.RPCUploadCalls)
}
add siac support for AcceptingContracts
package main
import (
"fmt"
"math/big"
"github.com/spf13/cobra"
"github.com/NebulousLabs/Sia/api"
)
var (
hostCmd = &cobra.Command{
Use: "host",
Short: "Perform host actions",
Long: "View or modify host settings.",
Run: wrap(hostcmd),
}
hostConfigCmd = &cobra.Command{
Use: "config [setting] [value]",
Short: "Modify host settings",
Long: `Modify host settings.
Available settings:
totalstorage
minduration
maxduration
windowsize
price (in SC per GB per month)
acceptingcontracts
To configure the host to not accept new contracts, set acceptingcontracts
to false, e.g.:
siac host config acceptingcontracts false
`,
Run: wrap(hostconfigcmd),
}
hostAnnounceCmd = &cobra.Command{
Use: "announce",
Short: "Announce yourself as a host",
Long: `Announce yourself as a host on the network.
You may also supply a specific address to be announced, e.g.:
siac host announce my-host-domain.com:9001
Doing so will override the standard connectivity checks.`,
Run: hostannouncecmd,
}
)
func hostconfigcmd(param, value string) {
// convert price to hastings/byte/block
if param == "price" {
p, ok := new(big.Rat).SetString(value)
if !ok {
fmt.Println("could not parse price")
return
}
p.Mul(p, big.NewRat(1e24/1e9, 4320))
value = new(big.Int).Div(p.Num(), p.Denom()).String()
}
// parse sizes of form 10GB, 10TB, 1TiB etc
if param == "totalstorage" {
var err error
value, err = parseSize(value)
if err != nil {
fmt.Println("could not parse " + param)
}
}
err := post("/host", param+"="+value)
if err != nil {
fmt.Println("Could not update host settings:", err)
return
}
fmt.Println("Host settings updated.")
}
func hostannouncecmd(cmd *cobra.Command, args []string) {
var err error
switch len(args) {
case 0:
err = post("/host/announce", "")
case 1:
err = post("/host/announce", "netaddress="+args[0])
default:
cmd.Usage()
return
}
if err != nil {
fmt.Println("Could not announce host:", err)
return
}
fmt.Println("Host announcement submitted to network.")
}
func hostcmd() {
hg := new(api.HostGET)
err := getAPI("/host", &hg)
if err != nil {
fmt.Println("Could not fetch host settings:", err)
return
}
// convert accepting bool
accept := "Yes"
if !hg.AcceptingContracts {
accept = "No"
}
// convert price to SC/GB/mo
price := new(big.Rat).SetInt(hg.Price.Big())
price.Mul(price, big.NewRat(4320, 1e24/1e9))
fmt.Printf(`Host info:
Storage: %v (%v used)
Price: %v SC per GB per month
Max Duration: %v Blocks
Contracts: %v
Accepting Contracts: %v
Anticipated Revenue: %v
Revenue: %v
Lost Revenue: %v
`, filesizeUnits(hg.TotalStorage), filesizeUnits(hg.TotalStorage-hg.StorageRemaining),
price.FloatString(3), hg.MaxDuration, hg.NumContracts, accept,
hg.AnticipatedRevenue, hg.Revenue, hg.LostRevenue)
// display more info if verbose flag is set
if !hostVerbose {
return
}
fmt.Printf(`
Net Address: %v
Unlock Hash: %v
(NOT a wallet address!)
RPC Stats:
Error Calls: %v
Unrecognized Calls: %v
Download Calls: %v
Renew Calls: %v
Revise Calls: %v
Settings Calls: %v
Upload Calls: %v
`, hg.NetAddress, hg.UnlockHash, hg.RPCErrorCalls, hg.RPCUnrecognizedCalls, hg.RPCDownloadCalls,
hg.RPCRenewCalls, hg.RPCReviseCalls, hg.RPCSettingsCalls, hg.RPCUploadCalls)
}
|
package sudoku
import (
"log"
"math"
"sort"
)
/*
This file is where the basic solve technique infrastructure is defined.
The techniques that Human Solve uses are initalized and stored into human_solves.go Techniques slice here.
Specific techniques are implemented in hst_*.go files (hst == human solve techniques), where there's
a separate file for each class of technique.
*/
//You shouldn't just create a technique by hand (the interior basicSolveTechnique needs to be initalized with the right values)
//So in the rare cases where you want to grab a technique by name, grab it from here.
//TODO: it feels like a pattern smell that there's only a singleton for each technique that you can't cons up on demand.
var techniquesByName map[string]SolveTechnique
//SolveTechnique is a logical technique that, when applied to a grid, returns potential SolveSteps
//that will move the grid closer to being solved, and are based on sound logical reasoning. A stable
//of SolveTechniques (stored in Techniques) are repeatedly applied to the Grid in HumanSolve.
type SolveTechnique interface {
//Name returns the human-readable shortname of the technique.
Name() string
//Description returns a human-readable phrase that describes the logical reasoning applied in the particular step; why
//it is valid.
Description(*SolveStep) string
//IMPORTANT: a step should return a step IFF that step is valid AND the step would cause useful work to be done if applied.
//Find returns as many steps as it can find in the grid for that technique, in a random order.
//HumanSolve repeatedly applies technique.Find() to identify candidates for the next step in the solution.
//A technique's Find method will send results as it finds them to results, and will periodically see if it
//can receive any value from done--if it can, it will stop searching. Find will block and not return if it can't send
//to results or receive from done; either use sufficiently buffered channels or run Find in a goroutine.
Find(grid *Grid, results chan *SolveStep, done chan bool)
//TODO: if we keep this signature, we should consider having each find method actually wrap its internals in a goRoutine
//to make it safer to use--although that would probably require a new signature.
//IsFill returns true if the techinque's action when applied to a grid is to fill a number (as opposed to culling possbilitie).
IsFill() bool
//HumanLikelihood is how likely a user would be to pick this technique when compared with other possible steps.
//Generally inversely related to difficulty (but not perfectly).
//This value will be used to pick which technique to apply when compared with other candidates.
HumanLikelihood() float64
}
type cellGroupType int
const (
_GROUP_NONE cellGroupType = iota
_GROUP_ROW
_GROUP_COL
_GROUP_BLOCK
)
type basicSolveTechnique struct {
name string
isFill bool
groupType cellGroupType
//Size of set in technique, e.g. single = 1, pair = 2, triple = 3
//Used for generating descriptions in some sub-structs.
k int
}
//Boilerplate to allow us to sort Techniques in weights
type techniqueByLikelihood []SolveTechnique
func (t techniqueByLikelihood) Len() int {
return len(t)
}
func (t techniqueByLikelihood) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t techniqueByLikelihood) Less(i, j int) bool {
return t[i].HumanLikelihood() < t[j].HumanLikelihood()
}
func init() {
//TODO: calculate more realistic weights.
Techniques = []SolveTechnique{
&hiddenSingleTechnique{
&basicSolveTechnique{
//TODO: shouldn't this be "Hidden Single Row" (and likewise for others)
"Necessary In Row",
true,
_GROUP_ROW,
1,
},
},
&hiddenSingleTechnique{
&basicSolveTechnique{
"Necessary In Col",
true,
_GROUP_COL,
1,
},
},
&hiddenSingleTechnique{
&basicSolveTechnique{
"Necessary In Block",
true,
_GROUP_BLOCK,
1,
},
},
&nakedSingleTechnique{
&basicSolveTechnique{
//TODO: shouldn't this name be Naked Single for consistency?
"Only Legal Number",
true,
_GROUP_NONE,
1,
},
},
&obviousInCollectionTechnique{
&basicSolveTechnique{
"Obvious In Row",
true,
_GROUP_ROW,
1,
},
},
&obviousInCollectionTechnique{
&basicSolveTechnique{
"Obvious In Col",
true,
_GROUP_COL,
1,
},
},
&obviousInCollectionTechnique{
&basicSolveTechnique{
"Obvious In Block",
true,
_GROUP_BLOCK,
1,
},
},
&pointingPairTechnique{
&basicSolveTechnique{
"Pointing Pair Row",
false,
_GROUP_ROW,
2,
},
},
&pointingPairTechnique{
&basicSolveTechnique{
"Pointing Pair Col",
false,
_GROUP_COL,
2,
},
},
&blockBlockInteractionTechnique{
&basicSolveTechnique{
"Block Block Interactions",
false,
_GROUP_BLOCK,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Pair Col",
false,
_GROUP_COL,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Pair Row",
false,
_GROUP_ROW,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Pair Block",
false,
_GROUP_BLOCK,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Triple Col",
false,
_GROUP_COL,
3,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Triple Row",
false,
_GROUP_ROW,
3,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Triple Block",
false,
_GROUP_BLOCK,
3,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Quad Col",
false,
_GROUP_COL,
4,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Quad Row",
false,
_GROUP_ROW,
4,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Quad Block",
false,
_GROUP_BLOCK,
4,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Pair Row",
false,
_GROUP_ROW,
2,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Pair Col",
false,
_GROUP_COL,
2,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Pair Block",
false,
_GROUP_BLOCK,
2,
},
},
&xwingTechnique{
&basicSolveTechnique{
"XWing Row",
false,
_GROUP_ROW,
2,
},
},
&xwingTechnique{
&basicSolveTechnique{
"XWing Col",
false,
_GROUP_COL,
2,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Triple Row",
false,
_GROUP_ROW,
3,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Triple Col",
false,
_GROUP_COL,
3,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Triple Block",
false,
_GROUP_BLOCK,
3,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Quad Row",
false,
_GROUP_ROW,
4,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Quad Col",
false,
_GROUP_COL,
4,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Quad Block",
false,
_GROUP_BLOCK,
4,
},
},
}
GuessTechnique = &guessTechnique{
&basicSolveTechnique{
"Guess",
true,
_GROUP_NONE,
1,
},
}
//Sort Techniques in order of humanLikelihood
sort.Sort(techniqueByLikelihood(Techniques))
//Guess is always the highest, so AllTechniques should already be sorted.
AllTechniques = append(Techniques, GuessTechnique)
techniquesByName = make(map[string]SolveTechnique)
for _, technique := range AllTechniques {
techniquesByName[technique.Name()] = technique
}
}
func (self *basicSolveTechnique) Name() string {
return self.name
}
func (self *basicSolveTechnique) IsFill() bool {
return self.isFill
}
//TOOD: this is now named incorrectly. (It should be likelihoodHelper)
func (self *basicSolveTechnique) difficultyHelper(baseDifficulty float64) float64 {
//Embedding structs should call into this to provide their own Difficulty
//TODO: the default difficulties, as configured, will mean that SolveDirection's Difficulty() will almost always clamp to 1.0.
//They're only useful in terms of a reasonable picking of techniques when multiple apply.
groupMultiplier := 1.0
switch self.groupType {
case _GROUP_BLOCK:
//Blocks are the easiest to notice; although they require zig-zag scanning, the eye doesn't have to move far.
groupMultiplier = 1.0
case _GROUP_ROW:
//Rows are easier to scan than columns because most humans are used to reading LTR
groupMultiplier = 1.25
case _GROUP_COL:
//Cols are easy to scan because the eye can move in one line, but they have to move a long way in an unnatural direction
groupMultiplier = 1.3
}
//TODO: Arguably, the "fill-ness" of a technique should be encoded in the baseDifficulty, and this is a hack to quickly change it for all fill techniques.
fillMultiplier := 1.0
if !self.IsFill() {
fillMultiplier = 5.0
}
return groupMultiplier * fillMultiplier * math.Pow(baseDifficulty, float64(self.k))
}
func (self *basicSolveTechnique) getter(grid *Grid) func(int) CellSlice {
switch self.groupType {
case _GROUP_ROW:
return func(i int) CellSlice {
return grid.Row(i)
}
case _GROUP_COL:
return func(i int) CellSlice {
return grid.Col(i)
}
case _GROUP_BLOCK:
return func(i int) CellSlice {
return grid.Block(i)
}
default:
//This should never happen in normal execution--the rare techniques where it doesn't work should never call getter.
log.Println("Asked for a getter for a function with GROUP_NONE")
//Return a shell of a function just to not trip up things downstream.
return func(i int) CellSlice {
return nil
}
}
}
//This is useful both for hidden and naked subset techniques
func subsetIndexes(len int, size int) [][]int {
//Given size of array to generate subset for, and size of desired subset, returns an array of all subset-indexes to try.
//Sanity check
if size > len {
return nil
}
//returns an array of slices of size size that give you all of the subsets of a list of length len
result := make([][]int, 0)
counters := make([]int, size)
for i := range counters {
counters[i] = i
}
for {
innerResult := make([]int, size)
for i, counter := range counters {
innerResult[i] = counter
}
result = append(result, innerResult)
//Now, increment.
//Start at the end and try to increment each counter one.
incremented := false
for i := size - 1; i >= 0; i-- {
counter := counters[i]
if counter < len-(size-i) {
//Found one!
counters[i]++
incremented = true
if i < size-1 {
//It was an inner counter; need to set all of the higher counters to one above the one to the left.
base := counters[i] + 1
for j := i + 1; j < size; j++ {
counters[j] = base
base++
}
}
break
}
}
//If we couldn't increment any, there's nothing to do.
if !incremented {
break
}
}
return result
}
Added ForcingChains to the list of techniques. Still not tested (although it does compile)
package sudoku
import (
"log"
"math"
"sort"
)
/*
This file is where the basic solve technique infrastructure is defined.
The techniques that Human Solve uses are initalized and stored into human_solves.go Techniques slice here.
Specific techniques are implemented in hst_*.go files (hst == human solve techniques), where there's
a separate file for each class of technique.
*/
//You shouldn't just create a technique by hand (the interior basicSolveTechnique needs to be initalized with the right values)
//So in the rare cases where you want to grab a technique by name, grab it from here.
//TODO: it feels like a pattern smell that there's only a singleton for each technique that you can't cons up on demand.
var techniquesByName map[string]SolveTechnique
//SolveTechnique is a logical technique that, when applied to a grid, returns potential SolveSteps
//that will move the grid closer to being solved, and are based on sound logical reasoning. A stable
//of SolveTechniques (stored in Techniques) are repeatedly applied to the Grid in HumanSolve.
type SolveTechnique interface {
//Name returns the human-readable shortname of the technique.
Name() string
//Description returns a human-readable phrase that describes the logical reasoning applied in the particular step; why
//it is valid.
Description(*SolveStep) string
//IMPORTANT: a step should return a step IFF that step is valid AND the step would cause useful work to be done if applied.
//Find returns as many steps as it can find in the grid for that technique, in a random order.
//HumanSolve repeatedly applies technique.Find() to identify candidates for the next step in the solution.
//A technique's Find method will send results as it finds them to results, and will periodically see if it
//can receive any value from done--if it can, it will stop searching. Find will block and not return if it can't send
//to results or receive from done; either use sufficiently buffered channels or run Find in a goroutine.
Find(grid *Grid, results chan *SolveStep, done chan bool)
//TODO: if we keep this signature, we should consider having each find method actually wrap its internals in a goRoutine
//to make it safer to use--although that would probably require a new signature.
//IsFill returns true if the techinque's action when applied to a grid is to fill a number (as opposed to culling possbilitie).
IsFill() bool
//HumanLikelihood is how likely a user would be to pick this technique when compared with other possible steps.
//Generally inversely related to difficulty (but not perfectly).
//This value will be used to pick which technique to apply when compared with other candidates.
HumanLikelihood() float64
}
type cellGroupType int
const (
_GROUP_NONE cellGroupType = iota
_GROUP_ROW
_GROUP_COL
_GROUP_BLOCK
)
type basicSolveTechnique struct {
name string
isFill bool
groupType cellGroupType
//Size of set in technique, e.g. single = 1, pair = 2, triple = 3
//Used for generating descriptions in some sub-structs.
k int
}
//Boilerplate to allow us to sort Techniques in weights
type techniqueByLikelihood []SolveTechnique
func (t techniqueByLikelihood) Len() int {
return len(t)
}
func (t techniqueByLikelihood) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t techniqueByLikelihood) Less(i, j int) bool {
return t[i].HumanLikelihood() < t[j].HumanLikelihood()
}
func init() {
//TODO: calculate more realistic weights.
Techniques = []SolveTechnique{
&hiddenSingleTechnique{
&basicSolveTechnique{
//TODO: shouldn't this be "Hidden Single Row" (and likewise for others)
"Necessary In Row",
true,
_GROUP_ROW,
1,
},
},
&hiddenSingleTechnique{
&basicSolveTechnique{
"Necessary In Col",
true,
_GROUP_COL,
1,
},
},
&hiddenSingleTechnique{
&basicSolveTechnique{
"Necessary In Block",
true,
_GROUP_BLOCK,
1,
},
},
&nakedSingleTechnique{
&basicSolveTechnique{
//TODO: shouldn't this name be Naked Single for consistency?
"Only Legal Number",
true,
_GROUP_NONE,
1,
},
},
&obviousInCollectionTechnique{
&basicSolveTechnique{
"Obvious In Row",
true,
_GROUP_ROW,
1,
},
},
&obviousInCollectionTechnique{
&basicSolveTechnique{
"Obvious In Col",
true,
_GROUP_COL,
1,
},
},
&obviousInCollectionTechnique{
&basicSolveTechnique{
"Obvious In Block",
true,
_GROUP_BLOCK,
1,
},
},
&pointingPairTechnique{
&basicSolveTechnique{
"Pointing Pair Row",
false,
_GROUP_ROW,
2,
},
},
&pointingPairTechnique{
&basicSolveTechnique{
"Pointing Pair Col",
false,
_GROUP_COL,
2,
},
},
&blockBlockInteractionTechnique{
&basicSolveTechnique{
"Block Block Interactions",
false,
_GROUP_BLOCK,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Pair Col",
false,
_GROUP_COL,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Pair Row",
false,
_GROUP_ROW,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Pair Block",
false,
_GROUP_BLOCK,
2,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Triple Col",
false,
_GROUP_COL,
3,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Triple Row",
false,
_GROUP_ROW,
3,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Triple Block",
false,
_GROUP_BLOCK,
3,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Quad Col",
false,
_GROUP_COL,
4,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Quad Row",
false,
_GROUP_ROW,
4,
},
},
&nakedSubsetTechnique{
&basicSolveTechnique{
"Naked Quad Block",
false,
_GROUP_BLOCK,
4,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Pair Row",
false,
_GROUP_ROW,
2,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Pair Col",
false,
_GROUP_COL,
2,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Pair Block",
false,
_GROUP_BLOCK,
2,
},
},
&xwingTechnique{
&basicSolveTechnique{
"XWing Row",
false,
_GROUP_ROW,
2,
},
},
&xwingTechnique{
&basicSolveTechnique{
"XWing Col",
false,
_GROUP_COL,
2,
},
},
&forcingChainsTechnique{
&basicSolveTechnique{
"Forcing Chain",
true,
_GROUP_NONE,
2,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Triple Row",
false,
_GROUP_ROW,
3,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Triple Col",
false,
_GROUP_COL,
3,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Triple Block",
false,
_GROUP_BLOCK,
3,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Quad Row",
false,
_GROUP_ROW,
4,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Quad Col",
false,
_GROUP_COL,
4,
},
},
&hiddenSubsetTechnique{
&basicSolveTechnique{
"Hidden Quad Block",
false,
_GROUP_BLOCK,
4,
},
},
}
GuessTechnique = &guessTechnique{
&basicSolveTechnique{
"Guess",
true,
_GROUP_NONE,
1,
},
}
//Sort Techniques in order of humanLikelihood
sort.Sort(techniqueByLikelihood(Techniques))
//Guess is always the highest, so AllTechniques should already be sorted.
AllTechniques = append(Techniques, GuessTechnique)
techniquesByName = make(map[string]SolveTechnique)
for _, technique := range AllTechniques {
techniquesByName[technique.Name()] = technique
}
}
func (self *basicSolveTechnique) Name() string {
return self.name
}
func (self *basicSolveTechnique) IsFill() bool {
return self.isFill
}
//TOOD: this is now named incorrectly. (It should be likelihoodHelper)
func (self *basicSolveTechnique) difficultyHelper(baseDifficulty float64) float64 {
//Embedding structs should call into this to provide their own Difficulty
//TODO: the default difficulties, as configured, will mean that SolveDirection's Difficulty() will almost always clamp to 1.0.
//They're only useful in terms of a reasonable picking of techniques when multiple apply.
groupMultiplier := 1.0
switch self.groupType {
case _GROUP_BLOCK:
//Blocks are the easiest to notice; although they require zig-zag scanning, the eye doesn't have to move far.
groupMultiplier = 1.0
case _GROUP_ROW:
//Rows are easier to scan than columns because most humans are used to reading LTR
groupMultiplier = 1.25
case _GROUP_COL:
//Cols are easy to scan because the eye can move in one line, but they have to move a long way in an unnatural direction
groupMultiplier = 1.3
}
//TODO: Arguably, the "fill-ness" of a technique should be encoded in the baseDifficulty, and this is a hack to quickly change it for all fill techniques.
fillMultiplier := 1.0
if !self.IsFill() {
fillMultiplier = 5.0
}
return groupMultiplier * fillMultiplier * math.Pow(baseDifficulty, float64(self.k))
}
func (self *basicSolveTechnique) getter(grid *Grid) func(int) CellSlice {
switch self.groupType {
case _GROUP_ROW:
return func(i int) CellSlice {
return grid.Row(i)
}
case _GROUP_COL:
return func(i int) CellSlice {
return grid.Col(i)
}
case _GROUP_BLOCK:
return func(i int) CellSlice {
return grid.Block(i)
}
default:
//This should never happen in normal execution--the rare techniques where it doesn't work should never call getter.
log.Println("Asked for a getter for a function with GROUP_NONE")
//Return a shell of a function just to not trip up things downstream.
return func(i int) CellSlice {
return nil
}
}
}
//This is useful both for hidden and naked subset techniques
func subsetIndexes(len int, size int) [][]int {
//Given size of array to generate subset for, and size of desired subset, returns an array of all subset-indexes to try.
//Sanity check
if size > len {
return nil
}
//returns an array of slices of size size that give you all of the subsets of a list of length len
result := make([][]int, 0)
counters := make([]int, size)
for i := range counters {
counters[i] = i
}
for {
innerResult := make([]int, size)
for i, counter := range counters {
innerResult[i] = counter
}
result = append(result, innerResult)
//Now, increment.
//Start at the end and try to increment each counter one.
incremented := false
for i := size - 1; i >= 0; i-- {
counter := counters[i]
if counter < len-(size-i) {
//Found one!
counters[i]++
incremented = true
if i < size-1 {
//It was an inner counter; need to set all of the higher counters to one above the one to the left.
base := counters[i] + 1
for j := i + 1; j < size; j++ {
counters[j] = base
base++
}
}
break
}
}
//If we couldn't increment any, there's nothing to do.
if !incremented {
break
}
}
return result
}
|
package main
import (
"testing"
)
func TestValidateUser(t *testing.T) {
var tests = []struct {
username string
password string
want bool
}{
{"foo", "bar", true},
{"user", "pass", false},
}
for _, test := range tests {
if got := IsValidateUser(test.username, test.password); got != test.want {
t.Error("ValidateUser(%q, %q) = %v", test.username, test.password, got)
}
}
}
moving stuff around
|
package ct
import (
"bytes"
"container/list"
"crypto"
"encoding/binary"
"errors"
"fmt"
"io"
)
// Variable size structure prefix-header byte lengths
const (
CertificateLengthBytes = 3
PreCertificateLengthBytes = 3
ExtensionsLengthBytes = 2
CertificateChainLengthBytes = 3
SignatureLengthBytes = 2
)
// Max lengths
const (
MaxCertificateLength = (1 << 24) - 1
MaxExtensionsLength = (1 << 16) - 1
)
func writeUint(w io.Writer, value uint64, numBytes int) error {
buf := make([]uint8, numBytes)
for i := 0; i < numBytes; i++ {
buf[numBytes-i-1] = uint8(value & 0xff)
value >>= 8
}
if value != 0 {
return errors.New("numBytes was insufficiently large to represent value")
}
if _, err := w.Write(buf); err != nil {
return err
}
return nil
}
func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error {
if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil {
return err
}
if _, err := w.Write(value); err != nil {
return err
}
return nil
}
func readUint(r io.Reader, numBytes int) (uint64, error) {
var l uint64
for i := 0; i < numBytes; i++ {
l <<= 8
var t uint8
if err := binary.Read(r, binary.BigEndian, &t); err != nil {
return 0, err
}
l |= uint64(t)
}
return l, nil
}
// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the
// number of (BigEndian) prefix-bytes which contain the length of the actual
// array data bytes that follow.
// Allocates an array to hold the contents and returns a slice view into it if
// the read was successful, or an error otherwise.
func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) {
switch {
case numLenBytes > 8:
return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes)
case numLenBytes == 0:
return nil, errors.New("numLenBytes should be > 0")
}
l, err := readUint(r, numLenBytes)
if err != nil {
return nil, err
}
data := make([]byte, l)
if n, err := io.ReadFull(r, data); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, fmt.Errorf("short read: expected %d but got %d", l, n)
}
return nil, err
}
return data, nil
}
// Reads a list of ASN1Cert types from |r|
func readASN1CertList(r io.Reader, totalLenBytes int, elementLenBytes int) ([]ASN1Cert, error) {
listBytes, err := readVarBytes(r, totalLenBytes)
if err != nil {
return []ASN1Cert{}, err
}
list := list.New()
listReader := bytes.NewReader(listBytes)
var entry []byte
for err == nil {
entry, err = readVarBytes(listReader, elementLenBytes)
if err != nil {
if err != io.EOF {
return []ASN1Cert{}, err
}
} else {
list.PushBack(entry)
}
}
ret := make([]ASN1Cert, list.Len())
i := 0
for e := list.Front(); e != nil; e = e.Next() {
ret[i] = e.Value.([]byte)
i++
}
return ret, nil
}
// ReadTimestampedEntryInto parses the byte-stream representation of a
// TimestampedEntry from |r| and populates the struct |t| with the data. See
// RFC section 3.4 for details on the format.
// Returns a non-nil error if there was a problem.
func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error {
var err error
if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil {
return err
}
if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil {
return err
}
switch t.EntryType {
case X509LogEntryType:
if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil {
return err
}
case PrecertLogEntryType:
if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil {
return err
}
if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil {
return err
}
default:
return fmt.Errorf("unknown EntryType: %d", t.EntryType)
}
t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes)
return nil
}
// ReadMerkleTreeLeaf parses the byte-stream representation of a MerkleTreeLeaf
// and returns a pointer to a new MerkleTreeLeaf structure containing the
// parsed data.
// See RFC section 3.4 for details on the format.
// Returns a pointer to a new MerkleTreeLeaf or non-nil error if there was a
// problem
func ReadMerkleTreeLeaf(r io.Reader) (*MerkleTreeLeaf, error) {
var m MerkleTreeLeaf
if err := binary.Read(r, binary.BigEndian, &m.Version); err != nil {
return nil, err
}
if m.Version != V1 {
return nil, fmt.Errorf("unknown Version %d", m.Version)
}
if err := binary.Read(r, binary.BigEndian, &m.LeafType); err != nil {
return nil, err
}
if m.LeafType != TimestampedEntryLeafType {
return nil, fmt.Errorf("unknown LeafType %d", m.LeafType)
}
if err := ReadTimestampedEntryInto(r, &m.TimestampedEntry); err != nil {
return nil, err
}
return &m, nil
}
// UnmarshalX509ChainArray unmarshalls the contents of the "chain:" entry in a
// GetEntries response in the case where the entry refers to an X509 leaf.
func UnmarshalX509ChainArray(b []byte) ([]ASN1Cert, error) {
return readASN1CertList(bytes.NewReader(b), CertificateChainLengthBytes, CertificateLengthBytes)
}
// UnmarshalPrecertChainArray unmarshalls the contents of the "chain:" entry in
// a GetEntries response in the case where the entry refers to a Precertificate
// leaf.
func UnmarshalPrecertChainArray(b []byte) ([]ASN1Cert, error) {
var chain []ASN1Cert
reader := bytes.NewReader(b)
// read the pre-cert entry:
precert, err := readVarBytes(reader, CertificateLengthBytes)
if err != nil {
return chain, err
}
chain = append(chain, precert)
// and then read and return the chain up to the root:
remainingChain, err := readASN1CertList(reader, CertificateChainLengthBytes, CertificateLengthBytes)
if err != nil {
return chain, err
}
chain = append(chain, remainingChain...)
return chain, nil
}
// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader
func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) {
var h byte
if err := binary.Read(r, binary.BigEndian, &h); err != nil {
return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err)
}
var s byte
if err := binary.Read(r, binary.BigEndian, &s); err != nil {
return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err)
}
sig, err := readVarBytes(r, SignatureLengthBytes)
if err != nil {
return nil, fmt.Errorf("failed to read Signature bytes: %v", err)
}
return &DigitallySigned{
HashAlgorithm: HashAlgorithm(h),
SignatureAlgorithm: SignatureAlgorithm(s),
Signature: sig,
}, nil
}
func marshalDigitallySignedHere(ds DigitallySigned, here []byte) ([]byte, error) {
sigLen := len(ds.Signature)
dsOutLen := 2 + SignatureLengthBytes + sigLen
if here == nil {
here = make([]byte, dsOutLen)
}
if len(here) < dsOutLen {
return nil, ErrNotEnoughBuffer
}
here = here[0:dsOutLen]
here[0] = byte(ds.HashAlgorithm)
here[1] = byte(ds.SignatureAlgorithm)
binary.BigEndian.PutUint16(here[2:4], uint16(sigLen))
copy(here[4:], ds.Signature)
return here, nil
}
// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array
func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) {
return marshalDigitallySignedHere(ds, nil)
}
func checkCertificateFormat(cert ASN1Cert) error {
if len(cert) == 0 {
return errors.New("certificate is zero length")
}
if len(cert) > MaxCertificateLength {
return errors.New("certificate too large")
}
return nil
}
func checkExtensionsFormat(ext CTExtensions) error {
if len(ext) > MaxExtensionsLength {
return errors.New("extensions too large")
}
return nil
}
func serializeV1CertSCTSignatureInput(timestamp uint64, cert ASN1Cert, ext CTExtensions) ([]byte, error) {
if err := checkCertificateFormat(cert); err != nil {
return nil, err
}
if err := checkExtensionsFormat(ext); err != nil {
return nil, err
}
var buf bytes.Buffer
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, X509LogEntryType); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, cert, CertificateLengthBytes); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func serializeV1PrecertSCTSignatureInput(timestamp uint64, issuerKeyHash [issuerKeyHashLength]byte, tbs []byte, ext CTExtensions) ([]byte, error) {
if err := checkCertificateFormat(tbs); err != nil {
return nil, err
}
if err := checkExtensionsFormat(ext); err != nil {
return nil, err
}
var buf bytes.Buffer
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, PrecertLogEntryType); err != nil {
return nil, err
}
if _, err := buf.Write(issuerKeyHash[:]); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, tbs, CertificateLengthBytes); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func serializeV1SCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
if sct.SCTVersion != V1 {
return nil, fmt.Errorf("unsupported SCT version, expected V1, but got %s", sct.SCTVersion)
}
if entry.Leaf.LeafType != TimestampedEntryLeafType {
return nil, fmt.Errorf("Unsupported leaf type %s", entry.Leaf.LeafType)
}
switch entry.Leaf.TimestampedEntry.EntryType {
case X509LogEntryType:
return serializeV1CertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.X509Entry, entry.Leaf.TimestampedEntry.Extensions)
case PrecertLogEntryType:
return serializeV1PrecertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
entry.Leaf.TimestampedEntry.Extensions)
default:
return nil, fmt.Errorf("unknown TimestampedEntryLeafType %s", entry.Leaf.TimestampedEntry.EntryType)
}
}
// SerializeSCTSignatureInput serializes the passed in sct and log entry into
// the correct format for signing.
func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
switch sct.SCTVersion {
case V1:
return serializeV1SCTSignatureInput(sct, entry)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
// SerializedLength will return the space (in bytes)
func (sct SignedCertificateTimestamp) SerializedLength() (int, error) {
switch sct.SCTVersion {
case V1:
extLen := len(sct.Extensions)
sigLen := len(sct.Signature.Signature)
return 1 + 32 + 8 + 2 + extLen + 2 + 2 + sigLen, nil
default:
return 0, ErrInvalidVersion
}
}
func serializeV1SCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
if sct.SCTVersion != V1 {
return nil, ErrInvalidVersion
}
sctLen, err := sct.SerializedLength()
if err != nil {
return nil, err
}
if here == nil {
here = make([]byte, sctLen)
}
if len(here) < sctLen {
return nil, ErrNotEnoughBuffer
}
if err := checkExtensionsFormat(sct.Extensions); err != nil {
return nil, err
}
here = here[0:sctLen]
// Write Version
here[0] = byte(sct.SCTVersion)
// Write LogID
copy(here[1:33], sct.LogID[:])
// Write Timestamp
binary.BigEndian.PutUint64(here[33:41], sct.Timestamp)
// Write Extensions
extLen := len(sct.Extensions)
binary.BigEndian.PutUint16(here[41:43], uint16(extLen))
n := 43 + extLen
copy(here[43:n], sct.Extensions)
// Write Signature
_, err = marshalDigitallySignedHere(sct.Signature, here[n:])
if err != nil {
return nil, err
}
return here, nil
}
// SerializeSCTHere serializes the passed in sct into the format specified
// by RFC6962 section 3.2.
// If a bytes slice here is provided then it will attempt to serialize into the
// provided byte slice, ErrNotEnoughBuffer will be returned if the buffer is
// too small.
// If a nil byte slice is provided, a buffer for will be allocated for you
// The returned slice will be sliced to the correct length.
func SerializeSCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
switch sct.SCTVersion {
case V1:
return serializeV1SCTHere(sct, here)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
// SerializeSCT serializes the passed in sct into the format specified
// by RFC6962 section 3.2
// Equivalent to SerializeSCTHere(sct, nil)
func SerializeSCT(sct SignedCertificateTimestamp) ([]byte, error) {
return SerializeSCTHere(sct, nil)
}
func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error {
if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil {
return err
}
if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil {
return err
}
ext, err := readVarBytes(r, ExtensionsLengthBytes)
if err != nil {
return err
}
sct.Extensions = ext
ds, err := UnmarshalDigitallySigned(r)
if err != nil {
return err
}
sct.Signature = *ds
return nil
}
func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) {
var sct SignedCertificateTimestamp
if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil {
return nil, err
}
switch sct.SCTVersion {
case V1:
return &sct, deserializeSCTV1(r, &sct)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
func serializeV1STHSignatureInput(sth SignedTreeHead) ([]byte, error) {
if sth.Version != V1 {
return nil, fmt.Errorf("invalid STH version %d", sth.Version)
}
if sth.TreeSize < 0 {
return nil, fmt.Errorf("invalid tree size %d", sth.TreeSize)
}
if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
}
var buf bytes.Buffer
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, TreeHashSignatureType); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, sth.Timestamp); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, sth.TreeSize); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, sth.SHA256RootHash); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// SerializeSTHSignatureInput serializes the passed in sth into the correct
// format for signing.
func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
switch sth.Version {
case V1:
return serializeV1STHSignatureInput(sth)
default:
return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
}
}
// SCTListSerializedLength determines the length of the required buffer should a SCT List need to be serialized
func SCTListSerializedLength(scts []SignedCertificateTimestamp) (int, error) {
sctListLen := 2
for i, sct := range scts {
n, err := sct.SerializedLength()
if err != nil {
return 0, fmt.Errorf("unable to determine length of SCT in position %d: %v", i, err)
}
if n > 0xFFFF {
return 0, fmt.Errorf("SCT in position %d too large: %d", i, n)
}
sctListLen += n + 2
}
if sctListLen == 2 {
return 0, fmt.Errorf("SCT List empty")
}
if sctListLen > 0xFFFF+2 {
return 0, fmt.Errorf("SCT List too large to serialize: %d", sctListLen)
}
return sctListLen, nil
}
// SerializeSCTListHere serializes the passed in slice of SignedCertificateTimestamp in the
// SCT List structure. If the passed in byte slice is not nil it will be used, otherwise
// a buffer will be allocated and returned
func SerializeSCTListHere(scts []SignedCertificateTimestamp, here []byte) ([]byte, error) {
sctListOutLen, err := SCTListSerializedLength(scts)
if err != nil {
return nil, err
}
if len(here) == 0 {
here = make([]byte, sctListOutLen)
}
if len(here) < sctListOutLen {
return nil, ErrNotEnoughBuffer
}
here = here[0:sctListOutLen]
binary.BigEndian.PutUint16(here[0:2], uint16(sctListOutLen-2))
sctListPos := 2
for i, sct := range scts {
n, _ := sct.SerializedLength()
// Error conditions for SerializedLength checked in SCTListSerializedLength above
binary.BigEndian.PutUint16(here[sctListPos:sctListPos+2], uint16(n))
sctListPos += 2
_, err := SerializeSCTHere(sct, here[sctListPos:sctListPos+n])
if err != nil {
return nil, fmt.Errorf("unable to serialize SCT in position %d: %v", i, err)
}
sctListPos += n
}
if sctListPos != sctListOutLen {
return nil, fmt.Errorf("SCTList size expected %d got %d", sctListOutLen, sctListPos)
}
return here, nil
}
Rework patch
Add and use Constants MaxSCTListLength, MaxSCTInListLength
Reword Function Descriptions
Add Wrapper for Here function
Compare byte slice against nil
Check Errors on sct.SerializedLength
package ct
import (
"bytes"
"container/list"
"crypto"
"encoding/binary"
"errors"
"fmt"
"io"
)
// Variable size structure prefix-header byte lengths
const (
CertificateLengthBytes = 3
PreCertificateLengthBytes = 3
ExtensionsLengthBytes = 2
CertificateChainLengthBytes = 3
SignatureLengthBytes = 2
)
// Max lengths
const (
MaxCertificateLength = (1 << 24) - 1
MaxExtensionsLength = (1 << 16) - 1
MaxSCTInListLength = (1 << 16) - 1
MaxSCTListLength = (1 << 16) - 1
)
func writeUint(w io.Writer, value uint64, numBytes int) error {
buf := make([]uint8, numBytes)
for i := 0; i < numBytes; i++ {
buf[numBytes-i-1] = uint8(value & 0xff)
value >>= 8
}
if value != 0 {
return errors.New("numBytes was insufficiently large to represent value")
}
if _, err := w.Write(buf); err != nil {
return err
}
return nil
}
func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error {
if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil {
return err
}
if _, err := w.Write(value); err != nil {
return err
}
return nil
}
func readUint(r io.Reader, numBytes int) (uint64, error) {
var l uint64
for i := 0; i < numBytes; i++ {
l <<= 8
var t uint8
if err := binary.Read(r, binary.BigEndian, &t); err != nil {
return 0, err
}
l |= uint64(t)
}
return l, nil
}
// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the
// number of (BigEndian) prefix-bytes which contain the length of the actual
// array data bytes that follow.
// Allocates an array to hold the contents and returns a slice view into it if
// the read was successful, or an error otherwise.
func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) {
switch {
case numLenBytes > 8:
return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes)
case numLenBytes == 0:
return nil, errors.New("numLenBytes should be > 0")
}
l, err := readUint(r, numLenBytes)
if err != nil {
return nil, err
}
data := make([]byte, l)
if n, err := io.ReadFull(r, data); err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, fmt.Errorf("short read: expected %d but got %d", l, n)
}
return nil, err
}
return data, nil
}
// Reads a list of ASN1Cert types from |r|
func readASN1CertList(r io.Reader, totalLenBytes int, elementLenBytes int) ([]ASN1Cert, error) {
listBytes, err := readVarBytes(r, totalLenBytes)
if err != nil {
return []ASN1Cert{}, err
}
list := list.New()
listReader := bytes.NewReader(listBytes)
var entry []byte
for err == nil {
entry, err = readVarBytes(listReader, elementLenBytes)
if err != nil {
if err != io.EOF {
return []ASN1Cert{}, err
}
} else {
list.PushBack(entry)
}
}
ret := make([]ASN1Cert, list.Len())
i := 0
for e := list.Front(); e != nil; e = e.Next() {
ret[i] = e.Value.([]byte)
i++
}
return ret, nil
}
// ReadTimestampedEntryInto parses the byte-stream representation of a
// TimestampedEntry from |r| and populates the struct |t| with the data. See
// RFC section 3.4 for details on the format.
// Returns a non-nil error if there was a problem.
func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error {
var err error
if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil {
return err
}
if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil {
return err
}
switch t.EntryType {
case X509LogEntryType:
if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil {
return err
}
case PrecertLogEntryType:
if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil {
return err
}
if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil {
return err
}
default:
return fmt.Errorf("unknown EntryType: %d", t.EntryType)
}
t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes)
return nil
}
// ReadMerkleTreeLeaf parses the byte-stream representation of a MerkleTreeLeaf
// and returns a pointer to a new MerkleTreeLeaf structure containing the
// parsed data.
// See RFC section 3.4 for details on the format.
// Returns a pointer to a new MerkleTreeLeaf or non-nil error if there was a
// problem
func ReadMerkleTreeLeaf(r io.Reader) (*MerkleTreeLeaf, error) {
var m MerkleTreeLeaf
if err := binary.Read(r, binary.BigEndian, &m.Version); err != nil {
return nil, err
}
if m.Version != V1 {
return nil, fmt.Errorf("unknown Version %d", m.Version)
}
if err := binary.Read(r, binary.BigEndian, &m.LeafType); err != nil {
return nil, err
}
if m.LeafType != TimestampedEntryLeafType {
return nil, fmt.Errorf("unknown LeafType %d", m.LeafType)
}
if err := ReadTimestampedEntryInto(r, &m.TimestampedEntry); err != nil {
return nil, err
}
return &m, nil
}
// UnmarshalX509ChainArray unmarshalls the contents of the "chain:" entry in a
// GetEntries response in the case where the entry refers to an X509 leaf.
func UnmarshalX509ChainArray(b []byte) ([]ASN1Cert, error) {
return readASN1CertList(bytes.NewReader(b), CertificateChainLengthBytes, CertificateLengthBytes)
}
// UnmarshalPrecertChainArray unmarshalls the contents of the "chain:" entry in
// a GetEntries response in the case where the entry refers to a Precertificate
// leaf.
func UnmarshalPrecertChainArray(b []byte) ([]ASN1Cert, error) {
var chain []ASN1Cert
reader := bytes.NewReader(b)
// read the pre-cert entry:
precert, err := readVarBytes(reader, CertificateLengthBytes)
if err != nil {
return chain, err
}
chain = append(chain, precert)
// and then read and return the chain up to the root:
remainingChain, err := readASN1CertList(reader, CertificateChainLengthBytes, CertificateLengthBytes)
if err != nil {
return chain, err
}
chain = append(chain, remainingChain...)
return chain, nil
}
// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader
func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) {
var h byte
if err := binary.Read(r, binary.BigEndian, &h); err != nil {
return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err)
}
var s byte
if err := binary.Read(r, binary.BigEndian, &s); err != nil {
return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err)
}
sig, err := readVarBytes(r, SignatureLengthBytes)
if err != nil {
return nil, fmt.Errorf("failed to read Signature bytes: %v", err)
}
return &DigitallySigned{
HashAlgorithm: HashAlgorithm(h),
SignatureAlgorithm: SignatureAlgorithm(s),
Signature: sig,
}, nil
}
func marshalDigitallySignedHere(ds DigitallySigned, here []byte) ([]byte, error) {
sigLen := len(ds.Signature)
dsOutLen := 2 + SignatureLengthBytes + sigLen
if here == nil {
here = make([]byte, dsOutLen)
}
if len(here) < dsOutLen {
return nil, ErrNotEnoughBuffer
}
here = here[0:dsOutLen]
here[0] = byte(ds.HashAlgorithm)
here[1] = byte(ds.SignatureAlgorithm)
binary.BigEndian.PutUint16(here[2:4], uint16(sigLen))
copy(here[4:], ds.Signature)
return here, nil
}
// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array
func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) {
return marshalDigitallySignedHere(ds, nil)
}
func checkCertificateFormat(cert ASN1Cert) error {
if len(cert) == 0 {
return errors.New("certificate is zero length")
}
if len(cert) > MaxCertificateLength {
return errors.New("certificate too large")
}
return nil
}
func checkExtensionsFormat(ext CTExtensions) error {
if len(ext) > MaxExtensionsLength {
return errors.New("extensions too large")
}
return nil
}
func serializeV1CertSCTSignatureInput(timestamp uint64, cert ASN1Cert, ext CTExtensions) ([]byte, error) {
if err := checkCertificateFormat(cert); err != nil {
return nil, err
}
if err := checkExtensionsFormat(ext); err != nil {
return nil, err
}
var buf bytes.Buffer
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, X509LogEntryType); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, cert, CertificateLengthBytes); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func serializeV1PrecertSCTSignatureInput(timestamp uint64, issuerKeyHash [issuerKeyHashLength]byte, tbs []byte, ext CTExtensions) ([]byte, error) {
if err := checkCertificateFormat(tbs); err != nil {
return nil, err
}
if err := checkExtensionsFormat(ext); err != nil {
return nil, err
}
var buf bytes.Buffer
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, PrecertLogEntryType); err != nil {
return nil, err
}
if _, err := buf.Write(issuerKeyHash[:]); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, tbs, CertificateLengthBytes); err != nil {
return nil, err
}
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func serializeV1SCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
if sct.SCTVersion != V1 {
return nil, fmt.Errorf("unsupported SCT version, expected V1, but got %s", sct.SCTVersion)
}
if entry.Leaf.LeafType != TimestampedEntryLeafType {
return nil, fmt.Errorf("Unsupported leaf type %s", entry.Leaf.LeafType)
}
switch entry.Leaf.TimestampedEntry.EntryType {
case X509LogEntryType:
return serializeV1CertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.X509Entry, entry.Leaf.TimestampedEntry.Extensions)
case PrecertLogEntryType:
return serializeV1PrecertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
entry.Leaf.TimestampedEntry.Extensions)
default:
return nil, fmt.Errorf("unknown TimestampedEntryLeafType %s", entry.Leaf.TimestampedEntry.EntryType)
}
}
// SerializeSCTSignatureInput serializes the passed in sct and log entry into
// the correct format for signing.
func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
switch sct.SCTVersion {
case V1:
return serializeV1SCTSignatureInput(sct, entry)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
// SerializedLength will return the space (in bytes)
func (sct SignedCertificateTimestamp) SerializedLength() (int, error) {
switch sct.SCTVersion {
case V1:
extLen := len(sct.Extensions)
sigLen := len(sct.Signature.Signature)
return 1 + 32 + 8 + 2 + extLen + 2 + 2 + sigLen, nil
default:
return 0, ErrInvalidVersion
}
}
func serializeV1SCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
if sct.SCTVersion != V1 {
return nil, ErrInvalidVersion
}
sctLen, err := sct.SerializedLength()
if err != nil {
return nil, err
}
if here == nil {
here = make([]byte, sctLen)
}
if len(here) < sctLen {
return nil, ErrNotEnoughBuffer
}
if err := checkExtensionsFormat(sct.Extensions); err != nil {
return nil, err
}
here = here[0:sctLen]
// Write Version
here[0] = byte(sct.SCTVersion)
// Write LogID
copy(here[1:33], sct.LogID[:])
// Write Timestamp
binary.BigEndian.PutUint64(here[33:41], sct.Timestamp)
// Write Extensions
extLen := len(sct.Extensions)
binary.BigEndian.PutUint16(here[41:43], uint16(extLen))
n := 43 + extLen
copy(here[43:n], sct.Extensions)
// Write Signature
_, err = marshalDigitallySignedHere(sct.Signature, here[n:])
if err != nil {
return nil, err
}
return here, nil
}
// SerializeSCTHere serializes the passed in sct into the format specified
// by RFC6962 section 3.2.
// If a bytes slice here is provided then it will attempt to serialize into the
// provided byte slice, ErrNotEnoughBuffer will be returned if the buffer is
// too small.
// If a nil byte slice is provided, a buffer for will be allocated for you
// The returned slice will be sliced to the correct length.
func SerializeSCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
switch sct.SCTVersion {
case V1:
return serializeV1SCTHere(sct, here)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
// SerializeSCT serializes the passed in sct into the format specified
// by RFC6962 section 3.2
// Equivalent to SerializeSCTHere(sct, nil)
func SerializeSCT(sct SignedCertificateTimestamp) ([]byte, error) {
return SerializeSCTHere(sct, nil)
}
func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error {
if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil {
return err
}
if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil {
return err
}
ext, err := readVarBytes(r, ExtensionsLengthBytes)
if err != nil {
return err
}
sct.Extensions = ext
ds, err := UnmarshalDigitallySigned(r)
if err != nil {
return err
}
sct.Signature = *ds
return nil
}
func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) {
var sct SignedCertificateTimestamp
if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil {
return nil, err
}
switch sct.SCTVersion {
case V1:
return &sct, deserializeSCTV1(r, &sct)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
func serializeV1STHSignatureInput(sth SignedTreeHead) ([]byte, error) {
if sth.Version != V1 {
return nil, fmt.Errorf("invalid STH version %d", sth.Version)
}
if sth.TreeSize < 0 {
return nil, fmt.Errorf("invalid tree size %d", sth.TreeSize)
}
if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
}
var buf bytes.Buffer
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, TreeHashSignatureType); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, sth.Timestamp); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, sth.TreeSize); err != nil {
return nil, err
}
if err := binary.Write(&buf, binary.BigEndian, sth.SHA256RootHash); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// SerializeSTHSignatureInput serializes the passed in sth into the correct
// format for signing.
func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
switch sth.Version {
case V1:
return serializeV1STHSignatureInput(sth)
default:
return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
}
}
// SCTListSerializedLength determines the length of the required buffer should a SCT List need to be serialized
func SCTListSerializedLength(scts []SignedCertificateTimestamp) (int, error) {
sctListLen := 2
for i, sct := range scts {
n, err := sct.SerializedLength()
if err != nil {
return 0, fmt.Errorf("unable to determine length of SCT in position %d: %v", i, err)
}
if n > MaxSCTInListLength {
return 0, fmt.Errorf("SCT in position %d too large: %d", i, n)
}
sctListLen += 2 + n
}
if sctListLen == 2 {
return 0, fmt.Errorf("SCT List empty")
}
if sctListLen > MaxSCTListLength+2 {
return 0, fmt.Errorf("SCT List too large to serialize: %d", sctListLen)
}
return sctListLen, nil
}
// SerializeSCTListHere serializes the passed-in slice of SignedCertificateTimestamp into the
// here byte slice as a SignedCertificateTimestampList (see RFC6962 Section 3.3)
func SerializeSCTListHere(scts []SignedCertificateTimestamp, here []byte) ([]byte, error) {
sctListOutLen, err := SCTListSerializedLength(scts)
if err != nil {
return nil, err
}
if here == nil {
here = make([]byte, sctListOutLen)
}
if len(here) < sctListOutLen {
return nil, ErrNotEnoughBuffer
}
here = here[0:sctListOutLen]
binary.BigEndian.PutUint16(here[0:2], uint16(sctListOutLen-2))
sctListPos := 2
for i, sct := range scts {
n, err := sct.SerializedLength()
if err != nil {
return nil, fmt.Errorf("unable to determine length of SCT in position %d: %v", i, err)
}
if n > MaxSCTInListLength {
return nil, fmt.Errorf("SCT in position %d too large: %d", i, n)
}
binary.BigEndian.PutUint16(here[sctListPos:sctListPos+2], uint16(n))
sctListPos += 2
_, err = SerializeSCTHere(sct, here[sctListPos:sctListPos+n])
if err != nil {
return nil, fmt.Errorf("unable to serialize SCT in position %d: %v", i, err)
}
sctListPos += n
}
if sctListPos != sctListOutLen {
return nil, fmt.Errorf("SCTList size expected %d got %d", sctListOutLen, sctListPos)
}
return here, nil
}
// SerializeSCTListHere serializes the passed-in slice of SignedCertificateTimestamp as a
// SignedCertificateTimestampList (see RFC6962 Section 3.3)
// Equivalent to SerializeSCTListHere(scts, nil)
func SerializeSCTList(scts []SignedCertificateTimestamp) ([]byte, error) {
return SerializeSCTListHere(scts, nil)
}
|
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"os"
"time"
"github.com/golang/glog"
"github.com/google/cadvisor/manager"
"github.com/google/cadvisor/storage"
"github.com/google/cadvisor/storage/bigquery"
"github.com/google/cadvisor/storage/influxdb"
"github.com/google/cadvisor/storage/memory"
)
var argSampleSize = flag.Int("samples", 1024, "number of samples we want to keep")
var argDbUsername = flag.String("storage_driver_user", "root", "database username")
var argDbPassword = flag.String("storage_driver_password", "root", "database password")
var argDbHost = flag.String("storage_driver_host", "localhost:8086", "database host:port")
var argDbName = flag.String("storage_driver_db", "cadvisor", "database name")
var argDbIsSecure = flag.Bool("storage_driver_secure", false, "use secure connection with database")
var argDbBufferDuration = flag.Duration("storage_driver_buffer_duration", 60*time.Second, "Writes in the storage driver will be buffered for this duration, and committed to the non memory backends as a single transaction")
const statsRequestedByUI = 60
func NewStorageDriver(driverName string) (*memory.InMemoryStorage, error) {
var storageDriver *memory.InMemoryStorage
var backendStorage storage.StorageDriver
var err error
// TODO(vmarmol): We shouldn't need the housekeeping interval here and it shouldn't be public.
statsToCache := int(*argDbBufferDuration / *manager.HousekeepingInterval)
if statsToCache < statsRequestedByUI {
// The UI requests the most recent 60 stats by default.
statsToCache = statsRequestedByUI
}
switch driverName {
case "":
backendStorage = nil
case "influxdb":
var hostname string
hostname, err = os.Hostname()
if err != nil {
return nil, err
}
backendStorage, err = influxdb.New(
hostname,
"stats",
*argDbName,
*argDbUsername,
*argDbPassword,
*argDbHost,
*argDbIsSecure,
*argDbBufferDuration,
)
case "bigquery":
var hostname string
hostname, err = os.Hostname()
if err != nil {
return nil, err
}
backendStorage, err = bigquery.New(
hostname,
"cadvisor",
*argDbName,
)
default:
err = fmt.Errorf("Unknown database driver: %v", *argDbDriver)
}
if err != nil {
return nil, err
}
glog.Infof("Caching %d recent stats in memory; using \"%v\" storage driver\n", statsToCache, driverName)
storageDriver = memory.New(statsToCache, backendStorage)
return storageDriver, nil
}
Remove unused samples flag.
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"os"
"time"
"github.com/golang/glog"
"github.com/google/cadvisor/manager"
"github.com/google/cadvisor/storage"
"github.com/google/cadvisor/storage/bigquery"
"github.com/google/cadvisor/storage/influxdb"
"github.com/google/cadvisor/storage/memory"
)
var argDbUsername = flag.String("storage_driver_user", "root", "database username")
var argDbPassword = flag.String("storage_driver_password", "root", "database password")
var argDbHost = flag.String("storage_driver_host", "localhost:8086", "database host:port")
var argDbName = flag.String("storage_driver_db", "cadvisor", "database name")
var argDbIsSecure = flag.Bool("storage_driver_secure", false, "use secure connection with database")
var argDbBufferDuration = flag.Duration("storage_driver_buffer_duration", 60*time.Second, "Writes in the storage driver will be buffered for this duration, and committed to the non memory backends as a single transaction")
const statsRequestedByUI = 60
func NewStorageDriver(driverName string) (*memory.InMemoryStorage, error) {
var storageDriver *memory.InMemoryStorage
var backendStorage storage.StorageDriver
var err error
// TODO(vmarmol): We shouldn't need the housekeeping interval here and it shouldn't be public.
statsToCache := int(*argDbBufferDuration / *manager.HousekeepingInterval)
if statsToCache < statsRequestedByUI {
// The UI requests the most recent 60 stats by default.
statsToCache = statsRequestedByUI
}
switch driverName {
case "":
backendStorage = nil
case "influxdb":
var hostname string
hostname, err = os.Hostname()
if err != nil {
return nil, err
}
backendStorage, err = influxdb.New(
hostname,
"stats",
*argDbName,
*argDbUsername,
*argDbPassword,
*argDbHost,
*argDbIsSecure,
*argDbBufferDuration,
)
case "bigquery":
var hostname string
hostname, err = os.Hostname()
if err != nil {
return nil, err
}
backendStorage, err = bigquery.New(
hostname,
"cadvisor",
*argDbName,
)
default:
err = fmt.Errorf("Unknown database driver: %v", *argDbDriver)
}
if err != nil {
return nil, err
}
glog.Infof("Caching %d recent stats in memory; using \"%v\" storage driver\n", statsToCache, driverName)
storageDriver = memory.New(statsToCache, backendStorage)
return storageDriver, nil
}
|
package raft
import (
"reflect"
"testing"
"time"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/third_party/code.google.com/p/go.net/context"
)
// TestNodeStep ensures that node.Step sends msgProp to propc chan
// and other kinds of messages to recvc chan.
func TestNodeStep(t *testing.T) {
for i := range mtmap {
n := &Node{
propc: make(chan raftpb.Message, 1),
recvc: make(chan raftpb.Message, 1),
}
n.Step(context.TODO(), raftpb.Message{Type: int64(i)})
// Proposal goes to proc chan. Others go to recvc chan.
if int64(i) == msgProp {
select {
case <-n.propc:
default:
t.Errorf("%d: cannot receive %s on propc chan", i, mtmap[i])
}
} else {
select {
case <-n.recvc:
default:
t.Errorf("%d: cannot receive %s on recvc chan", i, mtmap[i])
}
}
}
}
// Cancel and Stop should unblock Step()
func TestNodeStepUnblock(t *testing.T) {
// a node without buffer to block step
n := &Node{
propc: make(chan raftpb.Message),
done: make(chan struct{}),
}
ctx, cancel := context.WithCancel(context.Background())
stopFunc := func() { close(n.done) }
tests := []struct {
unblock func()
werr error
}{
{stopFunc, ErrStopped},
{cancel, context.Canceled},
}
for i, tt := range tests {
errc := make(chan error, 1)
go func() {
err := n.Step(ctx, raftpb.Message{Type: msgProp})
errc <- err
}()
tt.unblock()
select {
case err := <-errc:
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", err, tt.werr)
}
//clean up side-effect
if ctx.Err() != nil {
ctx = context.TODO()
}
select {
case <-n.done:
n.done = make(chan struct{})
default:
}
case <-time.After(time.Millisecond * 100):
t.Errorf("#%d: failed to unblock step", i)
}
}
}
func TestNode(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wants := []Ready{
{
State: raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 1},
Entries: []raftpb.Entry{{Term: 1, Index: 1}},
CommittedEntries: []raftpb.Entry{{Term: 1, Index: 1}},
},
{
State: raftpb.State{Term: 1, Vote: -1, Commit: 2, LastIndex: 2},
Entries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte("foo")}},
CommittedEntries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte("foo")}},
},
}
n := Start(1, []int64{1}, 0, 0)
n.Campaign(ctx)
if g := <-n.Ready(); !reflect.DeepEqual(g, wants[0]) {
t.Errorf("#%d: g = %+v,\n w %+v", 1, g, wants[0])
}
n.Propose(ctx, []byte("foo"))
if g := <-n.Ready(); !reflect.DeepEqual(g, wants[1]) {
t.Errorf("#%d: g = %+v,\n w %+v", 2, g, wants[1])
}
select {
case rd := <-n.Ready():
t.Errorf("unexpected Ready: %+v", rd)
default:
}
}
func TestNodeRestart(t *testing.T) {
entries := []raftpb.Entry{
{Term: 1, Index: 1},
{Term: 1, Index: 2, Data: []byte("foo")},
}
st := raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 2}
want := Ready{
State: emptyState,
// commit upto index commit index in st
CommittedEntries: entries[:st.Commit],
}
n := Restart(1, []int64{1}, 0, 0, st, entries)
if g := <-n.Ready(); !reflect.DeepEqual(g, want) {
t.Errorf("g = %+v,\n w %+v", g, want)
}
select {
case rd := <-n.Ready():
t.Errorf("unexpected Ready: %+v", rd)
default:
}
}
raft: test node block proposal
package raft
import (
"reflect"
"testing"
"time"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/third_party/code.google.com/p/go.net/context"
)
// TestNodeStep ensures that node.Step sends msgProp to propc chan
// and other kinds of messages to recvc chan.
func TestNodeStep(t *testing.T) {
for i := range mtmap {
n := &Node{
propc: make(chan raftpb.Message, 1),
recvc: make(chan raftpb.Message, 1),
}
n.Step(context.TODO(), raftpb.Message{Type: int64(i)})
// Proposal goes to proc chan. Others go to recvc chan.
if int64(i) == msgProp {
select {
case <-n.propc:
default:
t.Errorf("%d: cannot receive %s on propc chan", i, mtmap[i])
}
} else {
select {
case <-n.recvc:
default:
t.Errorf("%d: cannot receive %s on recvc chan", i, mtmap[i])
}
}
}
}
// Cancel and Stop should unblock Step()
func TestNodeStepUnblock(t *testing.T) {
// a node without buffer to block step
n := &Node{
propc: make(chan raftpb.Message),
done: make(chan struct{}),
}
ctx, cancel := context.WithCancel(context.Background())
stopFunc := func() { close(n.done) }
tests := []struct {
unblock func()
werr error
}{
{stopFunc, ErrStopped},
{cancel, context.Canceled},
}
for i, tt := range tests {
errc := make(chan error, 1)
go func() {
err := n.Step(ctx, raftpb.Message{Type: msgProp})
errc <- err
}()
tt.unblock()
select {
case err := <-errc:
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", err, tt.werr)
}
//clean up side-effect
if ctx.Err() != nil {
ctx = context.TODO()
}
select {
case <-n.done:
n.done = make(chan struct{})
default:
}
case <-time.After(time.Millisecond * 100):
t.Errorf("#%d: failed to unblock step", i)
}
}
}
// TestBlockProposal ensures that node will block proposal when it does not
// know who is the current leader; node will direct proposal when it knows
// who is the current leader.
func TestBlockProposal(t *testing.T) {
propsal := false
n := newNode()
defer n.Stop()
r := newRaft(1, []int64{1}, 10, 1)
r.step = func(r *raft, m raftpb.Message) {
if m.Type == msgProp {
propsal = true
}
}
go n.run(r)
go n.Propose(context.TODO(), []byte("somedata"))
// give some time for go routines sechduling ...
time.Sleep(time.Millisecond * 2)
if propsal {
t.Fatalf("proposal = %v, want %v", propsal, false)
}
// assign a lead to raft.
// tick to update the node.
r.lead = 1
n.Tick()
// give some time for go routines sechduling ...
time.Sleep(time.Millisecond * 2)
if !propsal {
t.Fatalf("proposal = %v, want %v", propsal, true)
}
}
func TestNode(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wants := []Ready{
{
State: raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 1},
Entries: []raftpb.Entry{{Term: 1, Index: 1}},
CommittedEntries: []raftpb.Entry{{Term: 1, Index: 1}},
},
{
State: raftpb.State{Term: 1, Vote: -1, Commit: 2, LastIndex: 2},
Entries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte("foo")}},
CommittedEntries: []raftpb.Entry{{Term: 1, Index: 2, Data: []byte("foo")}},
},
}
n := Start(1, []int64{1}, 0, 0)
n.Campaign(ctx)
if g := <-n.Ready(); !reflect.DeepEqual(g, wants[0]) {
t.Errorf("#%d: g = %+v,\n w %+v", 1, g, wants[0])
}
n.Propose(ctx, []byte("foo"))
if g := <-n.Ready(); !reflect.DeepEqual(g, wants[1]) {
t.Errorf("#%d: g = %+v,\n w %+v", 2, g, wants[1])
}
select {
case rd := <-n.Ready():
t.Errorf("unexpected Ready: %+v", rd)
default:
}
}
func TestNodeRestart(t *testing.T) {
entries := []raftpb.Entry{
{Term: 1, Index: 1},
{Term: 1, Index: 2, Data: []byte("foo")},
}
st := raftpb.State{Term: 1, Vote: -1, Commit: 1, LastIndex: 2}
want := Ready{
State: emptyState,
// commit upto index commit index in st
CommittedEntries: entries[:st.Commit],
}
n := Restart(1, []int64{1}, 0, 0, st, entries)
if g := <-n.Ready(); !reflect.DeepEqual(g, want) {
t.Errorf("g = %+v,\n w %+v", g, want)
}
select {
case rd := <-n.Ready():
t.Errorf("unexpected Ready: %+v", rd)
default:
}
}
|
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import pb "go.etcd.io/etcd/v3/raft/raftpb"
// ReadState provides state for read only query.
// It's caller's responsibility to call ReadIndex first before getting
// this state from ready, it's also caller's duty to differentiate if this
// state is what it requests through RequestCtx, eg. given a unique id as
// RequestCtx
type ReadState struct {
Index uint64
RequestCtx []byte
}
type readIndexStatus struct {
req pb.Message
index uint64
// NB: this never records 'false', but it's more convenient to use this
// instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If
// this becomes performance sensitive enough (doubtful), quorum.VoteResult
// can change to an API that is closer to that of CommittedIndex.
acks map[uint64]bool
}
type readOnly struct {
option ReadOnlyOption
pendingReadIndex map[string]*readIndexStatus
readIndexQueue []string
}
func newReadOnly(option ReadOnlyOption) *readOnly {
return &readOnly{
option: option,
pendingReadIndex: make(map[string]*readIndexStatus),
}
}
// addRequest adds a read only reuqest into readonly struct.
// `index` is the commit index of the raft state machine when it received
// the read only request.
// `m` is the original read only request message from the local or remote node.
func (ro *readOnly) addRequest(index uint64, m pb.Message) {
s := string(m.Entries[0].Data)
if _, ok := ro.pendingReadIndex[s]; ok {
return
}
ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)}
ro.readIndexQueue = append(ro.readIndexQueue, s)
}
// recvAck notifies the readonly struct that the raft state machine received
// an acknowledgment of the heartbeat that attached with the read only request
// context.
func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool {
rs, ok := ro.pendingReadIndex[string(context)]
if !ok {
return nil
}
rs.acks[id] = true
return rs.acks
}
// advance advances the read only request queue kept by the readonly struct.
// It dequeues the requests until it finds the read only request that has
// the same context as the given `m`.
func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
var (
i int
found bool
)
ctx := string(m.Context)
rss := []*readIndexStatus{}
for _, okctx := range ro.readIndexQueue {
i++
rs, ok := ro.pendingReadIndex[okctx]
if !ok {
panic("cannot find corresponding read state from pending map")
}
rss = append(rss, rs)
if okctx == ctx {
found = true
break
}
}
if found {
ro.readIndexQueue = ro.readIndexQueue[i:]
for _, rs := range rss {
delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
}
return rss
}
return nil
}
// lastPendingRequestCtx returns the context of the last pending read only
// request in readonly struct.
func (ro *readOnly) lastPendingRequestCtx() string {
if len(ro.readIndexQueue) == 0 {
return ""
}
return ro.readIndexQueue[len(ro.readIndexQueue)-1]
}
raft: fix typo
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import pb "go.etcd.io/etcd/v3/raft/raftpb"
// ReadState provides state for read only query.
// It's caller's responsibility to call ReadIndex first before getting
// this state from ready, it's also caller's duty to differentiate if this
// state is what it requests through RequestCtx, eg. given a unique id as
// RequestCtx
type ReadState struct {
Index uint64
RequestCtx []byte
}
type readIndexStatus struct {
req pb.Message
index uint64
// NB: this never records 'false', but it's more convenient to use this
// instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If
// this becomes performance sensitive enough (doubtful), quorum.VoteResult
// can change to an API that is closer to that of CommittedIndex.
acks map[uint64]bool
}
type readOnly struct {
option ReadOnlyOption
pendingReadIndex map[string]*readIndexStatus
readIndexQueue []string
}
func newReadOnly(option ReadOnlyOption) *readOnly {
return &readOnly{
option: option,
pendingReadIndex: make(map[string]*readIndexStatus),
}
}
// addRequest adds a read only request into readonly struct.
// `index` is the commit index of the raft state machine when it received
// the read only request.
// `m` is the original read only request message from the local or remote node.
func (ro *readOnly) addRequest(index uint64, m pb.Message) {
s := string(m.Entries[0].Data)
if _, ok := ro.pendingReadIndex[s]; ok {
return
}
ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)}
ro.readIndexQueue = append(ro.readIndexQueue, s)
}
// recvAck notifies the readonly struct that the raft state machine received
// an acknowledgment of the heartbeat that attached with the read only request
// context.
func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool {
rs, ok := ro.pendingReadIndex[string(context)]
if !ok {
return nil
}
rs.acks[id] = true
return rs.acks
}
// advance advances the read only request queue kept by the readonly struct.
// It dequeues the requests until it finds the read only request that has
// the same context as the given `m`.
func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
var (
i int
found bool
)
ctx := string(m.Context)
rss := []*readIndexStatus{}
for _, okctx := range ro.readIndexQueue {
i++
rs, ok := ro.pendingReadIndex[okctx]
if !ok {
panic("cannot find corresponding read state from pending map")
}
rss = append(rss, rs)
if okctx == ctx {
found = true
break
}
}
if found {
ro.readIndexQueue = ro.readIndexQueue[i:]
for _, rs := range rss {
delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
}
return rss
}
return nil
}
// lastPendingRequestCtx returns the context of the last pending read only
// request in readonly struct.
func (ro *readOnly) lastPendingRequestCtx() string {
if len(ro.readIndexQueue) == 0 {
return ""
}
return ro.readIndexQueue[len(ro.readIndexQueue)-1]
}
|
package gotldmap
// Generated; DO NOT EDIT
// Map - map of top level domains with mark key
var Map = map[string]int{
"abb": 1,
"abbott": 2,
"abogado": 3,
"ac": 4,
"academy": 5,
"accenture": 6,
"accountant": 7,
"accountants": 8,
"active": 9,
"actor": 10,
"ad": 11,
"ads": 12,
"adult": 13,
"ae": 14,
"aeg": 15,
"aero": 16,
"af": 17,
"afl": 18,
"ag": 19,
"agency": 20,
"ai": 21,
"aig": 22,
"airforce": 23,
"airtel": 24,
"al": 25,
"allfinanz": 26,
"alsace": 27,
"am": 28,
"amsterdam": 29,
"android": 30,
"ao": 31,
"apartments": 32,
"app": 33,
"aq": 34,
"aquarelle": 35,
"ar": 36,
"archi": 37,
"army": 38,
"arpa": 39,
"as": 40,
"asia": 41,
"associates": 42,
"at": 43,
"attorney": 44,
"au": 45,
"auction": 46,
"audio": 47,
"auto": 48,
"autos": 49,
"aw": 50,
"ax": 51,
"axa": 52,
"az": 53,
"azure": 54,
"ba": 55,
"band": 56,
"bank": 57,
"bar": 58,
"barcelona": 59,
"barclaycard": 60,
"barclays": 61,
"bargains": 62,
"bauhaus": 63,
"bayern": 64,
"bb": 65,
"bbc": 66,
"bbva": 67,
"bcn": 68,
"bd": 69,
"be": 70,
"beer": 71,
"bentley": 72,
"berlin": 73,
"best": 74,
"bet": 75,
"bf": 76,
"bg": 77,
"bh": 78,
"bharti": 79,
"bi": 80,
"bible": 81,
"bid": 82,
"bike": 83,
"bing": 84,
"bingo": 85,
"bio": 86,
"biz": 87,
"bj": 88,
"black": 89,
"blackfriday": 90,
"bloomberg": 91,
"blue": 92,
"bm": 93,
"bmw": 94,
"bn": 95,
"bnl": 96,
"bnpparibas": 97,
"bo": 98,
"boats": 99,
"bond": 100,
"boo": 101,
"boots": 102,
"boutique": 103,
"br": 104,
"bradesco": 105,
"bridgestone": 106,
"broker": 107,
"brother": 108,
"brussels": 109,
"bs": 110,
"bt": 111,
"budapest": 112,
"build": 113,
"builders": 114,
"business": 115,
"buzz": 116,
"bv": 117,
"bw": 118,
"by": 119,
"bz": 120,
"bzh": 121,
"ca": 122,
"cab": 123,
"cafe": 124,
"cal": 125,
"camera": 126,
"camp": 127,
"cancerresearch": 128,
"canon": 129,
"capetown": 130,
"capital": 131,
"caravan": 132,
"cards": 133,
"care": 134,
"career": 135,
"careers": 136,
"cars": 137,
"cartier": 138,
"casa": 139,
"cash": 140,
"casino": 141,
"cat": 142,
"catering": 143,
"cba": 144,
"cbn": 145,
"cc": 146,
"cd": 147,
"ceb": 1041,
"center": 148,
"ceo": 149,
"cern": 150,
"cf": 151,
"cfa": 152,
"cfd": 153,
"cg": 154,
"ch": 155,
"chanel": 156,
"channel": 157,
"chat": 158,
"cheap": 159,
"chloe": 160,
"christmas": 161,
"chrome": 162,
"church": 163,
"ci": 164,
"cisco": 165,
"citic": 166,
"city": 167,
"ck": 168,
"cl": 169,
"claims": 170,
"cleaning": 171,
"click": 172,
"clinic": 173,
"clothing": 174,
"cloud": 175,
"club": 176,
"cm": 177,
"cn": 178,
"co": 179,
"coach": 180,
"codes": 181,
"coffee": 182,
"college": 183,
"cologne": 184,
"com": 185,
"commbank": 186,
"community": 187,
"company": 188,
"computer": 189,
"condos": 190,
"construction": 191,
"consulting": 192,
"contractors": 193,
"cooking": 194,
"cool": 195,
"coop": 196,
"corsica": 197,
"country": 198,
"coupons": 199,
"courses": 200,
"cr": 201,
"credit": 202,
"creditcard": 203,
"cricket": 204,
"crown": 205,
"crs": 206,
"cruises": 207,
"cu": 208,
"cuisinella": 209,
"cv": 210,
"cw": 211,
"cx": 212,
"cy": 213,
"cymru": 214,
"cyou": 215,
"cz": 216,
"dabur": 217,
"dad": 218,
"dance": 219,
"date": 220,
"dating": 221,
"datsun": 222,
"day": 223,
"dclk": 224,
"de": 225,
"deals": 226,
"degree": 227,
"delivery": 228,
"delta": 229,
"democrat": 230,
"dental": 231,
"dentist": 232,
"desi": 233,
"design": 234,
"dev": 235,
"diamonds": 236,
"diet": 237,
"digital": 238,
"direct": 239,
"directory": 240,
"discount": 241,
"dj": 242,
"dk": 243,
"dm": 244,
"dnp": 245,
"do": 246,
"docs": 247,
"dog": 248,
"doha": 249,
"domains": 250,
"doosan": 251,
"download": 252,
"drive": 253,
"durban": 254,
"dvag": 255,
"dz": 256,
"earth": 257,
"eat": 258,
"ec": 259,
"edu": 260,
"education": 261,
"ee": 262,
"eg": 263,
"email": 264,
"emerck": 265,
"energy": 266,
"engineer": 267,
"engineering": 268,
"enterprises": 269,
"epson": 270,
"equipment": 271,
"er": 272,
"erni": 273,
"es": 274,
"esq": 275,
"estate": 276,
"et": 277,
"eu": 278,
"eurovision": 279,
"eus": 280,
"events": 281,
"everbank": 282,
"exchange": 283,
"expert": 284,
"exposed": 285,
"express": 286,
"fage": 1042,
"fail": 287,
"faith": 288,
"family": 1043,
"fan": 289,
"fans": 290,
"farm": 291,
"fashion": 292,
"feedback": 293,
"fi": 294,
"film": 295,
"finance": 296,
"financial": 297,
"firmdale": 298,
"fish": 299,
"fishing": 300,
"fit": 301,
"fitness": 302,
"fj": 303,
"fk": 304,
"flights": 305,
"florist": 306,
"flowers": 307,
"flsmidth": 308,
"fly": 309,
"fm": 310,
"fo": 311,
"foo": 312,
"football": 313,
"forex": 314,
"forsale": 315,
"forum": 316,
"foundation": 317,
"fr": 318,
"frl": 319,
"frogans": 320,
"fund": 321,
"furniture": 322,
"futbol": 323,
"fyi": 324,
"ga": 325,
"gal": 326,
"gallery": 327,
"game": 328,
"garden": 329,
"gb": 330,
"gbiz": 331,
"gd": 332,
"gdn": 333,
"ge": 334,
"gent": 335,
"genting": 336,
"gf": 337,
"gg": 338,
"ggee": 339,
"gh": 340,
"gi": 341,
"gift": 342,
"gifts": 343,
"gives": 344,
"giving": 345,
"gl": 346,
"glass": 347,
"gle": 348,
"global": 349,
"globo": 350,
"gm": 351,
"gmail": 352,
"gmo": 353,
"gmx": 354,
"gn": 355,
"gold": 356,
"goldpoint": 357,
"golf": 358,
"goo": 359,
"goog": 360,
"google": 361,
"gop": 362,
"gov": 363,
"gp": 364,
"gq": 365,
"gr": 366,
"graphics": 367,
"gratis": 368,
"green": 369,
"gripe": 370,
"group": 1044,
"gs": 371,
"gt": 372,
"gu": 373,
"guge": 374,
"guide": 375,
"guitars": 376,
"guru": 377,
"gw": 378,
"gy": 379,
"hamburg": 380,
"hangout": 381,
"haus": 382,
"healthcare": 383,
"help": 384,
"here": 385,
"hermes": 386,
"hiphop": 387,
"hitachi": 388,
"hiv": 389,
"hk": 390,
"hm": 391,
"hn": 392,
"hockey": 393,
"holdings": 394,
"holiday": 395,
"homedepot": 396,
"homes": 397,
"honda": 398,
"horse": 399,
"host": 400,
"hosting": 401,
"hoteles": 402,
"hotmail": 403,
"house": 404,
"how": 405,
"hr": 406,
"hsbc": 407,
"ht": 408,
"hu": 409,
"ibm": 410,
"icbc": 411,
"ice": 412,
"icu": 413,
"id": 414,
"ie": 415,
"ifm": 416,
"iinet": 417,
"il": 418,
"im": 419,
"immo": 420,
"immobilien": 421,
"in": 422,
"industries": 423,
"infiniti": 424,
"info": 425,
"ing": 426,
"ink": 427,
"institute": 428,
"insure": 429,
"int": 430,
"international": 431,
"investments": 432,
"io": 433,
"ipiranga": 434,
"iq": 435,
"ir": 436,
"irish": 437,
"is": 438,
"ist": 439,
"istanbul": 440,
"it": 441,
"itau": 442,
"iwc": 443,
"java": 444,
"jcb": 445,
"je": 446,
"jetzt": 447,
"jewelry": 448,
"jlc": 449,
"jll": 450,
"jm": 451,
"jo": 452,
"jobs": 453,
"joburg": 454,
"jp": 455,
"jprs": 456,
"juegos": 457,
"kaufen": 458,
"kddi": 459,
"ke": 460,
"kg": 461,
"kh": 462,
"ki": 463,
"kim": 464,
"kitchen": 465,
"kiwi": 466,
"km": 467,
"kn": 468,
"koeln": 469,
"komatsu": 470,
"kp": 471,
"kr": 472,
"krd": 473,
"kred": 474,
"kw": 475,
"ky": 476,
"kyoto": 477,
"kz": 478,
"la": 479,
"lacaixa": 480,
"lancaster": 481,
"land": 482,
"lasalle": 483,
"lat": 484,
"latrobe": 485,
"law": 486,
"lawyer": 487,
"lb": 488,
"lc": 489,
"lds": 490,
"lease": 491,
"leclerc": 492,
"legal": 493,
"lexus": 494,
"lgbt": 495,
"li": 496,
"liaison": 497,
"lidl": 498,
"life": 499,
"lighting": 500,
"limited": 501,
"limo": 502,
"link": 503,
"live": 504,
"lixil": 505,
"lk": 506,
"loan": 507,
"loans": 508,
"lol": 509,
"london": 510,
"lotte": 511,
"lotto": 512,
"love": 513,
"lr": 514,
"ls": 515,
"lt": 516,
"ltda": 517,
"lu": 518,
"lupin": 519,
"luxe": 520,
"luxury": 521,
"lv": 522,
"ly": 523,
"ma": 524,
"madrid": 525,
"maif": 526,
"maison": 527,
"man": 528,
"management": 529,
"mango": 530,
"market": 531,
"marketing": 532,
"markets": 533,
"marriott": 534,
"mba": 535,
"mc": 536,
"md": 537,
"me": 538,
"media": 539,
"meet": 540,
"melbourne": 541,
"meme": 542,
"memorial": 543,
"men": 544,
"menu": 545,
"mg": 546,
"mh": 547,
"miami": 548,
"microsoft": 549,
"mil": 550,
"mini": 551,
"mk": 552,
"ml": 553,
"mm": 554,
"mma": 555,
"mn": 556,
"mo": 557,
"mobi": 558,
"moda": 559,
"moe": 560,
"monash": 561,
"money": 562,
"montblanc": 563,
"mormon": 564,
"mortgage": 565,
"moscow": 566,
"motorcycles": 567,
"mov": 568,
"movie": 569,
"movistar": 570,
"mp": 571,
"mq": 572,
"mr": 573,
"ms": 574,
"mt": 575,
"mtn": 576,
"mtpc": 577,
"mu": 578,
"museum": 579,
"mv": 580,
"mw": 581,
"mx": 582,
"my": 583,
"mz": 584,
"na": 585,
"nadex": 586,
"nagoya": 587,
"name": 588,
"navy": 589,
"nc": 590,
"ne": 591,
"nec": 592,
"net": 593,
"netbank": 594,
"network": 595,
"neustar": 596,
"new": 597,
"news": 598,
"nexus": 599,
"nf": 600,
"ng": 601,
"ngo": 602,
"nhk": 603,
"ni": 604,
"nico": 605,
"ninja": 606,
"nissan": 607,
"nl": 608,
"no": 609,
"nokia": 610,
"np": 611,
"nr": 612,
"nra": 613,
"nrw": 614,
"ntt": 615,
"nu": 616,
"nyc": 617,
"nz": 618,
"office": 619,
"okinawa": 620,
"om": 621,
"omega": 622,
"one": 623,
"ong": 624,
"onl": 625,
"online": 626,
"ooo": 627,
"oracle": 628,
"orange": 629,
"org": 630,
"organic": 631,
"osaka": 632,
"otsuka": 633,
"ovh": 634,
"pa": 635,
"page": 636,
"panerai": 637,
"paris": 638,
"partners": 639,
"parts": 640,
"party": 641,
"pe": 642,
"pet": 643,
"pf": 644,
"pg": 645,
"ph": 646,
"pharmacy": 647,
"philips": 648,
"photo": 649,
"photography": 650,
"photos": 651,
"physio": 652,
"piaget": 653,
"pics": 654,
"pictet": 655,
"pictures": 656,
"pink": 657,
"pizza": 658,
"pk": 659,
"pl": 660,
"place": 661,
"play": 662,
"plumbing": 663,
"plus": 664,
"pm": 665,
"pn": 666,
"pohl": 667,
"poker": 668,
"porn": 669,
"post": 670,
"pr": 671,
"praxi": 672,
"press": 673,
"pro": 674,
"prod": 675,
"productions": 676,
"prof": 677,
"properties": 678,
"property": 679,
"ps": 680,
"pt": 681,
"pub": 682,
"pw": 683,
"py": 684,
"qa": 685,
"qpon": 686,
"quebec": 687,
"racing": 688,
"re": 689,
"realtor": 690,
"realty": 691,
"recipes": 692,
"red": 693,
"redstone": 694,
"rehab": 695,
"reise": 696,
"reisen": 697,
"reit": 698,
"ren": 699,
"rent": 700,
"rentals": 701,
"repair": 702,
"report": 703,
"republican": 704,
"rest": 705,
"restaurant": 706,
"review": 707,
"reviews": 708,
"rich": 709,
"ricoh": 710,
"rio": 711,
"rip": 712,
"ro": 713,
"rocks": 714,
"rodeo": 715,
"rs": 716,
"rsvp": 717,
"ru": 718,
"ruhr": 719,
"run": 720,
"rw": 721,
"ryukyu": 722,
"sa": 723,
"saarland": 724,
"sakura": 725,
"sale": 726,
"samsung": 727,
"sandvik": 728,
"sandvikcoromant": 729,
"sanofi": 730,
"sap": 731,
"sarl": 732,
"saxo": 733,
"sb": 734,
"sc": 735,
"sca": 736,
"scb": 737,
"schmidt": 738,
"scholarships": 739,
"school": 740,
"schule": 741,
"schwarz": 742,
"science": 743,
"scor": 744,
"scot": 745,
"sd": 746,
"se": 747,
"seat": 748,
"seek": 1045,
"sener": 749,
"services": 750,
"sew": 751,
"sex": 752,
"sexy": 753,
"sg": 754,
"sh": 755,
"shiksha": 756,
"shoes": 757,
"show": 758,
"shriram": 759,
"si": 760,
"singles": 761,
"site": 762,
"sj": 763,
"sk": 764,
"ski": 765,
"sky": 766,
"skype": 767,
"sl": 768,
"sm": 769,
"sn": 770,
"sncf": 771,
"so": 772,
"soccer": 773,
"social": 774,
"software": 775,
"sohu": 776,
"solar": 777,
"solutions": 778,
"sony": 779,
"soy": 780,
"space": 781,
"spiegel": 782,
"spreadbetting": 783,
"sr": 784,
"srl": 785,
"st": 786,
"starhub": 787,
"statoil": 788,
"studio": 789,
"study": 790,
"style": 791,
"su": 792,
"sucks": 793,
"supplies": 794,
"supply": 795,
"support": 796,
"surf": 797,
"surgery": 798,
"suzuki": 799,
"sv": 800,
"swatch": 801,
"swiss": 802,
"sx": 803,
"sy": 804,
"sydney": 805,
"systems": 806,
"sz": 807,
"taipei": 808,
"tatamotors": 809,
"tatar": 810,
"tattoo": 811,
"tax": 812,
"taxi": 813,
"tc": 814,
"td": 815,
"team": 816,
"tech": 817,
"technology": 818,
"tel": 819,
"telefonica": 820,
"temasek": 821,
"tennis": 822,
"tf": 823,
"tg": 824,
"th": 825,
"thd": 826,
"theater": 827,
"tickets": 828,
"tienda": 829,
"tips": 830,
"tires": 831,
"tirol": 832,
"tj": 833,
"tk": 834,
"tl": 835,
"tm": 836,
"tn": 837,
"to": 838,
"today": 839,
"tokyo": 840,
"tools": 841,
"top": 842,
"toray": 843,
"toshiba": 844,
"tours": 845,
"town": 846,
"toyota": 847,
"toys": 848,
"tr": 849,
"trade": 850,
"trading": 851,
"training": 852,
"travel": 853,
"trust": 854,
"tt": 855,
"tui": 856,
"tv": 857,
"tw": 858,
"tz": 859,
"ua": 860,
"ubs": 861,
"ug": 862,
"uk": 863,
"university": 864,
"uno": 865,
"uol": 866,
"us": 867,
"uy": 868,
"uz": 869,
"va": 870,
"vacations": 871,
"vc": 872,
"ve": 873,
"vegas": 874,
"ventures": 875,
"versicherung": 876,
"vet": 877,
"vg": 878,
"vi": 879,
"viajes": 880,
"video": 881,
"villas": 882,
"vin": 883,
"vision": 884,
"vista": 885,
"vistaprint": 886,
"vlaanderen": 887,
"vn": 888,
"vodka": 889,
"vote": 890,
"voting": 891,
"voto": 892,
"voyage": 893,
"vu": 894,
"wales": 895,
"walter": 896,
"wang": 897,
"watch": 898,
"webcam": 899,
"website": 900,
"wed": 901,
"wedding": 902,
"weir": 903,
"wf": 904,
"whoswho": 905,
"wien": 906,
"wiki": 907,
"williamhill": 908,
"win": 909,
"windows": 910,
"wine": 911,
"wme": 912,
"work": 913,
"works": 914,
"world": 915,
"ws": 916,
"wtc": 917,
"wtf": 918,
"xbox": 919,
"xerox": 920,
"xin": 921,
"xn--11b4c3d": 922,
"xn--1qqw23a": 923,
"xn--30rr7y": 924,
"xn--3bst00m": 925,
"xn--3ds443g": 926,
"xn--3e0b707e": 927,
"xn--3pxu8k": 928,
"xn--42c2d9a": 929,
"xn--45brj9c": 930,
"xn--45q11c": 931,
"xn--4gbrim": 932,
"xn--55qw42g": 933,
"xn--55qx5d": 934,
"xn--6frz82g": 935,
"xn--6qq986b3xl": 936,
"xn--80adxhks": 937,
"xn--80ao21a": 938,
"xn--80asehdb": 939,
"xn--80aswg": 940,
"xn--90a3ac": 941,
"xn--90ais": 942,
"xn--9dbq2a": 943,
"xn--9et52u": 944,
"xn--b4w605ferd": 945,
"xn--c1avg": 946,
"xn--c2br7g": 947,
"xn--cg4bki": 948,
"xn--clchc0ea0b2g2a9gcd": 949,
"xn--czr694b": 950,
"xn--czrs0t": 951,
"xn--czru2d": 952,
"xn--d1acj3b": 953,
"xn--d1alf": 954,
"xn--estv75g": 955,
"xn--fhbei": 956,
"xn--fiq228c5hs": 957,
"xn--fiq64b": 958,
"xn--fiqs8s": 959,
"xn--fiqz9s": 960,
"xn--fjq720a": 961,
"xn--flw351e": 962,
"xn--fpcrj9c3d": 963,
"xn--fzc2c9e2c": 964,
"xn--gecrj9c": 965,
"xn--h2brj9c": 966,
"xn--hxt814e": 967,
"xn--i1b6b1a6a2e": 968,
"xn--imr513n": 969,
"xn--io0a7i": 970,
"xn--j1aef": 971,
"xn--j1amh": 972,
"xn--j6w193g": 973,
"xn--kcrx77d1x4a": 974,
"xn--kprw13d": 975,
"xn--kpry57d": 976,
"xn--kput3i": 977,
"xn--l1acc": 978,
"xn--lgbbat1ad8j": 979,
"xn--mgb9awbf": 980,
"xn--mgba3a4f16a": 981,
"xn--mgbaam7a8h": 982,
"xn--mgbab2bd": 983,
"xn--mgbayh7gpa": 984,
"xn--mgbbh1a71e": 985,
"xn--mgbc0a9azcg": 986,
"xn--mgberp4a5d4ar": 987,
"xn--mgbpl2fh": 988,
"xn--mgbx4cd0ab": 989,
"xn--mk1bu44c": 990,
"xn--mxtq1m": 991,
"xn--ngbc5azd": 992,
"xn--node": 993,
"xn--nqv7f": 994,
"xn--nqv7fs00ema": 995,
"xn--nyqy26a": 996,
"xn--o3cw4h": 997,
"xn--ogbpf8fl": 998,
"xn--p1acf": 999,
"xn--p1ai": 1000,
"xn--pgbs0dh": 1001,
"xn--pssy2u": 1002,
"xn--q9jyb4c": 1003,
"xn--qcka1pmc": 1004,
"xn--rhqv96g": 1005,
"xn--s9brj9c": 1006,
"xn--ses554g": 1007,
"xn--t60b56a": 1008,
"xn--tckwe": 1009,
"xn--unup4y": 1010,
"xn--vermgensberater-ctb": 1011,
"xn--vermgensberatung-pwb": 1012,
"xn--vhquv": 1013,
"xn--vuq861b": 1014,
"xn--wgbh1c": 1015,
"xn--wgbl6a": 1016,
"xn--xhq521b": 1017,
"xn--xkc2al3hye2a": 1018,
"xn--xkc2dl3a5ee0h": 1019,
"xn--y9a3aq": 1020,
"xn--yfro4i67o": 1021,
"xn--ygbi2ammx": 1022,
"xn--zfr164b": 1023,
"xperia": 1024,
"xxx": 1025,
"xyz": 1026,
"yachts": 1027,
"yandex": 1028,
"ye": 1029,
"yodobashi": 1030,
"yoga": 1031,
"yokohama": 1032,
"youtube": 1033,
"yt": 1034,
"za": 1035,
"zip": 1036,
"zm": 1037,
"zone": 1038,
"zuerich": 1039,
"zw": 1040,
}
Version 2015082200, Last Updated Sat Aug 22 07:07:01 2015 UTC
package gotldmap
// Generated; DO NOT EDIT
// Map - map of top level domains with mark key
var Map = map[string]int{
"abb": 1,
"abbott": 2,
"abogado": 3,
"ac": 4,
"academy": 5,
"accenture": 6,
"accountant": 7,
"accountants": 8,
"active": 9,
"actor": 10,
"ad": 11,
"ads": 12,
"adult": 13,
"ae": 14,
"aeg": 15,
"aero": 16,
"af": 17,
"afl": 18,
"ag": 19,
"agency": 20,
"ai": 21,
"aig": 22,
"airforce": 23,
"airtel": 24,
"al": 25,
"allfinanz": 26,
"alsace": 27,
"am": 28,
"amsterdam": 29,
"android": 30,
"ao": 31,
"apartments": 32,
"app": 33,
"aq": 34,
"aquarelle": 35,
"ar": 36,
"archi": 37,
"army": 38,
"arpa": 39,
"as": 40,
"asia": 41,
"associates": 42,
"at": 43,
"attorney": 44,
"au": 45,
"auction": 46,
"audio": 47,
"auto": 48,
"autos": 49,
"aw": 50,
"ax": 51,
"axa": 52,
"az": 53,
"azure": 54,
"ba": 55,
"band": 56,
"bank": 57,
"bar": 58,
"barcelona": 59,
"barclaycard": 60,
"barclays": 61,
"bargains": 62,
"bauhaus": 63,
"bayern": 64,
"bb": 65,
"bbc": 66,
"bbva": 67,
"bcn": 68,
"bd": 69,
"be": 70,
"beer": 71,
"bentley": 72,
"berlin": 73,
"best": 74,
"bet": 75,
"bf": 76,
"bg": 77,
"bh": 78,
"bharti": 79,
"bi": 80,
"bible": 81,
"bid": 82,
"bike": 83,
"bing": 84,
"bingo": 85,
"bio": 86,
"biz": 87,
"bj": 88,
"black": 89,
"blackfriday": 90,
"bloomberg": 91,
"blue": 92,
"bm": 93,
"bmw": 94,
"bn": 95,
"bnl": 96,
"bnpparibas": 97,
"bo": 98,
"boats": 99,
"bond": 100,
"boo": 101,
"boots": 102,
"boutique": 103,
"br": 104,
"bradesco": 105,
"bridgestone": 106,
"broker": 107,
"brother": 108,
"brussels": 109,
"bs": 110,
"bt": 111,
"budapest": 112,
"build": 113,
"builders": 114,
"business": 115,
"buzz": 116,
"bv": 117,
"bw": 118,
"by": 119,
"bz": 120,
"bzh": 121,
"ca": 122,
"cab": 123,
"cafe": 124,
"cal": 125,
"camera": 126,
"camp": 127,
"cancerresearch": 128,
"canon": 129,
"capetown": 130,
"capital": 131,
"caravan": 132,
"cards": 133,
"care": 134,
"career": 135,
"careers": 136,
"cars": 137,
"cartier": 138,
"casa": 139,
"cash": 140,
"casino": 141,
"cat": 142,
"catering": 143,
"cba": 144,
"cbn": 145,
"cc": 146,
"cd": 147,
"ceb": 1041,
"center": 148,
"ceo": 149,
"cern": 150,
"cf": 151,
"cfa": 152,
"cfd": 153,
"cg": 154,
"ch": 155,
"chanel": 156,
"channel": 157,
"chat": 158,
"cheap": 159,
"chloe": 160,
"christmas": 161,
"chrome": 162,
"church": 163,
"ci": 164,
"cisco": 165,
"citic": 166,
"city": 167,
"ck": 168,
"cl": 169,
"claims": 170,
"cleaning": 171,
"click": 172,
"clinic": 173,
"clothing": 174,
"cloud": 175,
"club": 176,
"cm": 177,
"cn": 178,
"co": 179,
"coach": 180,
"codes": 181,
"coffee": 182,
"college": 183,
"cologne": 184,
"com": 185,
"commbank": 186,
"community": 187,
"company": 188,
"computer": 189,
"condos": 190,
"construction": 191,
"consulting": 192,
"contractors": 193,
"cooking": 194,
"cool": 195,
"coop": 196,
"corsica": 197,
"country": 198,
"coupons": 199,
"courses": 200,
"cr": 201,
"credit": 202,
"creditcard": 203,
"cricket": 204,
"crown": 205,
"crs": 206,
"cruises": 207,
"cu": 208,
"cuisinella": 209,
"cv": 210,
"cw": 211,
"cx": 212,
"cy": 213,
"cymru": 214,
"cyou": 215,
"cz": 216,
"dabur": 217,
"dad": 218,
"dance": 219,
"date": 220,
"dating": 221,
"datsun": 222,
"day": 223,
"dclk": 224,
"de": 225,
"deals": 226,
"degree": 227,
"delivery": 228,
"delta": 229,
"democrat": 230,
"dental": 231,
"dentist": 232,
"desi": 233,
"design": 234,
"dev": 235,
"diamonds": 236,
"diet": 237,
"digital": 238,
"direct": 239,
"directory": 240,
"discount": 241,
"dj": 242,
"dk": 243,
"dm": 244,
"dnp": 245,
"do": 246,
"docs": 247,
"dog": 248,
"doha": 249,
"domains": 250,
"doosan": 251,
"download": 252,
"drive": 253,
"durban": 254,
"dvag": 255,
"dz": 256,
"earth": 257,
"eat": 258,
"ec": 259,
"edu": 260,
"education": 261,
"ee": 262,
"eg": 263,
"email": 264,
"emerck": 265,
"energy": 266,
"engineer": 267,
"engineering": 268,
"enterprises": 269,
"epson": 270,
"equipment": 271,
"er": 272,
"erni": 273,
"es": 274,
"esq": 275,
"estate": 276,
"et": 277,
"eu": 278,
"eurovision": 279,
"eus": 280,
"events": 281,
"everbank": 282,
"exchange": 283,
"expert": 284,
"exposed": 285,
"express": 286,
"fage": 1042,
"fail": 287,
"faith": 288,
"family": 1043,
"fan": 289,
"fans": 290,
"farm": 291,
"fashion": 292,
"feedback": 293,
"fi": 294,
"film": 295,
"finance": 296,
"financial": 297,
"firmdale": 298,
"fish": 299,
"fishing": 300,
"fit": 301,
"fitness": 302,
"fj": 303,
"fk": 304,
"flights": 305,
"florist": 306,
"flowers": 307,
"flsmidth": 308,
"fly": 309,
"fm": 310,
"fo": 311,
"foo": 312,
"football": 313,
"forex": 314,
"forsale": 315,
"forum": 316,
"foundation": 317,
"fr": 318,
"frl": 319,
"frogans": 320,
"fund": 321,
"furniture": 322,
"futbol": 323,
"fyi": 324,
"ga": 325,
"gal": 326,
"gallery": 327,
"game": 328,
"garden": 329,
"gb": 330,
"gbiz": 331,
"gd": 332,
"gdn": 333,
"ge": 334,
"gent": 335,
"genting": 336,
"gf": 337,
"gg": 338,
"ggee": 339,
"gh": 340,
"gi": 341,
"gift": 342,
"gifts": 343,
"gives": 344,
"giving": 345,
"gl": 346,
"glass": 347,
"gle": 348,
"global": 349,
"globo": 350,
"gm": 351,
"gmail": 352,
"gmo": 353,
"gmx": 354,
"gn": 355,
"gold": 356,
"goldpoint": 357,
"golf": 358,
"goo": 359,
"goog": 360,
"google": 361,
"gop": 362,
"gov": 363,
"gp": 364,
"gq": 365,
"gr": 366,
"graphics": 367,
"gratis": 368,
"green": 369,
"gripe": 370,
"group": 1044,
"gs": 371,
"gt": 372,
"gu": 373,
"guge": 374,
"guide": 375,
"guitars": 376,
"guru": 377,
"gw": 378,
"gy": 379,
"hamburg": 380,
"hangout": 381,
"haus": 382,
"healthcare": 383,
"help": 384,
"here": 385,
"hermes": 386,
"hiphop": 387,
"hitachi": 388,
"hiv": 389,
"hk": 390,
"hm": 391,
"hn": 392,
"hockey": 393,
"holdings": 394,
"holiday": 395,
"homedepot": 396,
"homes": 397,
"honda": 398,
"horse": 399,
"host": 400,
"hosting": 401,
"hoteles": 402,
"hotmail": 403,
"house": 404,
"how": 405,
"hr": 406,
"hsbc": 407,
"ht": 408,
"hu": 409,
"ibm": 410,
"icbc": 411,
"ice": 412,
"icu": 413,
"id": 414,
"ie": 415,
"ifm": 416,
"iinet": 417,
"il": 418,
"im": 419,
"immo": 420,
"immobilien": 421,
"in": 422,
"industries": 423,
"infiniti": 424,
"info": 425,
"ing": 426,
"ink": 427,
"institute": 428,
"insure": 429,
"int": 430,
"international": 431,
"investments": 432,
"io": 433,
"ipiranga": 434,
"iq": 435,
"ir": 436,
"irish": 437,
"is": 438,
"ist": 439,
"istanbul": 440,
"it": 441,
"itau": 442,
"iwc": 443,
"java": 444,
"jcb": 445,
"je": 446,
"jetzt": 447,
"jewelry": 448,
"jlc": 449,
"jll": 450,
"jm": 451,
"jo": 452,
"jobs": 453,
"joburg": 454,
"jp": 455,
"jprs": 456,
"juegos": 457,
"kaufen": 458,
"kddi": 459,
"ke": 460,
"kg": 461,
"kh": 462,
"ki": 463,
"kim": 464,
"kitchen": 465,
"kiwi": 466,
"km": 467,
"kn": 468,
"koeln": 469,
"komatsu": 470,
"kp": 471,
"kr": 472,
"krd": 473,
"kred": 474,
"kw": 475,
"ky": 476,
"kyoto": 477,
"kz": 478,
"la": 479,
"lacaixa": 480,
"lancaster": 481,
"land": 482,
"lasalle": 483,
"lat": 484,
"latrobe": 485,
"law": 486,
"lawyer": 487,
"lb": 488,
"lc": 489,
"lds": 490,
"lease": 491,
"leclerc": 492,
"legal": 493,
"lexus": 494,
"lgbt": 495,
"li": 496,
"liaison": 497,
"lidl": 498,
"life": 499,
"lighting": 500,
"limited": 501,
"limo": 502,
"link": 503,
"live": 504,
"lixil": 505,
"lk": 506,
"loan": 507,
"loans": 508,
"lol": 509,
"london": 510,
"lotte": 511,
"lotto": 512,
"love": 513,
"lr": 514,
"ls": 515,
"lt": 516,
"ltda": 517,
"lu": 518,
"lupin": 519,
"luxe": 520,
"luxury": 521,
"lv": 522,
"ly": 523,
"ma": 524,
"madrid": 525,
"maif": 526,
"maison": 527,
"man": 528,
"management": 529,
"mango": 530,
"market": 531,
"marketing": 532,
"markets": 533,
"marriott": 534,
"mba": 535,
"mc": 536,
"md": 537,
"me": 538,
"media": 539,
"meet": 540,
"melbourne": 541,
"meme": 542,
"memorial": 543,
"men": 544,
"menu": 545,
"mg": 546,
"mh": 547,
"miami": 548,
"microsoft": 549,
"mil": 550,
"mini": 551,
"mk": 552,
"ml": 553,
"mm": 554,
"mma": 555,
"mn": 556,
"mo": 557,
"mobi": 558,
"moda": 559,
"moe": 560,
"mom": 1046,
"monash": 561,
"money": 562,
"montblanc": 563,
"mormon": 564,
"mortgage": 565,
"moscow": 566,
"motorcycles": 567,
"mov": 568,
"movie": 569,
"movistar": 570,
"mp": 571,
"mq": 572,
"mr": 573,
"ms": 574,
"mt": 575,
"mtn": 576,
"mtpc": 577,
"mu": 578,
"museum": 579,
"mv": 580,
"mw": 581,
"mx": 582,
"my": 583,
"mz": 584,
"na": 585,
"nadex": 586,
"nagoya": 587,
"name": 588,
"navy": 589,
"nc": 590,
"ne": 591,
"nec": 592,
"net": 593,
"netbank": 594,
"network": 595,
"neustar": 596,
"new": 597,
"news": 598,
"nexus": 599,
"nf": 600,
"ng": 601,
"ngo": 602,
"nhk": 603,
"ni": 604,
"nico": 605,
"ninja": 606,
"nissan": 607,
"nl": 608,
"no": 609,
"nokia": 610,
"np": 611,
"nr": 612,
"nra": 613,
"nrw": 614,
"ntt": 615,
"nu": 616,
"nyc": 617,
"nz": 618,
"office": 619,
"okinawa": 620,
"om": 621,
"omega": 622,
"one": 623,
"ong": 624,
"onl": 625,
"online": 626,
"ooo": 627,
"oracle": 628,
"orange": 629,
"org": 630,
"organic": 631,
"osaka": 632,
"otsuka": 633,
"ovh": 634,
"pa": 635,
"page": 636,
"panerai": 637,
"paris": 638,
"partners": 639,
"parts": 640,
"party": 641,
"pe": 642,
"pet": 643,
"pf": 644,
"pg": 645,
"ph": 646,
"pharmacy": 647,
"philips": 648,
"photo": 649,
"photography": 650,
"photos": 651,
"physio": 652,
"piaget": 653,
"pics": 654,
"pictet": 655,
"pictures": 656,
"pink": 657,
"pizza": 658,
"pk": 659,
"pl": 660,
"place": 661,
"play": 662,
"plumbing": 663,
"plus": 664,
"pm": 665,
"pn": 666,
"pohl": 667,
"poker": 668,
"porn": 669,
"post": 670,
"pr": 671,
"praxi": 672,
"press": 673,
"pro": 674,
"prod": 675,
"productions": 676,
"prof": 677,
"properties": 678,
"property": 679,
"ps": 680,
"pt": 681,
"pub": 682,
"pw": 683,
"py": 684,
"qa": 685,
"qpon": 686,
"quebec": 687,
"racing": 688,
"re": 689,
"realtor": 690,
"realty": 691,
"recipes": 692,
"red": 693,
"redstone": 694,
"rehab": 695,
"reise": 696,
"reisen": 697,
"reit": 698,
"ren": 699,
"rent": 700,
"rentals": 701,
"repair": 702,
"report": 703,
"republican": 704,
"rest": 705,
"restaurant": 706,
"review": 707,
"reviews": 708,
"rich": 709,
"ricoh": 710,
"rio": 711,
"rip": 712,
"ro": 713,
"rocks": 714,
"rodeo": 715,
"rs": 716,
"rsvp": 717,
"ru": 718,
"ruhr": 719,
"run": 720,
"rw": 721,
"ryukyu": 722,
"sa": 723,
"saarland": 724,
"sakura": 725,
"sale": 726,
"samsung": 727,
"sandvik": 728,
"sandvikcoromant": 729,
"sanofi": 730,
"sap": 731,
"sarl": 732,
"saxo": 733,
"sb": 734,
"sc": 735,
"sca": 736,
"scb": 737,
"schmidt": 738,
"scholarships": 739,
"school": 740,
"schule": 741,
"schwarz": 742,
"science": 743,
"scor": 744,
"scot": 745,
"sd": 746,
"se": 747,
"seat": 748,
"seek": 1045,
"sener": 749,
"services": 750,
"sew": 751,
"sex": 752,
"sexy": 753,
"sg": 754,
"sh": 755,
"shiksha": 756,
"shoes": 757,
"show": 758,
"shriram": 759,
"si": 760,
"singles": 761,
"site": 762,
"sj": 763,
"sk": 764,
"ski": 765,
"sky": 766,
"skype": 767,
"sl": 768,
"sm": 769,
"sn": 770,
"sncf": 771,
"so": 772,
"soccer": 773,
"social": 774,
"software": 775,
"sohu": 776,
"solar": 777,
"solutions": 778,
"sony": 779,
"soy": 780,
"space": 781,
"spiegel": 782,
"spreadbetting": 783,
"sr": 784,
"srl": 785,
"st": 786,
"starhub": 787,
"statoil": 788,
"studio": 789,
"study": 790,
"style": 791,
"su": 792,
"sucks": 793,
"supplies": 794,
"supply": 795,
"support": 796,
"surf": 797,
"surgery": 798,
"suzuki": 799,
"sv": 800,
"swatch": 801,
"swiss": 802,
"sx": 803,
"sy": 804,
"sydney": 805,
"systems": 806,
"sz": 807,
"taipei": 808,
"tatamotors": 809,
"tatar": 810,
"tattoo": 811,
"tax": 812,
"taxi": 813,
"tc": 814,
"td": 815,
"team": 816,
"tech": 817,
"technology": 818,
"tel": 819,
"telefonica": 820,
"temasek": 821,
"tennis": 822,
"tf": 823,
"tg": 824,
"th": 825,
"thd": 826,
"theater": 827,
"tickets": 828,
"tienda": 829,
"tips": 830,
"tires": 831,
"tirol": 832,
"tj": 833,
"tk": 834,
"tl": 835,
"tm": 836,
"tn": 837,
"to": 838,
"today": 839,
"tokyo": 840,
"tools": 841,
"top": 842,
"toray": 843,
"toshiba": 844,
"tours": 845,
"town": 846,
"toyota": 847,
"toys": 848,
"tr": 849,
"trade": 850,
"trading": 851,
"training": 852,
"travel": 853,
"trust": 854,
"tt": 855,
"tui": 856,
"tv": 857,
"tw": 858,
"tz": 859,
"ua": 860,
"ubs": 861,
"ug": 862,
"uk": 863,
"university": 864,
"uno": 865,
"uol": 866,
"us": 867,
"uy": 868,
"uz": 869,
"va": 870,
"vacations": 871,
"vc": 872,
"ve": 873,
"vegas": 874,
"ventures": 875,
"versicherung": 876,
"vet": 877,
"vg": 878,
"vi": 879,
"viajes": 880,
"video": 881,
"villas": 882,
"vin": 883,
"vision": 884,
"vista": 885,
"vistaprint": 886,
"vlaanderen": 887,
"vn": 888,
"vodka": 889,
"vote": 890,
"voting": 891,
"voto": 892,
"voyage": 893,
"vu": 894,
"wales": 895,
"walter": 896,
"wang": 897,
"watch": 898,
"webcam": 899,
"website": 900,
"wed": 901,
"wedding": 902,
"weir": 903,
"wf": 904,
"whoswho": 905,
"wien": 906,
"wiki": 907,
"williamhill": 908,
"win": 909,
"windows": 910,
"wine": 911,
"wme": 912,
"work": 913,
"works": 914,
"world": 915,
"ws": 916,
"wtc": 917,
"wtf": 918,
"xbox": 919,
"xerox": 920,
"xin": 921,
"xn--11b4c3d": 922,
"xn--1qqw23a": 923,
"xn--30rr7y": 924,
"xn--3bst00m": 925,
"xn--3ds443g": 926,
"xn--3e0b707e": 927,
"xn--3pxu8k": 928,
"xn--42c2d9a": 929,
"xn--45brj9c": 930,
"xn--45q11c": 931,
"xn--4gbrim": 932,
"xn--55qw42g": 933,
"xn--55qx5d": 934,
"xn--6frz82g": 935,
"xn--6qq986b3xl": 936,
"xn--80adxhks": 937,
"xn--80ao21a": 938,
"xn--80asehdb": 939,
"xn--80aswg": 940,
"xn--90a3ac": 941,
"xn--90ais": 942,
"xn--9dbq2a": 943,
"xn--9et52u": 944,
"xn--b4w605ferd": 945,
"xn--c1avg": 946,
"xn--c2br7g": 947,
"xn--cg4bki": 948,
"xn--clchc0ea0b2g2a9gcd": 949,
"xn--czr694b": 950,
"xn--czrs0t": 951,
"xn--czru2d": 952,
"xn--d1acj3b": 953,
"xn--d1alf": 954,
"xn--estv75g": 955,
"xn--fhbei": 956,
"xn--fiq228c5hs": 957,
"xn--fiq64b": 958,
"xn--fiqs8s": 959,
"xn--fiqz9s": 960,
"xn--fjq720a": 961,
"xn--flw351e": 962,
"xn--fpcrj9c3d": 963,
"xn--fzc2c9e2c": 964,
"xn--gecrj9c": 965,
"xn--h2brj9c": 966,
"xn--hxt814e": 967,
"xn--i1b6b1a6a2e": 968,
"xn--imr513n": 969,
"xn--io0a7i": 970,
"xn--j1aef": 971,
"xn--j1amh": 972,
"xn--j6w193g": 973,
"xn--kcrx77d1x4a": 974,
"xn--kprw13d": 975,
"xn--kpry57d": 976,
"xn--kput3i": 977,
"xn--l1acc": 978,
"xn--lgbbat1ad8j": 979,
"xn--mgb9awbf": 980,
"xn--mgba3a4f16a": 981,
"xn--mgbaam7a8h": 982,
"xn--mgbab2bd": 983,
"xn--mgbayh7gpa": 984,
"xn--mgbbh1a71e": 985,
"xn--mgbc0a9azcg": 986,
"xn--mgberp4a5d4ar": 987,
"xn--mgbpl2fh": 988,
"xn--mgbx4cd0ab": 989,
"xn--mk1bu44c": 990,
"xn--mxtq1m": 991,
"xn--ngbc5azd": 992,
"xn--node": 993,
"xn--nqv7f": 994,
"xn--nqv7fs00ema": 995,
"xn--nyqy26a": 996,
"xn--o3cw4h": 997,
"xn--ogbpf8fl": 998,
"xn--p1acf": 999,
"xn--p1ai": 1000,
"xn--pgbs0dh": 1001,
"xn--pssy2u": 1002,
"xn--q9jyb4c": 1003,
"xn--qcka1pmc": 1004,
"xn--rhqv96g": 1005,
"xn--s9brj9c": 1006,
"xn--ses554g": 1007,
"xn--t60b56a": 1008,
"xn--tckwe": 1009,
"xn--unup4y": 1010,
"xn--vermgensberater-ctb": 1011,
"xn--vermgensberatung-pwb": 1012,
"xn--vhquv": 1013,
"xn--vuq861b": 1014,
"xn--wgbh1c": 1015,
"xn--wgbl6a": 1016,
"xn--xhq521b": 1017,
"xn--xkc2al3hye2a": 1018,
"xn--xkc2dl3a5ee0h": 1019,
"xn--y9a3aq": 1020,
"xn--yfro4i67o": 1021,
"xn--ygbi2ammx": 1022,
"xn--zfr164b": 1023,
"xperia": 1024,
"xxx": 1025,
"xyz": 1026,
"yachts": 1027,
"yandex": 1028,
"ye": 1029,
"yodobashi": 1030,
"yoga": 1031,
"yokohama": 1032,
"youtube": 1033,
"yt": 1034,
"za": 1035,
"zip": 1036,
"zm": 1037,
"zone": 1038,
"zuerich": 1039,
"zw": 1040,
}
|
package styxauth
import (
"errors"
"io"
"aqwari.net/net/styx"
)
var (
errAuthFailure = errors.New("authentication failed")
)
type stackAll []styx.Auth
type stackAny []styx.Auth
// All combines multiple styx.Auth values into a single styx.Auth.
// When authenticating, the Auth method of each parameter is called
// in order. If all styx.Auth backends succeed, authentication is
// succesful. Otherwise, authentication fails.
func All(auth ...styx.Auth) styx.Auth {
return stackAll(auth)
}
func (stack stackAll) Auth(rw io.ReadWriter, c *styx.Conn, user, group, access string) error {
for _, auth := range stack {
err := auth(rw, c, user, group, access)
if err != nil {
return err
}
}
return nil
}
// Any combines multiple styx.Auth values into a single styx.Auth.
// When authenticating, the Auth method of each parameter is called
// in order. If any styx.Auth backend succeeds, authentication is
// succesful. Otherwise, authentication fails.
func Any(auth ...styx.Auth) styx.Auth {
return stackAny(auth)
}
func (stack stackAny) Auth(rw io.ReadWriter, c *styx.Conn, user, group, access string) error {
for _, auth := range stack {
err := auth(rw, c, user, group, access)
if err == nil {
return nil
}
}
return errAuthFailure
}
// The return value of Whitelist will authenticate users successfully
// only if the tuple (user, group, access) is true in the rules map.
func Whitelist(rules map[[3]string]bool) styx.Auth {
return allowMap(rules)
}
type allowMap map[[3]string]bool
func (m allowMap) Auth(rw io.ReadWriter, c *styx.Conn, user, group, access string) error {
q := [3]string{"user", "group", "access"}
if m[q] {
return nil
}
return errAuthFailure
}
Use auth values correctly in helper functions
package styxauth
import (
"errors"
"io"
"aqwari.net/net/styx"
)
var (
errAuthFailure = errors.New("authentication failed")
)
type stackAll []styx.Auth
type stackAny []styx.Auth
// All combines multiple styx.Auth values into a single styx.Auth.
// When authenticating, the Auth method of each parameter is called
// in order. If all styx.Auth backends succeed, authentication is
// succesful. Otherwise, authentication fails.
func All(auth ...styx.Auth) styx.Auth {
return stackAll(auth)
}
func (stack stackAll) Auth(rw io.ReadWriter, c *styx.Conn, user, group, access string) error {
for _, auth := range stack {
err := auth.Auth(rw, c, user, group, access)
if err != nil {
return err
}
}
return nil
}
// Any combines multiple styx.Auth values into a single styx.Auth.
// When authenticating, the Auth method of each parameter is called
// in order. If any styx.Auth backend succeeds, authentication is
// succesful. Otherwise, authentication fails.
func Any(auth ...styx.Auth) styx.Auth {
return stackAny(auth)
}
func (stack stackAny) Auth(rw io.ReadWriter, c *styx.Conn, user, group, access string) error {
for _, auth := range stack {
err := auth.Auth(rw, c, user, group, access)
if err == nil {
return nil
}
}
return errAuthFailure
}
// The return value of Whitelist will authenticate users successfully
// only if the tuple (user, group, access) is true in the rules map.
func Whitelist(rules map[[3]string]bool) styx.Auth {
return allowMap(rules)
}
type allowMap map[[3]string]bool
func (m allowMap) Auth(rw io.ReadWriter, c *styx.Conn, user, group, access string) error {
q := [3]string{"user", "group", "access"}
if m[q] {
return nil
}
return errAuthFailure
}
|
package main
import (
"bytes"
"fmt"
"io"
"strings"
"time"
)
var (
syncProtocolPrefix = "ORGALORG"
syncProtocolHello = "HELLO"
syncProtocolNode = "NODE"
syncProtocolStart = "START"
syncProtocolSync = "SYNC"
)
type syncProtocol struct {
input *bytes.Buffer
output io.WriteCloser
prefix string
}
func newSyncProtocol() *syncProtocol {
return &syncProtocol{
input: &bytes.Buffer{},
prefix: fmt.Sprintf(
"%s:%d",
syncProtocolPrefix,
time.Now().UnixNano(),
),
}
}
func (protocol *syncProtocol) Close() error {
return nil
}
func (protocol *syncProtocol) Init(output io.WriteCloser) error {
protocol.output = output
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolHello+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
func (protocol *syncProtocol) SendNode(node *remoteExecutionNode) error {
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolNode+" "+node.String()+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
func (protocol *syncProtocol) SendStart() error {
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolStart+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
func (protocol *syncProtocol) IsSyncCommand(line string) bool {
return strings.HasPrefix(line, protocol.prefix+" "+syncProtocolSync)
}
func (protocol *syncProtocol) SendSync(
source fmt.Stringer,
sync string,
) error {
data := strings.TrimSpace(
strings.TrimPrefix(sync, protocol.prefix+" "+syncProtocolSync),
)
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolSync+" "+source.String()+" "+data+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
// Suspend EOF for be compatible with simple commands, that are not support
// protocol, and therefore can close exit earlier, than protocol is initiated.
func protocolSuspendEOF(err error) error {
if err == io.EOF {
return nil
}
return err
}
add commands for sync protocol code
package main
import (
"fmt"
"io"
"strings"
"time"
)
var (
syncProtocolPrefix = "ORGALORG"
syncProtocolHello = "HELLO"
syncProtocolNode = "NODE"
syncProtocolStart = "START"
syncProtocolSync = "SYNC"
)
// syncProtocol handles SYNC protocol described in the main.go.
//
// It will handle protocol over all connected nodes.
type syncProtocol struct {
// output represents writer, that should be connected to stdins of
// all connected nodes.
output io.WriteCloser
// prefix is a unique string which prefixes every protocol message.
prefix string
}
// newSyncProtocol returns syncProtocol instantiated with unique prefix.
func newSyncProtocol() *syncProtocol {
return &syncProtocol{
prefix: fmt.Sprintf(
"%s:%d",
syncProtocolPrefix,
time.Now().UnixNano(),
),
}
}
// Close is currently noop.
func (protocol *syncProtocol) Close() error {
return nil
}
// Init starts protocol and sends HELLO message to the writer. Specified writer
// will be used in all further communications.
func (protocol *syncProtocol) Init(output io.WriteCloser) error {
protocol.output = output
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolHello+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
// SendNode sends to the writer serialized representation of specified node as
// NODE message.
func (protocol *syncProtocol) SendNode(node *remoteExecutionNode) error {
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolNode+" "+node.String()+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
// SendStart sends START message to the writer.
func (protocol *syncProtocol) SendStart() error {
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolStart+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
// IsSyncCommand will return true, if specified line looks like incoming
// SYNC message from the remote node.
func (protocol *syncProtocol) IsSyncCommand(line string) bool {
return strings.HasPrefix(line, protocol.prefix+" "+syncProtocolSync)
}
// SendSync sends SYNC message to the writer, tagging it as sent from node,
// described by given source and adding optional description for the given
// SYNC phase taken by extraction it from the original SYNC message, sent
// by node.
func (protocol *syncProtocol) SendSync(
source fmt.Stringer,
sync string,
) error {
data := strings.TrimSpace(
strings.TrimPrefix(sync, protocol.prefix+" "+syncProtocolSync),
)
_, err := io.WriteString(
protocol.output,
protocol.prefix+" "+syncProtocolSync+" "+source.String()+" "+data+"\n",
)
if err != nil {
return protocolSuspendEOF(err)
}
return nil
}
// Suspend EOF for be compatible with simple commands, that are not support
// protocol, and therefore can close exit earlier, than protocol is initiated.
func protocolSuspendEOF(err error) error {
if err == io.EOF {
return nil
}
return err
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes";
"fmt";
"io";
"testing";
"testing/iotest";
)
type writerTestEntry struct {
header *Header;
contents string;
}
type writerTest struct {
file string; // filename of expected output
entries []*writerTestEntry;
}
var writerTests = []*writerTest{
&writerTest{
file: "testdata/writer.tar",
entries: []*writerTestEntry{
&writerTestEntry{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
Mtime: 1246508266,
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
},
&writerTestEntry{
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
Mtime: 1245217492,
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
},
}
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
&writerTest{
file: "testdata/writer-big.tar",
entries: []*writerTestEntry{
&writerTestEntry{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16 << 30,
Mtime: 1254699560,
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// no contents
},
},
},
}
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string {
const rowLen = 32;
s := fmt.Sprintf("%04x ", offset);
for _, ch := range b {
switch {
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
s += fmt.Sprintf(" %c", ch);
default:
s += fmt.Sprintf(" %02x", ch);
}
}
return s
}
// Render a pseudo-diff between two blocks of bytes.
func bytediff(a []byte, b []byte) string {
const rowLen = 32;
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b));
for offset := 0; len(a) + len(b) > 0; offset += rowLen {
na, nb := rowLen, rowLen;
if na > len(a) {
na = len(a);
}
if nb > len(b) {
nb = len(b);
}
sa := bytestr(offset, a[0:na]);
sb := bytestr(offset, b[0:nb]);
if sa != sb {
s += fmt.Sprintf("-%v\n+%v\n", sa, sb);
}
a = a[na:len(a)];
b = b[nb:len(b)];
}
return s
}
func TestWriter(t *testing.T) {
testLoop:
for i, test := range writerTests {
expected, err := io.ReadFile(test.file);
if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err);
continue
}
buf := new(bytes.Buffer);
tw := NewWriter(iotest.TruncateWriter(buf, 4 << 10)); // only catch the first 4 KB
for j, entry := range test.entries {
if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err);
continue testLoop
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err);
continue testLoop
}
}
if err := tw.Close(); err != nil {
t.Errorf("test %d: Failed closing archive: %v", err);
continue testLoop
}
actual := buf.Bytes();
if !bytes.Equal(expected, actual) {
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual));
}
}
}
gofmt-ify src/pkg/archive
R=rsc
http://go/go-review/1018046
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tar
import (
"bytes";
"fmt";
"io";
"testing";
"testing/iotest";
)
type writerTestEntry struct {
header *Header;
contents string;
}
type writerTest struct {
file string; // filename of expected output
entries []*writerTestEntry;
}
var writerTests = []*writerTest{
&writerTest{
file: "testdata/writer.tar",
entries: []*writerTestEntry{
&writerTestEntry{
header: &Header{
Name: "small.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 5,
Mtime: 1246508266,
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Kilts",
},
&writerTestEntry{
header: &Header{
Name: "small2.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 11,
Mtime: 1245217492,
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
contents: "Google.com\n",
},
},
},
// The truncated test file was produced using these commands:
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
&writerTest{
file: "testdata/writer-big.tar",
entries: []*writerTestEntry{
&writerTestEntry{
header: &Header{
Name: "tmp/16gig.txt",
Mode: 0640,
Uid: 73025,
Gid: 5000,
Size: 16<<30,
Mtime: 1254699560,
Typeflag: '0',
Uname: "dsymonds",
Gname: "eng",
},
// no contents
},
},
},
}
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
func bytestr(offset int, b []byte) string {
const rowLen = 32;
s := fmt.Sprintf("%04x ", offset);
for _, ch := range b {
switch {
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
s += fmt.Sprintf(" %c", ch);
default:
s += fmt.Sprintf(" %02x", ch);
}
}
return s;
}
// Render a pseudo-diff between two blocks of bytes.
func bytediff(a []byte, b []byte) string {
const rowLen = 32;
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b));
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
na, nb := rowLen, rowLen;
if na > len(a) {
na = len(a);
}
if nb > len(b) {
nb = len(b);
}
sa := bytestr(offset, a[0:na]);
sb := bytestr(offset, b[0:nb]);
if sa != sb {
s += fmt.Sprintf("-%v\n+%v\n", sa, sb);
}
a = a[na:len(a)];
b = b[nb:len(b)];
}
return s;
}
func TestWriter(t *testing.T) {
testLoop:
for i, test := range writerTests {
expected, err := io.ReadFile(test.file);
if err != nil {
t.Errorf("test %d: Unexpected error: %v", i, err);
continue;
}
buf := new(bytes.Buffer);
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)); // only catch the first 4 KB
for j, entry := range test.entries {
if err := tw.WriteHeader(entry.header); err != nil {
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err);
continue testLoop;
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err);
continue testLoop;
}
}
if err := tw.Close(); err != nil {
t.Errorf("test %d: Failed closing archive: %v", err);
continue testLoop;
}
actual := buf.Bytes();
if !bytes.Equal(expected, actual) {
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual));
}
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Windows system calls.
package syscall
import (
"unsafe"
"utf16"
)
const OS = "windows"
/*
small demo to detect version of windows you are running:
package main
import (
"syscall"
)
func abort(funcname string, err int) {
panic(funcname + " failed: " + syscall.Errstr(err))
}
func print_version(v uint32) {
major := byte(v)
minor := uint8(v >> 8)
build := uint16(v >> 16)
print("windows version ", major, ".", minor, " (Build ", build, ")\n")
}
func main() {
h, err := syscall.LoadLibrary("kernel32.dll")
if err != 0 {
abort("LoadLibrary", err)
}
defer syscall.FreeLibrary(h)
proc, err := syscall.GetProcAddress(h, "GetVersion")
if err != 0 {
abort("GetProcAddress", err)
}
r, _, _ := syscall.Syscall(uintptr(proc), 0, 0, 0)
print_version(uint32(r))
}
*/
// StringToUTF16 returns the UTF-16 encoding of the UTF-8 string s,
// with a terminating NUL added.
func StringToUTF16(s string) []uint16 { return utf16.Encode([]int(s + "\x00")) }
// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s,
// with a terminating NUL removed.
func UTF16ToString(s []uint16) string {
for i, v := range s {
if v == 0 {
s = s[0:i]
break
}
}
return string(utf16.Decode(s))
}
// StringToUTF16Ptr returns pointer to the UTF-16 encoding of
// the UTF-8 string s, with a terminating NUL added.
func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] }
func NsecToTimeval(nsec int64) (tv Timeval) {
tv.Sec = int32(nsec / 1e9)
tv.Usec = int32(nsec % 1e9 / 1e3)
return
}
// dll helpers
// implemented in ../pkg/runtime/windows/syscall.cgo
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, lasterr uintptr)
func loadlibraryex(filename uintptr) (handle uint32)
func getprocaddress(handle uint32, procname uintptr) (proc uintptr)
func loadDll(fname string) uint32 {
m := loadlibraryex(uintptr(unsafe.Pointer(StringBytePtr(fname))))
if m == 0 {
panic("syscall: could not LoadLibraryEx " + fname)
}
return m
}
func getSysProcAddr(m uint32, pname string) uintptr {
p := getprocaddress(m, uintptr(unsafe.Pointer(StringBytePtr(pname))))
if p == 0 {
panic("syscall: could not GetProcAddress for " + pname)
}
return p
}
// windows api calls
//sys GetLastError() (lasterrno int)
//sys LoadLibrary(libname string) (handle uint32, errno int) = LoadLibraryW
//sys FreeLibrary(handle uint32) (ok bool, errno int)
//sys GetProcAddress(module uint32, procname string) (proc uint32, errno int)
//sys GetVersion() (ver uint32, errno int)
//sys FormatMessage(flags uint32, msgsrc uint32, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, errno int) = FormatMessageW
//sys ExitProcess(exitcode uint32)
//sys CreateFile(name *uint16, access uint32, mode uint32, sa *byte, createmode uint32, attrs uint32, templatefile int32) (handle int32, errno int) [failretval=-1] = CreateFileW
//sys ReadFile(handle int32, buf []byte, done *uint32, overlapped *Overlapped) (ok bool, errno int)
//sys WriteFile(handle int32, buf []byte, done *uint32, overlapped *Overlapped) (ok bool, errno int)
//sys SetFilePointer(handle int32, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, errno int) [failretval=0xffffffff]
//sys CloseHandle(handle int32) (ok bool, errno int)
//sys GetStdHandle(stdhandle int32) (handle int32, errno int) [failretval=-1]
//sys FindFirstFile(name *uint16, data *Win32finddata) (handle int32, errno int) [failretval=-1] = FindFirstFileW
//sys FindNextFile(handle int32, data *Win32finddata) (ok bool, errno int) = FindNextFileW
//sys FindClose(handle int32) (ok bool, errno int)
//sys GetFileInformationByHandle(handle int32, data *ByHandleFileInformation) (ok bool, errno int)
//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, errno int) = GetCurrentDirectoryW
//sys SetCurrentDirectory(path *uint16) (ok bool, errno int) = SetCurrentDirectoryW
//sys CreateDirectory(path *uint16, sa *byte) (ok bool, errno int) = CreateDirectoryW
//sys RemoveDirectory(path *uint16) (ok bool, errno int) = RemoveDirectoryW
//sys DeleteFile(path *uint16) (ok bool, errno int) = DeleteFileW
//sys MoveFile(from *uint16, to *uint16) (ok bool, errno int) = MoveFileW
//sys GetComputerName(buf *uint16, n *uint32) (ok bool, errno int) = GetComputerNameW
//sys SetEndOfFile(handle int32) (ok bool, errno int)
//sys GetSystemTimeAsFileTime(time *Filetime)
//sys sleep(msec uint32) = Sleep
//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, errno int) [failretval=0xffffffff]
//sys CreateIoCompletionPort(filehandle int32, cphandle int32, key uint32, threadcnt uint32) (handle int32, errno int)
//sys GetQueuedCompletionStatus(cphandle int32, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (ok bool, errno int)
//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, errno int) = GetTempPathW
//sys CryptAcquireContext(provhandle *uint32, container *uint16, provider *uint16, provtype uint32, flags uint32) (ok bool, errno int) = advapi32.CryptAcquireContextW
//sys CryptReleaseContext(provhandle uint32, flags uint32) (ok bool, errno int) = advapi32.CryptReleaseContext
//sys CryptGenRandom(provhandle uint32, buflen uint32, buf *byte) (ok bool, errno int) = advapi32.CryptGenRandom
// syscall interface implementation for other packages
func Errstr(errno int) string {
if errno == EWINDOWS {
return "not supported by windows"
}
b := make([]uint16, 300)
n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_ARGUMENT_ARRAY, 0, uint32(errno), 0, b, nil)
if err != 0 {
return "error " + str(errno) + " (FormatMessage failed with err=" + str(err) + ")"
}
return string(utf16.Decode(b[0 : n-1]))
}
func Exit(code int) { ExitProcess(uint32(code)) }
func Open(path string, mode int, perm int) (fd int, errno int) {
if len(path) == 0 {
return -1, ERROR_FILE_NOT_FOUND
}
var access uint32
switch mode & (O_RDONLY | O_WRONLY | O_RDWR) {
case O_RDONLY:
access = GENERIC_READ
case O_WRONLY:
access = GENERIC_WRITE
case O_RDWR:
access = GENERIC_READ | GENERIC_WRITE
}
if mode&O_CREAT != 0 {
access |= GENERIC_WRITE
}
sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE)
var createmode uint32
switch {
case mode&O_CREAT != 0:
if mode&O_EXCL != 0 {
createmode = CREATE_NEW
} else {
createmode = CREATE_ALWAYS
}
case mode&O_TRUNC != 0:
createmode = TRUNCATE_EXISTING
default:
createmode = OPEN_EXISTING
}
h, e := CreateFile(StringToUTF16Ptr(path), access, sharemode, nil, createmode, FILE_ATTRIBUTE_NORMAL, 0)
return int(h), int(e)
}
func Read(fd int, p []byte) (n int, errno int) {
var done uint32
if ok, e := ReadFile(int32(fd), p, &done, nil); !ok {
if e == ERROR_BROKEN_PIPE {
// BUG(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin
return 0, 0
}
return 0, e
}
return int(done), 0
}
// TODO(brainman): ReadFile/WriteFile change file offset, therefore
// i use Seek here to preserve semantics of unix pread/pwrite,
// not sure if I should do that
func Pread(fd int, p []byte, offset int64) (n int, errno int) {
curoffset, e := Seek(fd, 0, 1)
if e != 0 {
return 0, e
}
defer Seek(fd, curoffset, 0)
var o Overlapped
o.OffsetHigh = uint32(offset >> 32)
o.Offset = uint32(offset)
var done uint32
if ok, e := ReadFile(int32(fd), p, &done, &o); !ok {
return 0, e
}
return int(done), 0
}
func Write(fd int, p []byte) (n int, errno int) {
var done uint32
if ok, e := WriteFile(int32(fd), p, &done, nil); !ok {
return 0, e
}
return int(done), 0
}
func Pwrite(fd int, p []byte, offset int64) (n int, errno int) {
curoffset, e := Seek(fd, 0, 1)
if e != 0 {
return 0, e
}
defer Seek(fd, curoffset, 0)
var o Overlapped
o.OffsetHigh = uint32(offset >> 32)
o.Offset = uint32(offset)
var done uint32
if ok, e := WriteFile(int32(fd), p, &done, &o); !ok {
return 0, e
}
return int(done), 0
}
func Seek(fd int, offset int64, whence int) (newoffset int64, errno int) {
var w uint32
switch whence {
case 0:
w = FILE_BEGIN
case 1:
w = FILE_CURRENT
case 2:
w = FILE_END
}
hi := int32(offset >> 32)
lo := int32(offset)
rlo, e := SetFilePointer(int32(fd), lo, &hi, w)
if e != 0 {
return 0, e
}
return int64(hi)<<32 + int64(rlo), 0
}
func Close(fd int) (errno int) {
if ok, e := CloseHandle(int32(fd)); !ok {
return e
}
return 0
}
var (
Stdin = getStdHandle(STD_INPUT_HANDLE)
Stdout = getStdHandle(STD_OUTPUT_HANDLE)
Stderr = getStdHandle(STD_ERROR_HANDLE)
)
func getStdHandle(h int32) (fd int) {
r, _ := GetStdHandle(h)
return int(r)
}
func Stat(path string, stat *Stat_t) (errno int) {
h, e := FindFirstFile(StringToUTF16Ptr(path), &stat.Windata)
if e != 0 {
return e
}
defer FindClose(h)
stat.Mode = 0
return 0
}
func Lstat(path string, stat *Stat_t) (errno int) {
// no links on windows, just call Stat
return Stat(path, stat)
}
const ImplementsGetwd = true
func Getwd() (wd string, errno int) {
b := make([]uint16, 300)
n, e := GetCurrentDirectory(uint32(len(b)), &b[0])
if e != 0 {
return "", e
}
return string(utf16.Decode(b[0:n])), 0
}
func Chdir(path string) (errno int) {
if ok, e := SetCurrentDirectory(&StringToUTF16(path)[0]); !ok {
return e
}
return 0
}
func Mkdir(path string, mode int) (errno int) {
if ok, e := CreateDirectory(&StringToUTF16(path)[0], nil); !ok {
return e
}
return 0
}
func Rmdir(path string) (errno int) {
if ok, e := RemoveDirectory(&StringToUTF16(path)[0]); !ok {
return e
}
return 0
}
func Unlink(path string) (errno int) {
if ok, e := DeleteFile(&StringToUTF16(path)[0]); !ok {
return e
}
return 0
}
func Rename(oldpath, newpath string) (errno int) {
from := &StringToUTF16(oldpath)[0]
to := &StringToUTF16(newpath)[0]
if ok, e := MoveFile(from, to); !ok {
return e
}
return 0
}
func ComputerName() (name string, errno int) {
var n uint32 = MAX_COMPUTERNAME_LENGTH + 1
b := make([]uint16, n)
if ok, e := GetComputerName(&b[0], &n); !ok {
return "", e
}
return string(utf16.Decode(b[0:n])), 0
}
func Ftruncate(fd int, length int64) (errno int) {
curoffset, e := Seek(fd, 0, 1)
if e != 0 {
return e
}
defer Seek(fd, curoffset, 0)
if _, e := Seek(fd, length, 0); e != 0 {
return e
}
if _, e := SetEndOfFile(int32(fd)); e != 0 {
return e
}
return 0
}
func Gettimeofday(tv *Timeval) (errno int) {
var ft Filetime
GetSystemTimeAsFileTime(&ft)
ms := ft.Microseconds()
// split into sec / usec
tv.Sec = int32(ms / 1e6)
tv.Usec = int32(ms) - tv.Sec
return 0
}
func Sleep(nsec int64) (errno int) {
sleep(uint32((nsec + 1e6 - 1) / 1e6)) // round up to milliseconds
return 0
}
// TODO(brainman): implement Utimes, or rewrite os.file.Chtimes() instead
func Utimes(path string, tv []Timeval) (errno int) {
return EWINDOWS
}
// net api calls
//sys WSAStartup(verreq uint32, data *WSAData) (sockerrno int) = wsock32.WSAStartup
//sys WSACleanup() (errno int) [failretval=-1] = wsock32.WSACleanup
//sys socket(af int32, typ int32, protocol int32) (handle int32, errno int) [failretval=-1] = wsock32.socket
//sys setsockopt(s int32, level int32, optname int32, optval *byte, optlen int32) (errno int) [failretval=-1] = wsock32.setsockopt
//sys bind(s int32, name uintptr, namelen int32) (errno int) [failretval=-1] = wsock32.bind
//sys connect(s int32, name uintptr, namelen int32) (errno int) [failretval=-1] = wsock32.connect
//sys getsockname(s int32, rsa *RawSockaddrAny, addrlen *int32) (errno int) [failretval=-1] = wsock32.getsockname
//sys getpeername(s int32, rsa *RawSockaddrAny, addrlen *int32) (errno int) [failretval=-1] = wsock32.getpeername
//sys listen(s int32, backlog int32) (errno int) [failretval=-1] = wsock32.listen
//sys shutdown(s int32, how int32) (errno int) [failretval=-1] = wsock32.shutdown
//sys AcceptEx(ls uint32, as uint32, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (ok bool, errno int) = wsock32.AcceptEx
//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = wsock32.GetAcceptExSockaddrs
//sys WSARecv(s uint32, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (errno int) [failretval=-1] = ws2_32.WSARecv
//sys WSASend(s uint32, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (errno int) [failretval=-1] = ws2_32.WSASend
type RawSockaddrInet4 struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type RawSockaddr struct {
Family uint16
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [96]int8
}
type Sockaddr interface {
sockaddr() (ptr uintptr, len int32, errno int) // lowercase; only we can define Sockaddrs
}
type SockaddrInet4 struct {
Port int
Addr [4]byte
raw RawSockaddrInet4
}
func (sa *SockaddrInet4) sockaddr() (uintptr, int32, int) {
if sa.Port < 0 || sa.Port > 0xFFFF {
return 0, 0, EINVAL
}
sa.raw.Family = AF_INET
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return uintptr(unsafe.Pointer(&sa.raw)), int32(unsafe.Sizeof(sa.raw)), 0
}
type SockaddrInet6 struct {
Port int
Addr [16]byte
}
func (sa *SockaddrInet6) sockaddr() (uintptr, int32, int) {
// TODO(brainman): implement SockaddrInet6.sockaddr()
return 0, 0, EWINDOWS
}
type SockaddrUnix struct {
Name string
}
func (sa *SockaddrUnix) sockaddr() (uintptr, int32, int) {
// TODO(brainman): implement SockaddrUnix.sockaddr()
return 0, 0, EWINDOWS
}
func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, int) {
switch rsa.Addr.Family {
case AF_UNIX:
return nil, EWINDOWS
case AF_INET:
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
sa := new(SockaddrInet4)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, 0
case AF_INET6:
return nil, EWINDOWS
}
return nil, EAFNOSUPPORT
}
func Socket(domain, typ, proto int) (fd, errno int) {
h, e := socket(int32(domain), int32(typ), int32(proto))
return int(h), int(e)
}
func SetsockoptInt(fd, level, opt int, value int) (errno int) {
v := int32(value)
return int(setsockopt(int32(fd), int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))))
}
func Bind(fd int, sa Sockaddr) (errno int) {
ptr, n, err := sa.sockaddr()
if err != 0 {
return err
}
return bind(int32(fd), ptr, n)
}
func Connect(fd int, sa Sockaddr) (errno int) {
ptr, n, err := sa.sockaddr()
if err != 0 {
return err
}
return connect(int32(fd), ptr, n)
}
func Getsockname(fd int) (sa Sockaddr, errno int) {
var rsa RawSockaddrAny
l := int32(unsafe.Sizeof(rsa))
if errno = getsockname(int32(fd), &rsa, &l); errno != 0 {
return
}
return rsa.Sockaddr()
}
func Getpeername(fd int) (sa Sockaddr, errno int) {
var rsa RawSockaddrAny
l := int32(unsafe.Sizeof(rsa))
if errno = getpeername(int32(fd), &rsa, &l); errno != 0 {
return
}
return rsa.Sockaddr()
}
func Listen(s int, n int) (errno int) {
return int(listen(int32(s), int32(n)))
}
func Shutdown(fd, how int) (errno int) {
return int(shutdown(int32(fd), int32(how)))
}
func AcceptIOCP(iocpfd, fd int, o *Overlapped) (attrs *byte, errno int) {
// Will ask for local and remote address only.
rsa := make([]RawSockaddrAny, 2)
attrs = (*byte)(unsafe.Pointer(&rsa[0]))
alen := uint32(unsafe.Sizeof(rsa[0]))
var done uint32
_, errno = AcceptEx(uint32(iocpfd), uint32(fd), attrs, 0, alen, alen, &done, o)
return
}
func GetAcceptIOCPSockaddrs(attrs *byte) (lsa, rsa Sockaddr) {
var lrsa, rrsa *RawSockaddrAny
var llen, rlen int32
alen := uint32(unsafe.Sizeof(*lrsa))
GetAcceptExSockaddrs(attrs, 0, alen, alen, &lrsa, &llen, &rrsa, &rlen)
lsa, _ = lrsa.Sockaddr()
rsa, _ = rrsa.Sockaddr()
return
}
// TODO(brainman): fix all needed for net
func Accept(fd int) (nfd int, sa Sockaddr, errno int) { return 0, nil, EWINDOWS }
func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, errno int) { return 0, nil, EWINDOWS }
func Sendto(fd int, p []byte, flags int, to Sockaddr) (errno int) { return EWINDOWS }
func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (errno int) { return EWINDOWS }
type Linger struct {
Onoff int32
Linger int32
}
func SetsockoptLinger(fd, level, opt int, l *Linger) (errno int) { return EWINDOWS }
func BindToDevice(fd int, device string) (errno int) { return EWINDOWS }
// TODO(brainman): fix all needed for os
const (
SIGTRAP = 5
)
func Getpid() (pid int) { return -1 }
func Getppid() (ppid int) { return -1 }
func Fchdir(fd int) (errno int) { return EWINDOWS }
func Link(oldpath, newpath string) (errno int) { return EWINDOWS }
func Symlink(path, link string) (errno int) { return EWINDOWS }
func Readlink(path string, buf []byte) (n int, errno int) { return 0, EWINDOWS }
func Chmod(path string, mode int) (errno int) { return EWINDOWS }
func Fchmod(fd int, mode int) (errno int) { return EWINDOWS }
func Chown(path string, uid int, gid int) (errno int) { return EWINDOWS }
func Lchown(path string, uid int, gid int) (errno int) { return EWINDOWS }
func Fchown(fd int, uid int, gid int) (errno int) { return EWINDOWS }
func Getuid() (uid int) { return -1 }
func Geteuid() (euid int) { return -1 }
func Getgid() (gid int) { return -1 }
func Getegid() (egid int) { return -1 }
func Getgroups() (gids []int, errno int) { return nil, EWINDOWS }
// TODO(brainman): fix all this meaningless code, it is here to compile exec.go
func Pipe(p []int) (errno int) { return EWINDOWS }
func read(fd int, buf *byte, nbuf int) (n int, errno int) {
return 0, EWINDOWS
}
func fcntl(fd, cmd, arg int) (val int, errno int) {
return 0, EWINDOWS
}
const (
PTRACE_TRACEME = 1 + iota
WNOHANG
WSTOPPED
SYS_CLOSE
SYS_WRITE
SYS_EXIT
SYS_READ
)
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int32
Ixrss int32
Idrss int32
Isrss int32
Minflt int32
Majflt int32
Nswap int32
Inblock int32
Oublock int32
Msgsnd int32
Msgrcv int32
Nsignals int32
Nvcsw int32
Nivcsw int32
}
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, errno int) {
return 0, EWINDOWS
}
type WaitStatus uint32
func (WaitStatus) Exited() bool { return false }
func (WaitStatus) ExitStatus() int { return -1 }
func (WaitStatus) Signal() int { return -1 }
func (WaitStatus) CoreDump() bool { return false }
func (WaitStatus) Stopped() bool { return false }
func (WaitStatus) Continued() bool { return false }
func (WaitStatus) StopSignal() int { return -1 }
func (WaitStatus) Signaled() bool { return false }
func (WaitStatus) TrapCause() int { return -1 }
syscall: On Windows, Errstr FormatMessage has no values to insert.
For the Windows version of syscall Errstr, set the
FORMAT_MESSAGE_IGNORE_INSERTS value of the FormatMessage
Flags argument when there are no values to insert.
R=rsc, brainman
CC=golang-dev
https://golang.org/cl/1868043
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Windows system calls.
package syscall
import (
"unsafe"
"utf16"
)
const OS = "windows"
/*
small demo to detect version of windows you are running:
package main
import (
"syscall"
)
func abort(funcname string, err int) {
panic(funcname + " failed: " + syscall.Errstr(err))
}
func print_version(v uint32) {
major := byte(v)
minor := uint8(v >> 8)
build := uint16(v >> 16)
print("windows version ", major, ".", minor, " (Build ", build, ")\n")
}
func main() {
h, err := syscall.LoadLibrary("kernel32.dll")
if err != 0 {
abort("LoadLibrary", err)
}
defer syscall.FreeLibrary(h)
proc, err := syscall.GetProcAddress(h, "GetVersion")
if err != 0 {
abort("GetProcAddress", err)
}
r, _, _ := syscall.Syscall(uintptr(proc), 0, 0, 0)
print_version(uint32(r))
}
*/
// StringToUTF16 returns the UTF-16 encoding of the UTF-8 string s,
// with a terminating NUL added.
func StringToUTF16(s string) []uint16 { return utf16.Encode([]int(s + "\x00")) }
// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s,
// with a terminating NUL removed.
func UTF16ToString(s []uint16) string {
for i, v := range s {
if v == 0 {
s = s[0:i]
break
}
}
return string(utf16.Decode(s))
}
// StringToUTF16Ptr returns pointer to the UTF-16 encoding of
// the UTF-8 string s, with a terminating NUL added.
func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] }
func NsecToTimeval(nsec int64) (tv Timeval) {
tv.Sec = int32(nsec / 1e9)
tv.Usec = int32(nsec % 1e9 / 1e3)
return
}
// dll helpers
// implemented in ../pkg/runtime/windows/syscall.cgo
func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, lasterr uintptr)
func loadlibraryex(filename uintptr) (handle uint32)
func getprocaddress(handle uint32, procname uintptr) (proc uintptr)
func loadDll(fname string) uint32 {
m := loadlibraryex(uintptr(unsafe.Pointer(StringBytePtr(fname))))
if m == 0 {
panic("syscall: could not LoadLibraryEx " + fname)
}
return m
}
func getSysProcAddr(m uint32, pname string) uintptr {
p := getprocaddress(m, uintptr(unsafe.Pointer(StringBytePtr(pname))))
if p == 0 {
panic("syscall: could not GetProcAddress for " + pname)
}
return p
}
// windows api calls
//sys GetLastError() (lasterrno int)
//sys LoadLibrary(libname string) (handle uint32, errno int) = LoadLibraryW
//sys FreeLibrary(handle uint32) (ok bool, errno int)
//sys GetProcAddress(module uint32, procname string) (proc uint32, errno int)
//sys GetVersion() (ver uint32, errno int)
//sys FormatMessage(flags uint32, msgsrc uint32, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, errno int) = FormatMessageW
//sys ExitProcess(exitcode uint32)
//sys CreateFile(name *uint16, access uint32, mode uint32, sa *byte, createmode uint32, attrs uint32, templatefile int32) (handle int32, errno int) [failretval=-1] = CreateFileW
//sys ReadFile(handle int32, buf []byte, done *uint32, overlapped *Overlapped) (ok bool, errno int)
//sys WriteFile(handle int32, buf []byte, done *uint32, overlapped *Overlapped) (ok bool, errno int)
//sys SetFilePointer(handle int32, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, errno int) [failretval=0xffffffff]
//sys CloseHandle(handle int32) (ok bool, errno int)
//sys GetStdHandle(stdhandle int32) (handle int32, errno int) [failretval=-1]
//sys FindFirstFile(name *uint16, data *Win32finddata) (handle int32, errno int) [failretval=-1] = FindFirstFileW
//sys FindNextFile(handle int32, data *Win32finddata) (ok bool, errno int) = FindNextFileW
//sys FindClose(handle int32) (ok bool, errno int)
//sys GetFileInformationByHandle(handle int32, data *ByHandleFileInformation) (ok bool, errno int)
//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, errno int) = GetCurrentDirectoryW
//sys SetCurrentDirectory(path *uint16) (ok bool, errno int) = SetCurrentDirectoryW
//sys CreateDirectory(path *uint16, sa *byte) (ok bool, errno int) = CreateDirectoryW
//sys RemoveDirectory(path *uint16) (ok bool, errno int) = RemoveDirectoryW
//sys DeleteFile(path *uint16) (ok bool, errno int) = DeleteFileW
//sys MoveFile(from *uint16, to *uint16) (ok bool, errno int) = MoveFileW
//sys GetComputerName(buf *uint16, n *uint32) (ok bool, errno int) = GetComputerNameW
//sys SetEndOfFile(handle int32) (ok bool, errno int)
//sys GetSystemTimeAsFileTime(time *Filetime)
//sys sleep(msec uint32) = Sleep
//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, errno int) [failretval=0xffffffff]
//sys CreateIoCompletionPort(filehandle int32, cphandle int32, key uint32, threadcnt uint32) (handle int32, errno int)
//sys GetQueuedCompletionStatus(cphandle int32, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (ok bool, errno int)
//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, errno int) = GetTempPathW
//sys CryptAcquireContext(provhandle *uint32, container *uint16, provider *uint16, provtype uint32, flags uint32) (ok bool, errno int) = advapi32.CryptAcquireContextW
//sys CryptReleaseContext(provhandle uint32, flags uint32) (ok bool, errno int) = advapi32.CryptReleaseContext
//sys CryptGenRandom(provhandle uint32, buflen uint32, buf *byte) (ok bool, errno int) = advapi32.CryptGenRandom
// syscall interface implementation for other packages
func Errstr(errno int) string {
if errno == EWINDOWS {
return "not supported by windows"
}
var flags uint32 = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ARGUMENT_ARRAY | FORMAT_MESSAGE_IGNORE_INSERTS
b := make([]uint16, 300)
n, err := FormatMessage(flags, 0, uint32(errno), 0, b, nil)
if err != 0 {
return "error " + str(errno) + " (FormatMessage failed with err=" + str(err) + ")"
}
return string(utf16.Decode(b[0 : n-1]))
}
func Exit(code int) { ExitProcess(uint32(code)) }
func Open(path string, mode int, perm int) (fd int, errno int) {
if len(path) == 0 {
return -1, ERROR_FILE_NOT_FOUND
}
var access uint32
switch mode & (O_RDONLY | O_WRONLY | O_RDWR) {
case O_RDONLY:
access = GENERIC_READ
case O_WRONLY:
access = GENERIC_WRITE
case O_RDWR:
access = GENERIC_READ | GENERIC_WRITE
}
if mode&O_CREAT != 0 {
access |= GENERIC_WRITE
}
sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE)
var createmode uint32
switch {
case mode&O_CREAT != 0:
if mode&O_EXCL != 0 {
createmode = CREATE_NEW
} else {
createmode = CREATE_ALWAYS
}
case mode&O_TRUNC != 0:
createmode = TRUNCATE_EXISTING
default:
createmode = OPEN_EXISTING
}
h, e := CreateFile(StringToUTF16Ptr(path), access, sharemode, nil, createmode, FILE_ATTRIBUTE_NORMAL, 0)
return int(h), int(e)
}
func Read(fd int, p []byte) (n int, errno int) {
var done uint32
if ok, e := ReadFile(int32(fd), p, &done, nil); !ok {
if e == ERROR_BROKEN_PIPE {
// BUG(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin
return 0, 0
}
return 0, e
}
return int(done), 0
}
// TODO(brainman): ReadFile/WriteFile change file offset, therefore
// i use Seek here to preserve semantics of unix pread/pwrite,
// not sure if I should do that
func Pread(fd int, p []byte, offset int64) (n int, errno int) {
curoffset, e := Seek(fd, 0, 1)
if e != 0 {
return 0, e
}
defer Seek(fd, curoffset, 0)
var o Overlapped
o.OffsetHigh = uint32(offset >> 32)
o.Offset = uint32(offset)
var done uint32
if ok, e := ReadFile(int32(fd), p, &done, &o); !ok {
return 0, e
}
return int(done), 0
}
func Write(fd int, p []byte) (n int, errno int) {
var done uint32
if ok, e := WriteFile(int32(fd), p, &done, nil); !ok {
return 0, e
}
return int(done), 0
}
func Pwrite(fd int, p []byte, offset int64) (n int, errno int) {
curoffset, e := Seek(fd, 0, 1)
if e != 0 {
return 0, e
}
defer Seek(fd, curoffset, 0)
var o Overlapped
o.OffsetHigh = uint32(offset >> 32)
o.Offset = uint32(offset)
var done uint32
if ok, e := WriteFile(int32(fd), p, &done, &o); !ok {
return 0, e
}
return int(done), 0
}
func Seek(fd int, offset int64, whence int) (newoffset int64, errno int) {
var w uint32
switch whence {
case 0:
w = FILE_BEGIN
case 1:
w = FILE_CURRENT
case 2:
w = FILE_END
}
hi := int32(offset >> 32)
lo := int32(offset)
rlo, e := SetFilePointer(int32(fd), lo, &hi, w)
if e != 0 {
return 0, e
}
return int64(hi)<<32 + int64(rlo), 0
}
func Close(fd int) (errno int) {
if ok, e := CloseHandle(int32(fd)); !ok {
return e
}
return 0
}
var (
Stdin = getStdHandle(STD_INPUT_HANDLE)
Stdout = getStdHandle(STD_OUTPUT_HANDLE)
Stderr = getStdHandle(STD_ERROR_HANDLE)
)
func getStdHandle(h int32) (fd int) {
r, _ := GetStdHandle(h)
return int(r)
}
func Stat(path string, stat *Stat_t) (errno int) {
h, e := FindFirstFile(StringToUTF16Ptr(path), &stat.Windata)
if e != 0 {
return e
}
defer FindClose(h)
stat.Mode = 0
return 0
}
func Lstat(path string, stat *Stat_t) (errno int) {
// no links on windows, just call Stat
return Stat(path, stat)
}
const ImplementsGetwd = true
func Getwd() (wd string, errno int) {
b := make([]uint16, 300)
n, e := GetCurrentDirectory(uint32(len(b)), &b[0])
if e != 0 {
return "", e
}
return string(utf16.Decode(b[0:n])), 0
}
func Chdir(path string) (errno int) {
if ok, e := SetCurrentDirectory(&StringToUTF16(path)[0]); !ok {
return e
}
return 0
}
func Mkdir(path string, mode int) (errno int) {
if ok, e := CreateDirectory(&StringToUTF16(path)[0], nil); !ok {
return e
}
return 0
}
func Rmdir(path string) (errno int) {
if ok, e := RemoveDirectory(&StringToUTF16(path)[0]); !ok {
return e
}
return 0
}
func Unlink(path string) (errno int) {
if ok, e := DeleteFile(&StringToUTF16(path)[0]); !ok {
return e
}
return 0
}
func Rename(oldpath, newpath string) (errno int) {
from := &StringToUTF16(oldpath)[0]
to := &StringToUTF16(newpath)[0]
if ok, e := MoveFile(from, to); !ok {
return e
}
return 0
}
func ComputerName() (name string, errno int) {
var n uint32 = MAX_COMPUTERNAME_LENGTH + 1
b := make([]uint16, n)
if ok, e := GetComputerName(&b[0], &n); !ok {
return "", e
}
return string(utf16.Decode(b[0:n])), 0
}
func Ftruncate(fd int, length int64) (errno int) {
curoffset, e := Seek(fd, 0, 1)
if e != 0 {
return e
}
defer Seek(fd, curoffset, 0)
if _, e := Seek(fd, length, 0); e != 0 {
return e
}
if _, e := SetEndOfFile(int32(fd)); e != 0 {
return e
}
return 0
}
func Gettimeofday(tv *Timeval) (errno int) {
var ft Filetime
GetSystemTimeAsFileTime(&ft)
ms := ft.Microseconds()
// split into sec / usec
tv.Sec = int32(ms / 1e6)
tv.Usec = int32(ms) - tv.Sec
return 0
}
func Sleep(nsec int64) (errno int) {
sleep(uint32((nsec + 1e6 - 1) / 1e6)) // round up to milliseconds
return 0
}
// TODO(brainman): implement Utimes, or rewrite os.file.Chtimes() instead
func Utimes(path string, tv []Timeval) (errno int) {
return EWINDOWS
}
// net api calls
//sys WSAStartup(verreq uint32, data *WSAData) (sockerrno int) = wsock32.WSAStartup
//sys WSACleanup() (errno int) [failretval=-1] = wsock32.WSACleanup
//sys socket(af int32, typ int32, protocol int32) (handle int32, errno int) [failretval=-1] = wsock32.socket
//sys setsockopt(s int32, level int32, optname int32, optval *byte, optlen int32) (errno int) [failretval=-1] = wsock32.setsockopt
//sys bind(s int32, name uintptr, namelen int32) (errno int) [failretval=-1] = wsock32.bind
//sys connect(s int32, name uintptr, namelen int32) (errno int) [failretval=-1] = wsock32.connect
//sys getsockname(s int32, rsa *RawSockaddrAny, addrlen *int32) (errno int) [failretval=-1] = wsock32.getsockname
//sys getpeername(s int32, rsa *RawSockaddrAny, addrlen *int32) (errno int) [failretval=-1] = wsock32.getpeername
//sys listen(s int32, backlog int32) (errno int) [failretval=-1] = wsock32.listen
//sys shutdown(s int32, how int32) (errno int) [failretval=-1] = wsock32.shutdown
//sys AcceptEx(ls uint32, as uint32, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (ok bool, errno int) = wsock32.AcceptEx
//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = wsock32.GetAcceptExSockaddrs
//sys WSARecv(s uint32, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (errno int) [failretval=-1] = ws2_32.WSARecv
//sys WSASend(s uint32, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (errno int) [failretval=-1] = ws2_32.WSASend
type RawSockaddrInet4 struct {
Family uint16
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type RawSockaddr struct {
Family uint16
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [96]int8
}
type Sockaddr interface {
sockaddr() (ptr uintptr, len int32, errno int) // lowercase; only we can define Sockaddrs
}
type SockaddrInet4 struct {
Port int
Addr [4]byte
raw RawSockaddrInet4
}
func (sa *SockaddrInet4) sockaddr() (uintptr, int32, int) {
if sa.Port < 0 || sa.Port > 0xFFFF {
return 0, 0, EINVAL
}
sa.raw.Family = AF_INET
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
p[0] = byte(sa.Port >> 8)
p[1] = byte(sa.Port)
for i := 0; i < len(sa.Addr); i++ {
sa.raw.Addr[i] = sa.Addr[i]
}
return uintptr(unsafe.Pointer(&sa.raw)), int32(unsafe.Sizeof(sa.raw)), 0
}
type SockaddrInet6 struct {
Port int
Addr [16]byte
}
func (sa *SockaddrInet6) sockaddr() (uintptr, int32, int) {
// TODO(brainman): implement SockaddrInet6.sockaddr()
return 0, 0, EWINDOWS
}
type SockaddrUnix struct {
Name string
}
func (sa *SockaddrUnix) sockaddr() (uintptr, int32, int) {
// TODO(brainman): implement SockaddrUnix.sockaddr()
return 0, 0, EWINDOWS
}
func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, int) {
switch rsa.Addr.Family {
case AF_UNIX:
return nil, EWINDOWS
case AF_INET:
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
sa := new(SockaddrInet4)
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
sa.Port = int(p[0])<<8 + int(p[1])
for i := 0; i < len(sa.Addr); i++ {
sa.Addr[i] = pp.Addr[i]
}
return sa, 0
case AF_INET6:
return nil, EWINDOWS
}
return nil, EAFNOSUPPORT
}
func Socket(domain, typ, proto int) (fd, errno int) {
h, e := socket(int32(domain), int32(typ), int32(proto))
return int(h), int(e)
}
func SetsockoptInt(fd, level, opt int, value int) (errno int) {
v := int32(value)
return int(setsockopt(int32(fd), int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))))
}
func Bind(fd int, sa Sockaddr) (errno int) {
ptr, n, err := sa.sockaddr()
if err != 0 {
return err
}
return bind(int32(fd), ptr, n)
}
func Connect(fd int, sa Sockaddr) (errno int) {
ptr, n, err := sa.sockaddr()
if err != 0 {
return err
}
return connect(int32(fd), ptr, n)
}
func Getsockname(fd int) (sa Sockaddr, errno int) {
var rsa RawSockaddrAny
l := int32(unsafe.Sizeof(rsa))
if errno = getsockname(int32(fd), &rsa, &l); errno != 0 {
return
}
return rsa.Sockaddr()
}
func Getpeername(fd int) (sa Sockaddr, errno int) {
var rsa RawSockaddrAny
l := int32(unsafe.Sizeof(rsa))
if errno = getpeername(int32(fd), &rsa, &l); errno != 0 {
return
}
return rsa.Sockaddr()
}
func Listen(s int, n int) (errno int) {
return int(listen(int32(s), int32(n)))
}
func Shutdown(fd, how int) (errno int) {
return int(shutdown(int32(fd), int32(how)))
}
func AcceptIOCP(iocpfd, fd int, o *Overlapped) (attrs *byte, errno int) {
// Will ask for local and remote address only.
rsa := make([]RawSockaddrAny, 2)
attrs = (*byte)(unsafe.Pointer(&rsa[0]))
alen := uint32(unsafe.Sizeof(rsa[0]))
var done uint32
_, errno = AcceptEx(uint32(iocpfd), uint32(fd), attrs, 0, alen, alen, &done, o)
return
}
func GetAcceptIOCPSockaddrs(attrs *byte) (lsa, rsa Sockaddr) {
var lrsa, rrsa *RawSockaddrAny
var llen, rlen int32
alen := uint32(unsafe.Sizeof(*lrsa))
GetAcceptExSockaddrs(attrs, 0, alen, alen, &lrsa, &llen, &rrsa, &rlen)
lsa, _ = lrsa.Sockaddr()
rsa, _ = rrsa.Sockaddr()
return
}
// TODO(brainman): fix all needed for net
func Accept(fd int) (nfd int, sa Sockaddr, errno int) { return 0, nil, EWINDOWS }
func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, errno int) { return 0, nil, EWINDOWS }
func Sendto(fd int, p []byte, flags int, to Sockaddr) (errno int) { return EWINDOWS }
func SetsockoptTimeval(fd, level, opt int, tv *Timeval) (errno int) { return EWINDOWS }
type Linger struct {
Onoff int32
Linger int32
}
func SetsockoptLinger(fd, level, opt int, l *Linger) (errno int) { return EWINDOWS }
func BindToDevice(fd int, device string) (errno int) { return EWINDOWS }
// TODO(brainman): fix all needed for os
const (
SIGTRAP = 5
)
func Getpid() (pid int) { return -1 }
func Getppid() (ppid int) { return -1 }
func Fchdir(fd int) (errno int) { return EWINDOWS }
func Link(oldpath, newpath string) (errno int) { return EWINDOWS }
func Symlink(path, link string) (errno int) { return EWINDOWS }
func Readlink(path string, buf []byte) (n int, errno int) { return 0, EWINDOWS }
func Chmod(path string, mode int) (errno int) { return EWINDOWS }
func Fchmod(fd int, mode int) (errno int) { return EWINDOWS }
func Chown(path string, uid int, gid int) (errno int) { return EWINDOWS }
func Lchown(path string, uid int, gid int) (errno int) { return EWINDOWS }
func Fchown(fd int, uid int, gid int) (errno int) { return EWINDOWS }
func Getuid() (uid int) { return -1 }
func Geteuid() (euid int) { return -1 }
func Getgid() (gid int) { return -1 }
func Getegid() (egid int) { return -1 }
func Getgroups() (gids []int, errno int) { return nil, EWINDOWS }
// TODO(brainman): fix all this meaningless code, it is here to compile exec.go
func Pipe(p []int) (errno int) { return EWINDOWS }
func read(fd int, buf *byte, nbuf int) (n int, errno int) {
return 0, EWINDOWS
}
func fcntl(fd, cmd, arg int) (val int, errno int) {
return 0, EWINDOWS
}
const (
PTRACE_TRACEME = 1 + iota
WNOHANG
WSTOPPED
SYS_CLOSE
SYS_WRITE
SYS_EXIT
SYS_READ
)
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int32
Ixrss int32
Idrss int32
Isrss int32
Minflt int32
Majflt int32
Nswap int32
Inblock int32
Oublock int32
Msgsnd int32
Msgrcv int32
Nsignals int32
Nvcsw int32
Nivcsw int32
}
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, errno int) {
return 0, EWINDOWS
}
type WaitStatus uint32
func (WaitStatus) Exited() bool { return false }
func (WaitStatus) ExitStatus() int { return -1 }
func (WaitStatus) Signal() int { return -1 }
func (WaitStatus) CoreDump() bool { return false }
func (WaitStatus) Stopped() bool { return false }
func (WaitStatus) Continued() bool { return false }
func (WaitStatus) StopSignal() int { return -1 }
func (WaitStatus) Signaled() bool { return false }
func (WaitStatus) TrapCause() int { return -1 }
|
package hypervisor
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/hyperhq/hypercontainer-utils/hlog"
"github.com/hyperhq/runv/api"
hyperstartapi "github.com/hyperhq/runv/hyperstart/api/json"
"github.com/hyperhq/runv/hyperstart/libhyperstart"
"github.com/hyperhq/runv/hypervisor/types"
"github.com/hyperhq/runv/lib/utils"
)
type Vm struct {
Id string
ctx *VmContext
Cpu int
Mem int
Lazy bool
logPrefix string
clients *Fanout
}
func (v *Vm) LogLevel(level hlog.LogLevel) bool {
return hlog.IsLogLevel(level)
}
func (v *Vm) LogPrefix() string {
return v.logPrefix
}
func (v *Vm) Log(level hlog.LogLevel, args ...interface{}) {
hlog.HLog(level, v, 1, args...)
}
func (vm *Vm) GetResponseChan() (chan *types.VmResponse, error) {
if vm.clients != nil {
return vm.clients.Acquire()
}
return nil, errors.New("No channels available")
}
func (vm *Vm) ReleaseResponseChan(ch chan *types.VmResponse) {
if vm.clients != nil {
vm.clients.Release(ch)
}
}
func (vm *Vm) launch(b *BootConfig) (err error) {
var (
vmEvent = make(chan VmEvent, 128)
Status = make(chan *types.VmResponse, 128)
ctx *VmContext
)
ctx, err = InitContext(vm.Id, vmEvent, Status, nil, b)
if err != nil {
Status <- &types.VmResponse{
VmId: vm.Id,
Code: types.E_BAD_REQUEST,
Cause: err.Error(),
}
return err
}
ctx.Launch()
vm.ctx = ctx
vm.clients = CreateFanout(Status, 128, false)
return nil
}
// This function will only be invoked during daemon start
func AssociateVm(vmId string, data []byte) (*Vm, error) {
var (
PodEvent = make(chan VmEvent, 128)
Status = make(chan *types.VmResponse, 128)
err error
)
vm := newVm(vmId, 0, 0)
vm.ctx, err = VmAssociate(vm.Id, PodEvent, Status, data)
if err != nil {
vm.Log(ERROR, "cannot associate with vm: %v", err)
return nil, err
}
vm.clients = CreateFanout(Status, 128, false)
return vm, nil
}
type matchResponse func(response *types.VmResponse) (error, bool)
func (vm *Vm) WaitResponse(match matchResponse, timeout int) chan error {
result := make(chan error, 1)
var timeoutChan <-chan time.Time
if timeout >= 0 {
timeoutChan = time.After(time.Duration(timeout) * time.Second)
} else {
timeoutChan = make(chan time.Time, 1)
}
Status, err := vm.GetResponseChan()
if err != nil {
result <- err
return result
}
go func() {
defer vm.ReleaseResponseChan(Status)
for {
select {
case response, ok := <-Status:
if !ok {
result <- fmt.Errorf("Response Chan is broken")
return
}
if err, exit := match(response); exit {
result <- err
return
}
case <-timeoutChan:
result <- fmt.Errorf("timeout for waiting response")
return
}
}
}()
return result
}
func (vm *Vm) ReleaseVm() error {
if !vm.ctx.IsRunning() {
return nil
}
result := vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN || response.Code == types.E_OK {
return nil, true
}
return nil, false
}, -1)
releasePodEvent := &ReleaseVMCommand{}
if err := vm.ctx.SendVmEvent(releasePodEvent); err != nil {
return err
}
return <-result
}
func (vm *Vm) WaitVm(timeout int) <-chan error {
return vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN {
return nil, true
}
return nil, false
}, timeout)
}
func (vm *Vm) WaitProcess(isContainer bool, ids []string, timeout int) <-chan *api.ProcessExit {
var (
waiting = make(map[string]struct{})
result = make(chan *api.ProcessExit, len(ids))
waitEvent = types.E_CONTAINER_FINISHED
)
if !isContainer {
waitEvent = types.E_EXEC_FINISHED
}
for _, id := range ids {
waiting[id] = struct{}{}
}
resChan := vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN {
return fmt.Errorf("get shutdown event"), true
}
if response.Code != waitEvent {
return nil, false
}
ps, _ := response.Data.(*types.ProcessFinished)
if _, ok := waiting[ps.Id]; ok {
result <- &api.ProcessExit{
Id: ps.Id,
Code: int(ps.Code),
FinishedAt: time.Now().UTC(),
}
select {
case ps.Ack <- true:
vm.ctx.Log(TRACE, "got shut down msg, acked here")
default:
vm.ctx.Log(TRACE, "got shut down msg, acked somewhere")
}
delete(waiting, ps.Id)
if len(waiting) == 0 {
// got all of processexit event, exit
return nil, true
}
}
// continue to wait other processexit event
return nil, false
}, timeout)
go func() {
if err := <-resChan; err != nil {
close(result)
}
}()
return result
}
func (vm *Vm) InitSandbox(config *api.SandboxConfig) error {
vm.ctx.SetNetworkEnvironment(config)
return vm.ctx.startPod()
}
func (vm *Vm) WaitInit() api.Result {
if err := <-vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_OK {
return nil, true
}
if response.Code == types.E_FAILED || response.Code == types.E_VM_SHUTDOWN {
return fmt.Errorf("got failed event when wait init message"), true
}
return nil, false
}, -1); err != nil {
return api.NewResultBase(vm.Id, false, err.Error())
}
return api.NewResultBase(vm.Id, true, "wait init message successfully")
}
func (vm *Vm) Shutdown() api.Result {
if !vm.ctx.IsRunning() {
return api.NewResultBase(vm.Id, false, "not in running state")
}
result := vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN {
return nil, true
}
return nil, false
}, -1)
if err := vm.ctx.SendVmEvent(&ShutdownCommand{}); err != nil {
return api.NewResultBase(vm.Id, false, "vm context already exited")
}
if err := <-result; err != nil {
return api.NewResultBase(vm.Id, false, err.Error())
}
return api.NewResultBase(vm.Id, true, "shutdown vm successfully")
}
// TODO: should we provide a method to force kill vm
func (vm *Vm) Kill() {
vm.ctx.poweroffVM(false, "vm.Kill()")
}
func (vm *Vm) WriteFile(container, target string, data []byte) error {
return vm.ctx.hyperstart.WriteFile(container, target, data)
}
func (vm *Vm) ReadFile(container, target string) ([]byte, error) {
return vm.ctx.hyperstart.ReadFile(container, target)
}
func (vm *Vm) SignalProcess(container, process string, signal syscall.Signal) error {
return vm.ctx.hyperstart.SignalProcess(container, process, signal)
}
func (vm *Vm) KillContainer(container string, signal syscall.Signal) error {
return vm.SignalProcess(container, "init", signal)
}
// Should only be called near after AssociateVm
func (vm *Vm) AssociateContainer(container string) (alive bool, err error) {
return vm.ctx.restoreContainer(container)
}
func (vm *Vm) AddRoute() error {
routes := vm.ctx.networks.getRoutes()
return vm.ctx.hyperstart.AddRoute(routes)
}
func (vm *Vm) AddNic(info *api.InterfaceDescription) error {
client := make(chan api.Result, 1)
vm.ctx.AddInterface(info, client)
ev, ok := <-client
if !ok {
return fmt.Errorf("internal error")
}
if !ev.IsSuccess() {
return fmt.Errorf("allocate device failed")
}
if vm.ctx.LogLevel(TRACE) {
vm.Log(TRACE, "finial vmSpec.Interface is %#v", vm.ctx.networks.getInterface(info.Id))
}
return vm.ctx.hyperstartAddInterface(info.Id)
}
func (vm *Vm) AllNics() []*InterfaceCreated {
return vm.ctx.AllInterfaces()
}
func (vm *Vm) DeleteNic(id string) error {
if err := vm.ctx.hyperstartDeleteInterface(id); err != nil {
return err
}
client := make(chan api.Result, 1)
vm.ctx.RemoveInterface(id, client)
ev, ok := <-client
if !ok {
return fmt.Errorf("internal error")
}
if !ev.IsSuccess() {
return fmt.Errorf("remove device failed")
}
return nil
}
func (vm *Vm) UpdateNic(inf *api.InterfaceDescription) error {
if err := vm.ctx.hyperstartUpdateInterface(inf.Id, inf.Ip, inf.Mtu); err != nil {
return err
}
return vm.ctx.UpdateInterface(inf)
}
func (vm *Vm) SetCpus(cpus int) error {
if vm.Cpu >= cpus {
return nil
}
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
err := vm.ctx.DCtx.SetCpus(vm.ctx, cpus)
if err == nil {
vm.Cpu = cpus
}
return err
}
func (vm *Vm) AddMem(totalMem int) error {
if vm.Mem >= totalMem {
return nil
}
size := totalMem - vm.Mem
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
err := vm.ctx.DCtx.AddMem(vm.ctx, 1, size)
if err == nil {
vm.Mem = totalMem
}
return err
}
func (vm *Vm) OnlineCpuMem() error {
return vm.ctx.hyperstart.OnlineCpuMem()
}
func (vm *Vm) HyperstartExecSync(cmd []string, stdin []byte) (stdout, stderr []byte, err error) {
if len(cmd) == 0 {
return nil, nil, fmt.Errorf("'hyperstart-exec' without command")
}
execId := fmt.Sprintf("hyperstart-exec-%s", utils.RandStr(10, "alpha"))
var stdoutBuf, stderrBuf bytes.Buffer
tty := &TtyIO{
Stdin: ioutil.NopCloser(bytes.NewReader(stdin)),
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
}
result := vm.WaitProcess(false, []string{execId}, -1)
if result == nil {
err = fmt.Errorf("can not wait hyperstart-exec %q", execId)
vm.Log(ERROR, err)
return nil, nil, err
}
err = vm.AddProcess(&api.Process{
Container: hyperstartapi.HYPERSTART_EXEC_CONTAINER,
Id: execId,
Terminal: false,
Args: cmd,
Envs: []string{},
Workdir: "/"}, tty)
if err != nil {
return nil, nil, err
}
r, ok := <-result
if !ok {
err = fmt.Errorf("wait hyperstart-exec %q interrupted", execId)
vm.Log(ERROR, err)
return nil, nil, err
}
vm.Log(TRACE, "hyperstart-exec %q terminated at %v with code %d", execId, r.FinishedAt, r.Code)
if r.Code != 0 {
return stdoutBuf.Bytes(), stderrBuf.Bytes(), fmt.Errorf("exit with error code:%d", r.Code)
}
return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil
}
func (vm *Vm) HyperstartExec(cmd string, tty *TtyIO) (int, error) {
var command []string
if cmd == "" {
return -1, fmt.Errorf("'hyperstart exec' without command")
}
if err := json.Unmarshal([]byte(cmd), &command); err != nil {
return 0, err
}
execID := fmt.Sprintf("hyperstart-exec-%s", utils.RandStr(10, "alpha"))
result := vm.WaitProcess(false, []string{execID}, -1)
if result == nil {
err := fmt.Errorf("can not wait hyperstart-exec %q", execID)
vm.Log(ERROR, err)
return -1, err
}
err := vm.AddProcess(&api.Process{
Container: hyperstartapi.HYPERSTART_EXEC_CONTAINER,
Id: execID,
Terminal: false,
Args: command,
Envs: []string{},
Workdir: "/"}, tty)
if err != nil {
return -1, err
}
r, ok := <-result
if !ok {
err = fmt.Errorf("wait hyperstart-exec %q interrupted", execID)
vm.Log(ERROR, err)
return -1, err
}
vm.Log(TRACE, "hyperstart-exec %q terminated at %v with code %d", execID, r.FinishedAt, r.Code)
return r.Code, nil
}
func (vm *Vm) Exec(container, execId, cmd string, terminal bool, tty *TtyIO) error {
var command []string
if cmd == "" {
return fmt.Errorf("'exec' without command")
}
if err := json.Unmarshal([]byte(cmd), &command); err != nil {
return err
}
return vm.AddProcess(&api.Process{
Container: container,
Id: execId,
Terminal: terminal,
Args: command,
Envs: []string{},
Workdir: "/"}, tty)
}
func (vm *Vm) AddProcess(process *api.Process, tty *TtyIO) error {
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
envs := []hyperstartapi.EnvironmentVar{}
for _, v := range process.Envs {
if eqlIndex := strings.Index(v, "="); eqlIndex > 0 {
envs = append(envs, hyperstartapi.EnvironmentVar{
Env: v[:eqlIndex],
Value: v[eqlIndex+1:],
})
}
}
err := vm.ctx.hyperstart.AddProcess(process.Container, &hyperstartapi.Process{
Id: process.Id,
Terminal: process.Terminal,
Args: process.Args,
Envs: envs,
Workdir: process.Workdir,
User: process.User,
Group: process.Group,
})
if err != nil {
return fmt.Errorf("exec command %v failed: %v", process.Args, err)
}
if tty == nil {
return nil
}
inPipe, outPipe, errPipe := libhyperstart.StdioPipe(vm.ctx.hyperstart, process.Container, process.Id)
go streamCopy(tty, inPipe, outPipe, errPipe)
go func() {
status := vm.ctx.hyperstart.WaitProcess(process.Container, process.Id)
vm.ctx.reportProcessFinished(types.E_EXEC_FINISHED, &types.ProcessFinished{
Id: process.Id, Code: uint8(status), Ack: make(chan bool, 1),
})
}()
return nil
}
func (vm *Vm) AddVolume(vol *api.VolumeDescription) api.Result {
result := make(chan api.Result, 1)
vm.ctx.AddVolume(vol, result)
return <-result
}
func (vm *Vm) AddContainer(c *api.ContainerDescription) api.Result {
result := make(chan api.Result, 1)
vm.ctx.AddContainer(c, result)
return <-result
}
func (vm *Vm) RemoveContainer(id string) api.Result {
result := make(chan api.Result, 1)
vm.ctx.RemoveContainer(id, result)
return <-result
}
func (vm *Vm) RemoveVolume(name string) api.Result {
result := make(chan api.Result, 1)
vm.ctx.RemoveVolume(name, result)
return <-result
}
func (vm *Vm) RemoveContainers(ids ...string) (bool, map[string]api.Result) {
return vm.batchWaitResult(ids, vm.ctx.RemoveContainer)
}
func (vm *Vm) RemoveVolumes(names ...string) (bool, map[string]api.Result) {
return vm.batchWaitResult(names, vm.ctx.RemoveVolume)
}
type waitResultOp func(string, chan<- api.Result)
func (vm *Vm) batchWaitResult(names []string, op waitResultOp) (bool, map[string]api.Result) {
var (
success = true
result = map[string]api.Result{}
wl = map[string]struct{}{}
r = make(chan api.Result, len(names))
)
for _, name := range names {
if _, ok := wl[name]; !ok {
wl[name] = struct{}{}
go op(name, r)
}
}
for len(wl) > 0 {
rsp, ok := <-r
if !ok {
vm.ctx.Log(ERROR, "fail to wait channels for op %v on %v", op, names)
return false, result
}
if !rsp.IsSuccess() {
vm.ctx.Log(ERROR, "batch op %v on %s is not success: %s", op, rsp.ResultId(), rsp.Message())
success = false
}
vm.ctx.Log(DEBUG, "batch op %v on %s returned: %s", op, rsp.Message())
if _, ok := wl[rsp.ResultId()]; ok {
delete(wl, rsp.ResultId())
result[rsp.ResultId()] = rsp
}
}
return success, result
}
func (vm *Vm) StartContainer(id string) error {
err := vm.ctx.newContainer(id)
if err != nil {
return fmt.Errorf("Create new container failed: %v", err)
}
vm.ctx.Log(TRACE, "container %s start: done.", id)
return nil
}
func (vm *Vm) Tty(containerId, execId string, row, column int) error {
if execId == "" {
execId = "init"
}
return vm.ctx.hyperstart.TtyWinResize(containerId, execId, uint16(row), uint16(column))
}
func (vm *Vm) Attach(tty *TtyIO, container string) error {
cmd := &AttachCommand{
Streams: tty,
Container: container,
}
return vm.ctx.attachCmd(cmd)
}
func (vm *Vm) Stats() *types.PodStats {
ctx := vm.ctx
if !vm.ctx.IsRunning() {
vm.ctx.Log(WARNING, "could not get stats from non-running pod")
return nil
}
stats, err := ctx.DCtx.Stats(ctx)
if err != nil {
vm.ctx.Log(WARNING, "failed to get stats: %v", err)
return nil
}
return stats
}
func (vm *Vm) ContainerList() []string {
if !vm.ctx.IsRunning() {
vm.ctx.Log(WARNING, "could not get container list from non-running pod")
return nil
}
return vm.ctx.containerList()
}
func (vm *Vm) Pause(pause bool) error {
ctx := vm.ctx
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
command := "Pause"
pauseState := PauseStatePaused
if !pause {
pauseState = PauseStateUnpaused
command = "Unpause"
}
var err error
ctx.pauseLock.Lock()
defer ctx.pauseLock.Unlock()
if ctx.PauseState != pauseState {
/* FIXME: only support pause whole vm now */
if pause {
ctx.cancelWatchHyperstart <- struct{}{}
err = ctx.hyperstart.PauseSync()
}
if err != nil {
vm.Log(ERROR, "%s sandbox failed: %v", command, err)
return err
}
// should not change pause state inside ctx.DCtx.Pause!
err = ctx.DCtx.Pause(ctx, pause)
if err != nil {
vm.Log(ERROR, "%s sandbox failed: %v", command, err)
return err
}
if !pause {
err = ctx.hyperstart.Unpause()
go ctx.watchHyperstart()
}
if err != nil {
vm.Log(ERROR, "%s sandbox failed: %v", command, err)
return err
}
vm.Log(TRACE, "sandbox state turn to %s now", command)
ctx.PauseState = pauseState // change the state.
}
return nil
}
func (vm *Vm) Save(path string) error {
ctx := vm.ctx
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
ctx.pauseLock.Lock()
defer ctx.pauseLock.Unlock()
if ctx.PauseState != PauseStatePaused {
return NewNotReadyError(vm.Id)
}
return ctx.DCtx.Save(ctx, path)
}
func (vm *Vm) GetIPAddrs() []string {
ips := []string{}
if !vm.ctx.IsRunning() {
vm.Log(ERROR, "get pod ip failed: %v", NewNotReadyError(vm.Id))
return ips
}
res := vm.ctx.networks.getIPAddrs()
ips = append(ips, res...)
return ips
}
func (vm *Vm) Dump() ([]byte, error) {
pinfo, err := vm.ctx.dump()
if err != nil {
return nil, err
}
return pinfo.serialize()
}
func errorResponse(cause string) *types.VmResponse {
return &types.VmResponse{
Code: -1,
Cause: cause,
Data: nil,
}
}
func newVm(vmId string, cpu, memory int) *Vm {
return &Vm{
Id: vmId,
Cpu: cpu,
Mem: memory,
logPrefix: fmt.Sprintf("VM[%s] ", vmId),
}
}
func GetVm(vmId string, b *BootConfig, waitStarted bool) (*Vm, error) {
id := vmId
if id == "" {
for {
id = fmt.Sprintf("vm-%s", utils.RandStr(10, "alpha"))
if _, err := os.Stat(filepath.Join(BaseDir, id)); os.IsNotExist(err) {
break
}
}
}
vm := newVm(id, b.CPU, b.Memory)
if err := vm.launch(b); err != nil {
return nil, err
}
if waitStarted {
vm.Log(TRACE, "waiting for vm to start")
if _, err := vm.ctx.hyperstart.APIVersion(); err != nil {
vm.Log(ERROR, "VM start failed: %v", err)
return nil, fmt.Errorf("VM start failed: %v", err)
}
vm.Log(TRACE, "VM started successfully")
}
vm.Log(TRACE, "GetVm succeeded")
return vm, nil
}
remove vm.WriteFile()/ReadFile()
Signed-off-by: Lai Jiangshan <b337eedf453972390e4695e362aeea2634ef4e0c@gmail.com>
package hypervisor
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/hyperhq/hypercontainer-utils/hlog"
"github.com/hyperhq/runv/api"
hyperstartapi "github.com/hyperhq/runv/hyperstart/api/json"
"github.com/hyperhq/runv/hyperstart/libhyperstart"
"github.com/hyperhq/runv/hypervisor/types"
"github.com/hyperhq/runv/lib/utils"
)
type Vm struct {
Id string
ctx *VmContext
Cpu int
Mem int
Lazy bool
logPrefix string
clients *Fanout
}
func (v *Vm) LogLevel(level hlog.LogLevel) bool {
return hlog.IsLogLevel(level)
}
func (v *Vm) LogPrefix() string {
return v.logPrefix
}
func (v *Vm) Log(level hlog.LogLevel, args ...interface{}) {
hlog.HLog(level, v, 1, args...)
}
func (vm *Vm) GetResponseChan() (chan *types.VmResponse, error) {
if vm.clients != nil {
return vm.clients.Acquire()
}
return nil, errors.New("No channels available")
}
func (vm *Vm) ReleaseResponseChan(ch chan *types.VmResponse) {
if vm.clients != nil {
vm.clients.Release(ch)
}
}
func (vm *Vm) launch(b *BootConfig) (err error) {
var (
vmEvent = make(chan VmEvent, 128)
Status = make(chan *types.VmResponse, 128)
ctx *VmContext
)
ctx, err = InitContext(vm.Id, vmEvent, Status, nil, b)
if err != nil {
Status <- &types.VmResponse{
VmId: vm.Id,
Code: types.E_BAD_REQUEST,
Cause: err.Error(),
}
return err
}
ctx.Launch()
vm.ctx = ctx
vm.clients = CreateFanout(Status, 128, false)
return nil
}
// This function will only be invoked during daemon start
func AssociateVm(vmId string, data []byte) (*Vm, error) {
var (
PodEvent = make(chan VmEvent, 128)
Status = make(chan *types.VmResponse, 128)
err error
)
vm := newVm(vmId, 0, 0)
vm.ctx, err = VmAssociate(vm.Id, PodEvent, Status, data)
if err != nil {
vm.Log(ERROR, "cannot associate with vm: %v", err)
return nil, err
}
vm.clients = CreateFanout(Status, 128, false)
return vm, nil
}
type matchResponse func(response *types.VmResponse) (error, bool)
func (vm *Vm) WaitResponse(match matchResponse, timeout int) chan error {
result := make(chan error, 1)
var timeoutChan <-chan time.Time
if timeout >= 0 {
timeoutChan = time.After(time.Duration(timeout) * time.Second)
} else {
timeoutChan = make(chan time.Time, 1)
}
Status, err := vm.GetResponseChan()
if err != nil {
result <- err
return result
}
go func() {
defer vm.ReleaseResponseChan(Status)
for {
select {
case response, ok := <-Status:
if !ok {
result <- fmt.Errorf("Response Chan is broken")
return
}
if err, exit := match(response); exit {
result <- err
return
}
case <-timeoutChan:
result <- fmt.Errorf("timeout for waiting response")
return
}
}
}()
return result
}
func (vm *Vm) ReleaseVm() error {
if !vm.ctx.IsRunning() {
return nil
}
result := vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN || response.Code == types.E_OK {
return nil, true
}
return nil, false
}, -1)
releasePodEvent := &ReleaseVMCommand{}
if err := vm.ctx.SendVmEvent(releasePodEvent); err != nil {
return err
}
return <-result
}
func (vm *Vm) WaitVm(timeout int) <-chan error {
return vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN {
return nil, true
}
return nil, false
}, timeout)
}
func (vm *Vm) WaitProcess(isContainer bool, ids []string, timeout int) <-chan *api.ProcessExit {
var (
waiting = make(map[string]struct{})
result = make(chan *api.ProcessExit, len(ids))
waitEvent = types.E_CONTAINER_FINISHED
)
if !isContainer {
waitEvent = types.E_EXEC_FINISHED
}
for _, id := range ids {
waiting[id] = struct{}{}
}
resChan := vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN {
return fmt.Errorf("get shutdown event"), true
}
if response.Code != waitEvent {
return nil, false
}
ps, _ := response.Data.(*types.ProcessFinished)
if _, ok := waiting[ps.Id]; ok {
result <- &api.ProcessExit{
Id: ps.Id,
Code: int(ps.Code),
FinishedAt: time.Now().UTC(),
}
select {
case ps.Ack <- true:
vm.ctx.Log(TRACE, "got shut down msg, acked here")
default:
vm.ctx.Log(TRACE, "got shut down msg, acked somewhere")
}
delete(waiting, ps.Id)
if len(waiting) == 0 {
// got all of processexit event, exit
return nil, true
}
}
// continue to wait other processexit event
return nil, false
}, timeout)
go func() {
if err := <-resChan; err != nil {
close(result)
}
}()
return result
}
func (vm *Vm) InitSandbox(config *api.SandboxConfig) error {
vm.ctx.SetNetworkEnvironment(config)
return vm.ctx.startPod()
}
func (vm *Vm) WaitInit() api.Result {
if err := <-vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_OK {
return nil, true
}
if response.Code == types.E_FAILED || response.Code == types.E_VM_SHUTDOWN {
return fmt.Errorf("got failed event when wait init message"), true
}
return nil, false
}, -1); err != nil {
return api.NewResultBase(vm.Id, false, err.Error())
}
return api.NewResultBase(vm.Id, true, "wait init message successfully")
}
func (vm *Vm) Shutdown() api.Result {
if !vm.ctx.IsRunning() {
return api.NewResultBase(vm.Id, false, "not in running state")
}
result := vm.WaitResponse(func(response *types.VmResponse) (error, bool) {
if response.Code == types.E_VM_SHUTDOWN {
return nil, true
}
return nil, false
}, -1)
if err := vm.ctx.SendVmEvent(&ShutdownCommand{}); err != nil {
return api.NewResultBase(vm.Id, false, "vm context already exited")
}
if err := <-result; err != nil {
return api.NewResultBase(vm.Id, false, err.Error())
}
return api.NewResultBase(vm.Id, true, "shutdown vm successfully")
}
// TODO: should we provide a method to force kill vm
func (vm *Vm) Kill() {
vm.ctx.poweroffVM(false, "vm.Kill()")
}
func (vm *Vm) SignalProcess(container, process string, signal syscall.Signal) error {
return vm.ctx.hyperstart.SignalProcess(container, process, signal)
}
func (vm *Vm) KillContainer(container string, signal syscall.Signal) error {
return vm.SignalProcess(container, "init", signal)
}
// Should only be called near after AssociateVm
func (vm *Vm) AssociateContainer(container string) (alive bool, err error) {
return vm.ctx.restoreContainer(container)
}
func (vm *Vm) AddRoute() error {
routes := vm.ctx.networks.getRoutes()
return vm.ctx.hyperstart.AddRoute(routes)
}
func (vm *Vm) AddNic(info *api.InterfaceDescription) error {
client := make(chan api.Result, 1)
vm.ctx.AddInterface(info, client)
ev, ok := <-client
if !ok {
return fmt.Errorf("internal error")
}
if !ev.IsSuccess() {
return fmt.Errorf("allocate device failed")
}
if vm.ctx.LogLevel(TRACE) {
vm.Log(TRACE, "finial vmSpec.Interface is %#v", vm.ctx.networks.getInterface(info.Id))
}
return vm.ctx.hyperstartAddInterface(info.Id)
}
func (vm *Vm) AllNics() []*InterfaceCreated {
return vm.ctx.AllInterfaces()
}
func (vm *Vm) DeleteNic(id string) error {
if err := vm.ctx.hyperstartDeleteInterface(id); err != nil {
return err
}
client := make(chan api.Result, 1)
vm.ctx.RemoveInterface(id, client)
ev, ok := <-client
if !ok {
return fmt.Errorf("internal error")
}
if !ev.IsSuccess() {
return fmt.Errorf("remove device failed")
}
return nil
}
func (vm *Vm) UpdateNic(inf *api.InterfaceDescription) error {
if err := vm.ctx.hyperstartUpdateInterface(inf.Id, inf.Ip, inf.Mtu); err != nil {
return err
}
return vm.ctx.UpdateInterface(inf)
}
func (vm *Vm) SetCpus(cpus int) error {
if vm.Cpu >= cpus {
return nil
}
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
err := vm.ctx.DCtx.SetCpus(vm.ctx, cpus)
if err == nil {
vm.Cpu = cpus
}
return err
}
func (vm *Vm) AddMem(totalMem int) error {
if vm.Mem >= totalMem {
return nil
}
size := totalMem - vm.Mem
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
err := vm.ctx.DCtx.AddMem(vm.ctx, 1, size)
if err == nil {
vm.Mem = totalMem
}
return err
}
func (vm *Vm) OnlineCpuMem() error {
return vm.ctx.hyperstart.OnlineCpuMem()
}
func (vm *Vm) HyperstartExecSync(cmd []string, stdin []byte) (stdout, stderr []byte, err error) {
if len(cmd) == 0 {
return nil, nil, fmt.Errorf("'hyperstart-exec' without command")
}
execId := fmt.Sprintf("hyperstart-exec-%s", utils.RandStr(10, "alpha"))
var stdoutBuf, stderrBuf bytes.Buffer
tty := &TtyIO{
Stdin: ioutil.NopCloser(bytes.NewReader(stdin)),
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
}
result := vm.WaitProcess(false, []string{execId}, -1)
if result == nil {
err = fmt.Errorf("can not wait hyperstart-exec %q", execId)
vm.Log(ERROR, err)
return nil, nil, err
}
err = vm.AddProcess(&api.Process{
Container: hyperstartapi.HYPERSTART_EXEC_CONTAINER,
Id: execId,
Terminal: false,
Args: cmd,
Envs: []string{},
Workdir: "/"}, tty)
if err != nil {
return nil, nil, err
}
r, ok := <-result
if !ok {
err = fmt.Errorf("wait hyperstart-exec %q interrupted", execId)
vm.Log(ERROR, err)
return nil, nil, err
}
vm.Log(TRACE, "hyperstart-exec %q terminated at %v with code %d", execId, r.FinishedAt, r.Code)
if r.Code != 0 {
return stdoutBuf.Bytes(), stderrBuf.Bytes(), fmt.Errorf("exit with error code:%d", r.Code)
}
return stdoutBuf.Bytes(), stderrBuf.Bytes(), nil
}
func (vm *Vm) HyperstartExec(cmd string, tty *TtyIO) (int, error) {
var command []string
if cmd == "" {
return -1, fmt.Errorf("'hyperstart exec' without command")
}
if err := json.Unmarshal([]byte(cmd), &command); err != nil {
return 0, err
}
execID := fmt.Sprintf("hyperstart-exec-%s", utils.RandStr(10, "alpha"))
result := vm.WaitProcess(false, []string{execID}, -1)
if result == nil {
err := fmt.Errorf("can not wait hyperstart-exec %q", execID)
vm.Log(ERROR, err)
return -1, err
}
err := vm.AddProcess(&api.Process{
Container: hyperstartapi.HYPERSTART_EXEC_CONTAINER,
Id: execID,
Terminal: false,
Args: command,
Envs: []string{},
Workdir: "/"}, tty)
if err != nil {
return -1, err
}
r, ok := <-result
if !ok {
err = fmt.Errorf("wait hyperstart-exec %q interrupted", execID)
vm.Log(ERROR, err)
return -1, err
}
vm.Log(TRACE, "hyperstart-exec %q terminated at %v with code %d", execID, r.FinishedAt, r.Code)
return r.Code, nil
}
func (vm *Vm) Exec(container, execId, cmd string, terminal bool, tty *TtyIO) error {
var command []string
if cmd == "" {
return fmt.Errorf("'exec' without command")
}
if err := json.Unmarshal([]byte(cmd), &command); err != nil {
return err
}
return vm.AddProcess(&api.Process{
Container: container,
Id: execId,
Terminal: terminal,
Args: command,
Envs: []string{},
Workdir: "/"}, tty)
}
func (vm *Vm) AddProcess(process *api.Process, tty *TtyIO) error {
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
envs := []hyperstartapi.EnvironmentVar{}
for _, v := range process.Envs {
if eqlIndex := strings.Index(v, "="); eqlIndex > 0 {
envs = append(envs, hyperstartapi.EnvironmentVar{
Env: v[:eqlIndex],
Value: v[eqlIndex+1:],
})
}
}
err := vm.ctx.hyperstart.AddProcess(process.Container, &hyperstartapi.Process{
Id: process.Id,
Terminal: process.Terminal,
Args: process.Args,
Envs: envs,
Workdir: process.Workdir,
User: process.User,
Group: process.Group,
})
if err != nil {
return fmt.Errorf("exec command %v failed: %v", process.Args, err)
}
if tty == nil {
return nil
}
inPipe, outPipe, errPipe := libhyperstart.StdioPipe(vm.ctx.hyperstart, process.Container, process.Id)
go streamCopy(tty, inPipe, outPipe, errPipe)
go func() {
status := vm.ctx.hyperstart.WaitProcess(process.Container, process.Id)
vm.ctx.reportProcessFinished(types.E_EXEC_FINISHED, &types.ProcessFinished{
Id: process.Id, Code: uint8(status), Ack: make(chan bool, 1),
})
}()
return nil
}
func (vm *Vm) AddVolume(vol *api.VolumeDescription) api.Result {
result := make(chan api.Result, 1)
vm.ctx.AddVolume(vol, result)
return <-result
}
func (vm *Vm) AddContainer(c *api.ContainerDescription) api.Result {
result := make(chan api.Result, 1)
vm.ctx.AddContainer(c, result)
return <-result
}
func (vm *Vm) RemoveContainer(id string) api.Result {
result := make(chan api.Result, 1)
vm.ctx.RemoveContainer(id, result)
return <-result
}
func (vm *Vm) RemoveVolume(name string) api.Result {
result := make(chan api.Result, 1)
vm.ctx.RemoveVolume(name, result)
return <-result
}
func (vm *Vm) RemoveContainers(ids ...string) (bool, map[string]api.Result) {
return vm.batchWaitResult(ids, vm.ctx.RemoveContainer)
}
func (vm *Vm) RemoveVolumes(names ...string) (bool, map[string]api.Result) {
return vm.batchWaitResult(names, vm.ctx.RemoveVolume)
}
type waitResultOp func(string, chan<- api.Result)
func (vm *Vm) batchWaitResult(names []string, op waitResultOp) (bool, map[string]api.Result) {
var (
success = true
result = map[string]api.Result{}
wl = map[string]struct{}{}
r = make(chan api.Result, len(names))
)
for _, name := range names {
if _, ok := wl[name]; !ok {
wl[name] = struct{}{}
go op(name, r)
}
}
for len(wl) > 0 {
rsp, ok := <-r
if !ok {
vm.ctx.Log(ERROR, "fail to wait channels for op %v on %v", op, names)
return false, result
}
if !rsp.IsSuccess() {
vm.ctx.Log(ERROR, "batch op %v on %s is not success: %s", op, rsp.ResultId(), rsp.Message())
success = false
}
vm.ctx.Log(DEBUG, "batch op %v on %s returned: %s", op, rsp.Message())
if _, ok := wl[rsp.ResultId()]; ok {
delete(wl, rsp.ResultId())
result[rsp.ResultId()] = rsp
}
}
return success, result
}
func (vm *Vm) StartContainer(id string) error {
err := vm.ctx.newContainer(id)
if err != nil {
return fmt.Errorf("Create new container failed: %v", err)
}
vm.ctx.Log(TRACE, "container %s start: done.", id)
return nil
}
func (vm *Vm) Tty(containerId, execId string, row, column int) error {
if execId == "" {
execId = "init"
}
return vm.ctx.hyperstart.TtyWinResize(containerId, execId, uint16(row), uint16(column))
}
func (vm *Vm) Attach(tty *TtyIO, container string) error {
cmd := &AttachCommand{
Streams: tty,
Container: container,
}
return vm.ctx.attachCmd(cmd)
}
func (vm *Vm) Stats() *types.PodStats {
ctx := vm.ctx
if !vm.ctx.IsRunning() {
vm.ctx.Log(WARNING, "could not get stats from non-running pod")
return nil
}
stats, err := ctx.DCtx.Stats(ctx)
if err != nil {
vm.ctx.Log(WARNING, "failed to get stats: %v", err)
return nil
}
return stats
}
func (vm *Vm) ContainerList() []string {
if !vm.ctx.IsRunning() {
vm.ctx.Log(WARNING, "could not get container list from non-running pod")
return nil
}
return vm.ctx.containerList()
}
func (vm *Vm) Pause(pause bool) error {
ctx := vm.ctx
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
command := "Pause"
pauseState := PauseStatePaused
if !pause {
pauseState = PauseStateUnpaused
command = "Unpause"
}
var err error
ctx.pauseLock.Lock()
defer ctx.pauseLock.Unlock()
if ctx.PauseState != pauseState {
/* FIXME: only support pause whole vm now */
if pause {
ctx.cancelWatchHyperstart <- struct{}{}
err = ctx.hyperstart.PauseSync()
}
if err != nil {
vm.Log(ERROR, "%s sandbox failed: %v", command, err)
return err
}
// should not change pause state inside ctx.DCtx.Pause!
err = ctx.DCtx.Pause(ctx, pause)
if err != nil {
vm.Log(ERROR, "%s sandbox failed: %v", command, err)
return err
}
if !pause {
err = ctx.hyperstart.Unpause()
go ctx.watchHyperstart()
}
if err != nil {
vm.Log(ERROR, "%s sandbox failed: %v", command, err)
return err
}
vm.Log(TRACE, "sandbox state turn to %s now", command)
ctx.PauseState = pauseState // change the state.
}
return nil
}
func (vm *Vm) Save(path string) error {
ctx := vm.ctx
if !vm.ctx.IsRunning() {
return NewNotReadyError(vm.Id)
}
ctx.pauseLock.Lock()
defer ctx.pauseLock.Unlock()
if ctx.PauseState != PauseStatePaused {
return NewNotReadyError(vm.Id)
}
return ctx.DCtx.Save(ctx, path)
}
func (vm *Vm) GetIPAddrs() []string {
ips := []string{}
if !vm.ctx.IsRunning() {
vm.Log(ERROR, "get pod ip failed: %v", NewNotReadyError(vm.Id))
return ips
}
res := vm.ctx.networks.getIPAddrs()
ips = append(ips, res...)
return ips
}
func (vm *Vm) Dump() ([]byte, error) {
pinfo, err := vm.ctx.dump()
if err != nil {
return nil, err
}
return pinfo.serialize()
}
func errorResponse(cause string) *types.VmResponse {
return &types.VmResponse{
Code: -1,
Cause: cause,
Data: nil,
}
}
func newVm(vmId string, cpu, memory int) *Vm {
return &Vm{
Id: vmId,
Cpu: cpu,
Mem: memory,
logPrefix: fmt.Sprintf("VM[%s] ", vmId),
}
}
func GetVm(vmId string, b *BootConfig, waitStarted bool) (*Vm, error) {
id := vmId
if id == "" {
for {
id = fmt.Sprintf("vm-%s", utils.RandStr(10, "alpha"))
if _, err := os.Stat(filepath.Join(BaseDir, id)); os.IsNotExist(err) {
break
}
}
}
vm := newVm(id, b.CPU, b.Memory)
if err := vm.launch(b); err != nil {
return nil, err
}
if waitStarted {
vm.Log(TRACE, "waiting for vm to start")
if _, err := vm.ctx.hyperstart.APIVersion(); err != nil {
vm.Log(ERROR, "VM start failed: %v", err)
return nil, fmt.Errorf("VM start failed: %v", err)
}
vm.Log(TRACE, "VM started successfully")
}
vm.Log(TRACE, "GetVm succeeded")
return vm, nil
}
|
package ec2
import (
"bytes"
"fmt"
"github.com/megamsys/megamd/iaas"
"github.com/megamsys/megamd/provisioner"
"github.com/tsuru/config"
"strings"
"encoding/json"
)
func Init() {
iaas.RegisterIaasProvider("ec2", &EC2IaaS{})
}
type EC2IaaS struct{}
func (i *EC2IaaS) DeleteMachine(string) error {
return nil
}
func (i *EC2IaaS) CreateMachine(pdc *iaas.PredefClouds, assembly *provisioner.AssemblyResult) (string, error) {
keys, err_keys := iaas.GetAccessKeys(pdc)
if err_keys != nil {
return "", err_keys
}
str, err := buildCommand(iaas.GetPlugins("ec2"), pdc, "create")
if err != nil {
return "", err
}
str = str + " -N " + assembly.Name + "." + assembly.Components[0].Inputs.Domain
str = str + " -A " + keys.AccessKey
str = str + " -K " + keys.SecretKey
riak, err_riak := config.GetString("api:server")
if err_riak != nil {
return "", err_riak
}
recipe, err_recipe := config.GetString("knife:recipe")
if err_recipe != nil {
return "", err_recipe
}
str = str + " --run-list \"" + "recipe[" + recipe + "]" + "\""
attributes := &iaas.Attributes{RiakHost: riak, AccountID: pdc.Accounts_id, AssemblyID: assembly.Id}
b, aerr := json.Marshal(attributes)
if aerr != nil {
fmt.Println(aerr)
return "", aerr
}
str = str + " --json-attributes \'" + string(b) + "\'"
//--json-attributes {"riak_host":"api.megam.co","accounts_id":"ACT1135880650419732480","assembly_id":"ASM1138425212728115200"}
//strings.Replace(str,"-c","-c "+assembly.Name+"."+assembly.Components[0].Inputs.Domain,-1)
knifePath, kerr := config.GetString("knife:path")
if kerr != nil {
return "", kerr
}
str = strings.Replace(str, "-c", "-c "+knifePath, -1)
return str, nil
}
func buildCommand(plugin *iaas.Plugins, pdc *iaas.PredefClouds, command string) (string, error) {
var buffer bytes.Buffer
if len(plugin.Tool) > 0 {
buffer.WriteString(plugin.Tool)
} else {
return "", fmt.Errorf("Plugin tool doesn't loaded")
}
if command == "create" {
if len(plugin.Command.Create) > 0 {
buffer.WriteString(" " + plugin.Command.Create)
} else {
return "", fmt.Errorf("Plugin commands doesn't loaded")
}
}
if len(pdc.Spec.Groups) > 0 {
buffer.WriteString(" -G " + pdc.Spec.Groups)
} else {
return "", fmt.Errorf("Groups doesn't loaded")
}
if len(pdc.Spec.Image) > 0 {
buffer.WriteString(" -I " + pdc.Spec.Image)
} else {
return "", fmt.Errorf("Image doesn't loaded")
}
if len(pdc.Spec.Flavor) > 0 {
buffer.WriteString(" -f " + pdc.Spec.Flavor)
} else {
return "", fmt.Errorf("Flavor doesn't loaded")
}
if len(pdc.Access.Sshkey) > 0 {
buffer.WriteString(" -S " + pdc.Access.Sshkey)
} else {
return "", fmt.Errorf("Ssh key value doesn't loaded")
}
if len(pdc.Access.Sshuser) > 0 {
buffer.WriteString(" -x " + pdc.Access.Sshuser)
} else {
return "", fmt.Errorf("Ssh user value doesn't loaded")
}
if len(pdc.Access.IdentityFile) > 0 {
ifile, err := iaas.GetIdentityFileLocation(pdc.Access.IdentityFile)
if err != nil {
return "", fmt.Errorf("Identity file doesn't loaded")
}
buffer.WriteString(" --identity-file " + ifile+".key")
} else {
return "", fmt.Errorf("Identity file doesn't loaded")
}
if len(pdc.Access.Region) > 0 {
buffer.WriteString(" --region " + pdc.Access.Region)
} else {
return "", fmt.Errorf("Zone doesn't loaded")
}
return buffer.String(), nil
}
test ec2
package ec2
import (
"bytes"
"fmt"
"github.com/megamsys/megamd/iaas"
"github.com/megamsys/megamd/provisioner"
"github.com/tsuru/config"
"strings"
"encoding/json"
)
func Init() {
iaas.RegisterIaasProvider("ec2", &EC2IaaS{})
}
type EC2IaaS struct{}
func (i *EC2IaaS) DeleteMachine(string) error {
return nil
}
func (i *EC2IaaS) CreateMachine(pdc *iaas.PredefClouds, assembly *provisioner.AssemblyResult) (string, error) {
keys, err_keys := iaas.GetAccessKeys(pdc)
if err_keys != nil {
return "", err_keys
}
str, err := buildCommand(iaas.GetPlugins("ec2"), pdc, "create")
if err != nil {
return "", err
}
str = str + " -N " + assembly.Name + "." + assembly.Components[0].Inputs.Domain
str = str + " -A " + keys.AccessKey
str = str + " -K " + keys.SecretKey
riak, err_riak := config.GetString("api:server")
if err_riak != nil {
return "", err_riak
}
recipe, err_recipe := config.GetString("knife:recipe")
if err_recipe != nil {
return "", err_recipe
}
str = str + " --run-list \"" + "recipe[" + recipe + "]" + "\""
attributes := &iaas.Attributes{RiakHost: riak, AccountID: pdc.Accounts_id, AssemblyID: assembly.Id}
b, aerr := json.Marshal(attributes)
if aerr != nil {
fmt.Println(aerr)
return "", aerr
}
//str = str + " --json-attributes \'" + string(b) + "\'"
str = str + " --json-attributes '\(string(b))'"
//--json-attributes {"riak_host":"api.megam.co","accounts_id":"ACT1135880650419732480","assembly_id":"ASM1138425212728115200"}
//strings.Replace(str,"-c","-c "+assembly.Name+"."+assembly.Components[0].Inputs.Domain,-1)
knifePath, kerr := config.GetString("knife:path")
if kerr != nil {
return "", kerr
}
str = strings.Replace(str, "-c", "-c "+knifePath, -1)
return str, nil
}
func buildCommand(plugin *iaas.Plugins, pdc *iaas.PredefClouds, command string) (string, error) {
var buffer bytes.Buffer
if len(plugin.Tool) > 0 {
buffer.WriteString(plugin.Tool)
} else {
return "", fmt.Errorf("Plugin tool doesn't loaded")
}
if command == "create" {
if len(plugin.Command.Create) > 0 {
buffer.WriteString(" " + plugin.Command.Create)
} else {
return "", fmt.Errorf("Plugin commands doesn't loaded")
}
}
if len(pdc.Spec.Groups) > 0 {
buffer.WriteString(" -G " + pdc.Spec.Groups)
} else {
return "", fmt.Errorf("Groups doesn't loaded")
}
if len(pdc.Spec.Image) > 0 {
buffer.WriteString(" -I " + pdc.Spec.Image)
} else {
return "", fmt.Errorf("Image doesn't loaded")
}
if len(pdc.Spec.Flavor) > 0 {
buffer.WriteString(" -f " + pdc.Spec.Flavor)
} else {
return "", fmt.Errorf("Flavor doesn't loaded")
}
if len(pdc.Access.Sshkey) > 0 {
buffer.WriteString(" -S " + pdc.Access.Sshkey)
} else {
return "", fmt.Errorf("Ssh key value doesn't loaded")
}
if len(pdc.Access.Sshuser) > 0 {
buffer.WriteString(" -x " + pdc.Access.Sshuser)
} else {
return "", fmt.Errorf("Ssh user value doesn't loaded")
}
if len(pdc.Access.IdentityFile) > 0 {
ifile, err := iaas.GetIdentityFileLocation(pdc.Access.IdentityFile)
if err != nil {
return "", fmt.Errorf("Identity file doesn't loaded")
}
buffer.WriteString(" --identity-file " + ifile+".key")
} else {
return "", fmt.Errorf("Identity file doesn't loaded")
}
if len(pdc.Access.Region) > 0 {
buffer.WriteString(" --region " + pdc.Access.Region)
} else {
return "", fmt.Errorf("Zone doesn't loaded")
}
return buffer.String(), nil
}
|
package foo
import (
"io"
)
func Foo(c io.Closer) {
c.Close()
}
func FooArgs(rc io.ReadCloser) {
var b []byte
rc.Read(b)
rc.Close()
}
Make sure that we also work with make
package foo
import (
"io"
)
func Foo(c io.Closer) {
c.Close()
}
func FooArgs(rc io.ReadCloser) {
var b []byte
rc.Read(b)
rc.Close()
}
func FooArgsMake(rc io.ReadCloser) {
b := make([]byte, 10)
rc.Read(b)
rc.Close()
}
|
package tester
import (
"fmt"
"hash"
"log"
"sync"
"github.com/facebookgo/ensure"
"github.com/golang/protobuf/proto"
"github.com/lovoo/goka/codec"
"github.com/lovoo/goka/kafka"
"github.com/lovoo/goka/storage"
)
// Codec decodes and encodes from and to []byte
type Codec interface {
Encode(value interface{}) (data []byte, err error)
Decode(data []byte) (value interface{}, err error)
}
// EmitHandler abstracts a function that allows to overwrite kafkamock's Emit function to
// simulate producer errors
type EmitHandler func(topic string, key string, value []byte) *kafka.Promise
// Tester allows interacting with a test processor
type Tester struct {
t T
consumerMock *consumerMock
producerMock *producerMock
topicMgrMock *topicMgrMock
emitHandler EmitHandler
storage storage.Storage
codec Codec
offset int64
tableOffset int64
incomingEvents chan kafka.Event
consumerEvents chan kafka.Event
// Stores a map of all topics that are handled by the processor.
// Every time an emit is called, those messages for handled topics are relayed
// after the consume-function has finished.
// All other messages are stored in the emitted-slice for further inspection
handledTopics map[string]bool
groupTopic string
emitted []*kafka.Message
groupTableCreator func() (string, []byte)
callQueue []func()
wg sync.WaitGroup
}
// T abstracts the interface we assume from the test case.
// Will most likely be *testing.T
type T interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Fatal(a ...interface{})
}
// New returns a new testprocessor mocking every external service
// It should be passed as goka.WithTester to goka.NewProcessor. It essentially
// replaces the storage/consumer/producer/topicmanager with a mock.
// For example, a normal call to NewProcessor like this
// goka.NewProcessor(brokers, group, subscriptions,
// option_a,
// option_b,
// option_c,
// )
// would become in the unit test:
// tester := tester.New(t)
// NewProcessor(brokers, group, subscriptions,
// option_a,
// option_b,
// option_c,
// WithTester(tester),
// )
func New(t T) *Tester {
tester := &Tester{
storage: storage.NewMemory(),
t: t,
incomingEvents: make(chan kafka.Event),
consumerEvents: make(chan kafka.Event),
handledTopics: make(map[string]bool),
codec: new(codec.Bytes),
}
tester.consumerMock = newConsumerMock(tester)
tester.producerMock = newProducerMock(tester.handleEmit)
tester.topicMgrMock = newTopicMgrMock(tester)
return tester
}
// SetCodec sets the codec for the group table.
func (km *Tester) SetCodec(codec Codec) *Tester {
km.codec = codec
return km
}
// SetGroupTableCreator sets a creator for the group table.
func (km *Tester) SetGroupTableCreator(creator func() (string, []byte)) {
km.groupTableCreator = creator
}
func (km *Tester) TopicManagerBuilder() kafka.TopicManagerBuilder {
return func(brokers []string) (kafka.TopicManager, error) {
return km.topicMgrMock, nil
}
}
func (km *Tester) ConsumerBuilder() kafka.ConsumerBuilder {
return func(b []string, group, clientID string) (kafka.Consumer, error) {
if km.groupTopic == "" {
km.groupTopic = fmt.Sprintf("%s-table", group)
}
return km.consumerMock, nil
}
}
func (km *Tester) ProducerBuilder() kafka.ProducerBuilder {
return func(b []string, cid string, hasher func() hash.Hash32) (kafka.Producer, error) {
return km.producerMock, nil
}
}
func (km *Tester) StorageBuilder() storage.Builder {
return func(topic string, partition int32) (storage.Storage, error) {
return km.storage, nil
}
}
// initProtocol initiates the protocol with the client basically making the KafkaMock
// usable.
func (km *Tester) initProtocol() {
defer func() {
if r := recover(); r != nil {
log.Printf("tester: panic initProtocol: %+v", r)
}
}()
km.consumerEvents <- &kafka.Assignment{
0: -1,
}
for km.groupTableCreator != nil {
key, value := km.groupTableCreator()
if key == "" || value == nil {
break
}
km.consumerEvents <- &kafka.Message{
Topic: km.groupTopic,
Partition: 0,
Offset: km.tableOffset,
Key: key,
Value: value,
}
}
km.consumerEvents <- &kafka.EOF{Partition: 0}
km.consumerEvents <- &kafka.NOP{Partition: -1}
for ev := range km.incomingEvents {
km.consumerEvents <- ev
}
}
// ConsumeProto simulates a message on kafka in a topic with a key.
func (km *Tester) ConsumeProto(topic string, key string, msg proto.Message) {
data, err := proto.Marshal(msg)
if err != nil && km.t != nil {
km.t.Errorf("Error marshaling message for consume: %v", err)
}
km.ConsumeData(topic, key, data)
km.makeCalls()
}
// ConsumeString simulates a message with a string payload.
func (km *Tester) ConsumeString(topic string, key string, msg string) {
km.ConsumeData(topic, key, []byte(msg))
km.makeCalls()
}
// Consume simulates a message with a byte slice payload.
func (km *Tester) Consume(topic string, key string, msg []byte) {
km.ConsumeData(topic, key, msg)
km.makeCalls()
}
// ConsumeData simulates a message with a byte slice payload. This is the same
// as Consume.
// ConsumeData is a helper function consuming marshalled data. This function is
// used by ConsumeProto by the test case as well as any emit calls of the
// processor being tested.
func (km *Tester) ConsumeData(topic string, key string, data []byte) {
km.consumeData(topic, key, data)
km.makeCalls()
}
func (km *Tester) consumeData(topic string, key string, data []byte) {
defer func() {
if r := recover(); r != nil {
log.Printf("tester: panic ConsumeData: %+v\n", r)
}
}()
km.offset++
kafkaMsg := &kafka.Message{
Topic: topic,
Partition: 0,
Offset: km.offset,
Key: key,
Value: data,
}
// send message to processing goroutine
km.incomingEvents <- kafkaMsg
// wait until partition processing goroutine processes message by requiring it to read
// the following NOP.
km.incomingEvents <- &kafka.NOP{Partition: 0}
km.incomingEvents <- &kafka.NOP{Partition: 0}
// wait util processor goroutine is ready
km.incomingEvents <- &kafka.NOP{Partition: -1}
km.incomingEvents <- &kafka.NOP{Partition: -1}
}
func (km *Tester) consumeError(err error) {
km.incomingEvents <- &kafka.Error{Err: err}
// no need to send NOP (actuallly we can't, otherwise we might panic
// as the channels are already closed due to the error first).
}
// ValueForKey attempts to get a value from KafkaMock's storage.
func (km *Tester) ValueForKey(key string) interface{} {
item, err := km.storage.Get(key)
ensure.Nil(km.t, err)
if item == nil {
return nil
}
value, err := km.codec.Decode(item)
ensure.Nil(km.t, err)
return value
}
// SetValue sets a value in the storage.
func (km *Tester) SetValue(key string, value interface{}) {
data, err := km.codec.Encode(value)
ensure.Nil(km.t, err)
err = km.storage.Set(key, data)
ensure.Nil(km.t, err)
}
// ReplaceEmitHandler replaces the emitter.
func (km *Tester) ReplaceEmitHandler(emitter EmitHandler) {
km.producerMock.emitter = emitter
}
// ExpectEmit ensures a message exists in passed topic and key. The message may be
// inspected/unmarshalled by a passed expecter function.
func (km *Tester) ExpectEmit(topic string, key string, expecter func(value []byte)) {
for i := 0; i < len(km.emitted); i++ {
msg := km.emitted[i]
if msg.Topic != topic || msg.Key != key {
continue
}
if expecter != nil {
expecter(msg.Value)
}
// remove element from slice
// https://github.com/golang/go/wiki/SliceTricks
km.emitted = append(km.emitted[:i], km.emitted[i+1:]...)
return
}
km.t.Errorf("Expected emit for key %s in topic %s was not present.", key, topic)
}
// ExpectAllEmitted calls passed expected-emit-handler function for all emitted values and clears the
// emitted values
func (km *Tester) ExpectAllEmitted(handler func(topic string, key string, value []byte)) {
for _, emitted := range km.emitted {
handler(emitted.Topic, emitted.Key, emitted.Value)
}
km.emitted = make([]*kafka.Message, 0)
}
// Finish marks the kafkamock that there is no emit to be expected.
// Set @param fail to true, if kafkamock is supposed to fail the test case in case
// of remaining emits.
// Clears the list of emits either case.
// This should always be called at the end of a test case to make sure
// no emits of prior test cases are stuck in the list and mess with the test results.
func (km *Tester) Finish(fail bool) {
if len(km.emitted) > 0 {
if fail {
km.t.Errorf("The following emits are still in the list, although it's supposed to be empty:")
for _, emitted := range km.emitted {
km.t.Errorf(" topic: %s key: %s", emitted.Topic, emitted.Key)
}
}
}
km.emitted = make([]*kafka.Message, 0)
}
// handleEmit handles an Emit-call on the producerMock.
// This takes care of queueing calls
// to handled topics or putting the emitted messages in the emitted-messages-list
func (km *Tester) handleEmit(topic string, key string, value []byte) *kafka.Promise {
promise := kafka.NewPromise()
if topic == km.groupTopic {
return promise.Finish(nil)
}
if _, hasTopic := km.handledTopics[topic]; hasTopic {
km.newCall(func() {
km.consumeData(topic, key, value)
})
} else {
km.offset++
km.emitted = append(km.emitted, &kafka.Message{
Topic: topic,
Key: key,
Value: value,
Offset: km.offset,
})
}
return promise.Finish(nil)
}
// creates a new call being executed after the consume function has run.
func (km *Tester) newCall(call func()) {
km.wg.Add(1)
km.callQueue = append(km.callQueue, call)
}
// executes all calls on the call queue.
// Executing calls may put new calls on the queue (if they emit something),
// so this function executes until no further calls are being made.
func (km *Tester) makeCalls() {
go func() {
for len(km.callQueue) > 0 {
call := km.callQueue[0]
call()
km.callQueue = km.callQueue[1:]
km.wg.Done()
}
}()
km.wg.Wait()
}
// ClearValues resets everything that might be in the storage by deleting everything
// using the iterator.
func (km *Tester) ClearValues() {
it, _ := km.storage.Iterator()
for it.Next() {
km.storage.Delete(string(it.Key()))
}
}
type consumerMock struct {
tester *Tester
}
func newConsumerMock(tester *Tester) *consumerMock {
return &consumerMock{
tester: tester,
}
}
// Events returns the event channel of the consumer mock
func (km *consumerMock) Events() <-chan kafka.Event {
return km.tester.consumerEvents
}
// Subscribe marks the consumer to subscribe to passed topics.
// The consumerMock simply marks the topics as handled to make sure to
// pass emitted messages back to the processor.
func (km *consumerMock) Subscribe(topics map[string]int64) error {
for topic := range topics {
km.tester.handledTopics[topic] = true
}
go km.tester.initProtocol()
return nil
}
// AddGroupPartition adds a partition for group consumption.
// No action required in the mock.
func (km *consumerMock) AddGroupPartition(partition int32) {
}
// Commit commits an offest.
// No action required in the mock.
func (km *consumerMock) Commit(topic string, partition int32, offset int64) error {
return nil
}
// AddPartition marks the topic as a table topic.
// The mock has to know the group table topic to ignore emit calls (which would never be consumed)
func (km *consumerMock) AddPartition(topic string, partition int32, initialOffset int64) error {
return nil
}
// RemovePartition removes a partition from a topic.
// No action required in the mock.
func (km *consumerMock) RemovePartition(topic string, partition int32) error {
return nil
}
// Close closes the consumer.
// No action required in the mock.
func (km *consumerMock) Close() error {
close(km.tester.incomingEvents)
close(km.tester.consumerEvents)
fmt.Println("closed consumer mock")
return nil
}
type topicMgrMock struct {
tester *Tester
}
// EnsureTableExists checks that a table (log-compacted topic) exists, or create one if possible
func (tm *topicMgrMock) EnsureTableExists(topic string, npar int) error {
return nil
}
// EnsureStreamExists checks that a stream topic exists, or create one if possible
func (tm *topicMgrMock) EnsureStreamExists(topic string, npar int) error {
return nil
}
// Partitions returns the number of partitions of a topic, that are assigned to the running
// instance, i.e. it doesn't represent all partitions of a topic.
func (tm *topicMgrMock) Partitions(topic string) ([]int32, error) {
tm.tester.handledTopics[topic] = true
return []int32{0}, nil
}
// Close closes the topic manager.
// No action required in the mock.
func (tm *topicMgrMock) Close() error {
return nil
}
func newTopicMgrMock(tester *Tester) *topicMgrMock {
return &topicMgrMock{
tester: tester,
}
}
type producerMock struct {
emitter EmitHandler
}
func newProducerMock(emitter EmitHandler) *producerMock {
return &producerMock{
emitter: emitter,
}
}
// Emit emits messages to arbitrary topics.
// The mock simply forwards the emit to the KafkaMock which takes care of queueing calls
// to handled topics or putting the emitted messages in the emitted-messages-list
func (p *producerMock) Emit(topic string, key string, value []byte) *kafka.Promise {
return p.emitter(topic, key, value)
}
// Close closes the producer mock
// No action required in the mock.
func (p *producerMock) Close() error {
fmt.Println("Closing producer mock")
return nil
}
typo fix in tester code
package tester
import (
"fmt"
"hash"
"log"
"sync"
"github.com/facebookgo/ensure"
"github.com/golang/protobuf/proto"
"github.com/lovoo/goka/codec"
"github.com/lovoo/goka/kafka"
"github.com/lovoo/goka/storage"
)
// Codec decodes and encodes from and to []byte
type Codec interface {
Encode(value interface{}) (data []byte, err error)
Decode(data []byte) (value interface{}, err error)
}
// EmitHandler abstracts a function that allows to overwrite kafkamock's Emit function to
// simulate producer errors
type EmitHandler func(topic string, key string, value []byte) *kafka.Promise
// Tester allows interacting with a test processor
type Tester struct {
t T
consumerMock *consumerMock
producerMock *producerMock
topicMgrMock *topicMgrMock
emitHandler EmitHandler
storage storage.Storage
codec Codec
offset int64
tableOffset int64
incomingEvents chan kafka.Event
consumerEvents chan kafka.Event
// Stores a map of all topics that are handled by the processor.
// Every time an emit is called, those messages for handled topics are relayed
// after the consume-function has finished.
// All other messages are stored in the emitted-slice for further inspection
handledTopics map[string]bool
groupTopic string
emitted []*kafka.Message
groupTableCreator func() (string, []byte)
callQueue []func()
wg sync.WaitGroup
}
// T abstracts the interface we assume from the test case.
// Will most likely be *testing.T
type T interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Fatal(a ...interface{})
}
// New returns a new testprocessor mocking every external service
// It should be passed as goka.WithTester to goka.NewProcessor. It essentially
// replaces the storage/consumer/producer/topicmanager with a mock.
// For example, a normal call to NewProcessor like this
// goka.NewProcessor(brokers, group, subscriptions,
// option_a,
// option_b,
// option_c,
// )
// would become in the unit test:
// tester := tester.New(t)
// NewProcessor(brokers, group, subscriptions,
// option_a,
// option_b,
// option_c,
// WithTester(tester),
// )
func New(t T) *Tester {
tester := &Tester{
storage: storage.NewMemory(),
t: t,
incomingEvents: make(chan kafka.Event),
consumerEvents: make(chan kafka.Event),
handledTopics: make(map[string]bool),
codec: new(codec.Bytes),
}
tester.consumerMock = newConsumerMock(tester)
tester.producerMock = newProducerMock(tester.handleEmit)
tester.topicMgrMock = newTopicMgrMock(tester)
return tester
}
// SetCodec sets the codec for the group table.
func (km *Tester) SetCodec(codec Codec) *Tester {
km.codec = codec
return km
}
// SetGroupTableCreator sets a creator for the group table.
func (km *Tester) SetGroupTableCreator(creator func() (string, []byte)) {
km.groupTableCreator = creator
}
func (km *Tester) TopicManagerBuilder() kafka.TopicManagerBuilder {
return func(brokers []string) (kafka.TopicManager, error) {
return km.topicMgrMock, nil
}
}
func (km *Tester) ConsumerBuilder() kafka.ConsumerBuilder {
return func(b []string, group, clientID string) (kafka.Consumer, error) {
if km.groupTopic == "" {
km.groupTopic = fmt.Sprintf("%s-table", group)
}
return km.consumerMock, nil
}
}
func (km *Tester) ProducerBuilder() kafka.ProducerBuilder {
return func(b []string, cid string, hasher func() hash.Hash32) (kafka.Producer, error) {
return km.producerMock, nil
}
}
func (km *Tester) StorageBuilder() storage.Builder {
return func(topic string, partition int32) (storage.Storage, error) {
return km.storage, nil
}
}
// initProtocol initiates the protocol with the client basically making the KafkaMock
// usable.
func (km *Tester) initProtocol() {
defer func() {
if r := recover(); r != nil {
log.Printf("tester: panic initProtocol: %+v", r)
}
}()
km.consumerEvents <- &kafka.Assignment{
0: -1,
}
for km.groupTableCreator != nil {
key, value := km.groupTableCreator()
if key == "" || value == nil {
break
}
km.consumerEvents <- &kafka.Message{
Topic: km.groupTopic,
Partition: 0,
Offset: km.tableOffset,
Key: key,
Value: value,
}
}
km.consumerEvents <- &kafka.EOF{Partition: 0}
km.consumerEvents <- &kafka.NOP{Partition: -1}
for ev := range km.incomingEvents {
km.consumerEvents <- ev
}
}
// ConsumeProto simulates a message on kafka in a topic with a key.
func (km *Tester) ConsumeProto(topic string, key string, msg proto.Message) {
data, err := proto.Marshal(msg)
if err != nil && km.t != nil {
km.t.Errorf("Error marshaling message for consume: %v", err)
}
km.consumeData(topic, key, data)
km.makeCalls()
}
// ConsumeString simulates a message with a string payload.
func (km *Tester) ConsumeString(topic string, key string, msg string) {
km.consumeData(topic, key, []byte(msg))
km.makeCalls()
}
// Consume simulates a message with a byte slice payload.
func (km *Tester) Consume(topic string, key string, msg []byte) {
km.consumeData(topic, key, msg)
km.makeCalls()
}
// ConsumeData simulates a message with a byte slice payload. This is the same
// as Consume.
// ConsumeData is a helper function consuming marshalled data. This function is
// used by ConsumeProto by the test case as well as any emit calls of the
// processor being tested.
func (km *Tester) ConsumeData(topic string, key string, data []byte) {
km.consumeData(topic, key, data)
km.makeCalls()
}
func (km *Tester) consumeData(topic string, key string, data []byte) {
defer func() {
if r := recover(); r != nil {
log.Printf("tester: panic ConsumeData: %+v\n", r)
}
}()
km.offset++
kafkaMsg := &kafka.Message{
Topic: topic,
Partition: 0,
Offset: km.offset,
Key: key,
Value: data,
}
// send message to processing goroutine
km.incomingEvents <- kafkaMsg
// wait until partition processing goroutine processes message by requiring it to read
// the following NOP.
km.incomingEvents <- &kafka.NOP{Partition: 0}
km.incomingEvents <- &kafka.NOP{Partition: 0}
// wait util processor goroutine is ready
km.incomingEvents <- &kafka.NOP{Partition: -1}
km.incomingEvents <- &kafka.NOP{Partition: -1}
}
func (km *Tester) consumeError(err error) {
km.incomingEvents <- &kafka.Error{Err: err}
// no need to send NOP (actuallly we can't, otherwise we might panic
// as the channels are already closed due to the error first).
}
// ValueForKey attempts to get a value from KafkaMock's storage.
func (km *Tester) ValueForKey(key string) interface{} {
item, err := km.storage.Get(key)
ensure.Nil(km.t, err)
if item == nil {
return nil
}
value, err := km.codec.Decode(item)
ensure.Nil(km.t, err)
return value
}
// SetValue sets a value in the storage.
func (km *Tester) SetValue(key string, value interface{}) {
data, err := km.codec.Encode(value)
ensure.Nil(km.t, err)
err = km.storage.Set(key, data)
ensure.Nil(km.t, err)
}
// ReplaceEmitHandler replaces the emitter.
func (km *Tester) ReplaceEmitHandler(emitter EmitHandler) {
km.producerMock.emitter = emitter
}
// ExpectEmit ensures a message exists in passed topic and key. The message may be
// inspected/unmarshalled by a passed expecter function.
func (km *Tester) ExpectEmit(topic string, key string, expecter func(value []byte)) {
for i := 0; i < len(km.emitted); i++ {
msg := km.emitted[i]
if msg.Topic != topic || msg.Key != key {
continue
}
if expecter != nil {
expecter(msg.Value)
}
// remove element from slice
// https://github.com/golang/go/wiki/SliceTricks
km.emitted = append(km.emitted[:i], km.emitted[i+1:]...)
return
}
km.t.Errorf("Expected emit for key %s in topic %s was not present.", key, topic)
}
// ExpectAllEmitted calls passed expected-emit-handler function for all emitted values and clears the
// emitted values
func (km *Tester) ExpectAllEmitted(handler func(topic string, key string, value []byte)) {
for _, emitted := range km.emitted {
handler(emitted.Topic, emitted.Key, emitted.Value)
}
km.emitted = make([]*kafka.Message, 0)
}
// Finish marks the kafkamock that there is no emit to be expected.
// Set @param fail to true, if kafkamock is supposed to fail the test case in case
// of remaining emits.
// Clears the list of emits either case.
// This should always be called at the end of a test case to make sure
// no emits of prior test cases are stuck in the list and mess with the test results.
func (km *Tester) Finish(fail bool) {
if len(km.emitted) > 0 {
if fail {
km.t.Errorf("The following emits are still in the list, although it's supposed to be empty:")
for _, emitted := range km.emitted {
km.t.Errorf(" topic: %s key: %s", emitted.Topic, emitted.Key)
}
}
}
km.emitted = make([]*kafka.Message, 0)
}
// handleEmit handles an Emit-call on the producerMock.
// This takes care of queueing calls
// to handled topics or putting the emitted messages in the emitted-messages-list
func (km *Tester) handleEmit(topic string, key string, value []byte) *kafka.Promise {
promise := kafka.NewPromise()
if topic == km.groupTopic {
return promise.Finish(nil)
}
if _, hasTopic := km.handledTopics[topic]; hasTopic {
km.newCall(func() {
km.consumeData(topic, key, value)
})
} else {
km.offset++
km.emitted = append(km.emitted, &kafka.Message{
Topic: topic,
Key: key,
Value: value,
Offset: km.offset,
})
}
return promise.Finish(nil)
}
// creates a new call being executed after the consume function has run.
func (km *Tester) newCall(call func()) {
km.wg.Add(1)
km.callQueue = append(km.callQueue, call)
}
// executes all calls on the call queue.
// Executing calls may put new calls on the queue (if they emit something),
// so this function executes until no further calls are being made.
func (km *Tester) makeCalls() {
go func() {
for len(km.callQueue) > 0 {
call := km.callQueue[0]
call()
km.callQueue = km.callQueue[1:]
km.wg.Done()
}
}()
km.wg.Wait()
}
// ClearValues resets everything that might be in the storage by deleting everything
// using the iterator.
func (km *Tester) ClearValues() {
it, _ := km.storage.Iterator()
for it.Next() {
km.storage.Delete(string(it.Key()))
}
}
type consumerMock struct {
tester *Tester
}
func newConsumerMock(tester *Tester) *consumerMock {
return &consumerMock{
tester: tester,
}
}
// Events returns the event channel of the consumer mock
func (km *consumerMock) Events() <-chan kafka.Event {
return km.tester.consumerEvents
}
// Subscribe marks the consumer to subscribe to passed topics.
// The consumerMock simply marks the topics as handled to make sure to
// pass emitted messages back to the processor.
func (km *consumerMock) Subscribe(topics map[string]int64) error {
for topic := range topics {
km.tester.handledTopics[topic] = true
}
go km.tester.initProtocol()
return nil
}
// AddGroupPartition adds a partition for group consumption.
// No action required in the mock.
func (km *consumerMock) AddGroupPartition(partition int32) {
}
// Commit commits an offest.
// No action required in the mock.
func (km *consumerMock) Commit(topic string, partition int32, offset int64) error {
return nil
}
// AddPartition marks the topic as a table topic.
// The mock has to know the group table topic to ignore emit calls (which would never be consumed)
func (km *consumerMock) AddPartition(topic string, partition int32, initialOffset int64) error {
return nil
}
// RemovePartition removes a partition from a topic.
// No action required in the mock.
func (km *consumerMock) RemovePartition(topic string, partition int32) error {
return nil
}
// Close closes the consumer.
// No action required in the mock.
func (km *consumerMock) Close() error {
close(km.tester.incomingEvents)
close(km.tester.consumerEvents)
fmt.Println("closed consumer mock")
return nil
}
type topicMgrMock struct {
tester *Tester
}
// EnsureTableExists checks that a table (log-compacted topic) exists, or create one if possible
func (tm *topicMgrMock) EnsureTableExists(topic string, npar int) error {
return nil
}
// EnsureStreamExists checks that a stream topic exists, or create one if possible
func (tm *topicMgrMock) EnsureStreamExists(topic string, npar int) error {
return nil
}
// Partitions returns the number of partitions of a topic, that are assigned to the running
// instance, i.e. it doesn't represent all partitions of a topic.
func (tm *topicMgrMock) Partitions(topic string) ([]int32, error) {
tm.tester.handledTopics[topic] = true
return []int32{0}, nil
}
// Close closes the topic manager.
// No action required in the mock.
func (tm *topicMgrMock) Close() error {
return nil
}
func newTopicMgrMock(tester *Tester) *topicMgrMock {
return &topicMgrMock{
tester: tester,
}
}
type producerMock struct {
emitter EmitHandler
}
func newProducerMock(emitter EmitHandler) *producerMock {
return &producerMock{
emitter: emitter,
}
}
// Emit emits messages to arbitrary topics.
// The mock simply forwards the emit to the KafkaMock which takes care of queueing calls
// to handled topics or putting the emitted messages in the emitted-messages-list
func (p *producerMock) Emit(topic string, key string, value []byte) *kafka.Promise {
return p.emitter(topic, key, value)
}
// Close closes the producer mock
// No action required in the mock.
func (p *producerMock) Close() error {
fmt.Println("Closing producer mock")
return nil
}
|
package tasks
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"io"
"mime/multipart"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"time"
"github.com/golang/protobuf/ptypes"
any "github.com/golang/protobuf/ptypes/any"
"pixur.org/pixur/be/imaging"
"pixur.org/pixur/be/schema"
"pixur.org/pixur/be/schema/db"
tab "pixur.org/pixur/be/schema/tables"
"pixur.org/pixur/be/status"
)
// UpsertPicTask inserts or updates a pic with the provided information.
type UpsertPicTask struct {
// Deps
PixPath string
DB db.DB
HTTPClient *http.Client
// os functions
TempFile func(dir, prefix string) (*os.File, error)
Rename func(oldpath, newpath string) error
MkdirAll func(path string, perm os.FileMode) error
Now func() time.Time
// Inputs
FileURL string
File multipart.File
Md5Hash []byte
// Header is the name (and size) of the file. Currently only the Name is used. If the name is
// absent, UpsertPicTask will try to derive a name automatically from the FileURL.
Header FileHeader
TagNames []string
// Ext is additional extra data associated with this pic. If a key is present in both the
// new pic and the existing pic, Upsert will fail.
Ext map[string]*any.Any
// TODO: eventually take the Referer[sic]. This is to pass to HTTPClient when retrieving the
// pic.
// Results
CreatedPic *schema.Pic
}
type FileHeader struct {
Name string
Size int64
}
func (t *UpsertPicTask) Run(ctx context.Context) (stsCap status.S) {
j, err := tab.NewJob(ctx, t.DB)
if err != nil {
return status.InternalError(err, "can't create job")
}
defer cleanUp(j, &stsCap)
if sts := t.runInternal(ctx, j); sts != nil {
return sts
}
// TODO: Check if this delete the original pic on a failed merge.
if err := j.Commit(); err != nil {
os.Remove(t.CreatedPic.Path(t.PixPath))
os.Remove(t.CreatedPic.ThumbnailPath(t.PixPath))
t.CreatedPic = nil
return status.InternalError(err, "can't commit job")
}
return nil
}
func (t *UpsertPicTask) runInternal(ctx context.Context, j *tab.Job) status.S {
u, sts := requireCapability(ctx, j, schema.User_PIC_CREATE)
if sts != nil {
return sts
}
var furl *url.URL
if t.FileURL != "" {
fu, sts := validateURL(t.FileURL)
if sts != nil {
return sts
}
furl = fu
} else if t.File == nil {
return status.InvalidArgument(nil, "No pic specified")
}
now := t.Now()
// TODO: test this
if len(t.Header.Name) > 1024 {
return status.InvalidArgument(nil, "filename is too long")
}
pfs := &schema.Pic_FileSource{
CreatedTs: schema.ToTspb(now),
UserId: u.UserId,
Name: t.Header.Name,
}
if furl != nil {
pfs.Url = furl.String()
}
if len(t.Md5Hash) != 0 {
p, sts := findExistingPic(j, schema.PicIdent_MD5, t.Md5Hash)
if sts != nil {
return sts
}
if p != nil {
if p.HardDeleted() {
if !p.GetDeletionStatus().Temporary {
return status.InvalidArgument(nil, "Can't upload deleted pic.")
}
// Fallthrough. We still need to download, and then remerge.
} else {
t.CreatedPic = p
return mergePic(j, p, now, pfs, t.Ext, t.TagNames, u.UserId)
}
}
}
f, fh, sts := t.prepareFile(ctx, t.File, t.Header, furl)
if sts != nil {
return sts
}
// after preparing the f, fh, is authoritative.
pfs.Name = fh.Name
// on success, the name of f will change and it won't be removed.
defer os.Remove(f.Name())
defer f.Close()
md5Hash, sha1Hash, sha256Hash, sts := generatePicHashes(io.NewSectionReader(f, 0, fh.Size))
if sts != nil {
// TODO: test this case
return sts
}
if len(t.Md5Hash) != 0 && !bytes.Equal(t.Md5Hash, md5Hash) {
return status.InvalidArgumentf(nil, "Md5 hash mismatch %x != %x", t.Md5Hash, md5Hash)
}
im, sts := imaging.ReadImage(io.NewSectionReader(f, 0, fh.Size))
if sts != nil {
return sts
}
defer im.Close()
var immime schema.Pic_Mime
imf := im.Format()
switch {
case imf.IsJpeg():
immime = schema.Pic_JPEG
case imf.IsGif():
immime = schema.Pic_GIF
case imf.IsPng():
immime = schema.Pic_PNG
case imf.IsWebm():
immime = schema.Pic_WEBM
default:
return status.InvalidArgument(nil, "Unknown image type", imf)
}
var imanim *schema.AnimationInfo
if dur, sts := im.Duration(); sts != nil {
return sts
} else if dur != nil {
imanim = &schema.AnimationInfo{
Duration: ptypes.DurationProto(*dur),
}
}
// Still double check that the sha1 hash is not in use, even if the md5 one was
// checked up at the beginning of the function.
p, sts := findExistingPic(j, schema.PicIdent_SHA1, sha1Hash)
if sts != nil {
return sts
}
if p != nil {
if p.HardDeleted() {
if !p.GetDeletionStatus().Temporary {
return status.InvalidArgument(nil, "Can't upload deleted pic.")
}
// fall through, picture needs to be undeleted.
} else {
t.CreatedPic = p
return mergePic(j, p, now, pfs, t.Ext, t.TagNames, u.UserId)
}
} else {
picID, err := j.AllocID()
if err != nil {
return status.InternalError(err, "can't allocate id")
}
width, height := im.Dimensions()
p = &schema.Pic{
PicId: picID,
FileSize: fh.Size,
Mime: immime,
Width: int64(width),
Height: int64(height),
AnimationInfo: imanim,
// ModifiedTime is set in mergePic
}
p.SetCreatedTime(now)
if err := j.InsertPic(p); err != nil {
return status.InternalError(err, "Can't insert")
}
if sts := insertPicHashes(j, p.PicId, md5Hash, sha1Hash, sha256Hash); sts != nil {
return sts
}
if sts := insertPerceptualHash(j, p.PicId, im); sts != nil {
return sts
}
}
ft, err := t.TempFile(t.PixPath, "__")
if err != nil {
return status.InternalError(err, "Can't create tempfile")
}
defer os.Remove(ft.Name())
defer ft.Close()
thumb, sts := im.Thumbnail()
if sts != nil {
return sts
}
if sts := thumb.Write(ft); sts != nil {
return sts
}
if err := mergePic(j, p, now, pfs, t.Ext, t.TagNames, u.UserId); err != nil {
return err
}
if err := t.MkdirAll(filepath.Dir(p.Path(t.PixPath)), 0770); err != nil {
return status.InternalError(err, "Can't prepare pic dir")
}
if err := f.Close(); err != nil {
return status.InternalErrorf(err, "Can't close %v", f.Name())
}
if err := t.Rename(f.Name(), p.Path(t.PixPath)); err != nil {
return status.InternalErrorf(err, "Can't rename %v to %v", f.Name(), p.Path(t.PixPath))
}
if err := ft.Close(); err != nil {
return status.InternalErrorf(err, "Can't close %v", ft.Name())
}
// TODO: by luck the format created by imaging and the mime type decided by thumbnail are the
// same. Thumbnails should be made into proper rows with their own mime type.
if err := t.Rename(ft.Name(), p.ThumbnailPath(t.PixPath)); err != nil {
os.Remove(p.Path(t.PixPath))
return status.InternalErrorf(err, "Can't rename %v to %v", ft.Name(), p.ThumbnailPath(t.PixPath))
}
t.CreatedPic = p
return nil
}
func mergePic(j *tab.Job, p *schema.Pic, now time.Time, pfs *schema.Pic_FileSource,
ext map[string]*any.Any, tagNames []string, userID int64) status.S {
p.SetModifiedTime(now)
if ds := p.GetDeletionStatus(); ds != nil {
if ds.Temporary {
// If the pic was soft deleted, it stays deleted, unless it was temporary.
p.DeletionStatus = nil
}
}
if err := upsertTags(j, tagNames, p.PicId, now, userID); err != nil {
return err
}
// ignore sources from the same user after the first one
userFirstSource := true
if userID != schema.AnonymousUserID {
for _, s := range p.Source {
if s.UserId == userID {
userFirstSource = false
break
}
}
}
if userFirstSource {
// Okay, it's their first time uploading, let's consider adding it.
if pfs.Url != "" || len(p.Source) == 0 {
// Only accept the source if new information is being added, or there isn't any already.
// Ignore pfs.Name and pfs.Referrer as those aren't sources.
p.Source = append(p.Source, pfs)
}
}
if len(ext) != 0 && len(p.Ext) == 0 {
p.Ext = make(map[string]*any.Any)
}
for k, v := range ext {
if _, present := p.Ext[k]; present {
return status.InvalidArgumentf(nil, "duplicate key %v in extension map", k)
}
p.Ext[k] = v
}
if err := j.UpdatePic(p); err != nil {
return status.InternalError(err, "can't update pic")
}
return nil
}
func upsertTags(j *tab.Job, rawTags []string, picID int64, now time.Time, userID int64) status.S {
newTagNames, err := cleanTagNames(rawTags)
if err != nil {
return err
}
attachedTags, _, err := findAttachedPicTags(j, picID)
if err != nil {
return err
}
unattachedTagNames := findUnattachedTagNames(attachedTags, newTagNames)
existingTags, unknownNames, err := findExistingTagsByName(j, unattachedTagNames)
if err != nil {
return err
}
if sts := updateExistingTags(j, existingTags, now); err != nil {
return sts
}
newTags, sts := createNewTags(j, unknownNames, now)
if sts != nil {
return sts
}
existingTags = append(existingTags, newTags...)
if _, err := createPicTags(j, existingTags, picID, now, userID); err != nil {
return err
}
return nil
}
func findAttachedPicTags(j *tab.Job, picID int64) ([]*schema.Tag, []*schema.PicTag, status.S) {
pts, err := j.FindPicTags(db.Opts{
Prefix: tab.PicTagsPrimary{PicId: &picID},
Lock: db.LockWrite,
})
if err != nil {
return nil, nil, status.InternalError(err, "cant't find pic tags")
}
var tags []*schema.Tag
// TODO: maybe do something with lock ordering?
for _, pt := range pts {
ts, err := j.FindTags(db.Opts{
Prefix: tab.TagsPrimary{&pt.TagId},
Limit: 1,
Lock: db.LockWrite,
})
if err != nil {
return nil, nil, status.InternalError(err, "can't find tags")
}
if len(ts) != 1 {
return nil, nil, status.InternalError(err, "can't lookup tag")
}
tags = append(tags, ts[0])
}
return tags, pts, nil
}
// findUnattachedTagNames finds tag names that are not part of a pic's tags.
// While pic tags are the SoT for attachment, only the Tag is the SoT for the name.
func findUnattachedTagNames(attachedTags []*schema.Tag, newTagNames []string) []string {
attachedTagNames := make(map[string]struct{}, len(attachedTags))
for _, tag := range attachedTags {
attachedTagNames[tag.Name] = struct{}{}
}
var unattachedTagNames []string
for _, newTagName := range newTagNames {
if _, attached := attachedTagNames[newTagName]; !attached {
unattachedTagNames = append(unattachedTagNames, newTagName)
}
}
return unattachedTagNames
}
func findExistingTagsByName(j *tab.Job, names []string) (
tags []*schema.Tag, unknownNames []string, err status.S) {
for _, name := range names {
ts, err := j.FindTags(db.Opts{
Prefix: tab.TagsName{&name},
Limit: 1,
Lock: db.LockWrite,
})
if err != nil {
return nil, nil, status.InternalError(err, "can't find tags")
}
if len(ts) == 1 {
tags = append(tags, ts[0])
} else {
unknownNames = append(unknownNames, name)
}
}
return
}
func updateExistingTags(j *tab.Job, tags []*schema.Tag, now time.Time) status.S {
for _, tag := range tags {
tag.SetModifiedTime(now)
tag.UsageCount++
if err := j.UpdateTag(tag); err != nil {
return status.InternalError(err, "can't update tag")
}
}
return nil
}
func createNewTags(j *tab.Job, tagNames []string, now time.Time) ([]*schema.Tag, status.S) {
var tags []*schema.Tag
for _, name := range tagNames {
tagID, err := j.AllocID()
if err != nil {
return nil, status.InternalError(err, "can't allocate id")
}
tag := &schema.Tag{
TagId: tagID,
Name: name,
UsageCount: 1,
}
tag.SetCreatedTime(now)
tag.SetModifiedTime(now)
if err := j.InsertTag(tag); err != nil {
return nil, status.InternalError(err, "can't create tag")
}
tags = append(tags, tag)
}
return tags, nil
}
func createPicTags(j *tab.Job, tags []*schema.Tag, picID int64, now time.Time, userID int64) (
[]*schema.PicTag, status.S) {
var picTags []*schema.PicTag
for _, tag := range tags {
pt := &schema.PicTag{
PicId: picID,
TagId: tag.TagId,
Name: tag.Name,
UserId: userID,
}
pt.SetCreatedTime(now)
pt.SetModifiedTime(now)
if err := j.InsertPicTag(pt); err != nil {
return nil, status.InternalError(err, "can't create pic tag")
}
picTags = append(picTags, pt)
}
return picTags, nil
}
func findExistingPic(j *tab.Job, typ schema.PicIdent_Type, hash []byte) (*schema.Pic, status.S) {
pis, err := j.FindPicIdents(db.Opts{
Prefix: tab.PicIdentsIdent{
Type: &typ,
Value: &hash,
},
Lock: db.LockWrite,
Limit: 1,
})
if err != nil {
return nil, status.InternalError(err, "can't find pic idents")
}
if len(pis) == 0 {
return nil, nil
}
pics, err := j.FindPics(db.Opts{
Prefix: tab.PicsPrimary{&pis[0].PicId},
Lock: db.LockWrite,
Limit: 1,
})
if err != nil {
return nil, status.InternalError(err, "can't find pics")
}
if len(pics) != 1 {
return nil, status.InternalError(err, "can't lookup pic")
}
return pics[0], nil
}
func insertPicHashes(j *tab.Job, picID int64, md5Hash, sha1Hash, sha256Hash []byte) status.S {
md5Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_MD5,
Value: md5Hash,
}
if err := j.InsertPicIdent(md5Ident); err != nil {
return status.InternalError(err, "can't create md5")
}
sha1Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_SHA1,
Value: sha1Hash,
}
if err := j.InsertPicIdent(sha1Ident); err != nil {
return status.InternalError(err, "can't create sha1")
}
sha256Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_SHA256,
Value: sha256Hash,
}
if err := j.InsertPicIdent(sha256Ident); err != nil {
return status.InternalError(err, "can't create sha256")
}
return nil
}
func insertPerceptualHash(j *tab.Job, picID int64, im imaging.PixurImage) status.S {
hash, inputs, sts := im.PerceptualHash0()
if sts != nil {
return sts
}
dct0Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_DCT_0,
Value: hash,
Dct0Values: inputs,
}
if err := j.InsertPicIdent(dct0Ident); err != nil {
return status.InternalError(err, "can't create dct0")
}
return nil
}
// prepareFile prepares the file for image processing.
func (t *UpsertPicTask) prepareFile(ctx context.Context, fd multipart.File, fh FileHeader, u *url.URL) (
_ *os.File, _ *FileHeader, stsCap status.S) {
f, err := t.TempFile(t.PixPath, "__")
if err != nil {
return nil, nil, status.InternalError(err, "Can't create tempfile")
}
defer func() {
if stsCap != nil {
closeAndRemove(f)
}
}()
var h *FileHeader
if fd == nil {
if header, sts := t.downloadFile(ctx, f, u); sts != nil {
return nil, nil, sts
} else {
h = header
}
} else {
// TODO: maybe extract the filename from the url, if not provided in FileHeader
// Make sure to copy the file to pixPath, to make sure it's on the right partition.
// Also get a copy of the size. We don't want to move the file if it is on the
// same partition, because then we can't retry the task on failure.
if n, err := io.Copy(f, fd); err != nil {
return nil, nil, status.InternalError(err, "Can't save file")
} else {
h = &FileHeader{
Size: n,
}
}
}
// Provided header name takes precedence
if fh.Name != "" {
h.Name = fh.Name
}
// The file is now local. Sync it, since external programs might read it.
if err := f.Sync(); err != nil {
return nil, nil, status.InternalError(err, "Can't sync file")
}
return f, h, nil
}
// closeAndRemove cleans up in the event of an error. Windows needs the file to
// be closed so it is important to do it in order.
func closeAndRemove(f *os.File) {
f.Close()
os.Remove(f.Name())
}
// TODO: add tests
func validateURL(rawurl string) (*url.URL, status.S) {
if len(rawurl) > 1024 {
return nil, status.InvalidArgument(nil, "Can't use long URL")
}
u, err := url.Parse(rawurl)
if err != nil {
return nil, status.InvalidArgument(err, "Can't parse", rawurl)
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, status.InvalidArgument(nil, "Can't use non HTTP")
}
if u.User != nil {
return nil, status.InvalidArgument(nil, "Can't provide userinfo")
}
u.Fragment = ""
return u, nil
}
func (t *UpsertPicTask) downloadFile(ctx context.Context, f *os.File, u *url.URL) (
*FileHeader, status.S) {
if u == nil {
return nil, status.InvalidArgument(nil, "Missing URL")
}
// TODO: make sure this isn't reading from ourself
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
// if this fails, it's probably our fault
return nil, status.InternalError(err, "Can't create request")
}
req = req.WithContext(ctx)
resp, err := t.HTTPClient.Do(req)
if err != nil {
return nil, status.InvalidArgument(err, "Can't download", u)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// todo: log the response and headers
return nil, status.InvalidArgumentf(nil, "Can't download %s [%d]", u, resp.StatusCode)
}
bytesRead, err := io.Copy(f, resp.Body)
// This could either be because the remote hung up or a file error on our side. Assume that
// our system is okay, making this an InvalidArgument
if err != nil {
return nil, status.InvalidArgumentf(err, "Can't copy downloaded file")
}
header := &FileHeader{
Size: bytesRead,
}
// Can happen for a url that is a dir like http://foo.com/
if base := path.Base(u.Path); base != "." {
header.Name = base
}
// TODO: support Content-disposition
return header, nil
}
func generatePicHashes(f io.Reader) (md5Hash, sha1Hash, sha256Hash []byte, sts status.S) {
h1 := md5.New()
h2 := sha1.New()
h3 := sha256.New()
if _, err := io.Copy(io.MultiWriter(h1, h2, h3), f); err != nil {
return nil, nil, nil, status.InternalError(err, "Can't copy")
}
return h1.Sum(nil), h2.Sum(nil), h3.Sum(nil), nil
}
be/tasks: fix typo for error propagation
package tasks
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"io"
"mime/multipart"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"time"
"github.com/golang/protobuf/ptypes"
any "github.com/golang/protobuf/ptypes/any"
"pixur.org/pixur/be/imaging"
"pixur.org/pixur/be/schema"
"pixur.org/pixur/be/schema/db"
tab "pixur.org/pixur/be/schema/tables"
"pixur.org/pixur/be/status"
)
// UpsertPicTask inserts or updates a pic with the provided information.
type UpsertPicTask struct {
// Deps
PixPath string
DB db.DB
HTTPClient *http.Client
// os functions
TempFile func(dir, prefix string) (*os.File, error)
Rename func(oldpath, newpath string) error
MkdirAll func(path string, perm os.FileMode) error
Now func() time.Time
// Inputs
FileURL string
File multipart.File
Md5Hash []byte
// Header is the name (and size) of the file. Currently only the Name is used. If the name is
// absent, UpsertPicTask will try to derive a name automatically from the FileURL.
Header FileHeader
TagNames []string
// Ext is additional extra data associated with this pic. If a key is present in both the
// new pic and the existing pic, Upsert will fail.
Ext map[string]*any.Any
// TODO: eventually take the Referer[sic]. This is to pass to HTTPClient when retrieving the
// pic.
// Results
CreatedPic *schema.Pic
}
type FileHeader struct {
Name string
Size int64
}
func (t *UpsertPicTask) Run(ctx context.Context) (stsCap status.S) {
j, err := tab.NewJob(ctx, t.DB)
if err != nil {
return status.InternalError(err, "can't create job")
}
defer cleanUp(j, &stsCap)
if sts := t.runInternal(ctx, j); sts != nil {
return sts
}
// TODO: Check if this delete the original pic on a failed merge.
if err := j.Commit(); err != nil {
os.Remove(t.CreatedPic.Path(t.PixPath))
os.Remove(t.CreatedPic.ThumbnailPath(t.PixPath))
t.CreatedPic = nil
return status.InternalError(err, "can't commit job")
}
return nil
}
func (t *UpsertPicTask) runInternal(ctx context.Context, j *tab.Job) status.S {
u, sts := requireCapability(ctx, j, schema.User_PIC_CREATE)
if sts != nil {
return sts
}
var furl *url.URL
if t.FileURL != "" {
fu, sts := validateURL(t.FileURL)
if sts != nil {
return sts
}
furl = fu
} else if t.File == nil {
return status.InvalidArgument(nil, "No pic specified")
}
now := t.Now()
// TODO: test this
if len(t.Header.Name) > 1024 {
return status.InvalidArgument(nil, "filename is too long")
}
pfs := &schema.Pic_FileSource{
CreatedTs: schema.ToTspb(now),
UserId: u.UserId,
Name: t.Header.Name,
}
if furl != nil {
pfs.Url = furl.String()
}
if len(t.Md5Hash) != 0 {
p, sts := findExistingPic(j, schema.PicIdent_MD5, t.Md5Hash)
if sts != nil {
return sts
}
if p != nil {
if p.HardDeleted() {
if !p.GetDeletionStatus().Temporary {
return status.InvalidArgument(nil, "Can't upload deleted pic.")
}
// Fallthrough. We still need to download, and then remerge.
} else {
t.CreatedPic = p
return mergePic(j, p, now, pfs, t.Ext, t.TagNames, u.UserId)
}
}
}
f, fh, sts := t.prepareFile(ctx, t.File, t.Header, furl)
if sts != nil {
return sts
}
// after preparing the f, fh, is authoritative.
pfs.Name = fh.Name
// on success, the name of f will change and it won't be removed.
defer os.Remove(f.Name())
defer f.Close()
md5Hash, sha1Hash, sha256Hash, sts := generatePicHashes(io.NewSectionReader(f, 0, fh.Size))
if sts != nil {
// TODO: test this case
return sts
}
if len(t.Md5Hash) != 0 && !bytes.Equal(t.Md5Hash, md5Hash) {
return status.InvalidArgumentf(nil, "Md5 hash mismatch %x != %x", t.Md5Hash, md5Hash)
}
im, sts := imaging.ReadImage(io.NewSectionReader(f, 0, fh.Size))
if sts != nil {
return sts
}
defer im.Close()
var immime schema.Pic_Mime
imf := im.Format()
switch {
case imf.IsJpeg():
immime = schema.Pic_JPEG
case imf.IsGif():
immime = schema.Pic_GIF
case imf.IsPng():
immime = schema.Pic_PNG
case imf.IsWebm():
immime = schema.Pic_WEBM
default:
return status.InvalidArgument(nil, "Unknown image type", imf)
}
var imanim *schema.AnimationInfo
if dur, sts := im.Duration(); sts != nil {
return sts
} else if dur != nil {
imanim = &schema.AnimationInfo{
Duration: ptypes.DurationProto(*dur),
}
}
// Still double check that the sha1 hash is not in use, even if the md5 one was
// checked up at the beginning of the function.
p, sts := findExistingPic(j, schema.PicIdent_SHA1, sha1Hash)
if sts != nil {
return sts
}
if p != nil {
if p.HardDeleted() {
if !p.GetDeletionStatus().Temporary {
return status.InvalidArgument(nil, "Can't upload deleted pic.")
}
// fall through, picture needs to be undeleted.
} else {
t.CreatedPic = p
return mergePic(j, p, now, pfs, t.Ext, t.TagNames, u.UserId)
}
} else {
picID, err := j.AllocID()
if err != nil {
return status.InternalError(err, "can't allocate id")
}
width, height := im.Dimensions()
p = &schema.Pic{
PicId: picID,
FileSize: fh.Size,
Mime: immime,
Width: int64(width),
Height: int64(height),
AnimationInfo: imanim,
// ModifiedTime is set in mergePic
}
p.SetCreatedTime(now)
if err := j.InsertPic(p); err != nil {
return status.InternalError(err, "Can't insert")
}
if sts := insertPicHashes(j, p.PicId, md5Hash, sha1Hash, sha256Hash); sts != nil {
return sts
}
if sts := insertPerceptualHash(j, p.PicId, im); sts != nil {
return sts
}
}
ft, err := t.TempFile(t.PixPath, "__")
if err != nil {
return status.InternalError(err, "Can't create tempfile")
}
defer os.Remove(ft.Name())
defer ft.Close()
thumb, sts := im.Thumbnail()
if sts != nil {
return sts
}
if sts := thumb.Write(ft); sts != nil {
return sts
}
if err := mergePic(j, p, now, pfs, t.Ext, t.TagNames, u.UserId); err != nil {
return err
}
if err := t.MkdirAll(filepath.Dir(p.Path(t.PixPath)), 0770); err != nil {
return status.InternalError(err, "Can't prepare pic dir")
}
if err := f.Close(); err != nil {
return status.InternalErrorf(err, "Can't close %v", f.Name())
}
if err := t.Rename(f.Name(), p.Path(t.PixPath)); err != nil {
return status.InternalErrorf(err, "Can't rename %v to %v", f.Name(), p.Path(t.PixPath))
}
if err := ft.Close(); err != nil {
return status.InternalErrorf(err, "Can't close %v", ft.Name())
}
// TODO: by luck the format created by imaging and the mime type decided by thumbnail are the
// same. Thumbnails should be made into proper rows with their own mime type.
if err := t.Rename(ft.Name(), p.ThumbnailPath(t.PixPath)); err != nil {
os.Remove(p.Path(t.PixPath))
return status.InternalErrorf(err, "Can't rename %v to %v", ft.Name(), p.ThumbnailPath(t.PixPath))
}
t.CreatedPic = p
return nil
}
func mergePic(j *tab.Job, p *schema.Pic, now time.Time, pfs *schema.Pic_FileSource,
ext map[string]*any.Any, tagNames []string, userID int64) status.S {
p.SetModifiedTime(now)
if ds := p.GetDeletionStatus(); ds != nil {
if ds.Temporary {
// If the pic was soft deleted, it stays deleted, unless it was temporary.
p.DeletionStatus = nil
}
}
if err := upsertTags(j, tagNames, p.PicId, now, userID); err != nil {
return err
}
// ignore sources from the same user after the first one
userFirstSource := true
if userID != schema.AnonymousUserID {
for _, s := range p.Source {
if s.UserId == userID {
userFirstSource = false
break
}
}
}
if userFirstSource {
// Okay, it's their first time uploading, let's consider adding it.
if pfs.Url != "" || len(p.Source) == 0 {
// Only accept the source if new information is being added, or there isn't any already.
// Ignore pfs.Name and pfs.Referrer as those aren't sources.
p.Source = append(p.Source, pfs)
}
}
if len(ext) != 0 && len(p.Ext) == 0 {
p.Ext = make(map[string]*any.Any)
}
for k, v := range ext {
if _, present := p.Ext[k]; present {
return status.InvalidArgumentf(nil, "duplicate key %v in extension map", k)
}
p.Ext[k] = v
}
if err := j.UpdatePic(p); err != nil {
return status.InternalError(err, "can't update pic")
}
return nil
}
func upsertTags(j *tab.Job, rawTags []string, picID int64, now time.Time, userID int64) status.S {
newTagNames, err := cleanTagNames(rawTags)
if err != nil {
return err
}
attachedTags, _, err := findAttachedPicTags(j, picID)
if err != nil {
return err
}
unattachedTagNames := findUnattachedTagNames(attachedTags, newTagNames)
existingTags, unknownNames, err := findExistingTagsByName(j, unattachedTagNames)
if err != nil {
return err
}
if sts := updateExistingTags(j, existingTags, now); err != nil {
return sts
}
newTags, sts := createNewTags(j, unknownNames, now)
if sts != nil {
return sts
}
existingTags = append(existingTags, newTags...)
if _, err := createPicTags(j, existingTags, picID, now, userID); err != nil {
return err
}
return nil
}
func findAttachedPicTags(j *tab.Job, picID int64) ([]*schema.Tag, []*schema.PicTag, status.S) {
pts, err := j.FindPicTags(db.Opts{
Prefix: tab.PicTagsPrimary{PicId: &picID},
Lock: db.LockWrite,
})
if err != nil {
return nil, nil, status.InternalError(err, "cant't find pic tags")
}
var tags []*schema.Tag
// TODO: maybe do something with lock ordering?
for _, pt := range pts {
ts, err := j.FindTags(db.Opts{
Prefix: tab.TagsPrimary{&pt.TagId},
Limit: 1,
Lock: db.LockWrite,
})
if err != nil {
return nil, nil, status.InternalError(err, "can't find tags")
}
if len(ts) != 1 {
return nil, nil, status.InternalError(err, "can't lookup tag")
}
tags = append(tags, ts[0])
}
return tags, pts, nil
}
// findUnattachedTagNames finds tag names that are not part of a pic's tags.
// While pic tags are the SoT for attachment, only the Tag is the SoT for the name.
func findUnattachedTagNames(attachedTags []*schema.Tag, newTagNames []string) []string {
attachedTagNames := make(map[string]struct{}, len(attachedTags))
for _, tag := range attachedTags {
attachedTagNames[tag.Name] = struct{}{}
}
var unattachedTagNames []string
for _, newTagName := range newTagNames {
if _, attached := attachedTagNames[newTagName]; !attached {
unattachedTagNames = append(unattachedTagNames, newTagName)
}
}
return unattachedTagNames
}
func findExistingTagsByName(j *tab.Job, names []string) (
tags []*schema.Tag, unknownNames []string, err status.S) {
for _, name := range names {
ts, err := j.FindTags(db.Opts{
Prefix: tab.TagsName{&name},
Limit: 1,
Lock: db.LockWrite,
})
if err != nil {
return nil, nil, status.InternalError(err, "can't find tags")
}
if len(ts) == 1 {
tags = append(tags, ts[0])
} else {
unknownNames = append(unknownNames, name)
}
}
return
}
func updateExistingTags(j *tab.Job, tags []*schema.Tag, now time.Time) status.S {
for _, tag := range tags {
tag.SetModifiedTime(now)
tag.UsageCount++
if err := j.UpdateTag(tag); err != nil {
return status.InternalError(err, "can't update tag")
}
}
return nil
}
func createNewTags(j *tab.Job, tagNames []string, now time.Time) ([]*schema.Tag, status.S) {
var tags []*schema.Tag
for _, name := range tagNames {
tagID, err := j.AllocID()
if err != nil {
return nil, status.InternalError(err, "can't allocate id")
}
tag := &schema.Tag{
TagId: tagID,
Name: name,
UsageCount: 1,
}
tag.SetCreatedTime(now)
tag.SetModifiedTime(now)
if err := j.InsertTag(tag); err != nil {
return nil, status.InternalError(err, "can't create tag")
}
tags = append(tags, tag)
}
return tags, nil
}
func createPicTags(j *tab.Job, tags []*schema.Tag, picID int64, now time.Time, userID int64) (
[]*schema.PicTag, status.S) {
var picTags []*schema.PicTag
for _, tag := range tags {
pt := &schema.PicTag{
PicId: picID,
TagId: tag.TagId,
Name: tag.Name,
UserId: userID,
}
pt.SetCreatedTime(now)
pt.SetModifiedTime(now)
if err := j.InsertPicTag(pt); err != nil {
return nil, status.InternalError(err, "can't create pic tag")
}
picTags = append(picTags, pt)
}
return picTags, nil
}
func findExistingPic(j *tab.Job, typ schema.PicIdent_Type, hash []byte) (*schema.Pic, status.S) {
pis, err := j.FindPicIdents(db.Opts{
Prefix: tab.PicIdentsIdent{
Type: &typ,
Value: &hash,
},
Lock: db.LockWrite,
Limit: 1,
})
if err != nil {
return nil, status.InternalError(err, "can't find pic idents")
}
if len(pis) == 0 {
return nil, nil
}
pics, err := j.FindPics(db.Opts{
Prefix: tab.PicsPrimary{&pis[0].PicId},
Lock: db.LockWrite,
Limit: 1,
})
if err != nil {
return nil, status.InternalError(err, "can't find pics")
}
if len(pics) != 1 {
return nil, status.InternalError(nil, "can't lookup pic")
}
return pics[0], nil
}
func insertPicHashes(j *tab.Job, picID int64, md5Hash, sha1Hash, sha256Hash []byte) status.S {
md5Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_MD5,
Value: md5Hash,
}
if err := j.InsertPicIdent(md5Ident); err != nil {
return status.InternalError(err, "can't create md5")
}
sha1Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_SHA1,
Value: sha1Hash,
}
if err := j.InsertPicIdent(sha1Ident); err != nil {
return status.InternalError(err, "can't create sha1")
}
sha256Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_SHA256,
Value: sha256Hash,
}
if err := j.InsertPicIdent(sha256Ident); err != nil {
return status.InternalError(err, "can't create sha256")
}
return nil
}
func insertPerceptualHash(j *tab.Job, picID int64, im imaging.PixurImage) status.S {
hash, inputs, sts := im.PerceptualHash0()
if sts != nil {
return sts
}
dct0Ident := &schema.PicIdent{
PicId: picID,
Type: schema.PicIdent_DCT_0,
Value: hash,
Dct0Values: inputs,
}
if err := j.InsertPicIdent(dct0Ident); err != nil {
return status.InternalError(err, "can't create dct0")
}
return nil
}
// prepareFile prepares the file for image processing.
func (t *UpsertPicTask) prepareFile(ctx context.Context, fd multipart.File, fh FileHeader, u *url.URL) (
_ *os.File, _ *FileHeader, stsCap status.S) {
f, err := t.TempFile(t.PixPath, "__")
if err != nil {
return nil, nil, status.InternalError(err, "Can't create tempfile")
}
defer func() {
if stsCap != nil {
closeAndRemove(f)
}
}()
var h *FileHeader
if fd == nil {
if header, sts := t.downloadFile(ctx, f, u); sts != nil {
return nil, nil, sts
} else {
h = header
}
} else {
// TODO: maybe extract the filename from the url, if not provided in FileHeader
// Make sure to copy the file to pixPath, to make sure it's on the right partition.
// Also get a copy of the size. We don't want to move the file if it is on the
// same partition, because then we can't retry the task on failure.
if n, err := io.Copy(f, fd); err != nil {
return nil, nil, status.InternalError(err, "Can't save file")
} else {
h = &FileHeader{
Size: n,
}
}
}
// Provided header name takes precedence
if fh.Name != "" {
h.Name = fh.Name
}
// The file is now local. Sync it, since external programs might read it.
if err := f.Sync(); err != nil {
return nil, nil, status.InternalError(err, "Can't sync file")
}
return f, h, nil
}
// closeAndRemove cleans up in the event of an error. Windows needs the file to
// be closed so it is important to do it in order.
func closeAndRemove(f *os.File) {
f.Close()
os.Remove(f.Name())
}
// TODO: add tests
func validateURL(rawurl string) (*url.URL, status.S) {
if len(rawurl) > 1024 {
return nil, status.InvalidArgument(nil, "Can't use long URL")
}
u, err := url.Parse(rawurl)
if err != nil {
return nil, status.InvalidArgument(err, "Can't parse", rawurl)
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, status.InvalidArgument(nil, "Can't use non HTTP")
}
if u.User != nil {
return nil, status.InvalidArgument(nil, "Can't provide userinfo")
}
u.Fragment = ""
return u, nil
}
func (t *UpsertPicTask) downloadFile(ctx context.Context, f *os.File, u *url.URL) (
*FileHeader, status.S) {
if u == nil {
return nil, status.InvalidArgument(nil, "Missing URL")
}
// TODO: make sure this isn't reading from ourself
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
// if this fails, it's probably our fault
return nil, status.InternalError(err, "Can't create request")
}
req = req.WithContext(ctx)
resp, err := t.HTTPClient.Do(req)
if err != nil {
return nil, status.InvalidArgument(err, "Can't download", u)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// todo: log the response and headers
return nil, status.InvalidArgumentf(nil, "Can't download %s [%d]", u, resp.StatusCode)
}
bytesRead, err := io.Copy(f, resp.Body)
// This could either be because the remote hung up or a file error on our side. Assume that
// our system is okay, making this an InvalidArgument
if err != nil {
return nil, status.InvalidArgumentf(err, "Can't copy downloaded file")
}
header := &FileHeader{
Size: bytesRead,
}
// Can happen for a url that is a dir like http://foo.com/
if base := path.Base(u.Path); base != "." {
header.Name = base
}
// TODO: support Content-disposition
return header, nil
}
func generatePicHashes(f io.Reader) (md5Hash, sha1Hash, sha256Hash []byte, sts status.S) {
h1 := md5.New()
h2 := sha1.New()
h3 := sha256.New()
if _, err := io.Copy(io.MultiWriter(h1, h2, h3), f); err != nil {
return nil, nil, nil, status.InternalError(err, "Can't copy")
}
return h1.Sum(nil), h2.Sum(nil), h3.Sum(nil), nil
}
|
package main
import (
"flag"
"fmt"
"github.com/die-net/fotomat/imager"
"io/ioutil"
"net/http"
"regexp"
"strconv"
)
var maxBufferDimension = flag.Uint("max_buffer_dimension", 2048, "Maximum width or height of an image buffer to allocate.")
func init() {
http.HandleFunc("/albums/crop", imageCropHandler)
}
/*
Supported geometries:
WxH# - scale down so the shorter edge fits within this bounding box, crop to new aspect ratio
WxH or WxH> - scale down so the longer edge fits within this bounding box, no crop
*/
var (
matchGeometry = regexp.MustCompile(`^(\d{1,5})x(\d{1,5})([>#])?$`)
)
const maxDimension = 2048
func imageCropHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" && r.Method != "HEAD" {
sendError(w, nil, http.StatusMethodNotAllowed)
}
if err := r.ParseForm(); err != nil {
sendError(w, err, 0)
}
orig, err, status := fetchUrl(r.FormValue("image_url"))
if err != nil || status != http.StatusOK {
sendError(w, err, status)
return
}
width, height, crop, ok := parseGeometry(r.FormValue("geometry"))
if !ok {
sendError(w, nil, 400)
return
}
img, err := imager.New(orig, *maxBufferDimension)
if err != nil {
sendError(w, err, 0)
return
}
defer img.Close()
var thumb []byte
if crop {
thumb, err = img.Crop(width, height)
} else {
thumb, err = img.Thumbnail(width, height, true)
}
if err != nil {
sendError(w, err, 0)
return
}
w.Write(thumb)
}
func fetchUrl(url string) ([]byte, error, int) {
resp, err := http.Get(url)
if err != nil {
return nil, err, 0
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err, 0
}
switch resp.StatusCode {
case http.StatusOK, http.StatusNoContent, http.StatusBadRequest,
http.StatusUnauthorized, http.StatusForbidden, http.StatusNotFound,
http.StatusRequestTimeout, http.StatusGone:
return body, nil, resp.StatusCode
default:
err := fmt.Errorf("Proxy received %d %s", resp.StatusCode, http.StatusText(resp.StatusCode))
return nil, err, http.StatusBadGateway
}
}
func parseGeometry(geometry string) (uint, uint, bool, bool) {
g := matchGeometry.FindStringSubmatch(geometry)
if len(g) != 4 {
return 0, 0, false, false
}
width, err := strconv.Atoi(g[1])
if err != nil || width <= 0 || width >= maxDimension {
return 0, 0, false, false
}
height, err := strconv.Atoi(g[2])
if err != nil || height <= 0 || height >= maxDimension {
return 0, 0, false, false
}
crop := (g[3] == "#")
return uint(width), uint(height), crop, true
}
func sendError(w http.ResponseWriter, err error, status int) {
if status == 0 {
switch err {
case imager.UnknownFormat:
status = http.StatusUnsupportedMediaType
case imager.TooBig:
status = http.StatusRequestEntityTooLarge
default:
status = http.StatusInternalServerError
}
}
if err == nil {
err = fmt.Errorf(http.StatusText(status))
}
http.Error(w, err.Error(), status)
}
Add watchdog that panic()s if imagemagick takes longer than a minute.
package main
import (
"flag"
"fmt"
"github.com/die-net/fotomat/imager"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"time"
)
var maxBufferDimension = flag.Uint("max_buffer_dimension", 2048, "Maximum width or height of an image buffer to allocate.")
var maxProcessingDuration = flag.Duration("max_processing_duration", time.Minute, "Maximum duration we can be processing an image before assuming we crashed (0 = disable).")
func init() {
http.HandleFunc("/albums/crop", imageCropHandler)
}
/*
Supported geometries:
WxH# - scale down so the shorter edge fits within this bounding box, crop to new aspect ratio
WxH or WxH> - scale down so the longer edge fits within this bounding box, no crop
*/
var (
matchGeometry = regexp.MustCompile(`^(\d{1,5})x(\d{1,5})([>#])?$`)
)
const maxDimension = 2048
func imageCropHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" && r.Method != "HEAD" {
sendError(w, nil, http.StatusMethodNotAllowed)
return
}
if err := r.ParseForm(); err != nil {
sendError(w, err, 0)
return
}
width, height, crop, ok := parseGeometry(r.FormValue("geometry"))
if !ok {
sendError(w, nil, 400)
return
}
url := r.FormValue("image_url")
orig, err, status := fetchUrl(url)
if err != nil || status != http.StatusOK {
sendError(w, err, status)
return
}
thumb, err := processImage(url, orig, width, height, crop)
orig = nil // Free up image memory ASAP.
if err != nil {
sendError(w, err, 0)
return
}
w.Write(thumb)
}
func parseGeometry(geometry string) (uint, uint, bool, bool) {
g := matchGeometry.FindStringSubmatch(geometry)
if len(g) != 4 {
return 0, 0, false, false
}
width, err := strconv.Atoi(g[1])
if err != nil || width <= 0 || width >= maxDimension {
return 0, 0, false, false
}
height, err := strconv.Atoi(g[2])
if err != nil || height <= 0 || height >= maxDimension {
return 0, 0, false, false
}
crop := (g[3] == "#")
return uint(width), uint(height), crop, true
}
func fetchUrl(url string) ([]byte, error, int) {
resp, err := http.Get(url)
if err != nil {
return nil, err, 0
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err, 0
}
switch resp.StatusCode {
case http.StatusOK, http.StatusNoContent, http.StatusBadRequest,
http.StatusUnauthorized, http.StatusForbidden, http.StatusNotFound,
http.StatusRequestTimeout, http.StatusGone:
return body, nil, resp.StatusCode
default:
err := fmt.Errorf("Proxy received %d %s", resp.StatusCode, http.StatusText(resp.StatusCode))
return nil, err, http.StatusBadGateway
}
}
func processImage(url string, orig []byte, width, height uint, crop bool) ([]byte, error) {
if *maxProcessingDuration > 0 {
timer := time.AfterFunc(*maxProcessingDuration, func() {
panic(fmt.Sprintf("Processing %v longer than %v", url, *maxProcessingDuration))
})
defer timer.Stop()
}
img, err := imager.New(orig, *maxBufferDimension)
if err != nil {
return nil, err
}
defer img.Close()
var thumb []byte
if crop {
thumb, err = img.Crop(width, height)
} else {
thumb, err = img.Thumbnail(width, height, true)
}
return thumb, err
}
func sendError(w http.ResponseWriter, err error, status int) {
if status == 0 {
switch err {
case imager.UnknownFormat:
status = http.StatusUnsupportedMediaType
case imager.TooBig:
status = http.StatusRequestEntityTooLarge
default:
status = http.StatusInternalServerError
}
}
if err == nil {
err = fmt.Errorf(http.StatusText(status))
}
http.Error(w, err.Error(), status)
}
|
package images
import(
"io"
"encoding/json"
"github.com/ricallinson/forgery"
"github.com/spacedock-io/registry/models"
"github.com/spacedock-io/registry/cloudfiles"
)
func GetJson(req *f.Request, res *f.Response) {
image, err := models.GetImage(req.Params["id"])
if err != nil {
res.Send(404)
return
}
res.Set("X-Docker-Size", string(image.Size))
res.Set("X-Docker-Checksum", image.Checksum)
res.Send(image.Json)
}
func PutJson(req *f.Request, res *f.Response) {
uuid := req.Params["id"]
image, err := models.GetImage(uuid)
if err != nil {
if err != models.NotFoundErr {
res.Send(err.Error(), 500)
return
}
image = &models.Image{}
}
image.Uuid = uuid
image.Json, err = json.Marshal(req.Map["json"])
if err != nil {
res.Send(500)
return
}
err = image.Save()
if err != nil {
res.Send(err.Error(), 500)
return
}
res.Send(200)
}
func GetLayer(req *f.Request, res *f.Response) {
_, err := cloudfiles.Cloudfiles.ObjectGet(
"default", req.Params["id"], res.Response.Writer, true, nil)
if err == nil {
res.Send(200)
} else { res.Send(500) }
}
func PutLayer(req *f.Request, res *f.Response) {
obj, err := cloudfiles.Cloudfiles.ObjectCreate(
"spacedock", req.Params["id"], true, "", "", nil)
if err == nil {
io.Copy(obj, req.Request.Request.Body)
res.Send(200)
} else { res.Send(500) }
}
func GetAncestry(req *f.Request, res *f.Response) {
image, err := models.GetImage(req.Params["id"])
if err != nil {
res.Send(404)
return
}
data, err := json.Marshal(image.Ancestry)
if err == nil {
res.Send(data)
} else { res.Send(err.Error(), 500) }
}
func PutChecksum(req *f.Request, res *f.Response) {
uuid := req.Params["id"]
/* *WTF* Docker?!
HTTP API design 101: headers are *metadata*. The checksum should be passed
as PUT body.
*/
header := req.Header["X-Docker-Checksum"]
if header == nil {
res.Send("X-Docker-Checksum header is required", 400)
}
checksum := header[0]
err := models.SetImageChecksum(uuid, checksum)
if err != nil {
res.Send(err.Error(), 500)
return
}
res.Send(200)
}
fix: close
package images
import(
"io"
"encoding/json"
"github.com/ricallinson/forgery"
"github.com/spacedock-io/registry/models"
"github.com/spacedock-io/registry/cloudfiles"
)
func GetJson(req *f.Request, res *f.Response) {
image, err := models.GetImage(req.Params["id"])
if err != nil {
res.Send(404)
return
}
res.Set("X-Docker-Size", string(image.Size))
res.Set("X-Docker-Checksum", image.Checksum)
res.Send(image.Json)
}
func PutJson(req *f.Request, res *f.Response) {
uuid := req.Params["id"]
image, err := models.GetImage(uuid)
if err != nil {
if err != models.NotFoundErr {
res.Send(err.Error(), 500)
return
}
image = &models.Image{}
}
image.Uuid = uuid
image.Json, err = json.Marshal(req.Map["json"])
if err != nil {
res.Send(500)
return
}
err = image.Save()
if err != nil {
res.Send(err.Error(), 500)
return
}
res.Send(200)
}
func GetLayer(req *f.Request, res *f.Response) {
_, err := cloudfiles.Cloudfiles.ObjectGet(
"spacedock", req.Params["id"], res.Response.Writer, true, nil)
if err == nil {
res.Send(200)
} else { res.Send(500) }
}
func PutLayer(req *f.Request, res *f.Response) {
obj, err := cloudfiles.Cloudfiles.ObjectCreate(
"spacedock", req.Params["id"], true, "", "", nil)
if err == nil {
io.Copy(obj, req.Request.Request.Body)
err = obj.Close()
if err != nil {
res.Send(500)
} else { res.Send(200) }
} else { res.Send(500) }
}
func GetAncestry(req *f.Request, res *f.Response) {
image, err := models.GetImage(req.Params["id"])
if err != nil {
res.Send(404)
return
}
data, err := json.Marshal(image.Ancestry)
if err == nil {
res.Send(data)
} else { res.Send(err.Error(), 500) }
}
func PutChecksum(req *f.Request, res *f.Response) {
uuid := req.Params["id"]
/* *WTF* Docker?!
HTTP API design 101: headers are *metadata*. The checksum should be passed
as PUT body.
*/
header := req.Header["X-Docker-Checksum"]
if header == nil {
res.Send("X-Docker-Checksum header is required", 400)
}
checksum := header[0]
err := models.SetImageChecksum(uuid, checksum)
if err != nil {
res.Send(err.Error(), 500)
return
}
res.Send(200)
}
|
package api
import (
"github.com/cloudfoundry/cli/cf/errors"
"github.com/cloudfoundry/cli/cf/models"
"time"
)
type FakeAppInstancesRepo struct {
GetInstancesAppGuid string
GetInstancesResponses [][]models.AppInstanceFields
GetInstancesErrorCodes []string
}
func (repo *FakeAppInstancesRepo) GetInstances(appGuid string) (instances []models.AppInstanceFields, apiErr error) {
repo.GetInstancesAppGuid = appGuid
time.Sleep(1 * time.Millisecond) //needed for Windows only, otherwise it thinks error codes are not assigned
if len(repo.GetInstancesResponses) > 0 {
instances = repo.GetInstancesResponses[0]
repo.GetInstancesResponses = repo.GetInstancesResponses[1:]
}
if len(repo.GetInstancesErrorCodes) > 0 {
errorCode := repo.GetInstancesErrorCodes[0]
repo.GetInstancesErrorCodes = repo.GetInstancesErrorCodes[1:]
if errorCode != "" {
apiErr = errors.NewHttpError(400, errorCode, "Error staging app")
}
}
return
}
FakeAppInstancesRepo should be easier to use
The behavior before was that each time you called GetInstances, it would slice off
one of the responses and errors from its slick of mock responses. Eventually there
would be nothing left and it would just return you the zero-value for the
response and error, which made it fairly difficult to use this in practice
because we poll this repo over and over again in practice.
package api
import (
"time"
"github.com/cloudfoundry/cli/cf/errors"
"github.com/cloudfoundry/cli/cf/models"
)
type FakeAppInstancesRepo struct {
GetInstancesAppGuid string
GetInstancesResponses [][]models.AppInstanceFields
GetInstancesErrorCodes []string
}
func (repo *FakeAppInstancesRepo) GetInstances(appGuid string) (instances []models.AppInstanceFields, apiErr error) {
repo.GetInstancesAppGuid = appGuid
time.Sleep(1 * time.Millisecond) //needed for Windows only, otherwise it thinks error codes are not assigned
if len(repo.GetInstancesResponses) > 0 {
instances = repo.GetInstancesResponses[0]
if len(repo.GetInstancesResponses) > 1 {
repo.GetInstancesResponses = repo.GetInstancesResponses[1:]
}
}
if len(repo.GetInstancesErrorCodes) > 0 {
errorCode := repo.GetInstancesErrorCodes[0]
// don't slice away the last one if this is all we have
if len(repo.GetInstancesErrorCodes) > 1 {
repo.GetInstancesErrorCodes = repo.GetInstancesErrorCodes[1:]
}
if errorCode != "" {
apiErr = errors.NewHttpError(400, errorCode, "Error staging app")
}
}
return
}
|
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"context"
"fmt"
"reflect"
"sort"
"time"
"k8s.io/klog/v2"
admissionv1 "k8s.io/api/admission/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
admissionapi "k8s.io/pod-security-admission/admission/api"
"k8s.io/pod-security-admission/admission/api/validation"
"k8s.io/pod-security-admission/api"
"k8s.io/pod-security-admission/metrics"
"k8s.io/pod-security-admission/policy"
)
const (
defaultNamespaceMaxPodsToCheck = 3000
defaultNamespacePodCheckTimeout = 1 * time.Second
)
// Admission implements the core admission logic for the Pod Security Admission controller.
// The admission logic can be
type Admission struct {
Configuration *admissionapi.PodSecurityConfiguration
// Getting policy checks per level/version
Evaluator policy.Evaluator
// Metrics
Metrics metrics.Recorder
// Arbitrary object --> PodSpec
PodSpecExtractor PodSpecExtractor
// API connections
NamespaceGetter NamespaceGetter
PodLister PodLister
defaultPolicy api.Policy
namespaceMaxPodsToCheck int
namespacePodCheckTimeout time.Duration
}
type NamespaceGetter interface {
GetNamespace(ctx context.Context, name string) (*corev1.Namespace, error)
}
type PodLister interface {
ListPods(ctx context.Context, namespace string) ([]*corev1.Pod, error)
}
// PodSpecExtractor extracts a PodSpec from pod-controller resources that embed a PodSpec.
// This interface can be extended to enforce policy on CRDs for custom pod-controllers.
type PodSpecExtractor interface {
// HasPodSpec returns true if the given resource type MAY contain an extractable PodSpec.
HasPodSpec(schema.GroupResource) bool
// ExtractPodSpec returns a pod spec and metadata to evaluate from the object.
// An error returned here does not block admission of the pod-spec-containing object and is not returned to the user.
// If the object has no pod spec, return `nil, nil, nil`.
ExtractPodSpec(runtime.Object) (*metav1.ObjectMeta, *corev1.PodSpec, error)
}
var defaultPodSpecResources = map[schema.GroupResource]bool{
corev1.Resource("pods"): true,
corev1.Resource("replicationcontrollers"): true,
corev1.Resource("podtemplates"): true,
appsv1.Resource("replicasets"): true,
appsv1.Resource("deployments"): true,
appsv1.Resource("statefulsets"): true,
appsv1.Resource("daemonsets"): true,
batchv1.Resource("jobs"): true,
batchv1.Resource("cronjobs"): true,
}
type DefaultPodSpecExtractor struct{}
func (DefaultPodSpecExtractor) HasPodSpec(gr schema.GroupResource) bool {
return defaultPodSpecResources[gr]
}
func (DefaultPodSpecExtractor) ExtractPodSpec(obj runtime.Object) (*metav1.ObjectMeta, *corev1.PodSpec, error) {
switch o := obj.(type) {
case *corev1.Pod:
return &o.ObjectMeta, &o.Spec, nil
case *corev1.PodTemplate:
return extractPodSpecFromTemplate(&o.Template)
case *corev1.ReplicationController:
return extractPodSpecFromTemplate(o.Spec.Template)
case *appsv1.ReplicaSet:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *appsv1.Deployment:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *appsv1.DaemonSet:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *appsv1.StatefulSet:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *batchv1.Job:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *batchv1.CronJob:
return extractPodSpecFromTemplate(&o.Spec.JobTemplate.Spec.Template)
default:
return nil, nil, fmt.Errorf("unexpected object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
}
func (DefaultPodSpecExtractor) PodSpecResources() []schema.GroupResource {
retval := make([]schema.GroupResource, 0, len(defaultPodSpecResources))
for r := range defaultPodSpecResources {
retval = append(retval, r)
}
return retval
}
func extractPodSpecFromTemplate(template *corev1.PodTemplateSpec) (*metav1.ObjectMeta, *corev1.PodSpec, error) {
if template == nil {
return nil, nil, nil
}
return &template.ObjectMeta, &template.Spec, nil
}
// CompleteConfiguration sets up default or derived configuration.
func (a *Admission) CompleteConfiguration() error {
if a.Configuration != nil {
if p, err := admissionapi.ToPolicy(a.Configuration.Defaults); err != nil {
return err
} else {
a.defaultPolicy = p
}
}
a.namespaceMaxPodsToCheck = defaultNamespaceMaxPodsToCheck
a.namespacePodCheckTimeout = defaultNamespacePodCheckTimeout
if a.PodSpecExtractor == nil {
a.PodSpecExtractor = &DefaultPodSpecExtractor{}
}
return nil
}
// ValidateConfiguration ensures all required fields are set with valid values.
func (a *Admission) ValidateConfiguration() error {
if a.Configuration == nil {
return fmt.Errorf("configuration required")
} else if errs := validation.ValidatePodSecurityConfiguration(a.Configuration); len(errs) > 0 {
return errs.ToAggregate()
} else {
if p, err := admissionapi.ToPolicy(a.Configuration.Defaults); err != nil {
return err
} else if !reflect.DeepEqual(p, a.defaultPolicy) {
return fmt.Errorf("default policy does not match; CompleteConfiguration() was not called before ValidateConfiguration()")
}
}
if a.namespaceMaxPodsToCheck == 0 || a.namespacePodCheckTimeout == 0 {
return fmt.Errorf("namespace configuration not set; CompleteConfiguration() was not called before ValidateConfiguration()")
}
if a.Metrics == nil {
return fmt.Errorf("Metrics recorder required")
}
if a.PodSpecExtractor == nil {
return fmt.Errorf("PodSpecExtractor required")
}
if a.Evaluator == nil {
return fmt.Errorf("Evaluator required")
}
if a.NamespaceGetter == nil {
return fmt.Errorf("NamespaceGetter required")
}
if a.PodLister == nil {
return fmt.Errorf("PodLister required")
}
return nil
}
var (
namespacesResource = corev1.Resource("namespaces")
podsResource = corev1.Resource("pods")
)
// Validate admits an API request.
// The objects in admission attributes are expected to be external v1 objects that we care about.
// The returned response may be shared and must not be mutated.
func (a *Admission) Validate(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
var response *admissionv1.AdmissionResponse
switch attrs.GetResource().GroupResource() {
case namespacesResource:
response = a.ValidateNamespace(ctx, attrs)
case podsResource:
response = a.ValidatePod(ctx, attrs)
default:
response = a.ValidatePodController(ctx, attrs)
}
return response
}
// ValidateNamespace evaluates a namespace create or update request to ensure the pod security labels are valid,
// and checks existing pods in the namespace for violations of the new policy when updating the enforce level on a namespace.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) ValidateNamespace(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
// short-circuit on subresources
if attrs.GetSubresource() != "" {
return sharedAllowedResponse
}
obj, err := attrs.GetObject()
if err != nil {
klog.ErrorS(err, "failed to decode object")
return errorResponse(err, &apierrors.NewBadRequest("failed to decode object").ErrStatus)
}
namespace, ok := obj.(*corev1.Namespace)
if !ok {
klog.InfoS("failed to assert namespace type", "type", reflect.TypeOf(obj))
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode namespace").ErrStatus)
}
newPolicy, newErrs := a.PolicyToEvaluate(namespace.Labels)
switch attrs.GetOperation() {
case admissionv1.Create:
// require valid labels on create
if len(newErrs) > 0 {
return invalidResponse(attrs, newErrs)
}
if a.exemptNamespace(attrs.GetNamespace()) {
if warning := a.exemptNamespaceWarning(namespace.Name, newPolicy); warning != "" {
response := allowedResponse()
response.Warnings = append(response.Warnings, warning)
return response
}
}
return sharedAllowedResponse
case admissionv1.Update:
// if update, check if policy labels changed
oldObj, err := attrs.GetOldObject()
if err != nil {
klog.ErrorS(err, "failed to decode old object")
return errorResponse(err, &apierrors.NewBadRequest("failed to decode old object").ErrStatus)
}
oldNamespace, ok := oldObj.(*corev1.Namespace)
if !ok {
klog.InfoS("failed to assert old namespace type", "type", reflect.TypeOf(oldObj))
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode old namespace").ErrStatus)
}
oldPolicy, oldErrs := a.PolicyToEvaluate(oldNamespace.Labels)
// require valid labels on update if they have changed
if len(newErrs) > 0 && (len(oldErrs) == 0 || !reflect.DeepEqual(newErrs, oldErrs)) {
return invalidResponse(attrs, newErrs)
}
// Skip dry-running pods:
// * if the enforce policy is unchanged
// * if the new enforce policy is privileged
// * if the new enforce is the same version and level was relaxed
// * for exempt namespaces
if newPolicy.Enforce == oldPolicy.Enforce {
return sharedAllowedResponse
}
if newPolicy.Enforce.Level == api.LevelPrivileged {
return sharedAllowedResponse
}
if newPolicy.Enforce.Version == oldPolicy.Enforce.Version &&
api.CompareLevels(newPolicy.Enforce.Level, oldPolicy.Enforce.Level) < 1 {
return sharedAllowedResponse
}
if a.exemptNamespace(attrs.GetNamespace()) {
if warning := a.exemptNamespaceWarning(namespace.Name, newPolicy); warning != "" {
response := allowedResponse()
response.Warnings = append(response.Warnings, warning)
return response
}
return sharedAllowedResponse
}
response := allowedResponse()
response.Warnings = a.EvaluatePodsInNamespace(ctx, namespace.Name, newPolicy.Enforce)
return response
default:
return sharedAllowedResponse
}
}
// ignoredPodSubresources is a set of ignored Pod subresources.
// Any other subresource is expected to be a *v1.Pod type and is evaluated.
// This ensures a version skewed webhook fails safe and denies an unknown pod subresource that allows modifying the pod spec.
var ignoredPodSubresources = map[string]bool{
"exec": true,
"attach": true,
"binding": true,
"eviction": true,
"log": true,
"portforward": true,
"proxy": true,
"status": true,
}
// ValidatePod evaluates a pod create or update request against the effective policy for the namespace.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) ValidatePod(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
// short-circuit on ignored subresources
if ignoredPodSubresources[attrs.GetSubresource()] {
return sharedAllowedResponse
}
// short-circuit on exempt namespaces and users
if a.exemptNamespace(attrs.GetNamespace()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByNamespaceExemptionResponse
}
if a.exemptUser(attrs.GetUserName()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByUserExemptionResponse
}
// short-circuit on privileged enforce+audit+warn namespaces
namespace, err := a.NamespaceGetter.GetNamespace(ctx, attrs.GetNamespace())
if err != nil {
klog.ErrorS(err, "failed to fetch pod namespace", "namespace", attrs.GetNamespace())
a.Metrics.RecordError(true, attrs)
return errorResponse(err, &apierrors.NewInternalError(fmt.Errorf("failed to lookup namespace %q", attrs.GetNamespace())).ErrStatus)
}
nsPolicy, nsPolicyErrs := a.PolicyToEvaluate(namespace.Labels)
if len(nsPolicyErrs) == 0 && nsPolicy.FullyPrivileged() {
a.Metrics.RecordEvaluation(metrics.DecisionAllow, nsPolicy.Enforce, metrics.ModeEnforce, attrs)
return sharedAllowedPrivilegedResponse
}
obj, err := attrs.GetObject()
if err != nil {
klog.ErrorS(err, "failed to decode object")
a.Metrics.RecordError(true, attrs)
return errorResponse(err, &apierrors.NewBadRequest("failed to decode object").ErrStatus)
}
pod, ok := obj.(*corev1.Pod)
if !ok {
klog.InfoS("failed to assert pod type", "type", reflect.TypeOf(obj))
a.Metrics.RecordError(true, attrs)
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode pod").ErrStatus)
}
if attrs.GetOperation() == admissionv1.Update {
oldObj, err := attrs.GetOldObject()
if err != nil {
klog.ErrorS(err, "failed to decode old object")
a.Metrics.RecordError(true, attrs)
return errorResponse(err, &apierrors.NewBadRequest("failed to decode old object").ErrStatus)
}
oldPod, ok := oldObj.(*corev1.Pod)
if !ok {
klog.InfoS("failed to assert old pod type", "type", reflect.TypeOf(oldObj))
a.Metrics.RecordError(true, attrs)
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode old pod").ErrStatus)
}
if !isSignificantPodUpdate(pod, oldPod) {
// Nothing we care about changed, so always allow the update.
return sharedAllowedResponse
}
}
return a.EvaluatePod(ctx, nsPolicy, nsPolicyErrs.ToAggregate(), &pod.ObjectMeta, &pod.Spec, attrs, true)
}
// ValidatePodController evaluates a pod controller create or update request against the effective policy for the namespace.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) ValidatePodController(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
// short-circuit on subresources
if attrs.GetSubresource() != "" {
return sharedAllowedResponse
}
// short-circuit on exempt namespaces and users
if a.exemptNamespace(attrs.GetNamespace()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByNamespaceExemptionResponse
}
if a.exemptUser(attrs.GetUserName()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByUserExemptionResponse
}
// short-circuit on privileged audit+warn namespaces
namespace, err := a.NamespaceGetter.GetNamespace(ctx, attrs.GetNamespace())
if err != nil {
klog.ErrorS(err, "failed to fetch pod namespace", "namespace", attrs.GetNamespace())
a.Metrics.RecordError(true, attrs)
response := allowedResponse()
response.AuditAnnotations = map[string]string{
"error": fmt.Sprintf("failed to lookup namespace %q: %v", attrs.GetNamespace(), err),
}
return response
}
nsPolicy, nsPolicyErrs := a.PolicyToEvaluate(namespace.Labels)
if len(nsPolicyErrs) == 0 && nsPolicy.Warn.Level == api.LevelPrivileged && nsPolicy.Audit.Level == api.LevelPrivileged {
return sharedAllowedResponse
}
obj, err := attrs.GetObject()
if err != nil {
klog.ErrorS(err, "failed to decode object")
a.Metrics.RecordError(true, attrs)
response := allowedResponse()
response.AuditAnnotations = map[string]string{
"error": fmt.Sprintf("failed to decode object: %v", err),
}
return response
}
podMetadata, podSpec, err := a.PodSpecExtractor.ExtractPodSpec(obj)
if err != nil {
klog.ErrorS(err, "failed to extract pod spec")
a.Metrics.RecordError(true, attrs)
response := allowedResponse()
response.AuditAnnotations = map[string]string{
"error": fmt.Sprintf("failed to extract pod template: %v", err),
}
return response
}
if podMetadata == nil && podSpec == nil {
// if a controller with an optional pod spec does not contain a pod spec, skip validation
return sharedAllowedResponse
}
return a.EvaluatePod(ctx, nsPolicy, nsPolicyErrs.ToAggregate(), podMetadata, podSpec, attrs, false)
}
// EvaluatePod evaluates the given policy against the given pod(-like) object.
// The enforce policy is only checked if enforce=true.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPolicyErr error, podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec, attrs api.Attributes, enforce bool) *admissionv1.AdmissionResponse {
// short-circuit on exempt runtimeclass
if a.exemptRuntimeClass(podSpec.RuntimeClassName) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByRuntimeClassExemptionResponse
}
auditAnnotations := map[string]string{}
if nsPolicyErr != nil {
klog.V(2).InfoS("failed to parse PodSecurity namespace labels", "err", nsPolicyErr)
auditAnnotations["error"] = fmt.Sprintf("Failed to parse policy: %v", nsPolicyErr)
a.Metrics.RecordError(false, attrs)
}
klogV := klog.V(5)
if klogV.Enabled() {
klogV.InfoS("PodSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName())
}
cachedResults := make(map[api.LevelVersion]policy.AggregateCheckResult)
response := allowedResponse()
if enforce {
auditAnnotations[api.EnforcedPolicyAnnotationKey] = nsPolicy.Enforce.String()
result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Enforce, podMetadata, podSpec))
if !result.Allowed {
response = forbiddenResponse(attrs, fmt.Errorf(
"violates PodSecurity %q: %s",
nsPolicy.Enforce.String(),
result.ForbiddenDetail(),
))
a.Metrics.RecordEvaluation(metrics.DecisionDeny, nsPolicy.Enforce, metrics.ModeEnforce, attrs)
} else {
a.Metrics.RecordEvaluation(metrics.DecisionAllow, nsPolicy.Enforce, metrics.ModeEnforce, attrs)
}
cachedResults[nsPolicy.Enforce] = result
}
// reuse previous evaluation if audit level+version is the same as enforce level+version
auditResult, ok := cachedResults[nsPolicy.Audit]
if !ok {
auditResult = policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Audit, podMetadata, podSpec))
cachedResults[nsPolicy.Audit] = auditResult
}
if !auditResult.Allowed {
auditAnnotations[api.AuditViolationsAnnotationKey] = fmt.Sprintf(
"would violate PodSecurity %q: %s",
nsPolicy.Audit.String(),
auditResult.ForbiddenDetail(),
)
a.Metrics.RecordEvaluation(metrics.DecisionDeny, nsPolicy.Audit, metrics.ModeAudit, attrs)
}
// avoid adding warnings to a request we're already going to reject with an error
if response.Allowed {
// reuse previous evaluation if warn level+version is the same as audit or enforce level+version
warnResult, ok := cachedResults[nsPolicy.Warn]
if !ok {
warnResult = policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Warn, podMetadata, podSpec))
}
if !warnResult.Allowed {
// TODO: Craft a better user-facing warning message
response.Warnings = append(response.Warnings, fmt.Sprintf(
"would violate PodSecurity %q: %s",
nsPolicy.Warn.String(),
warnResult.ForbiddenDetail(),
))
a.Metrics.RecordEvaluation(metrics.DecisionDeny, nsPolicy.Warn, metrics.ModeWarn, attrs)
}
}
response.AuditAnnotations = auditAnnotations
return response
}
// podCount is used to track the number of pods sharing identical warnings when validating a namespace
type podCount struct {
// podName is the lexically first pod name for the given warning
podName string
// podCount is the total number of pods with the same warnings
podCount int
}
func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace string, enforce api.LevelVersion) []string {
// start with the default timeout
timeout := a.namespacePodCheckTimeout
if deadline, ok := ctx.Deadline(); ok {
timeRemaining := time.Until(deadline) / 2 // don't take more than half the remaining time
if timeout > timeRemaining {
timeout = timeRemaining
}
}
deadline := time.Now().Add(timeout)
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
pods, err := a.PodLister.ListPods(ctx, namespace)
if err != nil {
klog.ErrorS(err, "failed to list pods", "namespace", namespace)
return []string{"failed to list pods while checking new PodSecurity enforce level"}
}
var (
warnings []string
podWarnings []string
podWarningsToCount = make(map[string]podCount)
prioritizedPods = a.prioritizePods(pods)
)
totalPods := len(prioritizedPods)
if len(prioritizedPods) > a.namespaceMaxPodsToCheck {
prioritizedPods = prioritizedPods[0:a.namespaceMaxPodsToCheck]
}
checkedPods := len(prioritizedPods)
for i, pod := range prioritizedPods {
r := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(enforce, &pod.ObjectMeta, &pod.Spec))
if !r.Allowed {
warning := r.ForbiddenReason()
c, seen := podWarningsToCount[warning]
if !seen {
c.podName = pod.Name
podWarnings = append(podWarnings, warning)
} else if pod.Name < c.podName {
c.podName = pod.Name
}
c.podCount++
podWarningsToCount[warning] = c
}
if err := ctx.Err(); err != nil { // deadline exceeded or context was cancelled
checkedPods = i + 1
break
}
}
if checkedPods < totalPods {
warnings = append(warnings, fmt.Sprintf("new PodSecurity enforce level only checked against the first %d of %d existing pods", checkedPods, totalPods))
}
if len(podWarnings) > 0 {
warnings = append(warnings, fmt.Sprintf("existing pods in namespace %q violate the new PodSecurity enforce level %q", namespace, enforce.String()))
}
// prepend pod names to warnings
decoratePodWarnings(podWarningsToCount, podWarnings)
// put warnings in a deterministic order
sort.Strings(podWarnings)
return append(warnings, podWarnings...)
}
// prefixes warnings with the pod names related to that warning
func decoratePodWarnings(podWarningsToCount map[string]podCount, warnings []string) {
for i, warning := range warnings {
c := podWarningsToCount[warning]
switch c.podCount {
case 0:
// unexpected, just leave the warning alone
case 1:
warnings[i] = fmt.Sprintf("%s: %s", c.podName, warning)
case 2:
warnings[i] = fmt.Sprintf("%s (and 1 other pod): %s", c.podName, warning)
default:
warnings[i] = fmt.Sprintf("%s (and %d other pods): %s", c.podName, c.podCount-1, warning)
}
}
}
func (a *Admission) PolicyToEvaluate(labels map[string]string) (api.Policy, field.ErrorList) {
return api.PolicyToEvaluate(labels, a.defaultPolicy)
}
// isSignificantPodUpdate determines whether a pod update should trigger a policy evaluation.
// Relevant mutable pod fields as of 1.21 are image and seccomp annotations:
// * https://github.com/kubernetes/kubernetes/blob/release-1.21/pkg/apis/core/validation/validation.go#L3947-L3949
func isSignificantPodUpdate(pod, oldPod *corev1.Pod) bool {
// TODO: invert this logic to only allow specific update types.
if pod.Annotations[corev1.SeccompPodAnnotationKey] != oldPod.Annotations[corev1.SeccompPodAnnotationKey] {
return true
}
if len(pod.Spec.Containers) != len(oldPod.Spec.Containers) {
return true
}
if len(pod.Spec.InitContainers) != len(oldPod.Spec.InitContainers) {
return true
}
for i := 0; i < len(pod.Spec.Containers); i++ {
if isSignificantContainerUpdate(&pod.Spec.Containers[i], &oldPod.Spec.Containers[i], pod.Annotations, oldPod.Annotations) {
return true
}
}
for i := 0; i < len(pod.Spec.InitContainers); i++ {
if isSignificantContainerUpdate(&pod.Spec.InitContainers[i], &oldPod.Spec.InitContainers[i], pod.Annotations, oldPod.Annotations) {
return true
}
}
for _, c := range pod.Spec.EphemeralContainers {
var oldC *corev1.Container
for i, oc := range oldPod.Spec.EphemeralContainers {
if oc.Name == c.Name {
oldC = (*corev1.Container)(&oldPod.Spec.EphemeralContainers[i].EphemeralContainerCommon)
break
}
}
if oldC == nil {
return true // EphemeralContainer added
}
if isSignificantContainerUpdate((*corev1.Container)(&c.EphemeralContainerCommon), oldC, pod.Annotations, oldPod.Annotations) {
return true
}
}
return false
}
// isSignificantContainerUpdate determines whether a container update should trigger a policy evaluation.
func isSignificantContainerUpdate(container, oldContainer *corev1.Container, annotations, oldAnnotations map[string]string) bool {
if container.Image != oldContainer.Image {
return true
}
seccompKey := corev1.SeccompContainerAnnotationKeyPrefix + container.Name
return annotations[seccompKey] != oldAnnotations[seccompKey]
}
func (a *Admission) exemptNamespace(namespace string) bool {
if len(namespace) == 0 {
return false
}
// TODO: consider optimizing to O(1) lookup
return containsString(namespace, a.Configuration.Exemptions.Namespaces)
}
func (a *Admission) exemptUser(username string) bool {
if len(username) == 0 {
return false
}
// TODO: consider optimizing to O(1) lookup
return containsString(username, a.Configuration.Exemptions.Usernames)
}
func (a *Admission) exemptRuntimeClass(runtimeClass *string) bool {
if runtimeClass == nil || len(*runtimeClass) == 0 {
return false
}
// TODO: consider optimizing to O(1) lookup
return containsString(*runtimeClass, a.Configuration.Exemptions.RuntimeClasses)
}
// Filter and prioritize pods based on runtimeclass and uniqueness of the controller respectively for evaluation.
// The input slice is modified in place and should not be reused.
func (a *Admission) prioritizePods(pods []*corev1.Pod) []*corev1.Pod {
// accumulate the list of prioritized pods in-place to avoid double-allocating
prioritizedPods := pods[:0]
// accumulate any additional replicated pods after the first one encountered for a given controller uid
var duplicateReplicatedPods []*corev1.Pod
evaluatedControllers := make(map[types.UID]bool)
for _, pod := range pods {
// short-circuit on exempt runtimeclass
if a.exemptRuntimeClass(pod.Spec.RuntimeClassName) {
continue
}
// short-circuit if pod from the same controller is evaluated
podOwnerControllerRef := metav1.GetControllerOfNoCopy(pod)
if podOwnerControllerRef == nil {
prioritizedPods = append(prioritizedPods, pod)
continue
}
if evaluatedControllers[podOwnerControllerRef.UID] {
duplicateReplicatedPods = append(duplicateReplicatedPods, pod)
continue
}
prioritizedPods = append(prioritizedPods, pod)
evaluatedControllers[podOwnerControllerRef.UID] = true
}
return append(prioritizedPods, duplicateReplicatedPods...)
}
func containsString(needle string, haystack []string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
// exemptNamespaceWarning returns a non-empty warning message if the exempt namespace has a
// non-privileged policy and sets pod security labels.
func (a *Admission) exemptNamespaceWarning(exemptNamespace string, policy api.Policy) string {
if policy.FullyPrivileged() || policy.Equivalent(&a.defaultPolicy) {
return ""
}
return fmt.Sprintf("namespace %q is exempt from Pod Security, and the policy (%s) will be ignored",
exemptNamespace, policy.CompactString())
}
Partly remove support for seccomp annotations
We now partly drop the support for seccomp annotations which is planned
for v1.25 as part of the KEP:
https://github.com/kubernetes/enhancements/issues/135
Pod security policies are not touched by this change and therefore we
have to keep the annotation key constants.
This means we only allow the usage of the annotations for backwards
compatibility reasons while the synchronization of the field to
annotation is no longer supported. Using the annotations for static pods
is also not supported any more.
Making the annotations fully non-functional will be deferred to a
future release.
Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
Kubernetes-commit: 584783ee9f89fbff58bb69f6107db18f18ba8746
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"context"
"fmt"
"reflect"
"sort"
"time"
"k8s.io/klog/v2"
admissionv1 "k8s.io/api/admission/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/validation/field"
admissionapi "k8s.io/pod-security-admission/admission/api"
"k8s.io/pod-security-admission/admission/api/validation"
"k8s.io/pod-security-admission/api"
"k8s.io/pod-security-admission/metrics"
"k8s.io/pod-security-admission/policy"
)
const (
defaultNamespaceMaxPodsToCheck = 3000
defaultNamespacePodCheckTimeout = 1 * time.Second
)
// Admission implements the core admission logic for the Pod Security Admission controller.
// The admission logic can be
type Admission struct {
Configuration *admissionapi.PodSecurityConfiguration
// Getting policy checks per level/version
Evaluator policy.Evaluator
// Metrics
Metrics metrics.Recorder
// Arbitrary object --> PodSpec
PodSpecExtractor PodSpecExtractor
// API connections
NamespaceGetter NamespaceGetter
PodLister PodLister
defaultPolicy api.Policy
namespaceMaxPodsToCheck int
namespacePodCheckTimeout time.Duration
}
type NamespaceGetter interface {
GetNamespace(ctx context.Context, name string) (*corev1.Namespace, error)
}
type PodLister interface {
ListPods(ctx context.Context, namespace string) ([]*corev1.Pod, error)
}
// PodSpecExtractor extracts a PodSpec from pod-controller resources that embed a PodSpec.
// This interface can be extended to enforce policy on CRDs for custom pod-controllers.
type PodSpecExtractor interface {
// HasPodSpec returns true if the given resource type MAY contain an extractable PodSpec.
HasPodSpec(schema.GroupResource) bool
// ExtractPodSpec returns a pod spec and metadata to evaluate from the object.
// An error returned here does not block admission of the pod-spec-containing object and is not returned to the user.
// If the object has no pod spec, return `nil, nil, nil`.
ExtractPodSpec(runtime.Object) (*metav1.ObjectMeta, *corev1.PodSpec, error)
}
var defaultPodSpecResources = map[schema.GroupResource]bool{
corev1.Resource("pods"): true,
corev1.Resource("replicationcontrollers"): true,
corev1.Resource("podtemplates"): true,
appsv1.Resource("replicasets"): true,
appsv1.Resource("deployments"): true,
appsv1.Resource("statefulsets"): true,
appsv1.Resource("daemonsets"): true,
batchv1.Resource("jobs"): true,
batchv1.Resource("cronjobs"): true,
}
type DefaultPodSpecExtractor struct{}
func (DefaultPodSpecExtractor) HasPodSpec(gr schema.GroupResource) bool {
return defaultPodSpecResources[gr]
}
func (DefaultPodSpecExtractor) ExtractPodSpec(obj runtime.Object) (*metav1.ObjectMeta, *corev1.PodSpec, error) {
switch o := obj.(type) {
case *corev1.Pod:
return &o.ObjectMeta, &o.Spec, nil
case *corev1.PodTemplate:
return extractPodSpecFromTemplate(&o.Template)
case *corev1.ReplicationController:
return extractPodSpecFromTemplate(o.Spec.Template)
case *appsv1.ReplicaSet:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *appsv1.Deployment:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *appsv1.DaemonSet:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *appsv1.StatefulSet:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *batchv1.Job:
return extractPodSpecFromTemplate(&o.Spec.Template)
case *batchv1.CronJob:
return extractPodSpecFromTemplate(&o.Spec.JobTemplate.Spec.Template)
default:
return nil, nil, fmt.Errorf("unexpected object type: %s", obj.GetObjectKind().GroupVersionKind().String())
}
}
func (DefaultPodSpecExtractor) PodSpecResources() []schema.GroupResource {
retval := make([]schema.GroupResource, 0, len(defaultPodSpecResources))
for r := range defaultPodSpecResources {
retval = append(retval, r)
}
return retval
}
func extractPodSpecFromTemplate(template *corev1.PodTemplateSpec) (*metav1.ObjectMeta, *corev1.PodSpec, error) {
if template == nil {
return nil, nil, nil
}
return &template.ObjectMeta, &template.Spec, nil
}
// CompleteConfiguration sets up default or derived configuration.
func (a *Admission) CompleteConfiguration() error {
if a.Configuration != nil {
if p, err := admissionapi.ToPolicy(a.Configuration.Defaults); err != nil {
return err
} else {
a.defaultPolicy = p
}
}
a.namespaceMaxPodsToCheck = defaultNamespaceMaxPodsToCheck
a.namespacePodCheckTimeout = defaultNamespacePodCheckTimeout
if a.PodSpecExtractor == nil {
a.PodSpecExtractor = &DefaultPodSpecExtractor{}
}
return nil
}
// ValidateConfiguration ensures all required fields are set with valid values.
func (a *Admission) ValidateConfiguration() error {
if a.Configuration == nil {
return fmt.Errorf("configuration required")
} else if errs := validation.ValidatePodSecurityConfiguration(a.Configuration); len(errs) > 0 {
return errs.ToAggregate()
} else {
if p, err := admissionapi.ToPolicy(a.Configuration.Defaults); err != nil {
return err
} else if !reflect.DeepEqual(p, a.defaultPolicy) {
return fmt.Errorf("default policy does not match; CompleteConfiguration() was not called before ValidateConfiguration()")
}
}
if a.namespaceMaxPodsToCheck == 0 || a.namespacePodCheckTimeout == 0 {
return fmt.Errorf("namespace configuration not set; CompleteConfiguration() was not called before ValidateConfiguration()")
}
if a.Metrics == nil {
return fmt.Errorf("Metrics recorder required")
}
if a.PodSpecExtractor == nil {
return fmt.Errorf("PodSpecExtractor required")
}
if a.Evaluator == nil {
return fmt.Errorf("Evaluator required")
}
if a.NamespaceGetter == nil {
return fmt.Errorf("NamespaceGetter required")
}
if a.PodLister == nil {
return fmt.Errorf("PodLister required")
}
return nil
}
var (
namespacesResource = corev1.Resource("namespaces")
podsResource = corev1.Resource("pods")
)
// Validate admits an API request.
// The objects in admission attributes are expected to be external v1 objects that we care about.
// The returned response may be shared and must not be mutated.
func (a *Admission) Validate(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
var response *admissionv1.AdmissionResponse
switch attrs.GetResource().GroupResource() {
case namespacesResource:
response = a.ValidateNamespace(ctx, attrs)
case podsResource:
response = a.ValidatePod(ctx, attrs)
default:
response = a.ValidatePodController(ctx, attrs)
}
return response
}
// ValidateNamespace evaluates a namespace create or update request to ensure the pod security labels are valid,
// and checks existing pods in the namespace for violations of the new policy when updating the enforce level on a namespace.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) ValidateNamespace(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
// short-circuit on subresources
if attrs.GetSubresource() != "" {
return sharedAllowedResponse
}
obj, err := attrs.GetObject()
if err != nil {
klog.ErrorS(err, "failed to decode object")
return errorResponse(err, &apierrors.NewBadRequest("failed to decode object").ErrStatus)
}
namespace, ok := obj.(*corev1.Namespace)
if !ok {
klog.InfoS("failed to assert namespace type", "type", reflect.TypeOf(obj))
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode namespace").ErrStatus)
}
newPolicy, newErrs := a.PolicyToEvaluate(namespace.Labels)
switch attrs.GetOperation() {
case admissionv1.Create:
// require valid labels on create
if len(newErrs) > 0 {
return invalidResponse(attrs, newErrs)
}
if a.exemptNamespace(attrs.GetNamespace()) {
if warning := a.exemptNamespaceWarning(namespace.Name, newPolicy); warning != "" {
response := allowedResponse()
response.Warnings = append(response.Warnings, warning)
return response
}
}
return sharedAllowedResponse
case admissionv1.Update:
// if update, check if policy labels changed
oldObj, err := attrs.GetOldObject()
if err != nil {
klog.ErrorS(err, "failed to decode old object")
return errorResponse(err, &apierrors.NewBadRequest("failed to decode old object").ErrStatus)
}
oldNamespace, ok := oldObj.(*corev1.Namespace)
if !ok {
klog.InfoS("failed to assert old namespace type", "type", reflect.TypeOf(oldObj))
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode old namespace").ErrStatus)
}
oldPolicy, oldErrs := a.PolicyToEvaluate(oldNamespace.Labels)
// require valid labels on update if they have changed
if len(newErrs) > 0 && (len(oldErrs) == 0 || !reflect.DeepEqual(newErrs, oldErrs)) {
return invalidResponse(attrs, newErrs)
}
// Skip dry-running pods:
// * if the enforce policy is unchanged
// * if the new enforce policy is privileged
// * if the new enforce is the same version and level was relaxed
// * for exempt namespaces
if newPolicy.Enforce == oldPolicy.Enforce {
return sharedAllowedResponse
}
if newPolicy.Enforce.Level == api.LevelPrivileged {
return sharedAllowedResponse
}
if newPolicy.Enforce.Version == oldPolicy.Enforce.Version &&
api.CompareLevels(newPolicy.Enforce.Level, oldPolicy.Enforce.Level) < 1 {
return sharedAllowedResponse
}
if a.exemptNamespace(attrs.GetNamespace()) {
if warning := a.exemptNamespaceWarning(namespace.Name, newPolicy); warning != "" {
response := allowedResponse()
response.Warnings = append(response.Warnings, warning)
return response
}
return sharedAllowedResponse
}
response := allowedResponse()
response.Warnings = a.EvaluatePodsInNamespace(ctx, namespace.Name, newPolicy.Enforce)
return response
default:
return sharedAllowedResponse
}
}
// ignoredPodSubresources is a set of ignored Pod subresources.
// Any other subresource is expected to be a *v1.Pod type and is evaluated.
// This ensures a version skewed webhook fails safe and denies an unknown pod subresource that allows modifying the pod spec.
var ignoredPodSubresources = map[string]bool{
"exec": true,
"attach": true,
"binding": true,
"eviction": true,
"log": true,
"portforward": true,
"proxy": true,
"status": true,
}
// ValidatePod evaluates a pod create or update request against the effective policy for the namespace.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) ValidatePod(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
// short-circuit on ignored subresources
if ignoredPodSubresources[attrs.GetSubresource()] {
return sharedAllowedResponse
}
// short-circuit on exempt namespaces and users
if a.exemptNamespace(attrs.GetNamespace()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByNamespaceExemptionResponse
}
if a.exemptUser(attrs.GetUserName()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByUserExemptionResponse
}
// short-circuit on privileged enforce+audit+warn namespaces
namespace, err := a.NamespaceGetter.GetNamespace(ctx, attrs.GetNamespace())
if err != nil {
klog.ErrorS(err, "failed to fetch pod namespace", "namespace", attrs.GetNamespace())
a.Metrics.RecordError(true, attrs)
return errorResponse(err, &apierrors.NewInternalError(fmt.Errorf("failed to lookup namespace %q", attrs.GetNamespace())).ErrStatus)
}
nsPolicy, nsPolicyErrs := a.PolicyToEvaluate(namespace.Labels)
if len(nsPolicyErrs) == 0 && nsPolicy.FullyPrivileged() {
a.Metrics.RecordEvaluation(metrics.DecisionAllow, nsPolicy.Enforce, metrics.ModeEnforce, attrs)
return sharedAllowedPrivilegedResponse
}
obj, err := attrs.GetObject()
if err != nil {
klog.ErrorS(err, "failed to decode object")
a.Metrics.RecordError(true, attrs)
return errorResponse(err, &apierrors.NewBadRequest("failed to decode object").ErrStatus)
}
pod, ok := obj.(*corev1.Pod)
if !ok {
klog.InfoS("failed to assert pod type", "type", reflect.TypeOf(obj))
a.Metrics.RecordError(true, attrs)
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode pod").ErrStatus)
}
if attrs.GetOperation() == admissionv1.Update {
oldObj, err := attrs.GetOldObject()
if err != nil {
klog.ErrorS(err, "failed to decode old object")
a.Metrics.RecordError(true, attrs)
return errorResponse(err, &apierrors.NewBadRequest("failed to decode old object").ErrStatus)
}
oldPod, ok := oldObj.(*corev1.Pod)
if !ok {
klog.InfoS("failed to assert old pod type", "type", reflect.TypeOf(oldObj))
a.Metrics.RecordError(true, attrs)
return errorResponse(nil, &apierrors.NewBadRequest("failed to decode old pod").ErrStatus)
}
if !isSignificantPodUpdate(pod, oldPod) {
// Nothing we care about changed, so always allow the update.
return sharedAllowedResponse
}
}
return a.EvaluatePod(ctx, nsPolicy, nsPolicyErrs.ToAggregate(), &pod.ObjectMeta, &pod.Spec, attrs, true)
}
// ValidatePodController evaluates a pod controller create or update request against the effective policy for the namespace.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) ValidatePodController(ctx context.Context, attrs api.Attributes) *admissionv1.AdmissionResponse {
// short-circuit on subresources
if attrs.GetSubresource() != "" {
return sharedAllowedResponse
}
// short-circuit on exempt namespaces and users
if a.exemptNamespace(attrs.GetNamespace()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByNamespaceExemptionResponse
}
if a.exemptUser(attrs.GetUserName()) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByUserExemptionResponse
}
// short-circuit on privileged audit+warn namespaces
namespace, err := a.NamespaceGetter.GetNamespace(ctx, attrs.GetNamespace())
if err != nil {
klog.ErrorS(err, "failed to fetch pod namespace", "namespace", attrs.GetNamespace())
a.Metrics.RecordError(true, attrs)
response := allowedResponse()
response.AuditAnnotations = map[string]string{
"error": fmt.Sprintf("failed to lookup namespace %q: %v", attrs.GetNamespace(), err),
}
return response
}
nsPolicy, nsPolicyErrs := a.PolicyToEvaluate(namespace.Labels)
if len(nsPolicyErrs) == 0 && nsPolicy.Warn.Level == api.LevelPrivileged && nsPolicy.Audit.Level == api.LevelPrivileged {
return sharedAllowedResponse
}
obj, err := attrs.GetObject()
if err != nil {
klog.ErrorS(err, "failed to decode object")
a.Metrics.RecordError(true, attrs)
response := allowedResponse()
response.AuditAnnotations = map[string]string{
"error": fmt.Sprintf("failed to decode object: %v", err),
}
return response
}
podMetadata, podSpec, err := a.PodSpecExtractor.ExtractPodSpec(obj)
if err != nil {
klog.ErrorS(err, "failed to extract pod spec")
a.Metrics.RecordError(true, attrs)
response := allowedResponse()
response.AuditAnnotations = map[string]string{
"error": fmt.Sprintf("failed to extract pod template: %v", err),
}
return response
}
if podMetadata == nil && podSpec == nil {
// if a controller with an optional pod spec does not contain a pod spec, skip validation
return sharedAllowedResponse
}
return a.EvaluatePod(ctx, nsPolicy, nsPolicyErrs.ToAggregate(), podMetadata, podSpec, attrs, false)
}
// EvaluatePod evaluates the given policy against the given pod(-like) object.
// The enforce policy is only checked if enforce=true.
// The returned response may be shared between evaluations and must not be mutated.
func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPolicyErr error, podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSpec, attrs api.Attributes, enforce bool) *admissionv1.AdmissionResponse {
// short-circuit on exempt runtimeclass
if a.exemptRuntimeClass(podSpec.RuntimeClassName) {
a.Metrics.RecordExemption(attrs)
return sharedAllowedByRuntimeClassExemptionResponse
}
auditAnnotations := map[string]string{}
if nsPolicyErr != nil {
klog.V(2).InfoS("failed to parse PodSecurity namespace labels", "err", nsPolicyErr)
auditAnnotations["error"] = fmt.Sprintf("Failed to parse policy: %v", nsPolicyErr)
a.Metrics.RecordError(false, attrs)
}
klogV := klog.V(5)
if klogV.Enabled() {
klogV.InfoS("PodSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName())
}
cachedResults := make(map[api.LevelVersion]policy.AggregateCheckResult)
response := allowedResponse()
if enforce {
auditAnnotations[api.EnforcedPolicyAnnotationKey] = nsPolicy.Enforce.String()
result := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Enforce, podMetadata, podSpec))
if !result.Allowed {
response = forbiddenResponse(attrs, fmt.Errorf(
"violates PodSecurity %q: %s",
nsPolicy.Enforce.String(),
result.ForbiddenDetail(),
))
a.Metrics.RecordEvaluation(metrics.DecisionDeny, nsPolicy.Enforce, metrics.ModeEnforce, attrs)
} else {
a.Metrics.RecordEvaluation(metrics.DecisionAllow, nsPolicy.Enforce, metrics.ModeEnforce, attrs)
}
cachedResults[nsPolicy.Enforce] = result
}
// reuse previous evaluation if audit level+version is the same as enforce level+version
auditResult, ok := cachedResults[nsPolicy.Audit]
if !ok {
auditResult = policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Audit, podMetadata, podSpec))
cachedResults[nsPolicy.Audit] = auditResult
}
if !auditResult.Allowed {
auditAnnotations[api.AuditViolationsAnnotationKey] = fmt.Sprintf(
"would violate PodSecurity %q: %s",
nsPolicy.Audit.String(),
auditResult.ForbiddenDetail(),
)
a.Metrics.RecordEvaluation(metrics.DecisionDeny, nsPolicy.Audit, metrics.ModeAudit, attrs)
}
// avoid adding warnings to a request we're already going to reject with an error
if response.Allowed {
// reuse previous evaluation if warn level+version is the same as audit or enforce level+version
warnResult, ok := cachedResults[nsPolicy.Warn]
if !ok {
warnResult = policy.AggregateCheckResults(a.Evaluator.EvaluatePod(nsPolicy.Warn, podMetadata, podSpec))
}
if !warnResult.Allowed {
// TODO: Craft a better user-facing warning message
response.Warnings = append(response.Warnings, fmt.Sprintf(
"would violate PodSecurity %q: %s",
nsPolicy.Warn.String(),
warnResult.ForbiddenDetail(),
))
a.Metrics.RecordEvaluation(metrics.DecisionDeny, nsPolicy.Warn, metrics.ModeWarn, attrs)
}
}
response.AuditAnnotations = auditAnnotations
return response
}
// podCount is used to track the number of pods sharing identical warnings when validating a namespace
type podCount struct {
// podName is the lexically first pod name for the given warning
podName string
// podCount is the total number of pods with the same warnings
podCount int
}
func (a *Admission) EvaluatePodsInNamespace(ctx context.Context, namespace string, enforce api.LevelVersion) []string {
// start with the default timeout
timeout := a.namespacePodCheckTimeout
if deadline, ok := ctx.Deadline(); ok {
timeRemaining := time.Until(deadline) / 2 // don't take more than half the remaining time
if timeout > timeRemaining {
timeout = timeRemaining
}
}
deadline := time.Now().Add(timeout)
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
pods, err := a.PodLister.ListPods(ctx, namespace)
if err != nil {
klog.ErrorS(err, "failed to list pods", "namespace", namespace)
return []string{"failed to list pods while checking new PodSecurity enforce level"}
}
var (
warnings []string
podWarnings []string
podWarningsToCount = make(map[string]podCount)
prioritizedPods = a.prioritizePods(pods)
)
totalPods := len(prioritizedPods)
if len(prioritizedPods) > a.namespaceMaxPodsToCheck {
prioritizedPods = prioritizedPods[0:a.namespaceMaxPodsToCheck]
}
checkedPods := len(prioritizedPods)
for i, pod := range prioritizedPods {
r := policy.AggregateCheckResults(a.Evaluator.EvaluatePod(enforce, &pod.ObjectMeta, &pod.Spec))
if !r.Allowed {
warning := r.ForbiddenReason()
c, seen := podWarningsToCount[warning]
if !seen {
c.podName = pod.Name
podWarnings = append(podWarnings, warning)
} else if pod.Name < c.podName {
c.podName = pod.Name
}
c.podCount++
podWarningsToCount[warning] = c
}
if err := ctx.Err(); err != nil { // deadline exceeded or context was cancelled
checkedPods = i + 1
break
}
}
if checkedPods < totalPods {
warnings = append(warnings, fmt.Sprintf("new PodSecurity enforce level only checked against the first %d of %d existing pods", checkedPods, totalPods))
}
if len(podWarnings) > 0 {
warnings = append(warnings, fmt.Sprintf("existing pods in namespace %q violate the new PodSecurity enforce level %q", namespace, enforce.String()))
}
// prepend pod names to warnings
decoratePodWarnings(podWarningsToCount, podWarnings)
// put warnings in a deterministic order
sort.Strings(podWarnings)
return append(warnings, podWarnings...)
}
// prefixes warnings with the pod names related to that warning
func decoratePodWarnings(podWarningsToCount map[string]podCount, warnings []string) {
for i, warning := range warnings {
c := podWarningsToCount[warning]
switch c.podCount {
case 0:
// unexpected, just leave the warning alone
case 1:
warnings[i] = fmt.Sprintf("%s: %s", c.podName, warning)
case 2:
warnings[i] = fmt.Sprintf("%s (and 1 other pod): %s", c.podName, warning)
default:
warnings[i] = fmt.Sprintf("%s (and %d other pods): %s", c.podName, c.podCount-1, warning)
}
}
}
func (a *Admission) PolicyToEvaluate(labels map[string]string) (api.Policy, field.ErrorList) {
return api.PolicyToEvaluate(labels, a.defaultPolicy)
}
// isSignificantPodUpdate determines whether a pod update should trigger a policy evaluation.
// Relevant mutable pod fields as of 1.21 are image annotations:
// * https://github.com/kubernetes/kubernetes/blob/release-1.21/pkg/apis/core/validation/validation.go#L3947-L3949
func isSignificantPodUpdate(pod, oldPod *corev1.Pod) bool {
// TODO: invert this logic to only allow specific update types.
if len(pod.Spec.Containers) != len(oldPod.Spec.Containers) {
return true
}
if len(pod.Spec.InitContainers) != len(oldPod.Spec.InitContainers) {
return true
}
for i := 0; i < len(pod.Spec.Containers); i++ {
if isSignificantContainerUpdate(&pod.Spec.Containers[i], &oldPod.Spec.Containers[i], pod.Annotations, oldPod.Annotations) {
return true
}
}
for i := 0; i < len(pod.Spec.InitContainers); i++ {
if isSignificantContainerUpdate(&pod.Spec.InitContainers[i], &oldPod.Spec.InitContainers[i], pod.Annotations, oldPod.Annotations) {
return true
}
}
for _, c := range pod.Spec.EphemeralContainers {
var oldC *corev1.Container
for i, oc := range oldPod.Spec.EphemeralContainers {
if oc.Name == c.Name {
oldC = (*corev1.Container)(&oldPod.Spec.EphemeralContainers[i].EphemeralContainerCommon)
break
}
}
if oldC == nil {
return true // EphemeralContainer added
}
if isSignificantContainerUpdate((*corev1.Container)(&c.EphemeralContainerCommon), oldC, pod.Annotations, oldPod.Annotations) {
return true
}
}
return false
}
// isSignificantContainerUpdate determines whether a container update should trigger a policy evaluation.
func isSignificantContainerUpdate(container, oldContainer *corev1.Container, annotations, oldAnnotations map[string]string) bool {
if container.Image != oldContainer.Image {
return true
}
// TODO(saschagrunert): Remove this logic in 1.27.
seccompKey := corev1.SeccompContainerAnnotationKeyPrefix + container.Name
return annotations[seccompKey] != oldAnnotations[seccompKey]
}
func (a *Admission) exemptNamespace(namespace string) bool {
if len(namespace) == 0 {
return false
}
// TODO: consider optimizing to O(1) lookup
return containsString(namespace, a.Configuration.Exemptions.Namespaces)
}
func (a *Admission) exemptUser(username string) bool {
if len(username) == 0 {
return false
}
// TODO: consider optimizing to O(1) lookup
return containsString(username, a.Configuration.Exemptions.Usernames)
}
func (a *Admission) exemptRuntimeClass(runtimeClass *string) bool {
if runtimeClass == nil || len(*runtimeClass) == 0 {
return false
}
// TODO: consider optimizing to O(1) lookup
return containsString(*runtimeClass, a.Configuration.Exemptions.RuntimeClasses)
}
// Filter and prioritize pods based on runtimeclass and uniqueness of the controller respectively for evaluation.
// The input slice is modified in place and should not be reused.
func (a *Admission) prioritizePods(pods []*corev1.Pod) []*corev1.Pod {
// accumulate the list of prioritized pods in-place to avoid double-allocating
prioritizedPods := pods[:0]
// accumulate any additional replicated pods after the first one encountered for a given controller uid
var duplicateReplicatedPods []*corev1.Pod
evaluatedControllers := make(map[types.UID]bool)
for _, pod := range pods {
// short-circuit on exempt runtimeclass
if a.exemptRuntimeClass(pod.Spec.RuntimeClassName) {
continue
}
// short-circuit if pod from the same controller is evaluated
podOwnerControllerRef := metav1.GetControllerOfNoCopy(pod)
if podOwnerControllerRef == nil {
prioritizedPods = append(prioritizedPods, pod)
continue
}
if evaluatedControllers[podOwnerControllerRef.UID] {
duplicateReplicatedPods = append(duplicateReplicatedPods, pod)
continue
}
prioritizedPods = append(prioritizedPods, pod)
evaluatedControllers[podOwnerControllerRef.UID] = true
}
return append(prioritizedPods, duplicateReplicatedPods...)
}
func containsString(needle string, haystack []string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
// exemptNamespaceWarning returns a non-empty warning message if the exempt namespace has a
// non-privileged policy and sets pod security labels.
func (a *Admission) exemptNamespaceWarning(exemptNamespace string, policy api.Policy) string {
if policy.FullyPrivileged() || policy.Equivalent(&a.defaultPolicy) {
return ""
}
return fmt.Sprintf("namespace %q is exempt from Pod Security, and the policy (%s) will be ignored",
exemptNamespace, policy.CompactString())
}
|
package welove
import (
"testing"
"io/ioutil"
"log"
"github.com/bitly/go-simplejson"
"encoding/json"
"fmt"
)
func TestTreePost(t *testing.T) {
res, err := TreePost("562949961343086-2ca7e299a09974dd0", "ac5f34563a4344c4", 2)
if err != nil {
log.Fatal(err)
}
bytes, _ := ioutil.ReadAll(res.Body)
js, _ := simplejson.NewJson(bytes)
result, _ := js.Get("result").Int()
if result != 1 && result != 1001 && result != 1002 {
t.Error("响应值result错误.")
}
}
func TestHomePost(t *testing.T) {
res, err := HomePost("562949961343086-2ca7e299a09974dd0", 7, "844424932415867")
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
j := make(map[string]interface{})
json.Unmarshal(body, &j)
result := j["result"]
if result != 1201.0 && result != 1.0 {
log.Fatal("响应值result错误.")
}
}
func TestRandomHouse(t *testing.T) {
_, ok := RandomHouse("562949961343086-2ca7e299a09974dd0")
if !ok {
t.Error()
}
}
func TestVisit(t *testing.T) {
id, ok := RandomHouse("562949961343086-2ca7e299a09974dd0")
if !ok {
t.Error()
}
res, err := Visit("562949961343086-2ca7e299a09974dd0", id)
if err != nil {
t.Error(err)
}
bytes, _ := ioutil.ReadAll(res.Body)
js, err := simplejson.NewJson(bytes)
if err != nil {
t.Error(err)
}
result, _ := js.Get("result").Int()
if result != 1 && result != 1201 {
t.Error("响应值result错误.")
}
}
Remove unused package.
package welove
import (
"testing"
"io/ioutil"
"log"
"github.com/bitly/go-simplejson"
"encoding/json"
)
func TestTreePost(t *testing.T) {
res, err := TreePost("562949961343086-2ca7e299a09974dd0", "ac5f34563a4344c4", 2)
if err != nil {
log.Fatal(err)
}
bytes, _ := ioutil.ReadAll(res.Body)
js, _ := simplejson.NewJson(bytes)
result, _ := js.Get("result").Int()
if result != 1 && result != 1001 && result != 1002 {
t.Error("响应值result错误.")
}
}
func TestHomePost(t *testing.T) {
res, err := HomePost("562949961343086-2ca7e299a09974dd0", 7, "844424932415867")
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
j := make(map[string]interface{})
json.Unmarshal(body, &j)
result := j["result"]
if result != 1201.0 && result != 1.0 {
log.Fatal("响应值result错误.")
}
}
func TestRandomHouse(t *testing.T) {
_, ok := RandomHouse("562949961343086-2ca7e299a09974dd0")
if !ok {
t.Error()
}
}
func TestVisit(t *testing.T) {
id, ok := RandomHouse("562949961343086-2ca7e299a09974dd0")
if !ok {
t.Error()
}
res, err := Visit("562949961343086-2ca7e299a09974dd0", id)
if err != nil {
t.Error(err)
}
bytes, _ := ioutil.ReadAll(res.Body)
js, err := simplejson.NewJson(bytes)
if err != nil {
t.Error(err)
}
result, _ := js.Get("result").Int()
if result != 1 && result != 1201 {
t.Error("响应值result错误.")
}
} |
package lockfile_test
import (
lockfile "."
"fmt"
"os"
"path/filepath"
)
func ExampleLockfile() {
lock, err := lockfile.New(filepath.Join(os.TempDir(), "lock.me.now.lck"))
if err != nil {
fmt.Printf("Cannot init lock. reason: %v\n", err)
panic(err)
}
err = lock.TryLock()
// Error handling is essential, as we only try to get the lock.
if err != nil {
fmt.Printf("Cannot lock \"%v\", reason: %v\n", lock, err)
panic(err)
}
defer lock.Unlock()
fmt.Println("Do stuff under lock")
// Output: Do stuff under lock
}
Rewrote testing file to allow testing of basic features using 'go test'
package lockfile
import (
"fmt"
"io/ioutil"
"math/rand"
"path/filepath"
"strconv"
"testing"
)
func TestSimpleLock(t *testing.T) {
path, err := filepath.Abs("test_lockfile.pid")
if err != nil {
panic(err)
}
lf, err := New(path)
if err != nil {
t.Fail()
fmt.Println("Error making lockfile: ", err)
return
}
err = lf.TryLock()
if err != nil {
t.Fail()
fmt.Println("Error locking lockfile: ", err)
return
}
err = lf.Unlock()
if err != nil {
t.Fail()
fmt.Println("Error unlocking lockfile: ", err)
return
}
}
func TestDeadPID(t *testing.T) {
path, err := filepath.Abs("test_lockfile.pid")
if err != nil {
panic(err)
}
pid := GetDeadPID()
ioutil.WriteFile(path, []byte(strconv.Itoa(pid)+"\n"), 0666)
}
func GetDeadPID() int {
for {
pid := rand.Int() % 4096 //I have no idea how windows handles large PIDs, or if they even exist. Limit it to 4096 to be safe.
running, err := isRunning(pid)
if err != nil {
fmt.Println("Error checking PID: ", err)
continue
}
if !running {
return pid
}
}
}
|
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package log includes code that is specific to Trillian's log mode, particularly code
// for running sequencing operations.
package log
import (
"context"
"fmt"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/trillian"
"github.com/google/trillian/crypto"
"github.com/google/trillian/crypto/sigpb"
"github.com/google/trillian/merkle"
"github.com/google/trillian/monitoring"
"github.com/google/trillian/quota"
"github.com/google/trillian/storage"
"github.com/google/trillian/util"
)
const logIDLabel = "logid"
var (
once sync.Once
seqBatches monitoring.Counter
seqTreeSize monitoring.Gauge
seqLatency monitoring.Histogram
seqDequeueLatency monitoring.Histogram
seqGetRootLatency monitoring.Histogram
seqInitTreeLatency monitoring.Histogram
seqWriteTreeLatency monitoring.Histogram
seqUpdateLeavesLatency monitoring.Histogram
seqSetNodesLatency monitoring.Histogram
seqStoreRootLatency monitoring.Histogram
seqCommitLatency monitoring.Histogram
)
func createMetrics(mf monitoring.MetricFactory) {
if mf == nil {
mf = monitoring.InertMetricFactory{}
}
seqBatches = mf.NewCounter("sequencer_batches", "Number of sequencer batch operations", logIDLabel)
seqTreeSize = mf.NewGauge("sequencer_tree_size", "Size of Merkle tree", logIDLabel)
seqLatency = mf.NewHistogram("sequencer_latency", "Latency of sequencer batch operation in ms", logIDLabel)
seqDequeueLatency = mf.NewHistogram("sequencer_latency_dequeue", "Latency of dequeue-leaves part of sequencer batch operation in ms", logIDLabel)
seqGetRootLatency = mf.NewHistogram("sequencer_latency_get_root", "Latency of get-root part of sequencer batch operation in ms", logIDLabel)
seqInitTreeLatency = mf.NewHistogram("sequencer_latency_init_tree", "Latency of init-tree part of sequencer batch operation in ms", logIDLabel)
seqWriteTreeLatency = mf.NewHistogram("sequencer_latency_write_tree", "Latency of write-tree part of sequencer batch operation in ms", logIDLabel)
seqUpdateLeavesLatency = mf.NewHistogram("sequencer_latency_update_leaves", "Latency of update-leaves part of sequencer batch operation in ms", logIDLabel)
seqSetNodesLatency = mf.NewHistogram("sequencer_latency_set_nodes", "Latency of set-nodes part of sequencer batch operation in ms", logIDLabel)
seqStoreRootLatency = mf.NewHistogram("sequencer_latency_store_root", "Latency of store-root part of sequencer batch operation in ms", logIDLabel)
seqCommitLatency = mf.NewHistogram("sequencer_latency_commit", "Latency of commit part of sequencer batch operation in ms", logIDLabel)
}
// TODO(Martin2112): Add admin support for safely changing params like guard window during operation
// TODO(Martin2112): Add support for enabling and controlling sequencing as part of admin API
// Sequencer instances are responsible for integrating new leaves into a single log.
// Leaves will be assigned unique sequence numbers when they are processed.
// There is no strong ordering guarantee but in general entries will be processed
// in order of submission to the log.
type Sequencer struct {
hasher merkle.TreeHasher
timeSource util.TimeSource
logStorage storage.LogStorage
signer *crypto.Signer
qm quota.Manager
}
// maxTreeDepth sets an upper limit on the size of Log trees.
// TODO(al): We actually can't go beyond 2^63 entries because we use int64s,
// but we need to calculate tree depths from a multiple of 8 due to
// the subtrees.
const maxTreeDepth = 64
// NewSequencer creates a new Sequencer instance for the specified inputs.
func NewSequencer(
hasher merkle.TreeHasher,
timeSource util.TimeSource,
logStorage storage.LogStorage,
signer *crypto.Signer,
mf monitoring.MetricFactory,
qm quota.Manager) *Sequencer {
once.Do(func() {
createMetrics(mf)
})
return &Sequencer{
hasher: hasher,
timeSource: timeSource,
logStorage: logStorage,
signer: signer,
qm: qm,
}
}
// TODO: This currently doesn't use the batch api for fetching the required nodes. This
// would be more efficient but requires refactoring.
func (s Sequencer) buildMerkleTreeFromStorageAtRoot(ctx context.Context, root trillian.SignedLogRoot, tx storage.TreeTX) (*merkle.CompactMerkleTree, error) {
mt, err := merkle.NewCompactMerkleTreeWithState(s.hasher, root.TreeSize, func(depth int, index int64) ([]byte, error) {
nodeID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, maxTreeDepth)
if err != nil {
glog.Warningf("%v: Failed to create nodeID: %v", root.LogId, err)
return nil, err
}
nodes, err := tx.GetMerkleNodes(ctx, root.TreeRevision, []storage.NodeID{nodeID})
if err != nil {
glog.Warningf("%v: Failed to get Merkle nodes: %v", root.LogId, err)
return nil, err
}
// We expect to get exactly one node here
if nodes == nil || len(nodes) != 1 {
return nil, fmt.Errorf("%v: Did not retrieve one node while loading CompactMerkleTree, got %#v for ID %v@%v", root.LogId, nodes, nodeID.String(), root.TreeRevision)
}
return nodes[0].Hash, nil
}, root.RootHash)
return mt, err
}
func (s Sequencer) buildNodesFromNodeMap(nodeMap map[string]storage.Node, newVersion int64) ([]storage.Node, error) {
targetNodes := make([]storage.Node, len(nodeMap), len(nodeMap))
i := 0
for _, node := range nodeMap {
node.NodeRevision = newVersion
targetNodes[i] = node
i++
}
return targetNodes, nil
}
func (s Sequencer) sequenceLeaves(mt *merkle.CompactMerkleTree, leaves []*trillian.LogLeaf) (map[string]storage.Node, []*trillian.LogLeaf, error) {
nodeMap := make(map[string]storage.Node)
// Update the tree state and sequence the leaves and assign sequence numbers to the new leaves
for i, leaf := range leaves {
seq, err := mt.AddLeafHash(leaf.MerkleLeafHash, func(depth int, index int64, hash []byte) error {
nodeID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, maxTreeDepth)
if err != nil {
return err
}
nodeMap[nodeID.String()] = storage.Node{
NodeID: nodeID,
Hash: hash,
}
return nil
})
if err != nil {
return nil, nil, err
}
// The leaf has now been sequenced.
leaves[i].LeafIndex = seq
// Store leaf hash in the Merkle tree too:
leafNodeID, err := storage.NewNodeIDForTreeCoords(0, seq, maxTreeDepth)
if err != nil {
return nil, nil, err
}
nodeMap[leafNodeID.String()] = storage.Node{
NodeID: leafNodeID,
Hash: leaf.MerkleLeafHash,
}
}
return nodeMap, leaves, nil
}
func (s Sequencer) initMerkleTreeFromStorage(ctx context.Context, currentRoot trillian.SignedLogRoot, tx storage.LogTreeTX) (*merkle.CompactMerkleTree, error) {
if currentRoot.TreeSize == 0 {
return merkle.NewCompactMerkleTree(s.hasher), nil
}
// Initialize the compact tree state to match the latest root in the database
return s.buildMerkleTreeFromStorageAtRoot(ctx, currentRoot, tx)
}
func (s Sequencer) createRootSignature(ctx context.Context, root trillian.SignedLogRoot) (*sigpb.DigitallySigned, error) {
signature, err := s.signer.Sign(crypto.HashLogRoot(root))
if err != nil {
glog.Warningf("%v: signer failed to sign root: %v", root.LogId, err)
return nil, err
}
return signature, nil
}
// SequenceBatch wraps up all the operations needed to take a batch of queued leaves
// and integrate them into the tree.
// TODO(Martin2112): Can possibly improve by deferring a function that attempts to rollback,
// which will fail if the tx was committed. Should only do this if we can hide the details of
// the underlying storage transactions and it doesn't create other problems.
func (s Sequencer) SequenceBatch(ctx context.Context, logID int64, limit int, guardWindow, maxRootDurationInterval time.Duration) (int, error) {
start := s.timeSource.Now()
stageStart := start
label := strconv.FormatInt(logID, 10)
tx, err := s.logStorage.BeginForTree(ctx, logID)
if err != nil {
glog.Warningf("%v: Sequencer failed to start tx: %v", logID, err)
return 0, err
}
defer tx.Close()
defer seqBatches.Inc(label)
defer func() { seqLatency.Observe(s.sinceMillis(start), label) }()
// Very recent leaves inside the guard window will not be available for sequencing
guardCutoffTime := s.timeSource.Now().Add(-guardWindow)
leaves, err := tx.DequeueLeaves(ctx, limit, guardCutoffTime)
if err != nil {
glog.Warningf("%v: Sequencer failed to dequeue leaves: %v", logID, err)
return 0, err
}
seqDequeueLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// Get the latest known root from storage
currentRoot, err := tx.LatestSignedLogRoot(ctx)
if err != nil {
glog.Warningf("%v: Sequencer failed to get latest root: %v", logID, err)
return 0, err
}
seqGetRootLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// TODO(al): Have a better detection mechanism for there being no stored root.
// TODO(mhs): Might be better to create empty root in provisioning API when it exists
if currentRoot.RootHash == nil {
glog.Warningf("%v: Fresh log - no previous TreeHeads exist.", logID)
// SignRoot starts a new transaction, and we've got one open here until
// this function returns.
// This explicit Close() is a work-around for the in-memory storage which
// locks the tree for each TX.
// TODO(al): Producing the first signed root for a new tree should be
// handled by the provisioning, move it there.
tx.Close()
return 0, s.SignRoot(ctx, logID)
}
// There might be no work to be done. But we possibly still need to create an signed root if the
// current one is too old. If there's work to be done then we'll be creating a root anyway.
if len(leaves) == 0 {
nowNanos := s.timeSource.Now().UnixNano()
interval := time.Duration(nowNanos - currentRoot.TimestampNanos)
if maxRootDurationInterval == 0 || interval < maxRootDurationInterval {
// We have nothing to integrate into the tree
glog.V(1).Infof("No leaves sequenced in this signing operation.")
return 0, tx.Commit()
}
glog.Infof("Force new root generation as %v since last root", interval)
}
merkleTree, err := s.initMerkleTreeFromStorage(ctx, currentRoot, tx)
if err != nil {
return 0, err
}
seqInitTreeLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// We've done all the reads, can now do the updates.
// TODO: This relies on us being the only process updating the map, which isn't enforced yet
// though the schema should now prevent multiple STHs being inserted with the same revision
// number so it should not be possible for colliding updates to commit.
newVersion := tx.WriteRevision()
if got, want := newVersion, currentRoot.TreeRevision+int64(1); got != want {
return 0, fmt.Errorf("%v: got writeRevision of %v, but expected %v", logID, got, want)
}
// Assign leaf sequence numbers and collate node updates
nodeMap, sequencedLeaves, err := s.sequenceLeaves(merkleTree, leaves)
if err != nil {
return 0, err
}
seqWriteTreeLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// We should still have the same number of leaves
if want, got := len(leaves), len(sequencedLeaves); want != got {
return 0, fmt.Errorf("%v: wanted: %v leaves after sequencing but we got: %v", logID, want, got)
}
// Write the new sequence numbers to the leaves in the DB
if err := tx.UpdateSequencedLeaves(ctx, sequencedLeaves); err != nil {
glog.Warningf("%v: Sequencer failed to update sequenced leaves: %v", logID, err)
return 0, err
}
seqUpdateLeavesLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// Build objects for the nodes to be updated. Because we deduped via the map each
// node can only be created / updated once in each tree revision and they cannot
// conflict when we do the storage update.
targetNodes, err := s.buildNodesFromNodeMap(nodeMap, newVersion)
if err != nil {
// probably an internal error with map building, unexpected
glog.Warningf("%v: Failed to build target nodes in sequencer: %v", logID, err)
return 0, err
}
// Now insert or update the nodes affected by the above, at the new tree version
if err := tx.SetMerkleNodes(ctx, targetNodes); err != nil {
glog.Warningf("%v: Sequencer failed to set Merkle nodes: %v", logID, err)
return 0, err
}
seqSetNodesLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// Create the log root ready for signing
newLogRoot := trillian.SignedLogRoot{
RootHash: merkleTree.CurrentRoot(),
TimestampNanos: s.timeSource.Now().UnixNano(),
TreeSize: merkleTree.Size(),
LogId: currentRoot.LogId,
TreeRevision: newVersion,
}
seqTreeSize.Set(float64(merkleTree.Size()), label)
// Hash and sign the root, update it with the signature
signature, err := s.createRootSignature(ctx, newLogRoot)
if err != nil {
glog.Warningf("%v: signer failed to sign root: %v", logID, err)
return 0, err
}
newLogRoot.Signature = signature
if err := tx.StoreSignedLogRoot(ctx, newLogRoot); err != nil {
glog.Warningf("%v: failed to write updated tree root: %v", logID, err)
return 0, err
}
seqStoreRootLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// The batch is now fully sequenced and we're done
if err := tx.Commit(); err != nil {
return 0, err
}
seqCommitLatency.Observe(s.sinceMillis(stageStart), label)
// Let quota.Manager know about newly-sequenced entries.
// All possibly influenced quotas are replenished: {Tree/Global, Read/Write}.
// Implementations are tasked with filtering quotas that shouldn't be replenished.
// TODO(codingllama): Consider adding a source-aware replenish method
// (eg, qm.Replenish(ctx, tokens, specs, quota.SequencerSource)), so there's no ambiguity as to
// where the tokens come from.
if err := s.qm.PutTokens(ctx, len(leaves), []quota.Spec{
{Group: quota.Tree, Kind: quota.Read, TreeID: logID},
{Group: quota.Tree, Kind: quota.Write, TreeID: logID},
{Group: quota.Global, Kind: quota.Read},
{Group: quota.Global, Kind: quota.Write},
}); err != nil {
glog.Warningf("Failed to replenish tokens for tree %v: %v", logID, err)
}
glog.Infof("%v: sequenced %v leaves, size %v, tree-revision %v", logID, len(leaves), newLogRoot.TreeSize, newLogRoot.TreeRevision)
return len(leaves), nil
}
// SignRoot wraps up all the operations for creating a new log signed root.
func (s Sequencer) SignRoot(ctx context.Context, logID int64) error {
tx, err := s.logStorage.BeginForTree(ctx, logID)
if err != nil {
glog.Warningf("%v: signer failed to start tx: %v", logID, err)
return err
}
defer tx.Close()
// Get the latest known root from storage
currentRoot, err := tx.LatestSignedLogRoot(ctx)
if err != nil {
glog.Warningf("%v: signer failed to get latest root: %v", logID, err)
return err
}
// Initialize a Merkle Tree from the state in storage. This should fail if the tree is
// in a corrupt state.
merkleTree, err := s.initMerkleTreeFromStorage(ctx, currentRoot, tx)
if err != nil {
return err
}
// Build the updated root, ready for signing
newLogRoot := trillian.SignedLogRoot{
RootHash: merkleTree.CurrentRoot(),
TimestampNanos: s.timeSource.Now().UnixNano(),
TreeSize: merkleTree.Size(),
LogId: currentRoot.LogId,
TreeRevision: currentRoot.TreeRevision + 1,
}
// Hash and sign the root
signature, err := s.createRootSignature(ctx, newLogRoot)
if err != nil {
glog.Warningf("%v: signer failed to sign root: %v", logID, err)
return err
}
newLogRoot.Signature = signature
// Store the new root and we're done
if err := tx.StoreSignedLogRoot(ctx, newLogRoot); err != nil {
glog.Warningf("%v: signer failed to write updated root: %v", logID, err)
return err
}
glog.V(2).Infof("%v: new signed root, size %v, tree-revision %v", logID, newLogRoot.TreeSize, newLogRoot.TreeRevision)
return tx.Commit()
}
// sinceMillis() returns the time in milliseconds since a particular time, according to
// the TimeSource used by this sequencer.
func (s *Sequencer) sinceMillis(start time.Time) float64 {
return float64(s.timeSource.Now().Sub(start) / time.Millisecond)
}
log: count leaves sequenced in a batch (#690)
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package log includes code that is specific to Trillian's log mode, particularly code
// for running sequencing operations.
package log
import (
"context"
"fmt"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/trillian"
"github.com/google/trillian/crypto"
"github.com/google/trillian/crypto/sigpb"
"github.com/google/trillian/merkle"
"github.com/google/trillian/monitoring"
"github.com/google/trillian/quota"
"github.com/google/trillian/storage"
"github.com/google/trillian/util"
)
const logIDLabel = "logid"
var (
once sync.Once
seqBatches monitoring.Counter
seqTreeSize monitoring.Gauge
seqLatency monitoring.Histogram
seqDequeueLatency monitoring.Histogram
seqGetRootLatency monitoring.Histogram
seqInitTreeLatency monitoring.Histogram
seqWriteTreeLatency monitoring.Histogram
seqUpdateLeavesLatency monitoring.Histogram
seqSetNodesLatency monitoring.Histogram
seqStoreRootLatency monitoring.Histogram
seqCommitLatency monitoring.Histogram
seqCounter monitoring.Counter
)
func createMetrics(mf monitoring.MetricFactory) {
if mf == nil {
mf = monitoring.InertMetricFactory{}
}
seqBatches = mf.NewCounter("sequencer_batches", "Number of sequencer batch operations", logIDLabel)
seqTreeSize = mf.NewGauge("sequencer_tree_size", "Size of Merkle tree", logIDLabel)
seqLatency = mf.NewHistogram("sequencer_latency", "Latency of sequencer batch operation in ms", logIDLabel)
seqDequeueLatency = mf.NewHistogram("sequencer_latency_dequeue", "Latency of dequeue-leaves part of sequencer batch operation in ms", logIDLabel)
seqGetRootLatency = mf.NewHistogram("sequencer_latency_get_root", "Latency of get-root part of sequencer batch operation in ms", logIDLabel)
seqInitTreeLatency = mf.NewHistogram("sequencer_latency_init_tree", "Latency of init-tree part of sequencer batch operation in ms", logIDLabel)
seqWriteTreeLatency = mf.NewHistogram("sequencer_latency_write_tree", "Latency of write-tree part of sequencer batch operation in ms", logIDLabel)
seqUpdateLeavesLatency = mf.NewHistogram("sequencer_latency_update_leaves", "Latency of update-leaves part of sequencer batch operation in ms", logIDLabel)
seqSetNodesLatency = mf.NewHistogram("sequencer_latency_set_nodes", "Latency of set-nodes part of sequencer batch operation in ms", logIDLabel)
seqStoreRootLatency = mf.NewHistogram("sequencer_latency_store_root", "Latency of store-root part of sequencer batch operation in ms", logIDLabel)
seqCommitLatency = mf.NewHistogram("sequencer_latency_commit", "Latency of commit part of sequencer batch operation in ms", logIDLabel)
seqCounter = mf.NewCounter("sequencer_sequenced", "Number of leaves sequenced", logIDLabel)
}
// TODO(Martin2112): Add admin support for safely changing params like guard window during operation
// TODO(Martin2112): Add support for enabling and controlling sequencing as part of admin API
// Sequencer instances are responsible for integrating new leaves into a single log.
// Leaves will be assigned unique sequence numbers when they are processed.
// There is no strong ordering guarantee but in general entries will be processed
// in order of submission to the log.
type Sequencer struct {
hasher merkle.TreeHasher
timeSource util.TimeSource
logStorage storage.LogStorage
signer *crypto.Signer
qm quota.Manager
}
// maxTreeDepth sets an upper limit on the size of Log trees.
// TODO(al): We actually can't go beyond 2^63 entries because we use int64s,
// but we need to calculate tree depths from a multiple of 8 due to
// the subtrees.
const maxTreeDepth = 64
// NewSequencer creates a new Sequencer instance for the specified inputs.
func NewSequencer(
hasher merkle.TreeHasher,
timeSource util.TimeSource,
logStorage storage.LogStorage,
signer *crypto.Signer,
mf monitoring.MetricFactory,
qm quota.Manager) *Sequencer {
once.Do(func() {
createMetrics(mf)
})
return &Sequencer{
hasher: hasher,
timeSource: timeSource,
logStorage: logStorage,
signer: signer,
qm: qm,
}
}
// TODO: This currently doesn't use the batch api for fetching the required nodes. This
// would be more efficient but requires refactoring.
func (s Sequencer) buildMerkleTreeFromStorageAtRoot(ctx context.Context, root trillian.SignedLogRoot, tx storage.TreeTX) (*merkle.CompactMerkleTree, error) {
mt, err := merkle.NewCompactMerkleTreeWithState(s.hasher, root.TreeSize, func(depth int, index int64) ([]byte, error) {
nodeID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, maxTreeDepth)
if err != nil {
glog.Warningf("%v: Failed to create nodeID: %v", root.LogId, err)
return nil, err
}
nodes, err := tx.GetMerkleNodes(ctx, root.TreeRevision, []storage.NodeID{nodeID})
if err != nil {
glog.Warningf("%v: Failed to get Merkle nodes: %v", root.LogId, err)
return nil, err
}
// We expect to get exactly one node here
if nodes == nil || len(nodes) != 1 {
return nil, fmt.Errorf("%v: Did not retrieve one node while loading CompactMerkleTree, got %#v for ID %v@%v", root.LogId, nodes, nodeID.String(), root.TreeRevision)
}
return nodes[0].Hash, nil
}, root.RootHash)
return mt, err
}
func (s Sequencer) buildNodesFromNodeMap(nodeMap map[string]storage.Node, newVersion int64) ([]storage.Node, error) {
targetNodes := make([]storage.Node, len(nodeMap), len(nodeMap))
i := 0
for _, node := range nodeMap {
node.NodeRevision = newVersion
targetNodes[i] = node
i++
}
return targetNodes, nil
}
func (s Sequencer) sequenceLeaves(mt *merkle.CompactMerkleTree, leaves []*trillian.LogLeaf) (map[string]storage.Node, []*trillian.LogLeaf, error) {
nodeMap := make(map[string]storage.Node)
// Update the tree state and sequence the leaves and assign sequence numbers to the new leaves
for i, leaf := range leaves {
seq, err := mt.AddLeafHash(leaf.MerkleLeafHash, func(depth int, index int64, hash []byte) error {
nodeID, err := storage.NewNodeIDForTreeCoords(int64(depth), index, maxTreeDepth)
if err != nil {
return err
}
nodeMap[nodeID.String()] = storage.Node{
NodeID: nodeID,
Hash: hash,
}
return nil
})
if err != nil {
return nil, nil, err
}
// The leaf has now been sequenced.
leaves[i].LeafIndex = seq
// Store leaf hash in the Merkle tree too:
leafNodeID, err := storage.NewNodeIDForTreeCoords(0, seq, maxTreeDepth)
if err != nil {
return nil, nil, err
}
nodeMap[leafNodeID.String()] = storage.Node{
NodeID: leafNodeID,
Hash: leaf.MerkleLeafHash,
}
}
return nodeMap, leaves, nil
}
func (s Sequencer) initMerkleTreeFromStorage(ctx context.Context, currentRoot trillian.SignedLogRoot, tx storage.LogTreeTX) (*merkle.CompactMerkleTree, error) {
if currentRoot.TreeSize == 0 {
return merkle.NewCompactMerkleTree(s.hasher), nil
}
// Initialize the compact tree state to match the latest root in the database
return s.buildMerkleTreeFromStorageAtRoot(ctx, currentRoot, tx)
}
func (s Sequencer) createRootSignature(ctx context.Context, root trillian.SignedLogRoot) (*sigpb.DigitallySigned, error) {
signature, err := s.signer.Sign(crypto.HashLogRoot(root))
if err != nil {
glog.Warningf("%v: signer failed to sign root: %v", root.LogId, err)
return nil, err
}
return signature, nil
}
// SequenceBatch wraps up all the operations needed to take a batch of queued leaves
// and integrate them into the tree.
// TODO(Martin2112): Can possibly improve by deferring a function that attempts to rollback,
// which will fail if the tx was committed. Should only do this if we can hide the details of
// the underlying storage transactions and it doesn't create other problems.
func (s Sequencer) SequenceBatch(ctx context.Context, logID int64, limit int, guardWindow, maxRootDurationInterval time.Duration) (int, error) {
start := s.timeSource.Now()
stageStart := start
label := strconv.FormatInt(logID, 10)
tx, err := s.logStorage.BeginForTree(ctx, logID)
if err != nil {
glog.Warningf("%v: Sequencer failed to start tx: %v", logID, err)
return 0, err
}
defer tx.Close()
defer seqBatches.Inc(label)
defer func() { seqLatency.Observe(s.sinceMillis(start), label) }()
// Very recent leaves inside the guard window will not be available for sequencing
guardCutoffTime := s.timeSource.Now().Add(-guardWindow)
leaves, err := tx.DequeueLeaves(ctx, limit, guardCutoffTime)
if err != nil {
glog.Warningf("%v: Sequencer failed to dequeue leaves: %v", logID, err)
return 0, err
}
seqDequeueLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// Get the latest known root from storage
currentRoot, err := tx.LatestSignedLogRoot(ctx)
if err != nil {
glog.Warningf("%v: Sequencer failed to get latest root: %v", logID, err)
return 0, err
}
seqGetRootLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// TODO(al): Have a better detection mechanism for there being no stored root.
// TODO(mhs): Might be better to create empty root in provisioning API when it exists
if currentRoot.RootHash == nil {
glog.Warningf("%v: Fresh log - no previous TreeHeads exist.", logID)
// SignRoot starts a new transaction, and we've got one open here until
// this function returns.
// This explicit Close() is a work-around for the in-memory storage which
// locks the tree for each TX.
// TODO(al): Producing the first signed root for a new tree should be
// handled by the provisioning, move it there.
tx.Close()
return 0, s.SignRoot(ctx, logID)
}
// There might be no work to be done. But we possibly still need to create an signed root if the
// current one is too old. If there's work to be done then we'll be creating a root anyway.
if len(leaves) == 0 {
nowNanos := s.timeSource.Now().UnixNano()
interval := time.Duration(nowNanos - currentRoot.TimestampNanos)
if maxRootDurationInterval == 0 || interval < maxRootDurationInterval {
// We have nothing to integrate into the tree
glog.V(1).Infof("No leaves sequenced in this signing operation.")
return 0, tx.Commit()
}
glog.Infof("Force new root generation as %v since last root", interval)
}
merkleTree, err := s.initMerkleTreeFromStorage(ctx, currentRoot, tx)
if err != nil {
return 0, err
}
seqInitTreeLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// We've done all the reads, can now do the updates.
// TODO: This relies on us being the only process updating the map, which isn't enforced yet
// though the schema should now prevent multiple STHs being inserted with the same revision
// number so it should not be possible for colliding updates to commit.
newVersion := tx.WriteRevision()
if got, want := newVersion, currentRoot.TreeRevision+int64(1); got != want {
return 0, fmt.Errorf("%v: got writeRevision of %v, but expected %v", logID, got, want)
}
// Assign leaf sequence numbers and collate node updates
nodeMap, sequencedLeaves, err := s.sequenceLeaves(merkleTree, leaves)
if err != nil {
return 0, err
}
seqWriteTreeLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// We should still have the same number of leaves
if want, got := len(leaves), len(sequencedLeaves); want != got {
return 0, fmt.Errorf("%v: wanted: %v leaves after sequencing but we got: %v", logID, want, got)
}
// Write the new sequence numbers to the leaves in the DB
if err := tx.UpdateSequencedLeaves(ctx, sequencedLeaves); err != nil {
glog.Warningf("%v: Sequencer failed to update sequenced leaves: %v", logID, err)
return 0, err
}
seqUpdateLeavesLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// Build objects for the nodes to be updated. Because we deduped via the map each
// node can only be created / updated once in each tree revision and they cannot
// conflict when we do the storage update.
targetNodes, err := s.buildNodesFromNodeMap(nodeMap, newVersion)
if err != nil {
// probably an internal error with map building, unexpected
glog.Warningf("%v: Failed to build target nodes in sequencer: %v", logID, err)
return 0, err
}
// Now insert or update the nodes affected by the above, at the new tree version
if err := tx.SetMerkleNodes(ctx, targetNodes); err != nil {
glog.Warningf("%v: Sequencer failed to set Merkle nodes: %v", logID, err)
return 0, err
}
seqSetNodesLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// Create the log root ready for signing
newLogRoot := trillian.SignedLogRoot{
RootHash: merkleTree.CurrentRoot(),
TimestampNanos: s.timeSource.Now().UnixNano(),
TreeSize: merkleTree.Size(),
LogId: currentRoot.LogId,
TreeRevision: newVersion,
}
seqTreeSize.Set(float64(merkleTree.Size()), label)
// Hash and sign the root, update it with the signature
signature, err := s.createRootSignature(ctx, newLogRoot)
if err != nil {
glog.Warningf("%v: signer failed to sign root: %v", logID, err)
return 0, err
}
newLogRoot.Signature = signature
if err := tx.StoreSignedLogRoot(ctx, newLogRoot); err != nil {
glog.Warningf("%v: failed to write updated tree root: %v", logID, err)
return 0, err
}
seqStoreRootLatency.Observe(s.sinceMillis(stageStart), label)
stageStart = s.timeSource.Now()
// The batch is now fully sequenced and we're done
if err := tx.Commit(); err != nil {
return 0, err
}
seqCommitLatency.Observe(s.sinceMillis(stageStart), label)
// Let quota.Manager know about newly-sequenced entries.
// All possibly influenced quotas are replenished: {Tree/Global, Read/Write}.
// Implementations are tasked with filtering quotas that shouldn't be replenished.
// TODO(codingllama): Consider adding a source-aware replenish method
// (eg, qm.Replenish(ctx, tokens, specs, quota.SequencerSource)), so there's no ambiguity as to
// where the tokens come from.
if err := s.qm.PutTokens(ctx, len(leaves), []quota.Spec{
{Group: quota.Tree, Kind: quota.Read, TreeID: logID},
{Group: quota.Tree, Kind: quota.Write, TreeID: logID},
{Group: quota.Global, Kind: quota.Read},
{Group: quota.Global, Kind: quota.Write},
}); err != nil {
glog.Warningf("Failed to replenish tokens for tree %v: %v", logID, err)
}
seqCounter.Add(float64(len(leaves)), label)
glog.Infof("%v: sequenced %v leaves, size %v, tree-revision %v", logID, len(leaves), newLogRoot.TreeSize, newLogRoot.TreeRevision)
return len(leaves), nil
}
// SignRoot wraps up all the operations for creating a new log signed root.
func (s Sequencer) SignRoot(ctx context.Context, logID int64) error {
tx, err := s.logStorage.BeginForTree(ctx, logID)
if err != nil {
glog.Warningf("%v: signer failed to start tx: %v", logID, err)
return err
}
defer tx.Close()
// Get the latest known root from storage
currentRoot, err := tx.LatestSignedLogRoot(ctx)
if err != nil {
glog.Warningf("%v: signer failed to get latest root: %v", logID, err)
return err
}
// Initialize a Merkle Tree from the state in storage. This should fail if the tree is
// in a corrupt state.
merkleTree, err := s.initMerkleTreeFromStorage(ctx, currentRoot, tx)
if err != nil {
return err
}
// Build the updated root, ready for signing
newLogRoot := trillian.SignedLogRoot{
RootHash: merkleTree.CurrentRoot(),
TimestampNanos: s.timeSource.Now().UnixNano(),
TreeSize: merkleTree.Size(),
LogId: currentRoot.LogId,
TreeRevision: currentRoot.TreeRevision + 1,
}
// Hash and sign the root
signature, err := s.createRootSignature(ctx, newLogRoot)
if err != nil {
glog.Warningf("%v: signer failed to sign root: %v", logID, err)
return err
}
newLogRoot.Signature = signature
// Store the new root and we're done
if err := tx.StoreSignedLogRoot(ctx, newLogRoot); err != nil {
glog.Warningf("%v: signer failed to write updated root: %v", logID, err)
return err
}
glog.V(2).Infof("%v: new signed root, size %v, tree-revision %v", logID, newLogRoot.TreeSize, newLogRoot.TreeRevision)
return tx.Commit()
}
// sinceMillis() returns the time in milliseconds since a particular time, according to
// the TimeSource used by this sequencer.
func (s *Sequencer) sinceMillis(start time.Time) float64 {
return float64(s.timeSource.Now().Sub(start) / time.Millisecond)
}
|
package geolookup
import (
"fmt"
"math"
"math/rand"
"net/http"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/getlantern/geolookup"
"github.com/getlantern/golog"
"github.com/getlantern/flashlight/pubsub"
"github.com/getlantern/flashlight/ui"
)
const (
messageType = `GeoLookup`
basePublishSeconds = 30
publishSecondsVariance = basePublishSeconds - 10
retryWaitMillis = 100
)
var (
log = golog.LoggerFor("flashlight.geolookup")
service *ui.Service
client atomic.Value
cfgMutex sync.Mutex
location atomic.Value
)
func GetLocation() *geolookup.City {
l := location.Load()
if l == nil {
return nil
}
return l.(*geolookup.City)
}
func GetCountry() string {
loc := GetLocation()
if loc == nil {
return ""
}
return loc.Country.IsoCode
}
// Configure configures geolookup to use the given http.Client to perform
// lookups. geolookup runs in a continuous loop, periodically updating its
// location and publishing updates to any connected clients. We do this
// continually in order to detect when the computer's location has changed.
func Configure(newClient *http.Client) {
cfgMutex.Lock()
defer cfgMutex.Unlock()
client.Store(newClient)
if service == nil {
err := registerService()
if err != nil {
log.Errorf("Unable to register service: %s", err)
return
}
go write()
go read()
log.Debug("Running")
}
}
func registerService() error {
helloFn := func(write func(interface{}) error) error {
location := GetLocation()
if location == nil {
log.Trace("No lastKnownLocation, not sending anything to client")
return nil
}
log.Trace("Sending last known location to new client")
return write(location)
}
var err error
service, err = ui.Register(messageType, nil, helloFn)
return err
}
func write() {
consecutiveFailures := 0
for {
// Wait a random amount of time (to avoid looking too suspicious)
// Note - rand was seeded with the startup time in flashlight.go
n := rand.Intn(publishSecondsVariance)
wait := time.Duration(basePublishSeconds-publishSecondsVariance/2+n) * time.Second
oldLocation := GetLocation()
newLocation, ip, err := geolookup.LookupIPWithClient("", client.Load().(*http.Client))
if err == nil {
consecutiveFailures = 0
if !reflect.DeepEqual(newLocation, oldLocation) {
log.Debugf("Location changed")
location.Store(newLocation)
}
// Always publish location, even if unchanged
pubsub.Pub(pubsub.IP, ip)
service.Out <- newLocation
} else {
msg := fmt.Sprintf("Unable to get current location: %s", err)
// When retrying after a failure, wait a different amount of time
retryWait := time.Duration(math.Pow(2, float64(consecutiveFailures))*float64(retryWaitMillis)) * time.Millisecond
if retryWait < wait {
log.Debug(msg)
wait = retryWait
} else {
log.Error(msg)
}
log.Debugf("Waiting %v before retrying", wait)
consecutiveFailures += 1
// If available, publish last known location
if oldLocation != nil {
service.Out <- oldLocation
}
}
time.Sleep(wait)
}
}
func read() {
for _ = range service.In {
// Discard message, just in case any message is sent to this service.
}
}
use new ip and country headers closes #2943
package geolookup
import (
"fmt"
"io/ioutil"
"math"
"math/rand"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/getlantern/enproxy"
"github.com/getlantern/golog"
"github.com/getlantern/flashlight/pubsub"
"github.com/getlantern/flashlight/ui"
)
const (
messageType = `GeoLookup`
basePublishSeconds = 30
publishSecondsVariance = basePublishSeconds - 10
retryWaitMillis = 100
)
var (
log = golog.LoggerFor("flashlight.geolookup")
service *ui.Service
client atomic.Value
cfgMutex sync.Mutex
country atomic.Value
ip atomic.Value
)
func GetIp() string {
c := ip.Load()
if c == nil {
return ""
}
return c.(string)
}
func GetCountry() string {
c := country.Load()
if c == nil {
return ""
}
return c.(string)
}
// Configure configures geolookup to use the given http.Client to perform
// lookups. geolookup runs in a continuous loop, periodically updating its
// location and publishing updates to any connected clients. We do this
// continually in order to detect when the computer's location has changed.
func Configure(newClient *http.Client) {
cfgMutex.Lock()
defer cfgMutex.Unlock()
client.Store(newClient)
if service == nil {
err := registerService()
if err != nil {
log.Errorf("Unable to register service: %s", err)
return
}
go write()
go read()
log.Debug("Running")
}
}
func registerService() error {
helloFn := func(write func(interface{}) error) error {
country := GetCountry()
if country == "" {
log.Trace("No lastKnownCountry, not sending anything to client")
return nil
}
log.Trace("Sending last known location to new client")
return write(country)
}
var err error
service, err = ui.Register(messageType, nil, helloFn)
return err
}
func lookupIp(httpClient *http.Client) (string, string, error) {
httpClient.Timeout = 60 * time.Second
var err error
var req *http.Request
var resp *http.Response
lookupURL := "http://nl.fallbacks.getiantem.org"
if req, err = http.NewRequest("HEAD", lookupURL, nil); err != nil {
return "", "", fmt.Errorf("Could not create request: %q", err)
}
// Enproxy returns an error if this isn't there.
req.Header.Set(enproxy.X_ENPROXY_ID, "1")
if resp, err = httpClient.Do(req); err != nil {
return "", "", fmt.Errorf("Could not get response from server: %q", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Debugf("Unable to close reponse body: %v", err)
}
}()
if resp.StatusCode != http.StatusOK {
body := "body unreadable"
b, err := ioutil.ReadAll(resp.Body)
if err == nil {
body = string(b)
}
return "", "", fmt.Errorf("Unexpected response status %d: %v", resp.StatusCode, body)
}
ip := resp.Header.Get("Lantern-Ip")
country := resp.Header.Get("Lantern-Country")
log.Debugf("Got IP and country: %v, %v", ip, country)
return country, ip, nil
}
func write() {
consecutiveFailures := 0
for {
// Wait a random amount of time (to avoid looking too suspicious)
// Note - rand was seeded with the startup time in flashlight.go
n := rand.Intn(publishSecondsVariance)
wait := time.Duration(basePublishSeconds-publishSecondsVariance/2+n) * time.Second
oldIp := GetIp()
oldCountry := GetCountry()
newCountry, newIp, err := lookupIp(client.Load().(*http.Client))
//newCountry, ip, err := geolookup.LookupIPWithClient("", client.Load().(*http.Client))
if err == nil {
consecutiveFailures = 0
if newIp != oldIp {
log.Debugf("IP changed")
ip.Store(newIp)
}
// Always publish location, even if unchanged
pubsub.Pub(pubsub.IP, newIp)
service.Out <- newCountry
} else {
msg := fmt.Sprintf("Unable to get current location: %s", err)
// When retrying after a failure, wait a different amount of time
retryWait := time.Duration(math.Pow(2, float64(consecutiveFailures))*float64(retryWaitMillis)) * time.Millisecond
if retryWait < wait {
log.Debug(msg)
wait = retryWait
} else {
log.Error(msg)
}
log.Debugf("Waiting %v before retrying", wait)
consecutiveFailures += 1
// If available, publish last known location
if oldCountry != "" {
service.Out <- oldCountry
}
}
time.Sleep(wait)
}
}
func read() {
for _ = range service.In {
// Discard message, just in case any message is sent to this service.
}
}
|
package geolookup
import (
"fmt"
"math"
"math/rand"
"net/http"
"net/http/httputil"
"sync"
"sync/atomic"
"time"
"github.com/getlantern/golog"
"github.com/getlantern/flashlight/pubsub"
"github.com/getlantern/flashlight/ui"
)
const (
messageType = `GeoLookup`
basePublishSeconds = 30
publishSecondsVariance = basePublishSeconds - 10
retryWaitMillis = 100
)
var (
log = golog.LoggerFor("flashlight.geolookup")
service *ui.Service
client atomic.Value
cfgMutex sync.Mutex
country = atomicString()
ip = atomicString()
)
func atomicString() atomic.Value {
var val atomic.Value
val.Store("")
return val
}
func GetIp() string {
return ip.Load().(string)
}
func GetCountry() string {
return country.Load().(string)
}
// Configure configures geolookup to use the given http.Client to perform
// lookups. geolookup runs in a continuous loop, periodically updating its
// location and publishing updates to any connected clients. We do this
// continually in order to detect when the computer's location has changed.
func Configure(newClient *http.Client) {
cfgMutex.Lock()
defer cfgMutex.Unlock()
// Avoid annoying checks for nil later.
ip.Store("")
country.Store("")
client.Store(newClient)
if service == nil {
err := registerService()
if err != nil {
log.Errorf("Unable to register service: %s", err)
return
}
go write()
go read()
log.Debug("Running")
}
}
func registerService() error {
helloFn := func(write func(interface{}) error) error {
country := GetCountry()
if country == "" {
log.Trace("No lastKnownCountry, not sending anything to client")
return nil
}
log.Trace("Sending last known location to new client")
return write(country)
}
var err error
service, err = ui.Register(messageType, nil, helloFn)
return err
}
func lookupIp(httpClient *http.Client) (string, string, error) {
httpClient.Timeout = 60 * time.Second
var err error
var req *http.Request
var resp *http.Response
// Note this will typically be an HTTP client that uses direct domain fronting to
// hit our server pool in the Netherlands.
if req, err = http.NewRequest("HEAD", "http://nl.fallbacks.getiantem.org", nil); err != nil {
return "", "", fmt.Errorf("Could not create request: %q", err)
}
if resp, err = httpClient.Do(req); err != nil {
return "", "", fmt.Errorf("Could not get response from server: %q", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Debugf("Unable to close reponse body: %v", err)
}
}()
if resp.StatusCode != http.StatusOK {
if full, err := httputil.DumpResponse(resp, true); err != nil {
log.Errorf("Could not read full response %v", err)
} else {
log.Errorf("Unexpected response to geo IP lookup: %v", string(full))
}
return "", "", fmt.Errorf("Unexpected response status %d", resp.StatusCode)
}
ip := resp.Header.Get("Lantern-Ip")
country := resp.Header.Get("Lantern-Country")
log.Debugf("Got IP and country: %v, %v", ip, country)
return country, ip, nil
}
func write() {
consecutiveFailures := 0
for {
// Wait a random amount of time (to avoid looking too suspicious)
// Note - rand was seeded with the startup time in flashlight.go
n := rand.Intn(publishSecondsVariance)
wait := time.Duration(basePublishSeconds-publishSecondsVariance/2+n) * time.Second
oldIp := GetIp()
oldCountry := GetCountry()
newCountry, newIp, err := lookupIp(client.Load().(*http.Client))
if err == nil {
consecutiveFailures = 0
if newIp != oldIp {
log.Debugf("IP changed")
ip.Store(newIp)
}
// Always publish location, even if unchanged
pubsub.Pub(pubsub.IP, newIp)
service.Out <- newCountry
} else {
msg := fmt.Sprintf("Unable to get current location: %s", err)
// When retrying after a failure, wait a different amount of time
retryWait := time.Duration(math.Pow(2, float64(consecutiveFailures))*float64(retryWaitMillis)) * time.Millisecond
if retryWait < wait {
log.Debug(msg)
wait = retryWait
} else {
log.Error(msg)
}
log.Debugf("Waiting %v before retrying", wait)
consecutiveFailures += 1
// If available, publish last known location
if oldCountry != "" {
service.Out <- oldCountry
}
}
time.Sleep(wait)
}
}
func read() {
for _ = range service.In {
// Discard message, just in case any message is sent to this service.
}
}
fixed geolookup
package geolookup
import (
"fmt"
"math"
"math/rand"
"net/http"
"sync"
"sync/atomic"
"time"
geo "github.com/getlantern/geolookup"
"github.com/getlantern/golog"
"github.com/getlantern/flashlight/pubsub"
"github.com/getlantern/flashlight/ui"
)
const (
messageType = `GeoLookup`
basePublishSeconds = 30
publishSecondsVariance = basePublishSeconds - 10
retryWaitMillis = 100
)
var (
log = golog.LoggerFor("flashlight.geolookup")
service *ui.Service
client atomic.Value
cfgMutex sync.Mutex
country = atomicString()
ip = atomicString()
)
func atomicString() atomic.Value {
var val atomic.Value
val.Store("")
return val
}
func GetIp() string {
return ip.Load().(string)
}
func GetCountry() string {
return country.Load().(string)
}
// Configure configures geolookup to use the given http.Client to perform
// lookups. geolookup runs in a continuous loop, periodically updating its
// location and publishing updates to any connected clients. We do this
// continually in order to detect when the computer's location has changed.
func Configure(newClient *http.Client) {
cfgMutex.Lock()
defer cfgMutex.Unlock()
// Avoid annoying checks for nil later.
ip.Store("")
country.Store("")
client.Store(newClient)
if service == nil {
err := registerService()
if err != nil {
log.Errorf("Unable to register service: %s", err)
return
}
go write()
go read()
log.Debug("Running")
}
}
func registerService() error {
helloFn := func(write func(interface{}) error) error {
country := GetCountry()
if country == "" {
log.Trace("No lastKnownCountry, not sending anything to client")
return nil
}
log.Trace("Sending last known location to new client")
return write(country)
}
var err error
service, err = ui.Register(messageType, nil, helloFn)
return err
}
func lookupIp(httpClient *http.Client) (string, string, error) {
city, ip, err := geo.LookupIPWithClient("", httpClient)
if err != nil {
return "", "", err
}
return city.Country.IsoCode, ip, nil
/*
httpClient.Timeout = 60 * time.Second
var err error
var req *http.Request
var resp *http.Response
// Note this will typically be an HTTP client that uses direct domain fronting to
// hit our server pool in the Netherlands.
if req, err = http.NewRequest("HEAD", "http://nl.fallbacks.getiantem.org", nil); err != nil {
return "", "", fmt.Errorf("Could not create request: %q", err)
}
if resp, err = httpClient.Do(req); err != nil {
return "", "", fmt.Errorf("Could not get response from server: %q", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Debugf("Unable to close reponse body: %v", err)
}
}()
if resp.StatusCode != http.StatusOK {
if full, err := httputil.DumpResponse(resp, true); err != nil {
log.Errorf("Could not read full response %v", err)
} else {
log.Errorf("Unexpected response to geo IP lookup: %v", string(full))
}
return "", "", fmt.Errorf("Unexpected response status %d", resp.StatusCode)
}
ip := resp.Header.Get("Lantern-Ip")
country := resp.Header.Get("Lantern-Country")
log.Debugf("Got IP and country: %v, %v", ip, country)
return country, ip, nil
*/
}
func write() {
consecutiveFailures := 0
for {
// Wait a random amount of time (to avoid looking too suspicious)
// Note - rand was seeded with the startup time in flashlight.go
n := rand.Intn(publishSecondsVariance)
wait := time.Duration(basePublishSeconds-publishSecondsVariance/2+n) * time.Second
oldIp := GetIp()
oldCountry := GetCountry()
newCountry, newIp, err := lookupIp(client.Load().(*http.Client))
if err == nil {
consecutiveFailures = 0
if newIp != oldIp {
log.Debugf("IP changed")
ip.Store(newIp)
}
// Always publish location, even if unchanged
pubsub.Pub(pubsub.IP, newIp)
service.Out <- newCountry
} else {
msg := fmt.Sprintf("Unable to get current location: %s", err)
// When retrying after a failure, wait a different amount of time
retryWait := time.Duration(math.Pow(2, float64(consecutiveFailures))*float64(retryWaitMillis)) * time.Millisecond
if retryWait < wait {
log.Debug(msg)
wait = retryWait
} else {
log.Error(msg)
}
log.Debugf("Waiting %v before retrying", wait)
consecutiveFailures += 1
// If available, publish last known location
if oldCountry != "" {
service.Out <- oldCountry
}
}
time.Sleep(wait)
}
}
func read() {
for _ = range service.In {
// Discard message, just in case any message is sent to this service.
}
}
|
package logger
import (
"io"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/satori/go.uuid"
)
// http://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html
type option func(*handler)
// Internal handler
type handler struct {
name string
format string
out io.Writer
}
// AppName allows to set the application name to log.
func AppName(name string) option {
return func(l *handler) {
l.name = name
}
}
// Format allows to set a custom log format. Although, the timestamp is always logged at the beginning.
// This handler is a bit opinionated.
//
// Directives:
//
// {remote_user} : Remote user if Basic Auth credentials were sent
// {remote_ip} : Remote IP address.
// {latency} : The time taken to serve the request, in microseconds.
// {latency_human} : The time taken to serve the request, human readable.
// {id} : The request ID.
// {host} : The Host header sent to the server
// {method} : The request method. Ex: GET, POST, DELETE, etc.
// {url} : The URL path requested.
// {query} : Request's query string
// {rxbytes} : Bytes received without headers
// {txbytes} : Bytes sent, excluding HTTP headers.
// {status} : Status sent to the client
// {useragent} : User Agent
// {appname} : The application name for this server
// {referer} : The site from where the request came from
//
func Format(format string) option {
return func(l *handler) {
l.format = format
}
}
// Output allows setting an output writer for logging to be written to
func Output(out io.Writer) option {
return func(l *handler) {
l.out = out
}
}
// Handler does HTTP request logging
func Handler(h http.Handler, opts ...option) http.Handler {
// Default options
handler := &handler{
name: "unknown",
format: `{appname} {id} remote_ip={remote_ip} {method} "{host}{url}?{query}" rxbytes={rxbytes} status={status} latency_human={latency_human} latency={latency} txbytes={txbytes}`,
out: os.Stdout,
}
for _, opt := range opts {
opt(handler)
}
log.SetOutput(handler.out)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
// If there is a request ID already, we use it to keep the transaction
// traceable. If not, we generate a new request ID.
reqID := w.Header().Get("RequestID")
if reqID == "" {
reqID = uuid.NewV4().String()
}
w.Header().Set("RequestID", reqID)
log.Print(applyLogFormat(handler.format, handler.name, -1, w, r))
res := NewResponseWriter(w)
h.ServeHTTP(res, r)
latency := time.Since(start)
log.Print(applyLogFormat(handler.format, handler.name, latency, res, r))
})
}
func applyLogFormat(format, appname string, latency time.Duration, w http.ResponseWriter, r *http.Request) string {
reqID := w.Header().Get("RequestID")
if strings.Index(format, "{appname}") > -1 {
format = strings.Replace(format, "{appname}", appname, -1)
}
if strings.Index(format, "{remote_ip}") > -1 {
format = strings.Replace(format, "{remote_ip}", strings.Split(r.RemoteAddr, ":")[0], -1)
}
if strings.Index(format, "{remote_user}") > -1 {
user, _, _ := r.BasicAuth()
if user == "" {
user = r.URL.User.Username()
}
format = strings.Replace(format, "{remote_user}", user, -1)
}
if strings.Index(format, "{latency_human}") > -1 {
l := "..."
if latency > -1 {
l = latency.String()
}
format = strings.Replace(format, "{latency_human}", l, -1)
}
if strings.Index(format, "{latency}") > -1 {
l := "..."
if latency > -1 {
l = strconv.FormatInt(latency.Nanoseconds(), 10)
}
format = strings.Replace(format, "{latency}", l, -1)
}
if strings.Index(format, "{id}") > -1 {
format = strings.Replace(format, "{id}", reqID, -1)
}
if strings.Index(format, "{method}") > -1 {
format = strings.Replace(format, "{method}", r.Method, -1)
}
if strings.Index(format, "{url}") > -1 {
format = strings.Replace(format, "{url}", r.URL.Path, -1)
}
if strings.Index(format, "{query}") > -1 {
format = strings.Replace(format, "{query}", r.URL.RawQuery, -1)
}
if strings.Index(format, "{rxbytes}") > -1 {
format = strings.Replace(format, "{rxbytes}", strconv.FormatInt(r.ContentLength, 10), -1)
}
if strings.Index(format, "{txbytes}") > -1 {
size := "..."
if v, ok := w.(ResponseWriter); ok {
size = strconv.Itoa(v.Size())
}
format = strings.Replace(format, "{txbytes}", size, -1)
}
if strings.Index(format, "{status}") > -1 {
status := "..."
if v, ok := w.(ResponseWriter); ok {
status = strconv.Itoa(v.Status())
}
format = strings.Replace(format, "{status}", status, -1)
}
if strings.Index(format, "{useragent}") > -1 {
format = strings.Replace(format, "{useragent}", r.UserAgent(), -1)
}
if strings.Index(format, "{host}") > -1 {
format = strings.Replace(format, "{host}", r.Host, -1)
}
if strings.Index(format, "{referer}") > -1 {
format = strings.Replace(format, "{referer}", r.Referer(), -1)
}
return format
}
Creates a new logger to avoid clashes with app's loggers
package logger
import (
"fmt"
"io"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/satori/go.uuid"
)
// http://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html
type option func(*handler)
// Internal handler
type handler struct {
name string
format string
flags int
out io.Writer
}
// AppName allows to set the application name to log.
func AppName(name string) option {
return func(l *handler) {
l.name = name
}
}
// Format allows to set a custom log format. Although, the timestamp is always logged at the beginning.
// This handler is a bit opinionated.
//
// Directives:
//
// {remote_user} : Remote user if Basic Auth credentials were sent
// {remote_ip} : Remote IP address.
// {latency} : The time taken to serve the request, in microseconds.
// {latency_human} : The time taken to serve the request, human readable.
// {id} : The request ID.
// {host} : The Host header sent to the server
// {method} : The request method. Ex: GET, POST, DELETE, etc.
// {url} : The URL path requested.
// {query} : Request's query string
// {rxbytes} : Bytes received without headers
// {txbytes} : Bytes sent, excluding HTTP headers.
// {status} : Status sent to the client
// {useragent} : User Agent
// {referer} : The site from where the request came from
//
func Format(format string) option {
return func(l *handler) {
l.format = format
}
}
// Flags allows to set logging flags using Go's standard log flags.
//
// Example: log.LstdFlags | log.shortfile
// Keep in mind that log.shortfile and log.Llongfile are expensive flags
func Flags(flags int) option {
return func(l *handler) {
l.flags = flags
}
}
// Output allows setting an output writer for logging to be written to
func Output(out io.Writer) option {
return func(l *handler) {
l.out = out
}
}
// Handler does HTTP request logging
func Handler(h http.Handler, opts ...option) http.Handler {
// Default options
handler := &handler{
name: "unknown",
format: `{id} remote_ip={remote_ip} {method} "{host}{url}?{query}" rxbytes={rxbytes} status={status} latency_human={latency_human} latency={latency} txbytes={txbytes}`,
out: os.Stdout,
flags: log.LstdFlags,
}
for _, opt := range opts {
opt(handler)
}
l := log.New(handler.out, fmt.Sprintf("[%s] ", handler.name), handler.flags)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
// If there is a request ID already, we use it to keep the transaction
// traceable. If not, we generate a new request ID.
reqID := w.Header().Get("RequestID")
if reqID == "" {
reqID = uuid.NewV4().String()
}
w.Header().Set("RequestID", reqID)
l.Print(applyLogFormat(handler.format, -1, w, r))
res := NewResponseWriter(w)
h.ServeHTTP(res, r)
latency := time.Since(start)
l.Print(applyLogFormat(handler.format, latency, res, r))
})
}
func applyLogFormat(format string, latency time.Duration, w http.ResponseWriter, r *http.Request) string {
reqID := w.Header().Get("RequestID")
if strings.Index(format, "{remote_ip}") > -1 {
format = strings.Replace(format, "{remote_ip}", strings.Split(r.RemoteAddr, ":")[0], -1)
}
if strings.Index(format, "{remote_user}") > -1 {
user, _, _ := r.BasicAuth()
if user == "" {
user = r.URL.User.Username()
}
format = strings.Replace(format, "{remote_user}", user, -1)
}
if strings.Index(format, "{latency_human}") > -1 {
l := "..."
if latency > -1 {
l = latency.String()
}
format = strings.Replace(format, "{latency_human}", l, -1)
}
if strings.Index(format, "{latency}") > -1 {
l := "..."
if latency > -1 {
l = strconv.FormatInt(latency.Nanoseconds(), 10)
}
format = strings.Replace(format, "{latency}", l, -1)
}
if strings.Index(format, "{id}") > -1 {
format = strings.Replace(format, "{id}", reqID, -1)
}
if strings.Index(format, "{method}") > -1 {
format = strings.Replace(format, "{method}", r.Method, -1)
}
if strings.Index(format, "{url}") > -1 {
format = strings.Replace(format, "{url}", r.URL.Path, -1)
}
if strings.Index(format, "{query}") > -1 {
format = strings.Replace(format, "{query}", r.URL.RawQuery, -1)
}
if strings.Index(format, "{rxbytes}") > -1 {
format = strings.Replace(format, "{rxbytes}", strconv.FormatInt(r.ContentLength, 10), -1)
}
if strings.Index(format, "{txbytes}") > -1 {
size := "..."
if v, ok := w.(ResponseWriter); ok {
size = strconv.Itoa(v.Size())
}
format = strings.Replace(format, "{txbytes}", size, -1)
}
if strings.Index(format, "{status}") > -1 {
status := "..."
if v, ok := w.(ResponseWriter); ok {
status = strconv.Itoa(v.Status())
}
format = strings.Replace(format, "{status}", status, -1)
}
if strings.Index(format, "{useragent}") > -1 {
format = strings.Replace(format, "{useragent}", r.UserAgent(), -1)
}
if strings.Index(format, "{host}") > -1 {
format = strings.Replace(format, "{host}", r.Host, -1)
}
if strings.Index(format, "{referer}") > -1 {
format = strings.Replace(format, "{referer}", r.Referer(), -1)
}
return format
}
|
package logspoutloges
import (
"bytes"
"encoding/json"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/gliderlabs/logspout/router"
"github.com/mattbaird/elastigo/lib"
)
var elastigoConn *elastigo.Conn
func init() {
router.AdapterFactories.Register(NewLogesAdapter, "logspoutloges")
}
// LogesAdapter is an adapter that streams TCP JSON to Elasticsearch
type LogesAdapter struct {
conn *elastigo.Conn
route *router.Route
indexer *elastigo.BulkIndexer
}
// NewLogesAdapter creates a LogesAdapter with TCP Elastigo BulkIndexer as the default transport.
func NewLogesAdapter(route *router.Route) (router.LogAdapter, error) {
addr := route.Address
elastigoConn = elastigo.NewConn()
// The old standard for host was including :9200
esHost := strings.Replace(addr, ":9200", "", -1)
log.Infof("esHost variable: %s\n", esHost)
elastigoConn.SetHosts([]string{esHost})
indexer := elastigoConn.NewBulkIndexerErrors(10, 120)
indexer.Sender = func(buf *bytes.Buffer) error {
log.Infof("es writing: %d bytes", buf.Len())
return indexer.Send(buf)
}
indexer.Start()
return &LogesAdapter{
route: route,
conn: elastigoConn,
indexer: indexer,
}, nil
}
// Stream implements the router.LogAdapter interface.
func (a *LogesAdapter) Stream(logstream chan *router.Message) {
lid := 0
for m := range logstream {
lid++
// Un-escape the newline characters so logs look nice
m.Data = EncodeNewlines(m.Data)
msg := LogesMessage{
Message: m.Data,
Name: m.Container.Name,
ID: m.Container.ID,
Image: m.Container.Config.Image,
Hostname: m.Container.Config.Hostname,
LID: lid,
}
js, err := json.Marshal(msg)
if err != nil {
log.Println("loges:", err)
continue
}
idx := "logstash-" + m.Time.Format("2006.01.02")
//Index(index string, _type string, id, ttl string, date *time.Time, data interface{}, refresh bool)
if err := a.indexer.Index(idx, "logs", "", "30d", &m.Time, js, false); err != nil {
log.Errorf("Index(ing) error: %v\n", err)
}
}
}
// LogESMessage Encapsulates the log data for Elasticsearch
type LogesMessage struct {
Message string `json:"message"`
Name string `json:"docker.name"`
ID string `json:"docker.id"`
Image string `json:"docker.image"`
Hostname string `json:"docker.hostname"`
LID int `json:logspout.loges.lid"`
}
Updating Message struct to match logtoes Event fields for kibana
package logspoutloges
import (
"bytes"
"encoding/json"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/gliderlabs/logspout/router"
"github.com/mattbaird/elastigo/lib"
)
var elastigoConn *elastigo.Conn
func init() {
router.AdapterFactories.Register(NewLogesAdapter, "logspoutloges")
}
// LogesAdapter is an adapter that streams TCP JSON to Elasticsearch
type LogesAdapter struct {
conn *elastigo.Conn
route *router.Route
indexer *elastigo.BulkIndexer
}
// NewLogesAdapter creates a LogesAdapter with TCP Elastigo BulkIndexer as the default transport.
func NewLogesAdapter(route *router.Route) (router.LogAdapter, error) {
addr := route.Address
elastigoConn = elastigo.NewConn()
// The old standard for host was including :9200
esHost := strings.Replace(addr, ":9200", "", -1)
log.Infof("esHost variable: %s\n", esHost)
elastigoConn.SetHosts([]string{esHost})
indexer := elastigoConn.NewBulkIndexerErrors(10, 120)
indexer.Sender = func(buf *bytes.Buffer) error {
log.Infof("es writing: %d bytes", buf.Len())
return indexer.Send(buf)
}
indexer.Start()
return &LogesAdapter{
route: route,
conn: elastigoConn,
indexer: indexer,
}, nil
}
// Stream implements the router.LogAdapter interface.
func (a *LogesAdapter) Stream(logstream chan *router.Message) {
lid := 0
for m := range logstream {
lid++
// Un-escape the newline characters so logs look nice
m.Data = EncodeNewlines(m.Data)
msg := LogesMessage{
Source: m.Container.Config.Hostname,
Type: "logs",
Timestamp: time.Now(),
Message: m.Data,
Name: m.Container.Name,
ID: m.Container.ID,
Image: m.Container.Config.Image,
Hostname: m.Container.Config.Hostname,
LID: lid,
}
js, err := json.Marshal(msg)
if err != nil {
log.Println("loges:", err)
continue
}
idx := "logstash-" + m.Time.Format("2006.01.02")
//Index(index string, _type string, id, ttl string, date *time.Time, data interface{}, refresh bool)
if err := a.indexer.Index(idx, "logs", "", "30d", &m.Time, js, false); err != nil {
log.Errorf("Index(ing) error: %v\n", err)
}
}
}
// LogESMessage Encapsulates the log data for Elasticsearch
type LogesMessage struct {
Source string `json:"@source"`
Type string `json:"@type"`
Timestamp time.Time `json:"@timestamp"`
Message string `json:"@message"`
Tags []string `json:"@tags,omitempty"`
IndexFields map[string]interface{} `json:"@idx,omitempty"`
Fields map[string]interface{} `json:"@fields"`
Name string `json:"docker.name"`
ID string `json:"docker.id"`
Image string `json:"docker.image"`
Hostname string `json:"docker.hostname"`
LID int `json:logspout.loges.lid"`
}
|
// Package proctl provides functions for attaching to and manipulating
// a process during the debug session.
package proctl
import (
"bytes"
"debug/gosym"
"encoding/binary"
"fmt"
"os"
"strconv"
"strings"
"sync"
"syscall"
"unsafe"
"github.com/derekparker/dbg/dwarf/frame"
"github.com/derekparker/dbg/dwarf/line"
"github.com/derekparker/dbg/dwarf/op"
"github.com/derekparker/dbg/vendor/dwarf"
"github.com/derekparker/dbg/vendor/elf"
)
// Struct representing a debugged process. Holds onto pid, register values,
// process struct and process state.
type DebuggedProcess struct {
Pid int
Regs *syscall.PtraceRegs
Process *os.Process
ProcessState *syscall.WaitStatus
Executable *elf.File
Symbols []elf.Symbol
GoSymTable *gosym.Table
FrameEntries *frame.FrameDescriptionEntries
DebugLine *line.DebugLineInfo
BreakPoints map[uint64]*BreakPoint
}
// Represents a single breakpoint. Stores information on the break
// point including the byte of data that originally was stored at that
// address.
type BreakPoint struct {
FunctionName string
File string
Line int
Addr uint64
OriginalData []byte
}
type Variable struct {
Name string
Value string
Type string
}
type BreakPointExistsError struct {
file string
line int
addr uintptr
}
func (bpe BreakPointExistsError) Error() string {
return fmt.Sprintf("Breakpoint exists at %s:%d at %x", bpe.file, bpe.line, bpe.addr)
}
// Returns a new DebuggedProcess struct with sensible defaults.
func NewDebugProcess(pid int) (*DebuggedProcess, error) {
proc, err := os.FindProcess(pid)
if err != nil {
return nil, err
}
err = syscall.PtraceAttach(pid)
if err != nil {
return nil, err
}
ps, err := wait(proc.Pid)
if err != nil {
return nil, err
}
debuggedProc := DebuggedProcess{
Pid: pid,
Regs: new(syscall.PtraceRegs),
Process: proc,
ProcessState: ps,
BreakPoints: make(map[uint64]*BreakPoint),
}
err = debuggedProc.LoadInformation()
if err != nil {
return nil, err
}
return &debuggedProc, nil
}
// Finds the executable from /proc/<pid>/exe and then
// uses that to parse the following information:
// * Dwarf .debug_frame section
// * Dwarf .debug_line section
// * Go symbol table.
func (dbp *DebuggedProcess) LoadInformation() error {
var (
wg sync.WaitGroup
err error
)
err = dbp.findExecutable()
if err != nil {
return err
}
wg.Add(3)
go dbp.parseDebugFrame(&wg)
go dbp.parseDebugLine(&wg)
go dbp.obtainGoSymbols(&wg)
wg.Wait()
return nil
}
// Obtains register values from the debugged process.
func (dbp *DebuggedProcess) Registers() (*syscall.PtraceRegs, error) {
err := syscall.PtraceGetRegs(dbp.Pid, dbp.Regs)
if err != nil {
return nil, fmt.Errorf("Registers():", err)
}
return dbp.Regs, nil
}
// Sets a breakpoint in the running process.
func (dbp *DebuggedProcess) Break(addr uintptr) (*BreakPoint, error) {
var (
int3 = []byte{0xCC}
f, l, fn = dbp.GoSymTable.PCToLine(uint64(addr))
originalData = make([]byte, 1)
)
if fn == nil {
return nil, fmt.Errorf("could not set breakpoint")
}
_, err := syscall.PtracePeekData(dbp.Pid, addr, originalData)
if err != nil {
return nil, err
}
if bytes.Equal(originalData, int3) {
return nil, BreakPointExistsError{f, l, addr}
}
_, err = syscall.PtracePokeData(dbp.Pid, addr, int3)
if err != nil {
return nil, err
}
breakpoint := &BreakPoint{
FunctionName: fn.Name,
File: f,
Line: l,
Addr: uint64(addr),
OriginalData: originalData,
}
dbp.BreakPoints[uint64(addr)] = breakpoint
return breakpoint, nil
}
// Clears a breakpoint.
func (dbp *DebuggedProcess) Clear(pc uint64) (*BreakPoint, error) {
bp, ok := dbp.BreakPoints[pc]
if !ok {
return nil, fmt.Errorf("No breakpoint currently set for %#v", pc)
}
_, err := syscall.PtracePokeData(dbp.Pid, uintptr(bp.Addr), bp.OriginalData)
if err != nil {
return nil, err
}
delete(dbp.BreakPoints, pc)
return bp, nil
}
// Steps through process.
func (dbp *DebuggedProcess) Step() (err error) {
regs, err := dbp.Registers()
if err != nil {
return err
}
bp, ok := dbp.BreakPoints[regs.PC()-1]
if ok {
// Clear the breakpoint so that we can continue execution.
_, err = dbp.Clear(bp.Addr)
if err != nil {
return err
}
// Reset program counter to our restored instruction.
regs.SetPC(bp.Addr)
err = syscall.PtraceSetRegs(dbp.Pid, regs)
if err != nil {
return err
}
// Restore breakpoint now that we have passed it.
defer func() {
_, err = dbp.Break(uintptr(bp.Addr))
}()
}
err = dbp.handleResult(syscall.PtraceSingleStep(dbp.Pid))
if err != nil {
return fmt.Errorf("step failed: ", err.Error())
}
return nil
}
// Step over function calls.
func (dbp *DebuggedProcess) Next() error {
pc, err := dbp.CurrentPC()
if err != nil {
return err
}
if _, ok := dbp.BreakPoints[pc-1]; ok {
// Decrement the PC to be before
// the breakpoint instruction.
pc--
}
f, l, _ := dbp.GoSymTable.PCToLine(pc)
fde, err := dbp.FrameEntries.FDEForPC(pc)
if err != nil {
return err
}
step := func() (uint64, error) {
err = dbp.Step()
if err != nil {
return 0, fmt.Errorf("next stepping failed: ", err.Error())
}
return dbp.CurrentPC()
}
loc := dbp.DebugLine.NextLocation(pc, f, l)
if !fde.Cover(loc.Address) {
ret := dbp.ReturnAddressFromOffset(fde.ReturnAddressOffset(pc))
// Attempt to step out of function.
for fde.Cover(pc) {
pc, err = step()
if err != nil {
return err
}
}
if pc == ret {
return nil
}
// We have stepped into another function, return from it
// and continue single stepping through until we
// reach our real destination.
err = dbp.continueToReturnAddress(pc, fde)
if err != nil {
return err
}
}
for {
pc, err = step()
if err != nil {
return err
}
if !fde.Cover(pc) {
err = dbp.continueToReturnAddress(pc, fde)
if err != nil {
return err
}
pc, _ = dbp.CurrentPC()
}
_, nl, _ := dbp.GoSymTable.PCToLine(pc)
if nl != l {
break
}
}
return nil
}
func (dbp *DebuggedProcess) continueToReturnAddress(pc uint64, fde *frame.FrameDescriptionEntry) error {
for !fde.Cover(pc) {
// Our offset here is be 0 because we
// have stepped into the first instruction
// of this function. Therefore the function
// has not had a chance to modify its' stack
// and change our offset.
addr := dbp.ReturnAddressFromOffset(0)
bp, err := dbp.Break(uintptr(addr))
if err != nil {
if _, ok := err.(BreakPointExistsError); !ok {
return err
}
}
err = dbp.Continue()
if err != nil {
return err
}
err = dbp.clearTempBreakpoint(bp.Addr)
if err != nil {
return err
}
pc, _ = dbp.CurrentPC()
}
return nil
}
// Continue process until next breakpoint.
func (dbp *DebuggedProcess) Continue() error {
// Stepping first will ensure we are able to continue
// past a breakpoint if that's currently where we are stopped.
err := dbp.Step()
if err != nil {
return err
}
return dbp.handleResult(syscall.PtraceCont(dbp.Pid, 0))
}
func (dbp *DebuggedProcess) CurrentPC() (uint64, error) {
regs, err := dbp.Registers()
if err != nil {
return 0, err
}
return regs.Rip, nil
}
func (dbp *DebuggedProcess) clearTempBreakpoint(pc uint64) error {
if bp, ok := dbp.BreakPoints[pc]; ok {
regs, err := dbp.Registers()
if err != nil {
return err
}
// Reset program counter to our restored instruction.
bp, err = dbp.Clear(bp.Addr)
if err != nil {
return err
}
regs.SetPC(bp.Addr)
return syscall.PtraceSetRegs(dbp.Pid, regs)
}
return nil
}
// Returns the value of the named symbol.
func (dbp *DebuggedProcess) EvalSymbol(name string) (*Variable, error) {
data, err := dbp.Executable.DWARF()
if err != nil {
return nil, err
}
reader := data.Reader()
for entry, err := reader.Next(); entry != nil; entry, err = reader.Next() {
if err != nil {
return nil, err
}
if entry.Tag != dwarf.TagVariable {
continue
}
n, ok := entry.Val(dwarf.AttrName).(string)
if !ok || n != name {
continue
}
offset, ok := entry.Val(dwarf.AttrType).(dwarf.Offset)
if !ok {
continue
}
t, err := data.Type(offset)
if err != nil {
return nil, err
}
instructions, ok := entry.Val(dwarf.AttrLocation).([]byte)
if !ok {
continue
}
val, err := dbp.extractValue(instructions, 0, t)
if err != nil {
return nil, err
}
return &Variable{Name: n, Type: t.String(), Value: val}, nil
}
return nil, fmt.Errorf("could not find symbol value for %s", name)
}
// Extracts the value from the instructions given in the DW_AT_location entry.
// We execute the stack program described in the DW_OP_* instruction stream, and
// then grab the value from the other processes memory.
func (dbp *DebuggedProcess) extractValue(instructions []byte, off int64, typ interface{}) (string, error) {
regs, err := dbp.Registers()
if err != nil {
return "", err
}
fde, err := dbp.FrameEntries.FDEForPC(regs.PC())
if err != nil {
return "", err
}
fctx := fde.EstablishFrame(regs.PC())
cfaOffset := fctx.CFAOffset()
offset := off
if off == 0 {
offset, err = op.ExecuteStackProgram(cfaOffset, instructions)
if err != nil {
return "", err
}
offset = int64(regs.Rsp) + offset
}
// If we have a user defined type, find the
// underlying concrete type and use that.
if tt, ok := typ.(*dwarf.TypedefType); ok {
typ = tt.Type
}
offaddr := uintptr(offset)
switch t := typ.(type) {
case *dwarf.PtrType:
addr, err := dbp.readMemory(offaddr, 8)
if err != nil {
return "", err
}
adr := binary.LittleEndian.Uint64(addr)
val, err := dbp.extractValue(nil, int64(adr), t.Type)
if err != nil {
return "", err
}
retstr := fmt.Sprintf("*%s", val)
return retstr, nil
case *dwarf.StructType:
switch t.StructName {
case "string":
return dbp.readString(offaddr)
case "[]int":
return dbp.readIntSlice(offaddr)
default:
// Here we could recursively call extractValue to grab
// the value of all the members of the struct.
fields := make([]string, 0, len(t.Field))
for _, field := range t.Field {
val, err := dbp.extractValue(nil, field.ByteOffset+offset, field.Type)
if err != nil {
return "", err
}
fields = append(fields, fmt.Sprintf("%s: %s", field.Name, val))
}
retstr := fmt.Sprintf("%s {%s}", t.StructName, strings.Join(fields, ", "))
return retstr, nil
}
case *dwarf.ArrayType:
return dbp.readIntArray(offaddr, t)
case *dwarf.IntType:
return dbp.readInt(offaddr)
case *dwarf.FloatType:
return dbp.readFloat64(offaddr)
}
return "", fmt.Errorf("could not find value for type %s", typ)
}
func (dbp *DebuggedProcess) readString(addr uintptr) (string, error) {
val, err := dbp.readMemory(addr, 8)
if err != nil {
return "", err
}
// deref the pointer to the string
addr = uintptr(binary.LittleEndian.Uint64(val))
val, err = dbp.readMemory(addr, 16)
if err != nil {
return "", err
}
i := bytes.IndexByte(val, 0x0)
val = val[:i]
str := *(*string)(unsafe.Pointer(&val))
return str, nil
}
func (dbp *DebuggedProcess) readIntSlice(addr uintptr) (string, error) {
var number uint64
val, err := dbp.readMemory(addr, uintptr(24))
if err != nil {
return "", err
}
a := binary.LittleEndian.Uint64(val[:8])
l := binary.LittleEndian.Uint64(val[8:16])
c := binary.LittleEndian.Uint64(val[16:24])
val, err = dbp.readMemory(uintptr(a), uintptr(8*l))
if err != nil {
return "", err
}
members := make([]uint64, 0, l)
buf := bytes.NewBuffer(val)
for {
err := binary.Read(buf, binary.LittleEndian, &number)
if err != nil {
break
}
members = append(members, number)
}
str := fmt.Sprintf("len: %d cap: %d %d", l, c, members)
return str, err
}
func (dbp *DebuggedProcess) readIntArray(addr uintptr, t *dwarf.ArrayType) (string, error) {
var (
number uint64
members = make([]uint64, 0, t.ByteSize)
)
val, err := dbp.readMemory(addr, uintptr(t.ByteSize))
if err != nil {
return "", err
}
buf := bytes.NewBuffer(val)
for {
err := binary.Read(buf, binary.LittleEndian, &number)
if err != nil {
break
}
members = append(members, number)
}
str := fmt.Sprintf("[%d]int %d", t.ByteSize/8, members)
return str, nil
}
func (dbp *DebuggedProcess) readInt(addr uintptr) (string, error) {
val, err := dbp.readMemory(addr, 8)
if err != nil {
return "", err
}
n := binary.LittleEndian.Uint64(val)
return strconv.Itoa(int(n)), nil
}
func (dbp *DebuggedProcess) readFloat64(addr uintptr) (string, error) {
var n float64
val, err := dbp.readMemory(addr, 8)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(val)
binary.Read(buf, binary.LittleEndian, &n)
return strconv.FormatFloat(n, 'f', -1, 64), nil
}
func (dbp *DebuggedProcess) readMemory(addr uintptr, size uintptr) ([]byte, error) {
buf := make([]byte, size)
_, err := syscall.PtracePeekData(dbp.Pid, addr, buf)
if err != nil {
return nil, err
}
return buf, nil
}
func (dbp *DebuggedProcess) handleResult(err error) error {
if err != nil {
return err
}
ps, err := wait(dbp.Process.Pid)
if err != nil && err != syscall.ECHILD {
return err
}
if ps != nil {
dbp.ProcessState = ps
if ps.TrapCause() == -1 && !ps.Exited() {
regs, err := dbp.Registers()
if err != nil {
return err
}
fmt.Printf("traced program %s at: %#v\n", ps.StopSignal(), regs.PC())
}
}
return nil
}
func (dbp *DebuggedProcess) findExecutable() error {
procpath := fmt.Sprintf("/proc/%d/exe", dbp.Pid)
f, err := os.OpenFile(procpath, 0, os.ModePerm)
if err != nil {
return err
}
elffile, err := elf.NewFile(f)
if err != nil {
return err
}
dbp.Executable = elffile
return nil
}
func (dbp *DebuggedProcess) parseDebugLine(wg *sync.WaitGroup) {
defer wg.Done()
debugLine, err := dbp.Executable.Section(".debug_line").Data()
if err != nil {
fmt.Println("could not get .debug_line section", err)
os.Exit(1)
}
dbp.DebugLine = line.Parse(debugLine)
}
func (dbp *DebuggedProcess) parseDebugFrame(wg *sync.WaitGroup) {
defer wg.Done()
debugFrame, err := dbp.Executable.Section(".debug_frame").Data()
if err != nil {
fmt.Println("could not get .debug_frame section", err)
os.Exit(1)
}
dbp.FrameEntries = frame.Parse(debugFrame)
}
func (dbp *DebuggedProcess) obtainGoSymbols(wg *sync.WaitGroup) {
defer wg.Done()
var (
symdat []byte
pclndat []byte
err error
)
if sec := dbp.Executable.Section(".gosymtab"); sec != nil {
symdat, err = sec.Data()
if err != nil {
fmt.Println("could not get .gosymtab section", err)
os.Exit(1)
}
}
if sec := dbp.Executable.Section(".gopclntab"); sec != nil {
pclndat, err = sec.Data()
if err != nil {
fmt.Println("could not get .gopclntab section", err)
os.Exit(1)
}
}
pcln := gosym.NewLineTable(pclndat, dbp.Executable.Section(".text").Addr)
tab, err := gosym.NewTable(symdat, pcln)
if err != nil {
fmt.Println("could not get initialize line table", err)
os.Exit(1)
}
dbp.GoSymTable = tab
}
// Takes an offset from RSP and returns the address of the
// instruction the currect function is going to return to.
func (dbp *DebuggedProcess) ReturnAddressFromOffset(offset int64) uint64 {
regs, err := dbp.Registers()
if err != nil {
panic("Could not obtain register values")
}
retaddr := int64(regs.Rsp) + offset
data := make([]byte, 8)
syscall.PtracePeekText(dbp.Pid, uintptr(retaddr), data)
return binary.LittleEndian.Uint64(data)
}
func wait(pid int) (*syscall.WaitStatus, error) {
var status syscall.WaitStatus
var rusage syscall.Rusage
_, e := syscall.Wait4(pid, &status, 0, &rusage)
if e != nil {
return nil, e
}
return &status, nil
}
Refactor Next implementation
// Package proctl provides functions for attaching to and manipulating
// a process during the debug session.
package proctl
import (
"bytes"
"debug/gosym"
"encoding/binary"
"fmt"
"os"
"strconv"
"strings"
"sync"
"syscall"
"unsafe"
"github.com/derekparker/dbg/dwarf/frame"
"github.com/derekparker/dbg/dwarf/line"
"github.com/derekparker/dbg/dwarf/op"
"github.com/derekparker/dbg/vendor/dwarf"
"github.com/derekparker/dbg/vendor/elf"
)
// Struct representing a debugged process. Holds onto pid, register values,
// process struct and process state.
type DebuggedProcess struct {
Pid int
Regs *syscall.PtraceRegs
Process *os.Process
ProcessState *syscall.WaitStatus
Executable *elf.File
Symbols []elf.Symbol
GoSymTable *gosym.Table
FrameEntries *frame.FrameDescriptionEntries
DebugLine *line.DebugLineInfo
BreakPoints map[uint64]*BreakPoint
}
// Represents a single breakpoint. Stores information on the break
// point including the byte of data that originally was stored at that
// address.
type BreakPoint struct {
FunctionName string
File string
Line int
Addr uint64
OriginalData []byte
}
type Variable struct {
Name string
Value string
Type string
}
type BreakPointExistsError struct {
file string
line int
addr uintptr
}
func (bpe BreakPointExistsError) Error() string {
return fmt.Sprintf("Breakpoint exists at %s:%d at %x", bpe.file, bpe.line, bpe.addr)
}
// Returns a new DebuggedProcess struct with sensible defaults.
func NewDebugProcess(pid int) (*DebuggedProcess, error) {
proc, err := os.FindProcess(pid)
if err != nil {
return nil, err
}
err = syscall.PtraceAttach(pid)
if err != nil {
return nil, err
}
ps, err := wait(proc.Pid)
if err != nil {
return nil, err
}
debuggedProc := DebuggedProcess{
Pid: pid,
Regs: new(syscall.PtraceRegs),
Process: proc,
ProcessState: ps,
BreakPoints: make(map[uint64]*BreakPoint),
}
err = debuggedProc.LoadInformation()
if err != nil {
return nil, err
}
return &debuggedProc, nil
}
// Finds the executable from /proc/<pid>/exe and then
// uses that to parse the following information:
// * Dwarf .debug_frame section
// * Dwarf .debug_line section
// * Go symbol table.
func (dbp *DebuggedProcess) LoadInformation() error {
var (
wg sync.WaitGroup
err error
)
err = dbp.findExecutable()
if err != nil {
return err
}
wg.Add(3)
go dbp.parseDebugFrame(&wg)
go dbp.parseDebugLine(&wg)
go dbp.obtainGoSymbols(&wg)
wg.Wait()
return nil
}
// Obtains register values from the debugged process.
func (dbp *DebuggedProcess) Registers() (*syscall.PtraceRegs, error) {
err := syscall.PtraceGetRegs(dbp.Pid, dbp.Regs)
if err != nil {
return nil, fmt.Errorf("Registers():", err)
}
return dbp.Regs, nil
}
type InvalidAddressError struct {
address uintptr
}
func (iae InvalidAddressError) Error() string {
return fmt.Sprintf("Invalid address %#v\n", iae.address)
}
// Sets a breakpoint in the running process.
func (dbp *DebuggedProcess) Break(addr uintptr) (*BreakPoint, error) {
var (
int3 = []byte{0xCC}
f, l, fn = dbp.GoSymTable.PCToLine(uint64(addr))
originalData = make([]byte, 1)
)
if fn == nil {
return nil, InvalidAddressError{address: addr}
}
_, err := syscall.PtracePeekData(dbp.Pid, addr, originalData)
if err != nil {
return nil, err
}
if bytes.Equal(originalData, int3) {
return nil, BreakPointExistsError{f, l, addr}
}
_, err = syscall.PtracePokeData(dbp.Pid, addr, int3)
if err != nil {
return nil, err
}
breakpoint := &BreakPoint{
FunctionName: fn.Name,
File: f,
Line: l,
Addr: uint64(addr),
OriginalData: originalData,
}
dbp.BreakPoints[uint64(addr)] = breakpoint
return breakpoint, nil
}
// Clears a breakpoint.
func (dbp *DebuggedProcess) Clear(pc uint64) (*BreakPoint, error) {
bp, ok := dbp.BreakPoints[pc]
if !ok {
return nil, fmt.Errorf("No breakpoint currently set for %#v", pc)
}
_, err := syscall.PtracePokeData(dbp.Pid, uintptr(bp.Addr), bp.OriginalData)
if err != nil {
return nil, err
}
delete(dbp.BreakPoints, pc)
return bp, nil
}
// Steps through process.
func (dbp *DebuggedProcess) Step() (err error) {
regs, err := dbp.Registers()
if err != nil {
return err
}
bp, ok := dbp.BreakPoints[regs.PC()-1]
if ok {
// Clear the breakpoint so that we can continue execution.
_, err = dbp.Clear(bp.Addr)
if err != nil {
return err
}
// Reset program counter to our restored instruction.
regs.SetPC(bp.Addr)
err = syscall.PtraceSetRegs(dbp.Pid, regs)
if err != nil {
return err
}
// Restore breakpoint now that we have passed it.
defer func() {
_, err = dbp.Break(uintptr(bp.Addr))
}()
}
err = dbp.handleResult(syscall.PtraceSingleStep(dbp.Pid))
if err != nil {
return fmt.Errorf("step failed: ", err.Error())
}
return nil
}
// Step over function calls.
func (dbp *DebuggedProcess) Next() error {
pc, err := dbp.CurrentPC()
if err != nil {
return err
}
if _, ok := dbp.BreakPoints[pc-1]; ok {
// Decrement the PC to be before
// the breakpoint instruction.
pc--
}
_, l, _ := dbp.GoSymTable.PCToLine(pc)
fde, err := dbp.FrameEntries.FDEForPC(pc)
if err != nil {
return err
}
step := func() (uint64, error) {
err = dbp.Step()
if err != nil {
return 0, fmt.Errorf("next stepping failed: ", err.Error())
}
return dbp.CurrentPC()
}
ret := dbp.ReturnAddressFromOffset(fde.ReturnAddressOffset(pc))
for {
pc, err = step()
if err != nil {
return err
}
if !fde.Cover(pc) && pc != ret {
dbp.continueToReturnAddress(pc, fde)
if err != nil {
if ierr, ok := err.(InvalidAddressError); ok {
return ierr
}
}
pc, _ = dbp.CurrentPC()
}
_, nl, _ := dbp.GoSymTable.PCToLine(pc)
if nl != l {
break
}
}
return nil
}
func (dbp *DebuggedProcess) continueToReturnAddress(pc uint64, fde *frame.FrameDescriptionEntry) error {
for !fde.Cover(pc) {
// Our offset here is be 0 because we
// have stepped into the first instruction
// of this function. Therefore the function
// has not had a chance to modify its' stack
// and change our offset.
addr := dbp.ReturnAddressFromOffset(0)
bp, err := dbp.Break(uintptr(addr))
if err != nil {
if _, ok := err.(BreakPointExistsError); !ok {
return err
}
}
err = dbp.Continue()
if err != nil {
return err
}
err = dbp.clearTempBreakpoint(bp.Addr)
if err != nil {
return err
}
pc, _ = dbp.CurrentPC()
}
return nil
}
// Continue process until next breakpoint.
func (dbp *DebuggedProcess) Continue() error {
// Stepping first will ensure we are able to continue
// past a breakpoint if that's currently where we are stopped.
err := dbp.Step()
if err != nil {
return err
}
return dbp.handleResult(syscall.PtraceCont(dbp.Pid, 0))
}
func (dbp *DebuggedProcess) CurrentPC() (uint64, error) {
regs, err := dbp.Registers()
if err != nil {
return 0, err
}
return regs.Rip, nil
}
func (dbp *DebuggedProcess) clearTempBreakpoint(pc uint64) error {
if bp, ok := dbp.BreakPoints[pc]; ok {
regs, err := dbp.Registers()
if err != nil {
return err
}
// Reset program counter to our restored instruction.
bp, err = dbp.Clear(bp.Addr)
if err != nil {
return err
}
regs.SetPC(bp.Addr)
return syscall.PtraceSetRegs(dbp.Pid, regs)
}
return nil
}
// Returns the value of the named symbol.
func (dbp *DebuggedProcess) EvalSymbol(name string) (*Variable, error) {
data, err := dbp.Executable.DWARF()
if err != nil {
return nil, err
}
reader := data.Reader()
for entry, err := reader.Next(); entry != nil; entry, err = reader.Next() {
if err != nil {
return nil, err
}
if entry.Tag != dwarf.TagVariable {
continue
}
n, ok := entry.Val(dwarf.AttrName).(string)
if !ok || n != name {
continue
}
offset, ok := entry.Val(dwarf.AttrType).(dwarf.Offset)
if !ok {
continue
}
t, err := data.Type(offset)
if err != nil {
return nil, err
}
instructions, ok := entry.Val(dwarf.AttrLocation).([]byte)
if !ok {
continue
}
val, err := dbp.extractValue(instructions, 0, t)
if err != nil {
return nil, err
}
return &Variable{Name: n, Type: t.String(), Value: val}, nil
}
return nil, fmt.Errorf("could not find symbol value for %s", name)
}
// Extracts the value from the instructions given in the DW_AT_location entry.
// We execute the stack program described in the DW_OP_* instruction stream, and
// then grab the value from the other processes memory.
func (dbp *DebuggedProcess) extractValue(instructions []byte, off int64, typ interface{}) (string, error) {
regs, err := dbp.Registers()
if err != nil {
return "", err
}
fde, err := dbp.FrameEntries.FDEForPC(regs.PC())
if err != nil {
return "", err
}
fctx := fde.EstablishFrame(regs.PC())
cfaOffset := fctx.CFAOffset()
offset := off
if off == 0 {
offset, err = op.ExecuteStackProgram(cfaOffset, instructions)
if err != nil {
return "", err
}
offset = int64(regs.Rsp) + offset
}
// If we have a user defined type, find the
// underlying concrete type and use that.
if tt, ok := typ.(*dwarf.TypedefType); ok {
typ = tt.Type
}
offaddr := uintptr(offset)
switch t := typ.(type) {
case *dwarf.PtrType:
addr, err := dbp.readMemory(offaddr, 8)
if err != nil {
return "", err
}
adr := binary.LittleEndian.Uint64(addr)
val, err := dbp.extractValue(nil, int64(adr), t.Type)
if err != nil {
return "", err
}
retstr := fmt.Sprintf("*%s", val)
return retstr, nil
case *dwarf.StructType:
switch t.StructName {
case "string":
return dbp.readString(offaddr)
case "[]int":
return dbp.readIntSlice(offaddr)
default:
// Here we could recursively call extractValue to grab
// the value of all the members of the struct.
fields := make([]string, 0, len(t.Field))
for _, field := range t.Field {
val, err := dbp.extractValue(nil, field.ByteOffset+offset, field.Type)
if err != nil {
return "", err
}
fields = append(fields, fmt.Sprintf("%s: %s", field.Name, val))
}
retstr := fmt.Sprintf("%s {%s}", t.StructName, strings.Join(fields, ", "))
return retstr, nil
}
case *dwarf.ArrayType:
return dbp.readIntArray(offaddr, t)
case *dwarf.IntType:
return dbp.readInt(offaddr)
case *dwarf.FloatType:
return dbp.readFloat64(offaddr)
}
return "", fmt.Errorf("could not find value for type %s", typ)
}
func (dbp *DebuggedProcess) readString(addr uintptr) (string, error) {
val, err := dbp.readMemory(addr, 8)
if err != nil {
return "", err
}
// deref the pointer to the string
addr = uintptr(binary.LittleEndian.Uint64(val))
val, err = dbp.readMemory(addr, 16)
if err != nil {
return "", err
}
i := bytes.IndexByte(val, 0x0)
val = val[:i]
str := *(*string)(unsafe.Pointer(&val))
return str, nil
}
func (dbp *DebuggedProcess) readIntSlice(addr uintptr) (string, error) {
var number uint64
val, err := dbp.readMemory(addr, uintptr(24))
if err != nil {
return "", err
}
a := binary.LittleEndian.Uint64(val[:8])
l := binary.LittleEndian.Uint64(val[8:16])
c := binary.LittleEndian.Uint64(val[16:24])
val, err = dbp.readMemory(uintptr(a), uintptr(8*l))
if err != nil {
return "", err
}
members := make([]uint64, 0, l)
buf := bytes.NewBuffer(val)
for {
err := binary.Read(buf, binary.LittleEndian, &number)
if err != nil {
break
}
members = append(members, number)
}
str := fmt.Sprintf("len: %d cap: %d %d", l, c, members)
return str, err
}
func (dbp *DebuggedProcess) readIntArray(addr uintptr, t *dwarf.ArrayType) (string, error) {
var (
number uint64
members = make([]uint64, 0, t.ByteSize)
)
val, err := dbp.readMemory(addr, uintptr(t.ByteSize))
if err != nil {
return "", err
}
buf := bytes.NewBuffer(val)
for {
err := binary.Read(buf, binary.LittleEndian, &number)
if err != nil {
break
}
members = append(members, number)
}
str := fmt.Sprintf("[%d]int %d", t.ByteSize/8, members)
return str, nil
}
func (dbp *DebuggedProcess) readInt(addr uintptr) (string, error) {
val, err := dbp.readMemory(addr, 8)
if err != nil {
return "", err
}
n := binary.LittleEndian.Uint64(val)
return strconv.Itoa(int(n)), nil
}
func (dbp *DebuggedProcess) readFloat64(addr uintptr) (string, error) {
var n float64
val, err := dbp.readMemory(addr, 8)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(val)
binary.Read(buf, binary.LittleEndian, &n)
return strconv.FormatFloat(n, 'f', -1, 64), nil
}
func (dbp *DebuggedProcess) readMemory(addr uintptr, size uintptr) ([]byte, error) {
buf := make([]byte, size)
_, err := syscall.PtracePeekData(dbp.Pid, addr, buf)
if err != nil {
return nil, err
}
return buf, nil
}
func (dbp *DebuggedProcess) handleResult(err error) error {
if err != nil {
return err
}
ps, err := wait(dbp.Process.Pid)
if err != nil && err != syscall.ECHILD {
return err
}
if ps != nil {
dbp.ProcessState = ps
if ps.TrapCause() == -1 && !ps.Exited() {
regs, err := dbp.Registers()
if err != nil {
return err
}
fmt.Printf("traced program %s at: %#v\n", ps.StopSignal(), regs.PC())
}
}
return nil
}
func (dbp *DebuggedProcess) findExecutable() error {
procpath := fmt.Sprintf("/proc/%d/exe", dbp.Pid)
f, err := os.OpenFile(procpath, 0, os.ModePerm)
if err != nil {
return err
}
elffile, err := elf.NewFile(f)
if err != nil {
return err
}
dbp.Executable = elffile
return nil
}
func (dbp *DebuggedProcess) parseDebugLine(wg *sync.WaitGroup) {
defer wg.Done()
debugLine, err := dbp.Executable.Section(".debug_line").Data()
if err != nil {
fmt.Println("could not get .debug_line section", err)
os.Exit(1)
}
dbp.DebugLine = line.Parse(debugLine)
}
func (dbp *DebuggedProcess) parseDebugFrame(wg *sync.WaitGroup) {
defer wg.Done()
debugFrame, err := dbp.Executable.Section(".debug_frame").Data()
if err != nil {
fmt.Println("could not get .debug_frame section", err)
os.Exit(1)
}
dbp.FrameEntries = frame.Parse(debugFrame)
}
func (dbp *DebuggedProcess) obtainGoSymbols(wg *sync.WaitGroup) {
defer wg.Done()
var (
symdat []byte
pclndat []byte
err error
)
if sec := dbp.Executable.Section(".gosymtab"); sec != nil {
symdat, err = sec.Data()
if err != nil {
fmt.Println("could not get .gosymtab section", err)
os.Exit(1)
}
}
if sec := dbp.Executable.Section(".gopclntab"); sec != nil {
pclndat, err = sec.Data()
if err != nil {
fmt.Println("could not get .gopclntab section", err)
os.Exit(1)
}
}
pcln := gosym.NewLineTable(pclndat, dbp.Executable.Section(".text").Addr)
tab, err := gosym.NewTable(symdat, pcln)
if err != nil {
fmt.Println("could not get initialize line table", err)
os.Exit(1)
}
dbp.GoSymTable = tab
}
// Takes an offset from RSP and returns the address of the
// instruction the currect function is going to return to.
func (dbp *DebuggedProcess) ReturnAddressFromOffset(offset int64) uint64 {
regs, err := dbp.Registers()
if err != nil {
panic("Could not obtain register values")
}
retaddr := int64(regs.Rsp) + offset
data := make([]byte, 8)
syscall.PtracePeekText(dbp.Pid, uintptr(retaddr), data)
return binary.LittleEndian.Uint64(data)
}
func wait(pid int) (*syscall.WaitStatus, error) {
var status syscall.WaitStatus
var rusage syscall.Rusage
_, e := syscall.Wait4(pid, &status, 0, &rusage)
if e != nil {
return nil, e
}
return &status, nil
}
|
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profiler
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"testing"
"text/template"
"time"
"cloud.google.com/go/profiler/proftest"
"golang.org/x/oauth2/google"
compute "google.golang.org/api/compute/v1"
)
const (
cloudScope = "https://www.googleapis.com/auth/cloud-platform"
benchFinishString = "busybench finished profiling"
errorString = "failed to set up or run the benchmark"
)
const startupTemplate = `
#! /bin/bash
# Signal any unexpected error.
trap 'echo "{{.ErrorString}}"' ERR
(
# Shut down the VM in 5 minutes after this script exits
# to stop accounting the VM for billing and cores quota.
trap "sleep 300 && poweroff" EXIT
retry() {
for i in {1..3}; do
"${@}" && return 0
done
return 1
}
# Fail on any error.
set -eo pipefail
# Display commands being run.
set -x
# Install git
retry apt-get update >/dev/null
retry apt-get -y -q install git >/dev/null
# $GOCACHE is required from Go 1.12. See https://golang.org/doc/go1.11#gocache
# $GOCACHE is explicitly set becasue $HOME is not set when this code runs
mkdir -p /tmp/gocache
export GOCACHE=/tmp/gocache
# Install gcc, needed to install go master
if [ "{{.GoVersion}}" = "master" ]
then
retry apt-get -y -q install gcc >/dev/null
fi
# Install desired Go version
mkdir -p /tmp/bin
retry curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
chmod +x /tmp/bin/gimme
export PATH=$PATH:/tmp/bin
retry eval "$(gimme {{.GoVersion}})"
# Set $GOPATH
export GOPATH="$HOME/go"
export GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go
mkdir -p $GOCLOUD_HOME
# Install agent
retry git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME >/dev/null
cd $GOCLOUD_HOME
retry git fetch origin {{.Commit}}
git reset --hard {{.Commit}}
cd $GOCLOUD_HOME/profiler/busybench
retry go get >/dev/null
# Run benchmark with agent
go run busybench.go --service="{{.Service}}" --mutex_profiling="{{.MutexProfiling}}"
# Write output to serial port 2 with timestamp.
) 2>&1 | while read line; do echo "$(date): ${line}"; done >/dev/ttyS1
`
type goGCETestCase struct {
proftest.InstanceConfig
name string
goVersion string
mutexProfiling bool
wantProfileTypes []string
}
func (tc *goGCETestCase) initializeStartupScript(template *template.Template, commit string) error {
var buf bytes.Buffer
err := template.Execute(&buf,
struct {
Service string
GoVersion string
Commit string
ErrorString string
MutexProfiling bool
}{
Service: tc.name,
GoVersion: tc.goVersion,
Commit: commit,
ErrorString: errorString,
MutexProfiling: tc.mutexProfiling,
})
if err != nil {
return fmt.Errorf("failed to render startup script for %s: %v", tc.name, err)
}
tc.StartupScript = buf.String()
return nil
}
func TestAgentIntegration(t *testing.T) {
t.Skip("https://github.com/googleapis/google-cloud-go/issues/1366")
// Testing against master requires building go code and may take up to 10 minutes.
// Allow this test to run in parallel with other top level tests to avoid timeouts.
t.Parallel()
if testing.Short() {
t.Skip("skipping profiler integration test in short mode")
}
projectID := os.Getenv("GCLOUD_TESTS_GOLANG_PROJECT_ID")
if projectID == "" {
t.Skip("skipping profiler integration test when GCLOUD_TESTS_GOLANG_PROJECT_ID variable is not set")
}
zone := os.Getenv("GCLOUD_TESTS_GOLANG_PROFILER_ZONE")
if zone == "" {
t.Fatalf("GCLOUD_TESTS_GOLANG_PROFILER_ZONE environment variable must be set when integration test is requested")
}
// Figure out the Git commit of the current directory. The source checkout in
// the test VM will run in the same commit. Note that any local changes to
// the profiler agent won't be tested in the integration test. This flow only
// works with code that has been committed and pushed to the public repo
// (either to master or to a branch).
output, err := exec.Command("git", "rev-parse", "HEAD").CombinedOutput()
if err != nil {
t.Fatalf("failed to gather the Git revision of the current source: %v", err)
}
commit := strings.Trim(string(output), "\n")
t.Logf("using Git commit %q for the profiler integration test", commit)
pst, err := time.LoadLocation("America/Los_Angeles")
if err != nil {
t.Fatalf("failed to initiate PST location: %v", err)
}
runID := strings.Replace(time.Now().In(pst).Format("2006-01-02-15-04-05.000000-0700"), ".", "-", -1)
ctx := context.Background()
client, err := google.DefaultClient(ctx, cloudScope)
if err != nil {
t.Fatalf("failed to get default client: %v", err)
}
computeService, err := compute.New(client)
if err != nil {
t.Fatalf("failed to initialize compute service: %v", err)
}
template, err := template.New("startupScript").Parse(startupTemplate)
if err != nil {
t.Fatalf("failed to parse startup script template: %v", err)
}
tr := proftest.TestRunner{
Client: client,
}
gceTr := proftest.GCETestRunner{
TestRunner: tr,
ComputeService: computeService,
}
testcases := []goGCETestCase{
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-gomaster-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-gomaster-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "master",
mutexProfiling: true,
},
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-go111-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-go111-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "1.11",
mutexProfiling: true,
},
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-go110-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-go110-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "1.10",
mutexProfiling: true,
},
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-go19-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-go19-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "1.9",
mutexProfiling: true,
},
}
// The number of tests run in parallel is the current value of GOMAXPROCS.
runtime.GOMAXPROCS(len(testcases))
for _, tc := range testcases {
tc := tc // capture range variable
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if err := tc.initializeStartupScript(template, commit); err != nil {
t.Fatalf("failed to initialize startup script")
}
if err := gceTr.StartInstance(ctx, &tc.InstanceConfig); err != nil {
t.Fatal(err)
}
defer func() {
if gceTr.DeleteInstance(ctx, &tc.InstanceConfig); err != nil {
t.Fatal(err)
}
}()
timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25)
defer cancel()
if err := gceTr.PollForSerialOutput(timeoutCtx, &tc.InstanceConfig, benchFinishString, errorString); err != nil {
t.Fatalf("PollForSerialOutput() got error: %v", err)
}
timeNow := time.Now()
endTime := timeNow.Format(time.RFC3339)
startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339)
for _, pType := range tc.wantProfileTypes {
pr, err := tr.QueryProfiles(tc.ProjectID, tc.name, startTime, endTime, pType)
if err != nil {
t.Errorf("QueryProfiles(%s, %s, %s, %s, %s) got error: %v", tc.ProjectID, tc.name, startTime, endTime, pType, err)
continue
}
if err := pr.HasFunction("busywork"); err != nil {
t.Error(err)
}
}
})
}
}
profiler: fix gimme and reenable integration test
Change-Id: Id55c3d357b209fd54fd6115775463795d64de19c
Reviewed-on: https://code-review.googlesource.com/c/gocloud/+/39074
Reviewed-by: Alexey Alexandrov <707c85aa1ca9cc766e4957d3fb95a91b4e895454@google.com>
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profiler
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"runtime"
"strings"
"testing"
"text/template"
"time"
"cloud.google.com/go/profiler/proftest"
"golang.org/x/oauth2/google"
compute "google.golang.org/api/compute/v1"
)
const (
cloudScope = "https://www.googleapis.com/auth/cloud-platform"
benchFinishString = "busybench finished profiling"
errorString = "failed to set up or run the benchmark"
)
const startupTemplate = `
#! /bin/bash
# Signal any unexpected error.
trap 'echo "{{.ErrorString}}"' ERR
(
# Shut down the VM in 5 minutes after this script exits
# to stop accounting the VM for billing and cores quota.
trap "sleep 300 && poweroff" EXIT
retry() {
for i in {1..3}; do
"${@}" && return 0
done
return 1
}
# Fail on any error.
set -eo pipefail
# Display commands being run.
set -x
# Suppress debconf warnings to minimize noise in logs
export DEBIAN_FRONTEND="noninteractive"
# Building go from master will fail without $HOME set.
# Set $HOME becasue it is not automatically set when this script runs.
# If $HOME is unset, $GOCACHE must be set for Go 1.12+
cd /root
export HOME=$PWD
# Install git
retry apt-get update >/dev/null
retry apt-get -y -q install git >/dev/null
# Set $GOPATH
export GOPATH="$HOME/go"
export GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go
mkdir -p $GOCLOUD_HOME
# Install gcc, needed to install go master
if [ "{{.GoVersion}}" = "master" ]
then
retry apt-get -y -q install gcc >/dev/null
fi
# Install desired Go version
mkdir -p /tmp/bin
retry curl -sL -o /tmp/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
chmod +x /tmp/bin/gimme
export PATH=$PATH:/tmp/bin
retry gimme {{.GoVersion}} > out.gimme
eval "$(cat out.gimme)"
# Install agent
retry git clone https://code.googlesource.com/gocloud $GOCLOUD_HOME >/dev/null
cd $GOCLOUD_HOME
retry git fetch origin {{.Commit}}
git reset --hard {{.Commit}}
cd $GOCLOUD_HOME/profiler/busybench
retry go get >/dev/null
# Run benchmark with agent
go run busybench.go --service="{{.Service}}" --mutex_profiling="{{.MutexProfiling}}"
# Write output to serial port 2 with timestamp.
) 2>&1 | while read line; do echo "$(date): ${line}"; done >/dev/ttyS1
`
type goGCETestCase struct {
proftest.InstanceConfig
name string
goVersion string
mutexProfiling bool
wantProfileTypes []string
}
func (tc *goGCETestCase) initializeStartupScript(template *template.Template, commit string) error {
var buf bytes.Buffer
err := template.Execute(&buf,
struct {
Service string
GoVersion string
Commit string
ErrorString string
MutexProfiling bool
}{
Service: tc.name,
GoVersion: tc.goVersion,
Commit: commit,
ErrorString: errorString,
MutexProfiling: tc.mutexProfiling,
})
if err != nil {
return fmt.Errorf("failed to render startup script for %s: %v", tc.name, err)
}
tc.StartupScript = buf.String()
return nil
}
func TestAgentIntegration(t *testing.T) {
// Testing against master requires building go code and may take up to 10 minutes.
// Allow this test to run in parallel with other top level tests to avoid timeouts.
t.Parallel()
if testing.Short() {
t.Skip("skipping profiler integration test in short mode")
}
projectID := os.Getenv("GCLOUD_TESTS_GOLANG_PROJECT_ID")
if projectID == "" {
t.Skip("skipping profiler integration test when GCLOUD_TESTS_GOLANG_PROJECT_ID variable is not set")
}
zone := os.Getenv("GCLOUD_TESTS_GOLANG_PROFILER_ZONE")
if zone == "" {
t.Fatalf("GCLOUD_TESTS_GOLANG_PROFILER_ZONE environment variable must be set when integration test is requested")
}
// Figure out the Git commit of the current directory. The source checkout in
// the test VM will run in the same commit. Note that any local changes to
// the profiler agent won't be tested in the integration test. This flow only
// works with code that has been committed and pushed to the public repo
// (either to master or to a branch).
output, err := exec.Command("git", "rev-parse", "HEAD").CombinedOutput()
if err != nil {
t.Fatalf("failed to gather the Git revision of the current source: %v", err)
}
commit := strings.Trim(string(output), "\n")
t.Logf("using Git commit %q for the profiler integration test", commit)
pst, err := time.LoadLocation("America/Los_Angeles")
if err != nil {
t.Fatalf("failed to initiate PST location: %v", err)
}
runID := strings.Replace(time.Now().In(pst).Format("2006-01-02-15-04-05.000000-0700"), ".", "-", -1)
ctx := context.Background()
client, err := google.DefaultClient(ctx, cloudScope)
if err != nil {
t.Fatalf("failed to get default client: %v", err)
}
computeService, err := compute.New(client)
if err != nil {
t.Fatalf("failed to initialize compute service: %v", err)
}
template, err := template.New("startupScript").Parse(startupTemplate)
if err != nil {
t.Fatalf("failed to parse startup script template: %v", err)
}
tr := proftest.TestRunner{
Client: client,
}
gceTr := proftest.GCETestRunner{
TestRunner: tr,
ComputeService: computeService,
}
testcases := []goGCETestCase{
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-gomaster-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-gomaster-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "master",
mutexProfiling: true,
},
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-go111-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-go111-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "1.11",
mutexProfiling: true,
},
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-go110-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-go110-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "1.10",
mutexProfiling: true,
},
{
InstanceConfig: proftest.InstanceConfig{
ProjectID: projectID,
Zone: zone,
Name: fmt.Sprintf("profiler-test-go19-%s", runID),
MachineType: "n1-standard-1",
},
name: fmt.Sprintf("profiler-test-go19-%s-gce", runID),
wantProfileTypes: []string{"CPU", "HEAP", "THREADS", "CONTENTION", "HEAP_ALLOC"},
goVersion: "1.9",
mutexProfiling: true,
},
}
// The number of tests run in parallel is the current value of GOMAXPROCS.
runtime.GOMAXPROCS(len(testcases))
for _, tc := range testcases {
tc := tc // capture range variable
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
if err := tc.initializeStartupScript(template, commit); err != nil {
t.Fatalf("failed to initialize startup script")
}
if err := gceTr.StartInstance(ctx, &tc.InstanceConfig); err != nil {
t.Fatal(err)
}
defer func() {
if gceTr.DeleteInstance(ctx, &tc.InstanceConfig); err != nil {
t.Fatal(err)
}
}()
timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*25)
defer cancel()
if err := gceTr.PollForSerialOutput(timeoutCtx, &tc.InstanceConfig, benchFinishString, errorString); err != nil {
t.Fatalf("PollForSerialOutput() got error: %v", err)
}
timeNow := time.Now()
endTime := timeNow.Format(time.RFC3339)
startTime := timeNow.Add(-1 * time.Hour).Format(time.RFC3339)
for _, pType := range tc.wantProfileTypes {
pr, err := tr.QueryProfiles(tc.ProjectID, tc.name, startTime, endTime, pType)
if err != nil {
t.Errorf("QueryProfiles(%s, %s, %s, %s, %s) got error: %v", tc.ProjectID, tc.name, startTime, endTime, pType, err)
continue
}
if err := pr.HasFunction("busywork"); err != nil {
t.Error(err)
}
}
})
}
}
|
package main
import (
"archive/tar"
"bytes"
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"syscall"
"time"
"gopkg.in/flosch/pongo2.v3"
"gopkg.in/lxc/go-lxc.v2"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/shared"
log "gopkg.in/inconshreveable/log15.v2"
)
// ExtractInterfaceFromConfigName returns "eth0" from "volatile.eth0.hwaddr",
// or an error if the key does not match this pattern.
func extractInterfaceFromConfigName(k string) (string, error) {
re := regexp.MustCompile("volatile\\.([^.]*)\\.hwaddr")
m := re.FindStringSubmatch(k)
if m != nil && len(m) > 1 {
return m[1], nil
}
return "", fmt.Errorf("%s did not match", k)
}
func validateRawLxc(rawLxc string) error {
for _, line := range strings.Split(rawLxc, "\n") {
// Ignore empty lines
if len(line) == 0 {
continue
}
// Ignore comments
if strings.HasPrefix(line, "#") {
continue
}
// Ensure the format is valid
membs := strings.SplitN(line, "=", 2)
if len(membs) != 2 {
return fmt.Errorf("invalid raw.lxc line: %s", line)
}
// Blacklist some keys
if strings.ToLower(strings.Trim(membs[0], " \t")) == "lxc.logfile" {
return fmt.Errorf("setting lxc.logfile is not allowed")
}
if strings.HasPrefix(strings.ToLower(strings.Trim(membs[0], " \t")), "lxc.network.") {
return fmt.Errorf("setting lxc.network keys is not allowed")
}
}
return nil
}
func setConfigItem(c *containerLXD, key string, value string) error {
err := c.c.SetConfigItem(key, value)
if err != nil {
return fmt.Errorf("Failed to set LXC config: %s=%s", key, value)
}
return nil
}
// GenerateMacAddr generates a mac address from a string template:
// e.g. "00:11:22:xx:xx:xx" -> "00:11:22:af:3e:51"
func generateMacAddr(template string) (string, error) {
ret := bytes.Buffer{}
for _, c := range template {
if c == 'x' {
c, err := rand.Int(rand.Reader, big.NewInt(16))
if err != nil {
return "", err
}
ret.WriteString(fmt.Sprintf("%x", c.Int64()))
} else {
ret.WriteString(string(c))
}
}
return ret.String(), nil
}
func containerPath(name string, isSnapshot bool) string {
if isSnapshot {
return shared.VarPath("snapshots", name)
}
return shared.VarPath("containers", name)
}
// containerLXDArgs contains every argument needed to create an LXD Container
type containerLXDArgs struct {
ID int // Leave it empty when you create one.
Ctype containerType
Config map[string]string
Profiles []string
Ephemeral bool
BaseImage string
Architecture int
Devices shared.Devices
}
type containerLXD struct {
c *lxc.Container
daemon *Daemon
id int
name string
config map[string]string
profiles []string
devices shared.Devices
architecture int
ephemeral bool
idmapset *shared.IdmapSet
cType containerType
baseConfig map[string]string
baseDevices shared.Devices
storage storage
}
type container interface {
RenderState() (*shared.ContainerState, error)
Reboot() error
Freeze() error
Shutdown(timeout time.Duration) error
Start() error
Stop() error
Unfreeze() error
Delete() error
Restore(sourceContainer container) error
Rename(newName string) error
ConfigReplace(newConfig containerLXDArgs) error
StorageStart() error
StorageStop() error
Storage() storage
IsPrivileged() bool
IsRunning() bool
IsFrozen() bool
IsEphemeral() bool
IsSnapshot() bool
ID() int
Name() string
Architecture() int
Config() map[string]string
ConfigKeySet(key string, value string) error
Devices() shared.Devices
Profiles() []string
Path(newName string) string
RootfsPath() string
TemplatesPath() string
StateDir() string
LogFilePath() string
LogPath() string
InitPID() int
State() string
IdmapSet() *shared.IdmapSet
LastIdmapSet() (*shared.IdmapSet, error)
TemplateApply(trigger string) error
ExportToTar(snap string, w io.Writer) error
Checkpoint(opts lxc.CheckpointOptions) error
StartFromMigration(imagesDir string) error
// TODO: Remove every use of this and remove it.
LXContainerGet() *lxc.Container
DetachMount(m shared.Device) error
AttachMount(m shared.Device) error
AttachUnixDev(dev shared.Device) error
DetachUnixDev(dev shared.Device) error
}
// unmount and unlink any directories called $path/blk.$(mktemp)
func unmountTempBlocks(path string) {
dents, err := ioutil.ReadDir(path)
if err != nil {
return
}
for _, f := range dents {
bpath := f.Name()
dpath := filepath.Join(path, bpath)
if !strings.HasPrefix(bpath, "blk.") {
continue
}
if err = syscall.Unmount(dpath, syscall.MNT_DETACH); err != nil {
shared.Log.Warn("Failed to unmount block device", log.Ctx{"error": err, "path": dpath})
continue
}
if err = os.Remove(dpath); err != nil {
shared.Log.Warn("Failed to unlink block device mountpoint", log.Ctx{"error": err, "path": dpath})
continue
}
}
}
func getMountOptions(m shared.Device) ([]string, bool, bool) {
opts := []string{}
readonly := false
if m["readonly"] == "1" || m["readonly"] == "true" {
readonly = true
opts = append(opts, "ro")
}
optional := false
if m["optional"] == "1" || m["optional"] == "true" {
optional = true
opts = append(opts, "optional")
}
return opts, readonly, optional
}
/*
* This is called at container startup to mount any block
* devices (since a container with idmap cannot do so)
*/
func mountTmpBlockdev(cntPath string, d shared.Device) ([]string, error) {
source := d["source"]
fstype, err := shared.BlockFsDetect(source)
if err != nil {
return []string{}, fmt.Errorf("Error setting up %s: %s", source, err)
}
opts, readonly, optional := getMountOptions(d)
// Mount blockdev into $containerdir/blk.$(mktemp)
fnam := fmt.Sprintf("blk.%s", strings.Replace(source, "/", "-", -1))
blkmnt := filepath.Join(cntPath, fnam)
syscall.Unmount(blkmnt, syscall.MNT_DETACH)
os.Remove(blkmnt)
if err = os.Mkdir(blkmnt, 0660); err != nil {
if optional {
shared.Log.Warn("Failed to create block device mount directory",
log.Ctx{"error": err, "source": source})
return []string{}, nil
}
return []string{}, fmt.Errorf("Unable to create mountpoint for blockdev %s: %s", source, err)
}
flags := 0
if readonly {
flags |= syscall.MS_RDONLY
}
if err = syscall.Mount(source, blkmnt, fstype, uintptr(flags), ""); err != nil {
if optional {
shared.Log.Warn("Failed to mount block device", log.Ctx{"error": err, "source": source})
return []string{}, nil
}
return []string{}, fmt.Errorf("Unable to prepare blockdev mount for %s: %s", source, err)
}
opts = append(opts, "bind")
sb, err := os.Stat(source)
if err == nil {
if sb.IsDir() {
opts = append(opts, "create=dir")
} else {
opts = append(opts, "create=file")
}
}
tgtpath := d["path"]
for len(tgtpath) > 0 && filepath.IsAbs(tgtpath) {
tgtpath = tgtpath[1:]
}
if len(tgtpath) == 0 {
if optional {
shared.Log.Warn("Invalid mount target", log.Ctx{"target": d["path"]})
return []string{}, nil
}
return []string{}, fmt.Errorf("Invalid mount target %s", d["path"])
}
mtab := fmt.Sprintf("%s %s %s %s 0 0", blkmnt, tgtpath, "none", strings.Join(opts, ","))
shared.Debugf("adding mount entry for %s: .%s.\n", d["source"], mtab)
return []string{"lxc.mount.entry", mtab}, nil
}
func containerLXDCreateAsEmpty(d *Daemon, name string,
args containerLXDArgs) (container, error) {
// Create the container
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
// Now create the empty storage
if err := c.storage.ContainerCreate(c); err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDCreateFromImage(d *Daemon, name string,
args containerLXDArgs, hash string) (container, error) {
// Create the container
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
if err := dbImageLastAccessUpdate(d.db, hash); err != nil {
return nil, fmt.Errorf("Error updating image last use date: %s", err)
}
// Now create the storage from an image
if err := c.storage.ContainerCreateFromImage(c, hash); err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDCreateAsCopy(d *Daemon, name string,
args containerLXDArgs, sourceContainer container) (container, error) {
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
if err := c.ConfigReplace(args); err != nil {
c.Delete()
return nil, err
}
if err := c.storage.ContainerCopy(c, sourceContainer); err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDCreateAsSnapshot(d *Daemon, name string,
args containerLXDArgs, sourceContainer container,
stateful bool) (container, error) {
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
c.storage = sourceContainer.Storage()
if err := c.storage.ContainerSnapshotCreate(c, sourceContainer); err != nil {
c.Delete()
return nil, err
}
if stateful {
stateDir := c.StateDir()
err = os.MkdirAll(stateDir, 0700)
if err != nil {
c.Delete()
return nil, err
}
// TODO - shouldn't we freeze for the duration of rootfs snapshot below?
if !sourceContainer.IsRunning() {
c.Delete()
return nil, fmt.Errorf("Container not running")
}
opts := lxc.CheckpointOptions{Directory: stateDir, Stop: true, Verbose: true}
err = sourceContainer.Checkpoint(opts)
err2 := CollectCRIULogFile(sourceContainer, stateDir, "snapshot", "dump")
if err2 != nil {
shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2})
}
if err != nil {
c.Delete()
return nil, err
}
}
return c, nil
}
func validContainerName(name string) error {
if strings.Contains(name, shared.SnapshotDelimiter) {
return fmt.Errorf(
"The character '%s' is reserved for snapshots.",
shared.SnapshotDelimiter)
}
return nil
}
func containerLXDCreateInternal(
d *Daemon, name string, args containerLXDArgs) (*containerLXD, error) {
shared.Log.Info(
"Container create",
log.Ctx{
"container": name,
"isSnapshot": args.Ctype == cTypeSnapshot})
if args.Ctype != cTypeSnapshot {
if err := validContainerName(name); err != nil {
return nil, err
}
}
path := containerPath(name, args.Ctype == cTypeSnapshot)
if shared.PathExists(path) {
return nil, fmt.Errorf(
"The container already exists on disk, container: '%s', path: '%s'",
name,
path)
}
if args.Profiles == nil {
args.Profiles = []string{"default"}
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.BaseImage != "" {
args.Config["volatile.base_image"] = args.BaseImage
}
if args.Devices == nil {
args.Devices = shared.Devices{}
}
profiles, err := dbProfiles(d.db)
if err != nil {
return nil, err
}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
}
id, err := dbContainerCreate(d.db, name, args)
if err != nil {
return nil, err
}
shared.Log.Debug(
"Container created in the DB",
log.Ctx{"container": name, "id": id})
baseConfig := map[string]string{}
if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil {
return nil, err
}
baseDevices := shared.Devices{}
if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil {
return nil, err
}
c := &containerLXD{
daemon: d,
id: id,
name: name,
ephemeral: args.Ephemeral,
architecture: args.Architecture,
config: args.Config,
profiles: args.Profiles,
devices: args.Devices,
cType: args.Ctype,
baseConfig: baseConfig,
baseDevices: baseDevices}
// No need to detect storage here, its a new container.
c.storage = d.Storage
if err := c.init(); err != nil {
c.Delete() // Delete the container from the DB.
return nil, err
}
idmap := c.IdmapSet()
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
c.Delete()
return nil, err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap)
if err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDLoad(d *Daemon, name string) (container, error) {
shared.Log.Debug("Container load", log.Ctx{"container": name})
args, err := dbContainerGet(d.db, name)
if err != nil {
return nil, err
}
baseConfig := map[string]string{}
if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil {
return nil, err
}
baseDevices := shared.Devices{}
if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil {
return nil, err
}
c := &containerLXD{
daemon: d,
id: args.ID,
name: name,
ephemeral: args.Ephemeral,
architecture: args.Architecture,
config: args.Config,
profiles: args.Profiles,
devices: args.Devices,
cType: args.Ctype,
baseConfig: baseConfig,
baseDevices: baseDevices}
s, err := storageForFilename(d, shared.VarPath("containers", strings.Split(name, "/")[0]))
if err != nil {
shared.Log.Warn("Couldn't detect storage.", log.Ctx{"container": c.Name()})
c.storage = d.Storage
} else {
c.storage = s
}
if err := c.init(); err != nil {
return nil, err
}
return c, nil
}
// init prepares the LXContainer for this LXD Container
// TODO: This gets called on each load of the container,
// we might be able to split this is up into c.Start().
func (c *containerLXD) init() error {
templateConfBase := "ubuntu"
templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG")
if templateConfDir == "" {
templateConfDir = "/usr/share/lxc/config"
}
cc, err := lxc.NewContainer(c.Name(), c.daemon.lxcpath)
if err != nil {
return err
}
c.c = cc
logfile := c.LogFilePath()
if err := os.MkdirAll(filepath.Dir(logfile), 0700); err != nil {
return err
}
if err = c.c.SetLogFile(logfile); err != nil {
return err
}
personality, err := shared.ArchitecturePersonality(c.architecture)
if err == nil {
if err := setConfigItem(c, "lxc.arch", personality); err != nil {
return err
}
}
err = setConfigItem(c, "lxc.include", fmt.Sprintf("%s/%s.common.conf", templateConfDir, templateConfBase))
if err != nil {
return err
}
if err := setConfigItem(c, "lxc.rootfs", c.RootfsPath()); err != nil {
return err
}
if err := setConfigItem(c, "lxc.loglevel", "0"); err != nil {
return err
}
if err := setConfigItem(c, "lxc.utsname", c.Name()); err != nil {
return err
}
if err := setConfigItem(c, "lxc.tty", "0"); err != nil {
return err
}
if err := setupDevLxdMount(c); err != nil {
return err
}
for _, p := range c.profiles {
if err := c.applyProfile(p); err != nil {
return err
}
}
// base per-container config should override profile config, so we apply it second
if err := c.applyConfig(c.baseConfig); err != nil {
return err
}
if !c.IsPrivileged() || runningInUserns {
err = setConfigItem(c, "lxc.include", fmt.Sprintf("%s/%s.userns.conf", templateConfDir, templateConfBase))
if err != nil {
return err
}
}
if c.IsNesting() {
shared.Debugf("Setting up %s for nesting", c.name)
orig := c.c.ConfigItem("lxc.mount.auto")
auto := ""
if len(orig) == 1 {
auto = orig[0]
}
if !strings.Contains(auto, "cgroup") {
auto = fmt.Sprintf("%s %s", auto, "cgroup:mixed")
err = setConfigItem(c, "lxc.mount.auto", auto)
if err != nil {
return err
}
}
/*
* mount extra /proc and /sys to work around kernel
* restrictions on remounting them when covered
*/
err = setConfigItem(c, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional")
if err != nil {
return err
}
err = setConfigItem(c, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional")
if err != nil {
return err
}
}
/*
* Until stacked apparmor profiles are possible, we have to run nested
* containers unconfined
*/
if aaEnabled {
if aaConfined() {
curProfile := aaProfile()
shared.Debugf("Running %s in current profile %s (nested container)", c.name, curProfile)
curProfile = strings.TrimSuffix(curProfile, " (enforce)")
if err := setConfigItem(c, "lxc.aa_profile", curProfile); err != nil {
return err
}
} else if err := setConfigItem(c, "lxc.aa_profile", AAProfileFull(c)); err != nil {
return err
}
}
if err := setConfigItem(c, "lxc.seccomp", SeccompProfilePath(c)); err != nil {
return err
}
if err := c.setupMacAddresses(); err != nil {
return err
}
// Allow overwrites of devices
for k, v := range c.baseDevices {
c.devices[k] = v
}
if !c.IsPrivileged() {
if c.daemon.IdmapSet == nil {
return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported.")
}
c.idmapset = c.daemon.IdmapSet // TODO - per-tenant idmaps
}
if err := c.applyIdmapSet(); err != nil {
return err
}
if err := c.applyPostDeviceConfig(); err != nil {
return err
}
return nil
}
func (c *containerLXD) RenderState() (*shared.ContainerState, error) {
statusCode := shared.FromLXCState(int(c.c.State()))
status := shared.ContainerStatus{
Status: statusCode.String(),
StatusCode: statusCode,
}
if c.IsRunning() {
pid := c.InitPID()
status.Init = pid
status.Ips = c.iPsGet()
}
return &shared.ContainerState{
Name: c.name,
Profiles: c.profiles,
Config: c.baseConfig,
ExpandedConfig: c.config,
Status: status,
Devices: c.baseDevices,
ExpandedDevices: c.devices,
Ephemeral: c.ephemeral,
}, nil
}
func (c *containerLXD) insertMount(source, target, fstype string, flags int, options string) error {
pid := c.c.InitPid()
if pid == -1 { // container not running - we're done
return nil
}
// now live-mount
var tmpMount string
var err error
if shared.IsDir(source) {
tmpMount, err = ioutil.TempDir(shared.VarPath("shmounts", c.name), "lxdmount_")
} else {
f, err := ioutil.TempFile(shared.VarPath("shmounts", c.name), "lxdmount_")
if err == nil {
tmpMount = f.Name()
f.Close()
}
}
if err != nil {
return err
}
err = syscall.Mount(source, tmpMount, fstype, uintptr(flags), "")
if err != nil {
return err
}
mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
// finally we need to move-mount this in the container
pidstr := fmt.Sprintf("%d", pid)
err = exec.Command(os.Args[0], "forkmount", pidstr, mntsrc, target).Run()
syscall.Unmount(tmpMount, syscall.MNT_DETACH) // in case forkmount failed
os.Remove(tmpMount)
return err
}
func (c *containerLXD) createUnixDevice(m shared.Device) (string, string, error) {
devname := m["path"]
if !filepath.IsAbs(devname) {
devname = filepath.Join("/", devname)
}
// target must be a relative path, so that lxc will DTRT
tgtname := m["path"]
for len(tgtname) > 0 && filepath.IsAbs(tgtname) {
tgtname = tgtname[1:]
}
if len(tgtname) == 0 {
return "", "", fmt.Errorf("Failed to interpret path: %s", devname)
}
var err error
var major, minor int
if m["major"] == "" && m["minor"] == "" {
major, minor, err = getDev(devname)
if err != nil {
return "", "", fmt.Errorf("Device does not exist: %s", devname)
}
} else if m["major"] == "" || m["minor"] == "" {
return "", "", fmt.Errorf("Both major and minor must be supplied for devices")
} else {
/* ok we have a major:minor and need to create it */
major, err = strconv.Atoi(m["major"])
if err != nil {
return "", "", fmt.Errorf("Bad major %s in device %s", m["major"], m["path"])
}
minor, err = strconv.Atoi(m["minor"])
if err != nil {
return "", "", fmt.Errorf("Bad minor %s in device %s", m["minor"], m["path"])
}
}
name := strings.Replace(m["path"], "/", "-", -1)
devpath := path.Join(c.Path(""), name)
mode := os.FileMode(0660)
if m["type"] == "unix-block" {
mode |= syscall.S_IFBLK
} else {
mode |= syscall.S_IFCHR
}
if m["mode"] != "" {
tmp, err := devModeOct(m["mode"])
if err != nil {
return "", "", fmt.Errorf("Bad mode %s in device %s", m["mode"], m["path"])
}
mode = os.FileMode(tmp)
}
os.Remove(devpath)
if err := syscall.Mknod(devpath, uint32(mode), minor|(major<<8)); err != nil {
if shared.PathExists(devname) {
return devname, tgtname, nil
}
return "", "", fmt.Errorf("Failed to create device %s for %s: %s", devpath, m["path"], err)
}
if err := c.idmapset.ShiftFile(devpath); err != nil {
// uidshift failing is weird, but not a big problem. Log and proceed
shared.Debugf("Failed to uidshift device %s: %s\n", m["path"], err)
}
return devpath, tgtname, nil
}
func (c *containerLXD) setupUnixDev(m shared.Device) error {
source, target, err := c.createUnixDevice(m)
if err != nil {
return fmt.Errorf("Failed to setup device %s: %s", m["path"], err)
}
options, err := devGetOptions(m)
if err != nil {
return err
}
if c.c.Running() {
// insert mount from 'source' to 'target'
err := c.insertMount(source, target, "none", syscall.MS_BIND, options)
if err != nil {
return fmt.Errorf("Failed to add mount for device %s: %s", m["path"], err)
}
// add the new device cgroup rule
entry, err := deviceCgroupInfo(m)
if err != nil {
return fmt.Errorf("Failed to add cgroup rule for device %s: %s", m["path"], err)
}
if err := c.c.SetCgroupItem("devices.allow", entry); err != nil {
return fmt.Errorf("Failed to add cgroup rule %s for device %s: %s", entry, m["path"], err)
}
}
entry := fmt.Sprintf("%s %s none %s", source, target, options)
return c.c.SetConfigItem("lxc.mount.entry", entry)
}
func (c *containerLXD) Start() error {
if c.IsRunning() {
return fmt.Errorf("the container is already running")
}
// Start the storage for this container
if err := c.StorageStart(); err != nil {
return err
}
/* (Re)Load the AA profile; we set it in the container's config above
* in init()
*/
if err := AALoadProfile(c); err != nil {
c.StorageStop()
return err
}
if err := SeccompCreateProfile(c); err != nil {
c.StorageStop()
return err
}
if err := c.mountShared(); err != nil {
return err
}
/*
* add the lxc.* entries for the configured devices,
* and create if necessary
*/
if err := c.applyDevices(); err != nil {
return err
}
f, err := ioutil.TempFile("", "lxd_lxc_startconfig_")
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
configPath := f.Name()
if err = f.Chmod(0600); err != nil {
f.Close()
os.Remove(configPath)
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
f.Close()
err = c.c.SaveConfigFile(configPath)
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
err = c.TemplateApply("start")
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
/* Deal with idmap changes */
idmap := c.IdmapSet()
lastIdmap, err := c.LastIdmapSet()
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
if !reflect.DeepEqual(idmap, lastIdmap) {
shared.Debugf("Container idmap changed, remapping")
if lastIdmap != nil {
if err := lastIdmap.UnshiftRootfs(c.RootfsPath()); err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
}
if idmap != nil {
if err := idmap.ShiftRootfs(c.RootfsPath()); err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
}
}
err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap)
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
/* Actually start the container */
out, err := exec.Command(
os.Args[0],
"forkstart",
c.name,
c.daemon.lxcpath,
configPath).CombinedOutput()
if string(out) != "" {
for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") {
shared.Debugf("forkstart: %s", line)
}
}
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
err = fmt.Errorf(
"Error calling 'lxd forkstart %s %s %s': err='%v'",
c.name,
c.daemon.lxcpath,
path.Join(c.LogPath(), "lxc.conf"),
err)
}
if err == nil && c.ephemeral == true {
containerWatchEphemeral(c.daemon, c)
}
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
}
return err
}
func (c *containerLXD) Reboot() error {
return c.c.Reboot()
}
func (c *containerLXD) Freeze() error {
return c.c.Freeze()
}
func (c *containerLXD) IsNesting() bool {
switch strings.ToLower(c.config["security.nesting"]) {
case "1":
return true
case "true":
return true
}
return false
}
func (c *containerLXD) IsPrivileged() bool {
switch strings.ToLower(c.config["security.privileged"]) {
case "1":
return true
case "true":
return true
}
return false
}
func (c *containerLXD) IsRunning() bool {
return c.c.Running()
}
func (c *containerLXD) IsFrozen() bool {
return c.State() == "FROZEN"
}
func (c *containerLXD) Shutdown(timeout time.Duration) error {
if err := c.c.Shutdown(timeout); err != nil {
// Still try to unload the storage.
c.StorageStop()
return err
}
// Stop the storage for this container
if err := c.StorageStop(); err != nil {
return err
}
unmountTempBlocks(c.Path(""))
if err := AAUnloadProfile(c); err != nil {
return err
}
return nil
}
func (c *containerLXD) Stop() error {
if err := c.c.Stop(); err != nil {
// Still try to unload the storage.
c.StorageStop()
return err
}
// Stop the storage for this container
if err := c.StorageStop(); err != nil {
return err
}
// Clean up any mounts from previous runs
unmountTempBlocks(c.Path(""))
if err := AAUnloadProfile(c); err != nil {
return err
}
return nil
}
func (c *containerLXD) Unfreeze() error {
return c.c.Unfreeze()
}
func (c *containerLXD) StorageFromImage(hash string) error {
return c.storage.ContainerCreateFromImage(c, hash)
}
func (c *containerLXD) StorageFromNone() error {
return c.storage.ContainerCreate(c)
}
func (c *containerLXD) StorageStart() error {
return c.storage.ContainerStart(c)
}
func (c *containerLXD) StorageStop() error {
return c.storage.ContainerStop(c)
}
func (c *containerLXD) Storage() storage {
return c.storage
}
func (c *containerLXD) Restore(sourceContainer container) error {
/*
* restore steps:
* 1. stop container if already running
* 2. copy snapshot rootfs to container
* 3. overwrite existing config with snapshot config
*/
// Stop the container
// TODO: stateful restore ?
wasRunning := false
if c.IsRunning() {
wasRunning = true
if err := c.Stop(); err != nil {
shared.Log.Error(
"RESTORE => could not stop container",
log.Ctx{
"container": c.Name(),
"err": err})
return err
}
shared.Log.Debug(
"RESTORE => Stopped container",
log.Ctx{"container": c.Name()})
}
// Restore the FS.
// TODO: I switched the FS and config restore, think thats the correct way
// (pcdummy)
err := c.storage.ContainerRestore(c, sourceContainer)
if err != nil {
shared.Log.Error("RESTORE => Restoring the filesystem failed",
log.Ctx{
"source": sourceContainer.Name(),
"destination": c.Name()})
return err
}
args := containerLXDArgs{
Ctype: cTypeRegular,
Config: sourceContainer.Config(),
Profiles: sourceContainer.Profiles(),
Ephemeral: sourceContainer.IsEphemeral(),
Architecture: sourceContainer.Architecture(),
Devices: sourceContainer.Devices(),
}
err = c.ConfigReplace(args)
if err != nil {
shared.Log.Error("RESTORE => Restore of the configuration failed",
log.Ctx{
"source": sourceContainer.Name(),
"destination": c.Name()})
return err
}
if wasRunning {
c.Start()
}
return nil
}
func (c *containerLXD) Delete() error {
shared.Log.Debug("containerLXD.Delete", log.Ctx{"c.name": c.Name(), "type": c.cType})
switch c.cType {
case cTypeRegular:
if err := containerDeleteSnapshots(c.daemon, c.Name()); err != nil {
return err
}
if err := c.storage.ContainerDelete(c); err != nil {
return err
}
unmountTempBlocks(c.Path(""))
case cTypeSnapshot:
if err := c.storage.ContainerSnapshotDelete(c); err != nil {
return err
}
default:
return fmt.Errorf("Unknown cType: %d", c.cType)
}
if err := dbContainerRemove(c.daemon.db, c.Name()); err != nil {
return err
}
AADeleteProfile(c)
SeccompDeleteProfile(c)
return nil
}
func (c *containerLXD) Rename(newName string) error {
oldName := c.Name()
if !c.IsSnapshot() && !shared.ValidHostname(newName) {
return fmt.Errorf("Invalid container name")
}
if c.IsRunning() {
return fmt.Errorf("renaming of running container not allowed")
}
if c.IsSnapshot() {
if err := c.storage.ContainerSnapshotRename(c, newName); err != nil {
return err
}
} else {
if err := c.storage.ContainerRename(c, newName); err != nil {
return err
}
}
if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil {
return err
}
results, err := dbContainerGetSnapshots(c.daemon.db, oldName)
if err != nil {
return err
}
for _, sname := range results {
sc, err := containerLXDLoad(c.daemon, sname)
if err != nil {
shared.Log.Error(
"containerDeleteSnapshots: Failed to load the snapshotcontainer",
log.Ctx{"container": oldName, "snapshot": sname})
continue
}
baseSnapName := filepath.Base(sname)
newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
if err := sc.Rename(newSnapshotName); err != nil {
shared.Log.Error(
"containerDeleteSnapshots: Failed to rename a snapshotcontainer",
log.Ctx{"container": oldName, "snapshot": sname, "err": err})
}
}
c.name = newName
// Recreate the LX Container
c.c = nil
c.init()
return nil
}
func (c *containerLXD) IsEphemeral() bool {
return c.ephemeral
}
func (c *containerLXD) IsSnapshot() bool {
return c.cType == cTypeSnapshot
}
func (c *containerLXD) ID() int {
return c.id
}
func (c *containerLXD) Name() string {
return c.name
}
func (c *containerLXD) Architecture() int {
return c.architecture
}
func (c *containerLXD) Path(newName string) string {
if newName != "" {
return containerPath(newName, c.IsSnapshot())
}
return containerPath(c.Name(), c.IsSnapshot())
}
func (c *containerLXD) RootfsPath() string {
return path.Join(c.Path(""), "rootfs")
}
func (c *containerLXD) TemplatesPath() string {
return path.Join(c.Path(""), "templates")
}
func (c *containerLXD) StateDir() string {
return path.Join(c.Path(""), "state")
}
func (c *containerLXD) LogPath() string {
return shared.LogPath(c.Name())
}
func (c *containerLXD) LogFilePath() string {
return filepath.Join(c.LogPath(), "lxc.log")
}
func (c *containerLXD) InitPID() int {
return c.c.InitPid()
}
func (c *containerLXD) State() string {
return c.c.State().String()
}
func (c *containerLXD) IdmapSet() *shared.IdmapSet {
return c.idmapset
}
func (c *containerLXD) LastIdmapSet() (*shared.IdmapSet, error) {
config := c.Config()
lastJsonIdmap := config["volatile.last_state.idmap"]
if lastJsonIdmap == "" {
return c.IdmapSet(), nil
}
lastIdmap := new(shared.IdmapSet)
err := json.Unmarshal([]byte(lastJsonIdmap), &lastIdmap.Idmap)
if err != nil {
return nil, err
}
if len(lastIdmap.Idmap) == 0 {
return nil, nil
}
return lastIdmap, nil
}
func (c *containerLXD) ConfigKeySet(key string, value string) error {
c.baseConfig[key] = value
args := containerLXDArgs{
Ctype: c.cType,
Config: c.baseConfig,
Profiles: c.profiles,
Ephemeral: c.ephemeral,
Architecture: c.architecture,
Devices: c.baseDevices,
}
return c.ConfigReplace(args)
}
func (c *containerLXD) LXContainerGet() *lxc.Container {
return c.c
}
// ConfigReplace replaces the config of container and tries to live apply
// the new configuration.
func (c *containerLXD) ConfigReplace(newContainerArgs containerLXDArgs) error {
/* check to see that the config actually applies to the container
* successfully before saving it. in particular, raw.lxc and
* raw.apparmor need to be parsed once to make sure they make sense.
*/
preDevList := c.devices
if err := c.applyConfig(newContainerArgs.Config); err != nil {
return err
}
tx, err := dbBegin(c.daemon.db)
if err != nil {
return err
}
/* Update config or profiles */
if err = dbContainerConfigClear(tx, c.id); err != nil {
shared.Log.Debug(
"Error clearing configuration for container",
log.Ctx{"name": c.Name()})
tx.Rollback()
return err
}
if err = dbContainerConfigInsert(tx, c.id, newContainerArgs.Config); err != nil {
shared.Debugf("Error inserting configuration for container %s: %s", c.Name(), err)
tx.Rollback()
return err
}
/* handle profiles */
if emptyProfile(newContainerArgs.Profiles) {
_, err := tx.Exec("DELETE from containers_profiles where container_id=?", c.id)
if err != nil {
tx.Rollback()
return err
}
} else {
if err := dbContainerProfilesInsert(tx, c.id, newContainerArgs.Profiles); err != nil {
tx.Rollback()
return err
}
}
err = dbDevicesAdd(tx, "container", int64(c.id), newContainerArgs.Devices)
if err != nil {
tx.Rollback()
return err
}
if err := c.applyPostDeviceConfig(); err != nil {
return err
}
c.baseConfig = newContainerArgs.Config
c.baseDevices = newContainerArgs.Devices
/* Let's try to load the apparmor profile again, in case the
* raw.apparmor config was changed (or deleted). Make sure we do this
* before commit, in case it fails because the user screwed something
* up so we can roll back and not hose their container.
*
* For containers that aren't running, we just want to parse the new
* profile; this is because this code is called during the start
* process after the profile is loaded but before the container starts,
* which will cause a container start to fail. If the container is
* running, we /do/ want to reload the profile, because we want the
* changes to take effect immediately.
*/
if !c.IsRunning() {
AAParseProfile(c)
return txCommit(tx)
}
if err := AALoadProfile(c); err != nil {
tx.Rollback()
return err
}
if err := txCommit(tx); err != nil {
return err
}
// add devices from new profile list to the desired goal set
for _, p := range c.profiles {
profileDevs, err := dbDevices(c.daemon.db, p, true)
if err != nil {
return fmt.Errorf("Error reading devices from profile '%s': %v", p, err)
}
newContainerArgs.Devices.ExtendFromProfile(preDevList, profileDevs)
}
tx, err = dbBegin(c.daemon.db)
if err != nil {
return err
}
if err := devicesApplyDeltaLive(tx, c, preDevList, newContainerArgs.Devices); err != nil {
return err
}
if err := txCommit(tx); err != nil {
return err
}
return nil
}
func (c *containerLXD) Config() map[string]string {
return c.config
}
func (c *containerLXD) Devices() shared.Devices {
return c.devices
}
func (c *containerLXD) Profiles() []string {
return c.profiles
}
/*
* Export the container to a unshifted tarfile containing:
* dir/
* metadata.yaml
* rootfs/
*/
func (c *containerLXD) ExportToTar(snap string, w io.Writer) error {
if snap == "" && c.IsRunning() {
return fmt.Errorf("Cannot export a running container as image")
}
if err := c.StorageStart(); err != nil {
return err
}
defer c.StorageStop()
idmap, err := c.LastIdmapSet()
if err != nil {
return err
}
if idmap != nil {
if err := idmap.UnshiftRootfs(c.RootfsPath()); err != nil {
return err
}
defer idmap.ShiftRootfs(c.RootfsPath())
}
tw := tar.NewWriter(w)
// keep track of the first path we saw for each path with nlink>1
linkmap := map[uint64]string{}
cDir := c.Path("")
// Path inside the tar image is the pathname starting after cDir
offset := len(cDir) + 1
writeToTar := func(path string, fi os.FileInfo, err error) error {
if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil {
shared.Debugf("Error tarring up %s: %s", path, err)
return err
}
return nil
}
fnam := filepath.Join(cDir, "metadata.yaml")
if shared.PathExists(fnam) {
fi, err := os.Lstat(fnam)
if err != nil {
shared.Debugf("Error statting %s during exportToTar", fnam)
tw.Close()
return err
}
if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil {
shared.Debugf("Error writing to tarfile: %s", err)
tw.Close()
return err
}
}
fnam = filepath.Join(cDir, "rootfs")
filepath.Walk(fnam, writeToTar)
fnam = filepath.Join(cDir, "templates")
if shared.PathExists(fnam) {
filepath.Walk(fnam, writeToTar)
}
return tw.Close()
}
func (c *containerLXD) TemplateApply(trigger string) error {
fname := path.Join(c.Path(""), "metadata.yaml")
if !shared.PathExists(fname) {
return nil
}
content, err := ioutil.ReadFile(fname)
if err != nil {
return err
}
metadata := new(imageMetadata)
err = yaml.Unmarshal(content, &metadata)
if err != nil {
return fmt.Errorf("Could not parse %s: %v", fname, err)
}
for filepath, template := range metadata.Templates {
var w *os.File
found := false
for _, tplTrigger := range template.When {
if tplTrigger == trigger {
found = true
break
}
}
if !found {
continue
}
fullpath := shared.VarPath("containers", c.name, "rootfs", strings.TrimLeft(filepath, "/"))
if shared.PathExists(fullpath) {
w, err = os.Create(fullpath)
if err != nil {
return err
}
} else {
uid := 0
gid := 0
if !c.IsPrivileged() {
uid, gid = c.idmapset.ShiftIntoNs(0, 0)
}
shared.MkdirAllOwner(path.Dir(fullpath), 0755, uid, gid)
w, err = os.Create(fullpath)
if err != nil {
return err
}
if !c.IsPrivileged() {
w.Chown(uid, gid)
}
w.Chmod(0644)
}
tplString, err := ioutil.ReadFile(shared.VarPath("containers", c.name, "templates", template.Template))
if err != nil {
return err
}
tpl, err := pongo2.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
if err != nil {
return err
}
containerMeta := make(map[string]string)
containerMeta["name"] = c.name
containerMeta["architecture"], _ = shared.ArchitectureName(c.architecture)
if c.ephemeral {
containerMeta["ephemeral"] = "true"
} else {
containerMeta["ephemeral"] = "false"
}
if c.IsPrivileged() {
containerMeta["privileged"] = "true"
} else {
containerMeta["privileged"] = "false"
}
configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
val, ok := c.config[confKey.String()]
if !ok {
return confDefault
}
return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
}
tpl.ExecuteWriter(pongo2.Context{"trigger": trigger,
"path": filepath,
"container": containerMeta,
"config": c.config,
"devices": c.devices,
"properties": template.Properties,
"config_get": configGet}, w)
}
return nil
}
func (c *containerLXD) DetachMount(m shared.Device) error {
// TODO - in case of reboot, we should remove the lxc.mount.entry. Trick
// is, we can't d.c.ClearConfigItem bc that will clear all the keys. So
// we should get the full list, clear, then reinsert all but the one we're
// removing
shared.Debugf("Mounts detach not yet implemented")
pid := c.c.InitPid()
if pid == -1 { // container not running
return nil
}
pidstr := fmt.Sprintf("%d", pid)
return exec.Command(os.Args[0], "forkumount", pidstr, m["path"]).Run()
}
/* This is called when adding a mount to a *running* container */
func (c *containerLXD) AttachMount(m shared.Device) error {
dest := m["path"]
source := m["source"]
flags := 0
sb, err := os.Stat(source)
if err != nil {
return err
}
opts, readonly, optional := getMountOptions(m)
if readonly {
flags |= syscall.MS_RDONLY
}
if shared.IsBlockdev(sb.Mode()) {
fstype, err := shared.BlockFsDetect(source)
if err != nil {
if optional {
shared.Log.Warn("Failed to detect fstype for block device",
log.Ctx{"error": err, "source": source})
return nil
}
return fmt.Errorf("Unable to detect fstype for %s: %s", source, err)
}
// Mount blockdev into $containerdir/blk.$(mktemp)
fnam := fmt.Sprintf("blk.%s", strings.Replace(source, "/", "-", -1))
blkmnt := filepath.Join(c.Path(""), fnam)
syscall.Unmount(blkmnt, syscall.MNT_DETACH)
os.Remove(blkmnt)
if err = os.Mkdir(blkmnt, 0660); err != nil {
if optional {
return nil
}
return fmt.Errorf("Unable to create mountpoint for blockdev %s: %s", source, err)
}
if err = syscall.Mount(source, blkmnt, fstype, uintptr(flags), ""); err != nil {
if optional {
return nil
}
return fmt.Errorf("Unable to prepare blockdev mount for %s: %s", source, err)
}
source = blkmnt
opts = append(opts, "create=dir")
} else if sb.IsDir() {
opts = append(opts, "create=dir")
} else {
opts = append(opts, "create=file")
}
opts = append(opts, "bind")
flags |= syscall.MS_BIND
optstr := strings.Join(opts, ",")
entry := fmt.Sprintf("%s %s %s %s 0 0", source, dest, "none", optstr)
if err := setConfigItem(c, "lxc.mount.entry", entry); err != nil {
if optional {
shared.Log.Warn("Failed to setup lxc mount for block device",
log.Ctx{"error": err, "source": source})
}
return fmt.Errorf("Failed to set up lxc mount entry for %s: %s", m["source"], err)
}
err = c.insertMount(source, dest, "none", flags, optstr)
if err != nil {
if optional {
shared.Log.Warn("Failed to insert mount for block device",
log.Ctx{"error": err, "source": m["source"]})
return nil
}
return fmt.Errorf("Failed to insert mount for %s: %s", m["source"], err)
}
return nil
}
func (c *containerLXD) applyConfig(config map[string]string) error {
var err error
for k, v := range config {
switch k {
case "limits.cpus":
// TODO - Come up with a way to choose cpus for multiple
// containers
var vint int
count, err := fmt.Sscanf(v, "%d", &vint)
if err != nil {
return err
}
if count != 1 || vint < 0 || vint > 65000 {
return fmt.Errorf("Bad cpu limit: %s", v)
}
cpuset := fmt.Sprintf("0-%d", vint-1)
err = setConfigItem(c, "lxc.cgroup.cpuset.cpus", cpuset)
case "limits.memory":
err = setConfigItem(c, "lxc.cgroup.memory.limit_in_bytes", v)
default:
if strings.HasPrefix(k, "environment.") {
setConfigItem(c, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
}
/* Things like security.privileged need to be propagated */
c.config[k] = v
}
if err != nil {
shared.Debugf("Error setting %s: %q", k, err)
return err
}
}
return nil
}
func (c *containerLXD) applyPostDeviceConfig() error {
// applies config that must be delayed until after devices are
// instantiated, see bug #588 and fix #635
if lxcConfig, ok := c.config["raw.lxc"]; ok {
if err := validateRawLxc(lxcConfig); err != nil {
return err
}
f, err := ioutil.TempFile("", "lxd_config_")
if err != nil {
return err
}
err = shared.WriteAll(f, []byte(lxcConfig))
f.Close()
defer os.Remove(f.Name())
if err != nil {
return err
}
if err := c.c.LoadConfigFile(f.Name()); err != nil {
return fmt.Errorf("problem applying raw.lxc, perhaps there is a syntax error?")
}
}
return nil
}
func (c *containerLXD) applyProfile(p string) error {
q := `SELECT key, value FROM profiles_config
JOIN profiles ON profiles.id=profiles_config.profile_id
WHERE profiles.name=?`
var k, v string
inargs := []interface{}{p}
outfmt := []interface{}{k, v}
result, err := dbQueryScan(c.daemon.db, q, inargs, outfmt)
if err != nil {
return err
}
config := map[string]string{}
for _, r := range result {
k = r[0].(string)
v = r[1].(string)
config[k] = v
}
newdevs, err := dbDevices(c.daemon.db, p, true)
if err != nil {
return err
}
for k, v := range newdevs {
c.devices[k] = v
}
return c.applyConfig(config)
}
func (c *containerLXD) updateContainerHWAddr(k, v string) {
for name, d := range c.devices {
if d["type"] != "nic" {
continue
}
for key := range c.config {
device, err := extractInterfaceFromConfigName(key)
if err == nil && device == name {
d["hwaddr"] = v
c.config[key] = v
return
}
}
}
}
func (c *containerLXD) setupMacAddresses() error {
newConfigEntries := map[string]string{}
for name, d := range c.devices {
if d["type"] != "nic" {
continue
}
found := false
for key, val := range c.config {
device, err := extractInterfaceFromConfigName(key)
if err == nil && device == name {
found = true
d["hwaddr"] = val
}
}
if !found {
var hwaddr string
var err error
if d["hwaddr"] != "" {
hwaddr, err = generateMacAddr(d["hwaddr"])
if err != nil {
return err
}
} else {
hwaddr, err = generateMacAddr("00:16:3e:xx:xx:xx")
if err != nil {
return err
}
}
if hwaddr != d["hwaddr"] {
d["hwaddr"] = hwaddr
key := fmt.Sprintf("volatile.%s.hwaddr", name)
c.config[key] = hwaddr
c.baseConfig[key] = hwaddr
newConfigEntries[key] = hwaddr
}
}
}
if len(newConfigEntries) > 0 {
tx, err := dbBegin(c.daemon.db)
if err != nil {
return err
}
/*
* My logic may be flawed here, but it seems to me that one of
* the following must be true:
* 1. The current database entry equals what we had stored.
* Our update akes precedence
* 2. The current database entry is different from what we had
* stored. Someone updated it since we last grabbed the
* container configuration. So either
* a. it contains 'x' and is a template. We have generated
* a real mac, so our update takes precedence
* b. it contains no 'x' and is an hwaddr, not template. We
* defer to the racer's update since it may be actually
* starting the container.
*/
str := "INSERT INTO containers_config (container_id, key, value) values (?, ?, ?)"
stmt, err := tx.Prepare(str)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
ustr := "UPDATE containers_config SET value=? WHERE container_id=? AND key=?"
ustmt, err := tx.Prepare(ustr)
if err != nil {
tx.Rollback()
return err
}
defer ustmt.Close()
qstr := "SELECT value FROM containers_config WHERE container_id=? AND key=?"
qstmt, err := tx.Prepare(qstr)
if err != nil {
tx.Rollback()
return err
}
defer qstmt.Close()
for k, v := range newConfigEntries {
var racer string
err := qstmt.QueryRow(c.id, k).Scan(&racer)
if err == sql.ErrNoRows {
_, err = stmt.Exec(c.id, k, v)
if err != nil {
shared.Debugf("Error adding mac address to container")
tx.Rollback()
return err
}
} else if err != nil {
tx.Rollback()
return err
} else if strings.Contains(racer, "x") {
_, err = ustmt.Exec(v, c.id, k)
if err != nil {
shared.Debugf("Error updating mac address to container")
tx.Rollback()
return err
}
} else {
// we accept the racing task's update
c.updateContainerHWAddr(k, v)
}
}
err = txCommit(tx)
if err != nil {
fmt.Printf("setupMacAddresses: (TxCommit) error %s\n", err)
}
return err
}
return nil
}
func (c *containerLXD) applyIdmapSet() error {
if c.idmapset == nil {
return nil
}
lines := c.idmapset.ToLxcString()
for _, line := range lines {
err := setConfigItem(c, "lxc.id_map", line+"\n")
if err != nil {
return err
}
}
return nil
}
func (c *containerLXD) applyDevices() error {
var keys []string
for k := range c.devices {
keys = append(keys, k)
}
sort.Strings(keys)
for _, name := range keys {
d := c.devices[name]
if name == "type" {
continue
}
configs, err := deviceToLxc(c.Path(""), d)
if err != nil {
return fmt.Errorf("Failed configuring device %s: %s", name, err)
}
for _, line := range configs {
err := setConfigItem(c, line[0], line[1])
if err != nil {
return fmt.Errorf("Failed configuring device %s: %s", name, err)
}
}
if d["type"] == "unix-block" || d["type"] == "unix-char" {
if err := c.setupUnixDev(d); err != nil {
return fmt.Errorf("Failed creating device %s: %s", d["name"], err)
}
}
}
return nil
}
func (c *containerLXD) iPsGet() []shared.Ip {
ips := []shared.Ip{}
names, err := c.c.Interfaces()
if err != nil {
return ips
}
for _, n := range names {
addresses, err := c.c.IPAddress(n)
if err != nil {
continue
}
veth := ""
for i := 0; i < len(c.c.ConfigItem("lxc.network")); i++ {
nicName := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.name", i))[0]
if nicName != n {
continue
}
interfaceType := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.type", i))
if interfaceType[0] != "veth" {
continue
}
veth = c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.veth.pair", i))[0]
break
}
for _, a := range addresses {
ip := shared.Ip{Interface: n, Address: a, HostVeth: veth}
if net.ParseIP(a).To4() == nil {
ip.Protocol = "IPV6"
} else {
ip.Protocol = "IPV4"
}
ips = append(ips, ip)
}
}
return ips
}
func (c *containerLXD) tarStoreFile(linkmap map[uint64]string, offset int, tw *tar.Writer, path string, fi os.FileInfo) error {
var err error
var major, minor, nlink int
var ino uint64
link := ""
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err = os.Readlink(path)
if err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return err
}
hdr.Name = path[offset:]
if fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink {
hdr.Size = 0
} else {
hdr.Size = fi.Size()
}
hdr.Uid, hdr.Gid, major, minor, ino, nlink, err = shared.GetFileStat(path)
if err != nil {
return fmt.Errorf("error getting file info: %s", err)
}
// unshift the id under /rootfs/ for unpriv containers
if !c.IsPrivileged() && strings.HasPrefix(hdr.Name, "/rootfs") {
hdr.Uid, hdr.Gid = c.idmapset.ShiftFromNs(hdr.Uid, hdr.Gid)
if hdr.Uid == -1 || hdr.Gid == -1 {
return nil
}
}
if major != -1 {
hdr.Devmajor = int64(major)
hdr.Devminor = int64(minor)
}
// If it's a hardlink we've already seen use the old name
if fi.Mode().IsRegular() && nlink > 1 {
if firstpath, found := linkmap[ino]; found {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = firstpath
hdr.Size = 0
} else {
linkmap[ino] = hdr.Name
}
}
// todo - handle xattrs
if err := tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("error writing header: %s", err)
}
if hdr.Typeflag == tar.TypeReg {
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("tarStoreFile: error opening file: %s", err)
}
defer f.Close()
if _, err := io.Copy(tw, f); err != nil {
return fmt.Errorf("error copying file %s", err)
}
}
return nil
}
func (c *containerLXD) mkdirAllContainerRoot(path string, perm os.FileMode) error {
var uid int
var gid int
if !c.IsPrivileged() {
uid, gid = c.idmapset.ShiftIntoNs(0, 0)
if uid == -1 {
uid = 0
}
if gid == -1 {
gid = 0
}
}
return shared.MkdirAllOwner(path, perm, uid, gid)
}
func (c *containerLXD) mountShared() error {
source := shared.VarPath("shmounts", c.Name())
entry := fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", source)
if !shared.PathExists(source) {
if err := c.mkdirAllContainerRoot(source, 0755); err != nil {
return err
}
}
return setConfigItem(c, "lxc.mount.entry", entry)
}
func (c *containerLXD) Checkpoint(opts lxc.CheckpointOptions) error {
return c.c.Checkpoint(opts)
}
func (c *containerLXD) StartFromMigration(imagesDir string) error {
f, err := ioutil.TempFile("", "lxd_lxc_migrateconfig_")
if err != nil {
return err
}
if err = f.Chmod(0600); err != nil {
f.Close()
os.Remove(f.Name())
return err
}
f.Close()
os.Remove(f.Name())
if err := c.c.SaveConfigFile(f.Name()); err != nil {
return err
}
/* (Re)Load the AA profile; we set it in the container's config above
* in init()
*/
if err := AALoadProfile(c); err != nil {
c.StorageStop()
return err
}
if err := SeccompCreateProfile(c); err != nil {
c.StorageStop()
return err
}
cmd := exec.Command(
os.Args[0],
"forkmigrate",
c.name,
c.c.ConfigPath(),
f.Name(),
imagesDir,
)
return cmd.Run()
}
Freeze the container before killing it
This makes it possible to kill containers which are constantly spawning
and replacing processes (flooding their init process) such as when
running a fork bomb.
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
package main
import (
"archive/tar"
"bytes"
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/big"
"net"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"syscall"
"time"
"gopkg.in/flosch/pongo2.v3"
"gopkg.in/lxc/go-lxc.v2"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/shared"
log "gopkg.in/inconshreveable/log15.v2"
)
// ExtractInterfaceFromConfigName returns "eth0" from "volatile.eth0.hwaddr",
// or an error if the key does not match this pattern.
func extractInterfaceFromConfigName(k string) (string, error) {
re := regexp.MustCompile("volatile\\.([^.]*)\\.hwaddr")
m := re.FindStringSubmatch(k)
if m != nil && len(m) > 1 {
return m[1], nil
}
return "", fmt.Errorf("%s did not match", k)
}
func validateRawLxc(rawLxc string) error {
for _, line := range strings.Split(rawLxc, "\n") {
// Ignore empty lines
if len(line) == 0 {
continue
}
// Ignore comments
if strings.HasPrefix(line, "#") {
continue
}
// Ensure the format is valid
membs := strings.SplitN(line, "=", 2)
if len(membs) != 2 {
return fmt.Errorf("invalid raw.lxc line: %s", line)
}
// Blacklist some keys
if strings.ToLower(strings.Trim(membs[0], " \t")) == "lxc.logfile" {
return fmt.Errorf("setting lxc.logfile is not allowed")
}
if strings.HasPrefix(strings.ToLower(strings.Trim(membs[0], " \t")), "lxc.network.") {
return fmt.Errorf("setting lxc.network keys is not allowed")
}
}
return nil
}
func setConfigItem(c *containerLXD, key string, value string) error {
err := c.c.SetConfigItem(key, value)
if err != nil {
return fmt.Errorf("Failed to set LXC config: %s=%s", key, value)
}
return nil
}
// GenerateMacAddr generates a mac address from a string template:
// e.g. "00:11:22:xx:xx:xx" -> "00:11:22:af:3e:51"
func generateMacAddr(template string) (string, error) {
ret := bytes.Buffer{}
for _, c := range template {
if c == 'x' {
c, err := rand.Int(rand.Reader, big.NewInt(16))
if err != nil {
return "", err
}
ret.WriteString(fmt.Sprintf("%x", c.Int64()))
} else {
ret.WriteString(string(c))
}
}
return ret.String(), nil
}
func containerPath(name string, isSnapshot bool) string {
if isSnapshot {
return shared.VarPath("snapshots", name)
}
return shared.VarPath("containers", name)
}
// containerLXDArgs contains every argument needed to create an LXD Container
type containerLXDArgs struct {
ID int // Leave it empty when you create one.
Ctype containerType
Config map[string]string
Profiles []string
Ephemeral bool
BaseImage string
Architecture int
Devices shared.Devices
}
type containerLXD struct {
c *lxc.Container
daemon *Daemon
id int
name string
config map[string]string
profiles []string
devices shared.Devices
architecture int
ephemeral bool
idmapset *shared.IdmapSet
cType containerType
baseConfig map[string]string
baseDevices shared.Devices
storage storage
}
type container interface {
RenderState() (*shared.ContainerState, error)
Reboot() error
Freeze() error
Shutdown(timeout time.Duration) error
Start() error
Stop() error
Unfreeze() error
Delete() error
Restore(sourceContainer container) error
Rename(newName string) error
ConfigReplace(newConfig containerLXDArgs) error
StorageStart() error
StorageStop() error
Storage() storage
IsPrivileged() bool
IsRunning() bool
IsFrozen() bool
IsEphemeral() bool
IsSnapshot() bool
ID() int
Name() string
Architecture() int
Config() map[string]string
ConfigKeySet(key string, value string) error
Devices() shared.Devices
Profiles() []string
Path(newName string) string
RootfsPath() string
TemplatesPath() string
StateDir() string
LogFilePath() string
LogPath() string
InitPID() int
State() string
IdmapSet() *shared.IdmapSet
LastIdmapSet() (*shared.IdmapSet, error)
TemplateApply(trigger string) error
ExportToTar(snap string, w io.Writer) error
Checkpoint(opts lxc.CheckpointOptions) error
StartFromMigration(imagesDir string) error
// TODO: Remove every use of this and remove it.
LXContainerGet() *lxc.Container
DetachMount(m shared.Device) error
AttachMount(m shared.Device) error
AttachUnixDev(dev shared.Device) error
DetachUnixDev(dev shared.Device) error
}
// unmount and unlink any directories called $path/blk.$(mktemp)
func unmountTempBlocks(path string) {
dents, err := ioutil.ReadDir(path)
if err != nil {
return
}
for _, f := range dents {
bpath := f.Name()
dpath := filepath.Join(path, bpath)
if !strings.HasPrefix(bpath, "blk.") {
continue
}
if err = syscall.Unmount(dpath, syscall.MNT_DETACH); err != nil {
shared.Log.Warn("Failed to unmount block device", log.Ctx{"error": err, "path": dpath})
continue
}
if err = os.Remove(dpath); err != nil {
shared.Log.Warn("Failed to unlink block device mountpoint", log.Ctx{"error": err, "path": dpath})
continue
}
}
}
func getMountOptions(m shared.Device) ([]string, bool, bool) {
opts := []string{}
readonly := false
if m["readonly"] == "1" || m["readonly"] == "true" {
readonly = true
opts = append(opts, "ro")
}
optional := false
if m["optional"] == "1" || m["optional"] == "true" {
optional = true
opts = append(opts, "optional")
}
return opts, readonly, optional
}
/*
* This is called at container startup to mount any block
* devices (since a container with idmap cannot do so)
*/
func mountTmpBlockdev(cntPath string, d shared.Device) ([]string, error) {
source := d["source"]
fstype, err := shared.BlockFsDetect(source)
if err != nil {
return []string{}, fmt.Errorf("Error setting up %s: %s", source, err)
}
opts, readonly, optional := getMountOptions(d)
// Mount blockdev into $containerdir/blk.$(mktemp)
fnam := fmt.Sprintf("blk.%s", strings.Replace(source, "/", "-", -1))
blkmnt := filepath.Join(cntPath, fnam)
syscall.Unmount(blkmnt, syscall.MNT_DETACH)
os.Remove(blkmnt)
if err = os.Mkdir(blkmnt, 0660); err != nil {
if optional {
shared.Log.Warn("Failed to create block device mount directory",
log.Ctx{"error": err, "source": source})
return []string{}, nil
}
return []string{}, fmt.Errorf("Unable to create mountpoint for blockdev %s: %s", source, err)
}
flags := 0
if readonly {
flags |= syscall.MS_RDONLY
}
if err = syscall.Mount(source, blkmnt, fstype, uintptr(flags), ""); err != nil {
if optional {
shared.Log.Warn("Failed to mount block device", log.Ctx{"error": err, "source": source})
return []string{}, nil
}
return []string{}, fmt.Errorf("Unable to prepare blockdev mount for %s: %s", source, err)
}
opts = append(opts, "bind")
sb, err := os.Stat(source)
if err == nil {
if sb.IsDir() {
opts = append(opts, "create=dir")
} else {
opts = append(opts, "create=file")
}
}
tgtpath := d["path"]
for len(tgtpath) > 0 && filepath.IsAbs(tgtpath) {
tgtpath = tgtpath[1:]
}
if len(tgtpath) == 0 {
if optional {
shared.Log.Warn("Invalid mount target", log.Ctx{"target": d["path"]})
return []string{}, nil
}
return []string{}, fmt.Errorf("Invalid mount target %s", d["path"])
}
mtab := fmt.Sprintf("%s %s %s %s 0 0", blkmnt, tgtpath, "none", strings.Join(opts, ","))
shared.Debugf("adding mount entry for %s: .%s.\n", d["source"], mtab)
return []string{"lxc.mount.entry", mtab}, nil
}
func containerLXDCreateAsEmpty(d *Daemon, name string,
args containerLXDArgs) (container, error) {
// Create the container
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
// Now create the empty storage
if err := c.storage.ContainerCreate(c); err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDCreateFromImage(d *Daemon, name string,
args containerLXDArgs, hash string) (container, error) {
// Create the container
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
if err := dbImageLastAccessUpdate(d.db, hash); err != nil {
return nil, fmt.Errorf("Error updating image last use date: %s", err)
}
// Now create the storage from an image
if err := c.storage.ContainerCreateFromImage(c, hash); err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDCreateAsCopy(d *Daemon, name string,
args containerLXDArgs, sourceContainer container) (container, error) {
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
if err := c.ConfigReplace(args); err != nil {
c.Delete()
return nil, err
}
if err := c.storage.ContainerCopy(c, sourceContainer); err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDCreateAsSnapshot(d *Daemon, name string,
args containerLXDArgs, sourceContainer container,
stateful bool) (container, error) {
c, err := containerLXDCreateInternal(d, name, args)
if err != nil {
return nil, err
}
c.storage = sourceContainer.Storage()
if err := c.storage.ContainerSnapshotCreate(c, sourceContainer); err != nil {
c.Delete()
return nil, err
}
if stateful {
stateDir := c.StateDir()
err = os.MkdirAll(stateDir, 0700)
if err != nil {
c.Delete()
return nil, err
}
// TODO - shouldn't we freeze for the duration of rootfs snapshot below?
if !sourceContainer.IsRunning() {
c.Delete()
return nil, fmt.Errorf("Container not running")
}
opts := lxc.CheckpointOptions{Directory: stateDir, Stop: true, Verbose: true}
err = sourceContainer.Checkpoint(opts)
err2 := CollectCRIULogFile(sourceContainer, stateDir, "snapshot", "dump")
if err2 != nil {
shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2})
}
if err != nil {
c.Delete()
return nil, err
}
}
return c, nil
}
func validContainerName(name string) error {
if strings.Contains(name, shared.SnapshotDelimiter) {
return fmt.Errorf(
"The character '%s' is reserved for snapshots.",
shared.SnapshotDelimiter)
}
return nil
}
func containerLXDCreateInternal(
d *Daemon, name string, args containerLXDArgs) (*containerLXD, error) {
shared.Log.Info(
"Container create",
log.Ctx{
"container": name,
"isSnapshot": args.Ctype == cTypeSnapshot})
if args.Ctype != cTypeSnapshot {
if err := validContainerName(name); err != nil {
return nil, err
}
}
path := containerPath(name, args.Ctype == cTypeSnapshot)
if shared.PathExists(path) {
return nil, fmt.Errorf(
"The container already exists on disk, container: '%s', path: '%s'",
name,
path)
}
if args.Profiles == nil {
args.Profiles = []string{"default"}
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.BaseImage != "" {
args.Config["volatile.base_image"] = args.BaseImage
}
if args.Devices == nil {
args.Devices = shared.Devices{}
}
profiles, err := dbProfiles(d.db)
if err != nil {
return nil, err
}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
}
id, err := dbContainerCreate(d.db, name, args)
if err != nil {
return nil, err
}
shared.Log.Debug(
"Container created in the DB",
log.Ctx{"container": name, "id": id})
baseConfig := map[string]string{}
if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil {
return nil, err
}
baseDevices := shared.Devices{}
if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil {
return nil, err
}
c := &containerLXD{
daemon: d,
id: id,
name: name,
ephemeral: args.Ephemeral,
architecture: args.Architecture,
config: args.Config,
profiles: args.Profiles,
devices: args.Devices,
cType: args.Ctype,
baseConfig: baseConfig,
baseDevices: baseDevices}
// No need to detect storage here, its a new container.
c.storage = d.Storage
if err := c.init(); err != nil {
c.Delete() // Delete the container from the DB.
return nil, err
}
idmap := c.IdmapSet()
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
c.Delete()
return nil, err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap)
if err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerLXDLoad(d *Daemon, name string) (container, error) {
shared.Log.Debug("Container load", log.Ctx{"container": name})
args, err := dbContainerGet(d.db, name)
if err != nil {
return nil, err
}
baseConfig := map[string]string{}
if err := shared.DeepCopy(&args.Config, &baseConfig); err != nil {
return nil, err
}
baseDevices := shared.Devices{}
if err := shared.DeepCopy(&args.Devices, &baseDevices); err != nil {
return nil, err
}
c := &containerLXD{
daemon: d,
id: args.ID,
name: name,
ephemeral: args.Ephemeral,
architecture: args.Architecture,
config: args.Config,
profiles: args.Profiles,
devices: args.Devices,
cType: args.Ctype,
baseConfig: baseConfig,
baseDevices: baseDevices}
s, err := storageForFilename(d, shared.VarPath("containers", strings.Split(name, "/")[0]))
if err != nil {
shared.Log.Warn("Couldn't detect storage.", log.Ctx{"container": c.Name()})
c.storage = d.Storage
} else {
c.storage = s
}
if err := c.init(); err != nil {
return nil, err
}
return c, nil
}
// init prepares the LXContainer for this LXD Container
// TODO: This gets called on each load of the container,
// we might be able to split this is up into c.Start().
func (c *containerLXD) init() error {
templateConfBase := "ubuntu"
templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG")
if templateConfDir == "" {
templateConfDir = "/usr/share/lxc/config"
}
cc, err := lxc.NewContainer(c.Name(), c.daemon.lxcpath)
if err != nil {
return err
}
c.c = cc
logfile := c.LogFilePath()
if err := os.MkdirAll(filepath.Dir(logfile), 0700); err != nil {
return err
}
if err = c.c.SetLogFile(logfile); err != nil {
return err
}
personality, err := shared.ArchitecturePersonality(c.architecture)
if err == nil {
if err := setConfigItem(c, "lxc.arch", personality); err != nil {
return err
}
}
err = setConfigItem(c, "lxc.include", fmt.Sprintf("%s/%s.common.conf", templateConfDir, templateConfBase))
if err != nil {
return err
}
if err := setConfigItem(c, "lxc.rootfs", c.RootfsPath()); err != nil {
return err
}
if err := setConfigItem(c, "lxc.loglevel", "0"); err != nil {
return err
}
if err := setConfigItem(c, "lxc.utsname", c.Name()); err != nil {
return err
}
if err := setConfigItem(c, "lxc.tty", "0"); err != nil {
return err
}
if err := setupDevLxdMount(c); err != nil {
return err
}
for _, p := range c.profiles {
if err := c.applyProfile(p); err != nil {
return err
}
}
// base per-container config should override profile config, so we apply it second
if err := c.applyConfig(c.baseConfig); err != nil {
return err
}
if !c.IsPrivileged() || runningInUserns {
err = setConfigItem(c, "lxc.include", fmt.Sprintf("%s/%s.userns.conf", templateConfDir, templateConfBase))
if err != nil {
return err
}
}
if c.IsNesting() {
shared.Debugf("Setting up %s for nesting", c.name)
orig := c.c.ConfigItem("lxc.mount.auto")
auto := ""
if len(orig) == 1 {
auto = orig[0]
}
if !strings.Contains(auto, "cgroup") {
auto = fmt.Sprintf("%s %s", auto, "cgroup:mixed")
err = setConfigItem(c, "lxc.mount.auto", auto)
if err != nil {
return err
}
}
/*
* mount extra /proc and /sys to work around kernel
* restrictions on remounting them when covered
*/
err = setConfigItem(c, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional")
if err != nil {
return err
}
err = setConfigItem(c, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional")
if err != nil {
return err
}
}
/*
* Until stacked apparmor profiles are possible, we have to run nested
* containers unconfined
*/
if aaEnabled {
if aaConfined() {
curProfile := aaProfile()
shared.Debugf("Running %s in current profile %s (nested container)", c.name, curProfile)
curProfile = strings.TrimSuffix(curProfile, " (enforce)")
if err := setConfigItem(c, "lxc.aa_profile", curProfile); err != nil {
return err
}
} else if err := setConfigItem(c, "lxc.aa_profile", AAProfileFull(c)); err != nil {
return err
}
}
if err := setConfigItem(c, "lxc.seccomp", SeccompProfilePath(c)); err != nil {
return err
}
if err := c.setupMacAddresses(); err != nil {
return err
}
// Allow overwrites of devices
for k, v := range c.baseDevices {
c.devices[k] = v
}
if !c.IsPrivileged() {
if c.daemon.IdmapSet == nil {
return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported.")
}
c.idmapset = c.daemon.IdmapSet // TODO - per-tenant idmaps
}
if err := c.applyIdmapSet(); err != nil {
return err
}
if err := c.applyPostDeviceConfig(); err != nil {
return err
}
return nil
}
func (c *containerLXD) RenderState() (*shared.ContainerState, error) {
statusCode := shared.FromLXCState(int(c.c.State()))
status := shared.ContainerStatus{
Status: statusCode.String(),
StatusCode: statusCode,
}
if c.IsRunning() {
pid := c.InitPID()
status.Init = pid
status.Ips = c.iPsGet()
}
return &shared.ContainerState{
Name: c.name,
Profiles: c.profiles,
Config: c.baseConfig,
ExpandedConfig: c.config,
Status: status,
Devices: c.baseDevices,
ExpandedDevices: c.devices,
Ephemeral: c.ephemeral,
}, nil
}
func (c *containerLXD) insertMount(source, target, fstype string, flags int, options string) error {
pid := c.c.InitPid()
if pid == -1 { // container not running - we're done
return nil
}
// now live-mount
var tmpMount string
var err error
if shared.IsDir(source) {
tmpMount, err = ioutil.TempDir(shared.VarPath("shmounts", c.name), "lxdmount_")
} else {
f, err := ioutil.TempFile(shared.VarPath("shmounts", c.name), "lxdmount_")
if err == nil {
tmpMount = f.Name()
f.Close()
}
}
if err != nil {
return err
}
err = syscall.Mount(source, tmpMount, fstype, uintptr(flags), "")
if err != nil {
return err
}
mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount))
// finally we need to move-mount this in the container
pidstr := fmt.Sprintf("%d", pid)
err = exec.Command(os.Args[0], "forkmount", pidstr, mntsrc, target).Run()
syscall.Unmount(tmpMount, syscall.MNT_DETACH) // in case forkmount failed
os.Remove(tmpMount)
return err
}
func (c *containerLXD) createUnixDevice(m shared.Device) (string, string, error) {
devname := m["path"]
if !filepath.IsAbs(devname) {
devname = filepath.Join("/", devname)
}
// target must be a relative path, so that lxc will DTRT
tgtname := m["path"]
for len(tgtname) > 0 && filepath.IsAbs(tgtname) {
tgtname = tgtname[1:]
}
if len(tgtname) == 0 {
return "", "", fmt.Errorf("Failed to interpret path: %s", devname)
}
var err error
var major, minor int
if m["major"] == "" && m["minor"] == "" {
major, minor, err = getDev(devname)
if err != nil {
return "", "", fmt.Errorf("Device does not exist: %s", devname)
}
} else if m["major"] == "" || m["minor"] == "" {
return "", "", fmt.Errorf("Both major and minor must be supplied for devices")
} else {
/* ok we have a major:minor and need to create it */
major, err = strconv.Atoi(m["major"])
if err != nil {
return "", "", fmt.Errorf("Bad major %s in device %s", m["major"], m["path"])
}
minor, err = strconv.Atoi(m["minor"])
if err != nil {
return "", "", fmt.Errorf("Bad minor %s in device %s", m["minor"], m["path"])
}
}
name := strings.Replace(m["path"], "/", "-", -1)
devpath := path.Join(c.Path(""), name)
mode := os.FileMode(0660)
if m["type"] == "unix-block" {
mode |= syscall.S_IFBLK
} else {
mode |= syscall.S_IFCHR
}
if m["mode"] != "" {
tmp, err := devModeOct(m["mode"])
if err != nil {
return "", "", fmt.Errorf("Bad mode %s in device %s", m["mode"], m["path"])
}
mode = os.FileMode(tmp)
}
os.Remove(devpath)
if err := syscall.Mknod(devpath, uint32(mode), minor|(major<<8)); err != nil {
if shared.PathExists(devname) {
return devname, tgtname, nil
}
return "", "", fmt.Errorf("Failed to create device %s for %s: %s", devpath, m["path"], err)
}
if err := c.idmapset.ShiftFile(devpath); err != nil {
// uidshift failing is weird, but not a big problem. Log and proceed
shared.Debugf("Failed to uidshift device %s: %s\n", m["path"], err)
}
return devpath, tgtname, nil
}
func (c *containerLXD) setupUnixDev(m shared.Device) error {
source, target, err := c.createUnixDevice(m)
if err != nil {
return fmt.Errorf("Failed to setup device %s: %s", m["path"], err)
}
options, err := devGetOptions(m)
if err != nil {
return err
}
if c.c.Running() {
// insert mount from 'source' to 'target'
err := c.insertMount(source, target, "none", syscall.MS_BIND, options)
if err != nil {
return fmt.Errorf("Failed to add mount for device %s: %s", m["path"], err)
}
// add the new device cgroup rule
entry, err := deviceCgroupInfo(m)
if err != nil {
return fmt.Errorf("Failed to add cgroup rule for device %s: %s", m["path"], err)
}
if err := c.c.SetCgroupItem("devices.allow", entry); err != nil {
return fmt.Errorf("Failed to add cgroup rule %s for device %s: %s", entry, m["path"], err)
}
}
entry := fmt.Sprintf("%s %s none %s", source, target, options)
return c.c.SetConfigItem("lxc.mount.entry", entry)
}
func (c *containerLXD) Start() error {
if c.IsRunning() {
return fmt.Errorf("the container is already running")
}
// Start the storage for this container
if err := c.StorageStart(); err != nil {
return err
}
/* (Re)Load the AA profile; we set it in the container's config above
* in init()
*/
if err := AALoadProfile(c); err != nil {
c.StorageStop()
return err
}
if err := SeccompCreateProfile(c); err != nil {
c.StorageStop()
return err
}
if err := c.mountShared(); err != nil {
return err
}
/*
* add the lxc.* entries for the configured devices,
* and create if necessary
*/
if err := c.applyDevices(); err != nil {
return err
}
f, err := ioutil.TempFile("", "lxd_lxc_startconfig_")
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
configPath := f.Name()
if err = f.Chmod(0600); err != nil {
f.Close()
os.Remove(configPath)
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
f.Close()
err = c.c.SaveConfigFile(configPath)
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
err = c.TemplateApply("start")
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
/* Deal with idmap changes */
idmap := c.IdmapSet()
lastIdmap, err := c.LastIdmapSet()
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
var jsonIdmap string
if idmap != nil {
idmapBytes, err := json.Marshal(idmap.Idmap)
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
jsonIdmap = string(idmapBytes)
} else {
jsonIdmap = "[]"
}
if !reflect.DeepEqual(idmap, lastIdmap) {
shared.Debugf("Container idmap changed, remapping")
if lastIdmap != nil {
if err := lastIdmap.UnshiftRootfs(c.RootfsPath()); err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
}
if idmap != nil {
if err := idmap.ShiftRootfs(c.RootfsPath()); err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
}
}
err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap)
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
return err
}
/* Actually start the container */
out, err := exec.Command(
os.Args[0],
"forkstart",
c.name,
c.daemon.lxcpath,
configPath).CombinedOutput()
if string(out) != "" {
for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") {
shared.Debugf("forkstart: %s", line)
}
}
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
err = fmt.Errorf(
"Error calling 'lxd forkstart %s %s %s': err='%v'",
c.name,
c.daemon.lxcpath,
path.Join(c.LogPath(), "lxc.conf"),
err)
}
if err == nil && c.ephemeral == true {
containerWatchEphemeral(c.daemon, c)
}
if err != nil {
unmountTempBlocks(c.Path(""))
c.StorageStop()
}
return err
}
func (c *containerLXD) Reboot() error {
return c.c.Reboot()
}
func (c *containerLXD) Freeze() error {
return c.c.Freeze()
}
func (c *containerLXD) IsNesting() bool {
switch strings.ToLower(c.config["security.nesting"]) {
case "1":
return true
case "true":
return true
}
return false
}
func (c *containerLXD) IsPrivileged() bool {
switch strings.ToLower(c.config["security.privileged"]) {
case "1":
return true
case "true":
return true
}
return false
}
func (c *containerLXD) IsRunning() bool {
return c.c.Running()
}
func (c *containerLXD) IsFrozen() bool {
return c.State() == "FROZEN"
}
func (c *containerLXD) Shutdown(timeout time.Duration) error {
if err := c.c.Shutdown(timeout); err != nil {
// Still try to unload the storage.
c.StorageStop()
return err
}
// Stop the storage for this container
if err := c.StorageStop(); err != nil {
return err
}
unmountTempBlocks(c.Path(""))
if err := AAUnloadProfile(c); err != nil {
return err
}
return nil
}
func (c *containerLXD) Stop() error {
// Attempt to freeze the container first, helps massively with fork bombs
c.c.Freeze()
if err := c.c.Stop(); err != nil {
// Still try to unload the storage.
c.StorageStop()
return err
}
// Stop the storage for this container
if err := c.StorageStop(); err != nil {
return err
}
// Clean up any mounts from previous runs
unmountTempBlocks(c.Path(""))
if err := AAUnloadProfile(c); err != nil {
return err
}
return nil
}
func (c *containerLXD) Unfreeze() error {
return c.c.Unfreeze()
}
func (c *containerLXD) StorageFromImage(hash string) error {
return c.storage.ContainerCreateFromImage(c, hash)
}
func (c *containerLXD) StorageFromNone() error {
return c.storage.ContainerCreate(c)
}
func (c *containerLXD) StorageStart() error {
return c.storage.ContainerStart(c)
}
func (c *containerLXD) StorageStop() error {
return c.storage.ContainerStop(c)
}
func (c *containerLXD) Storage() storage {
return c.storage
}
func (c *containerLXD) Restore(sourceContainer container) error {
/*
* restore steps:
* 1. stop container if already running
* 2. copy snapshot rootfs to container
* 3. overwrite existing config with snapshot config
*/
// Stop the container
// TODO: stateful restore ?
wasRunning := false
if c.IsRunning() {
wasRunning = true
if err := c.Stop(); err != nil {
shared.Log.Error(
"RESTORE => could not stop container",
log.Ctx{
"container": c.Name(),
"err": err})
return err
}
shared.Log.Debug(
"RESTORE => Stopped container",
log.Ctx{"container": c.Name()})
}
// Restore the FS.
// TODO: I switched the FS and config restore, think thats the correct way
// (pcdummy)
err := c.storage.ContainerRestore(c, sourceContainer)
if err != nil {
shared.Log.Error("RESTORE => Restoring the filesystem failed",
log.Ctx{
"source": sourceContainer.Name(),
"destination": c.Name()})
return err
}
args := containerLXDArgs{
Ctype: cTypeRegular,
Config: sourceContainer.Config(),
Profiles: sourceContainer.Profiles(),
Ephemeral: sourceContainer.IsEphemeral(),
Architecture: sourceContainer.Architecture(),
Devices: sourceContainer.Devices(),
}
err = c.ConfigReplace(args)
if err != nil {
shared.Log.Error("RESTORE => Restore of the configuration failed",
log.Ctx{
"source": sourceContainer.Name(),
"destination": c.Name()})
return err
}
if wasRunning {
c.Start()
}
return nil
}
func (c *containerLXD) Delete() error {
shared.Log.Debug("containerLXD.Delete", log.Ctx{"c.name": c.Name(), "type": c.cType})
switch c.cType {
case cTypeRegular:
if err := containerDeleteSnapshots(c.daemon, c.Name()); err != nil {
return err
}
if err := c.storage.ContainerDelete(c); err != nil {
return err
}
unmountTempBlocks(c.Path(""))
case cTypeSnapshot:
if err := c.storage.ContainerSnapshotDelete(c); err != nil {
return err
}
default:
return fmt.Errorf("Unknown cType: %d", c.cType)
}
if err := dbContainerRemove(c.daemon.db, c.Name()); err != nil {
return err
}
AADeleteProfile(c)
SeccompDeleteProfile(c)
return nil
}
func (c *containerLXD) Rename(newName string) error {
oldName := c.Name()
if !c.IsSnapshot() && !shared.ValidHostname(newName) {
return fmt.Errorf("Invalid container name")
}
if c.IsRunning() {
return fmt.Errorf("renaming of running container not allowed")
}
if c.IsSnapshot() {
if err := c.storage.ContainerSnapshotRename(c, newName); err != nil {
return err
}
} else {
if err := c.storage.ContainerRename(c, newName); err != nil {
return err
}
}
if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil {
return err
}
results, err := dbContainerGetSnapshots(c.daemon.db, oldName)
if err != nil {
return err
}
for _, sname := range results {
sc, err := containerLXDLoad(c.daemon, sname)
if err != nil {
shared.Log.Error(
"containerDeleteSnapshots: Failed to load the snapshotcontainer",
log.Ctx{"container": oldName, "snapshot": sname})
continue
}
baseSnapName := filepath.Base(sname)
newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName
if err := sc.Rename(newSnapshotName); err != nil {
shared.Log.Error(
"containerDeleteSnapshots: Failed to rename a snapshotcontainer",
log.Ctx{"container": oldName, "snapshot": sname, "err": err})
}
}
c.name = newName
// Recreate the LX Container
c.c = nil
c.init()
return nil
}
func (c *containerLXD) IsEphemeral() bool {
return c.ephemeral
}
func (c *containerLXD) IsSnapshot() bool {
return c.cType == cTypeSnapshot
}
func (c *containerLXD) ID() int {
return c.id
}
func (c *containerLXD) Name() string {
return c.name
}
func (c *containerLXD) Architecture() int {
return c.architecture
}
func (c *containerLXD) Path(newName string) string {
if newName != "" {
return containerPath(newName, c.IsSnapshot())
}
return containerPath(c.Name(), c.IsSnapshot())
}
func (c *containerLXD) RootfsPath() string {
return path.Join(c.Path(""), "rootfs")
}
func (c *containerLXD) TemplatesPath() string {
return path.Join(c.Path(""), "templates")
}
func (c *containerLXD) StateDir() string {
return path.Join(c.Path(""), "state")
}
func (c *containerLXD) LogPath() string {
return shared.LogPath(c.Name())
}
func (c *containerLXD) LogFilePath() string {
return filepath.Join(c.LogPath(), "lxc.log")
}
func (c *containerLXD) InitPID() int {
return c.c.InitPid()
}
func (c *containerLXD) State() string {
return c.c.State().String()
}
func (c *containerLXD) IdmapSet() *shared.IdmapSet {
return c.idmapset
}
func (c *containerLXD) LastIdmapSet() (*shared.IdmapSet, error) {
config := c.Config()
lastJsonIdmap := config["volatile.last_state.idmap"]
if lastJsonIdmap == "" {
return c.IdmapSet(), nil
}
lastIdmap := new(shared.IdmapSet)
err := json.Unmarshal([]byte(lastJsonIdmap), &lastIdmap.Idmap)
if err != nil {
return nil, err
}
if len(lastIdmap.Idmap) == 0 {
return nil, nil
}
return lastIdmap, nil
}
func (c *containerLXD) ConfigKeySet(key string, value string) error {
c.baseConfig[key] = value
args := containerLXDArgs{
Ctype: c.cType,
Config: c.baseConfig,
Profiles: c.profiles,
Ephemeral: c.ephemeral,
Architecture: c.architecture,
Devices: c.baseDevices,
}
return c.ConfigReplace(args)
}
func (c *containerLXD) LXContainerGet() *lxc.Container {
return c.c
}
// ConfigReplace replaces the config of container and tries to live apply
// the new configuration.
func (c *containerLXD) ConfigReplace(newContainerArgs containerLXDArgs) error {
/* check to see that the config actually applies to the container
* successfully before saving it. in particular, raw.lxc and
* raw.apparmor need to be parsed once to make sure they make sense.
*/
preDevList := c.devices
if err := c.applyConfig(newContainerArgs.Config); err != nil {
return err
}
tx, err := dbBegin(c.daemon.db)
if err != nil {
return err
}
/* Update config or profiles */
if err = dbContainerConfigClear(tx, c.id); err != nil {
shared.Log.Debug(
"Error clearing configuration for container",
log.Ctx{"name": c.Name()})
tx.Rollback()
return err
}
if err = dbContainerConfigInsert(tx, c.id, newContainerArgs.Config); err != nil {
shared.Debugf("Error inserting configuration for container %s: %s", c.Name(), err)
tx.Rollback()
return err
}
/* handle profiles */
if emptyProfile(newContainerArgs.Profiles) {
_, err := tx.Exec("DELETE from containers_profiles where container_id=?", c.id)
if err != nil {
tx.Rollback()
return err
}
} else {
if err := dbContainerProfilesInsert(tx, c.id, newContainerArgs.Profiles); err != nil {
tx.Rollback()
return err
}
}
err = dbDevicesAdd(tx, "container", int64(c.id), newContainerArgs.Devices)
if err != nil {
tx.Rollback()
return err
}
if err := c.applyPostDeviceConfig(); err != nil {
return err
}
c.baseConfig = newContainerArgs.Config
c.baseDevices = newContainerArgs.Devices
/* Let's try to load the apparmor profile again, in case the
* raw.apparmor config was changed (or deleted). Make sure we do this
* before commit, in case it fails because the user screwed something
* up so we can roll back and not hose their container.
*
* For containers that aren't running, we just want to parse the new
* profile; this is because this code is called during the start
* process after the profile is loaded but before the container starts,
* which will cause a container start to fail. If the container is
* running, we /do/ want to reload the profile, because we want the
* changes to take effect immediately.
*/
if !c.IsRunning() {
AAParseProfile(c)
return txCommit(tx)
}
if err := AALoadProfile(c); err != nil {
tx.Rollback()
return err
}
if err := txCommit(tx); err != nil {
return err
}
// add devices from new profile list to the desired goal set
for _, p := range c.profiles {
profileDevs, err := dbDevices(c.daemon.db, p, true)
if err != nil {
return fmt.Errorf("Error reading devices from profile '%s': %v", p, err)
}
newContainerArgs.Devices.ExtendFromProfile(preDevList, profileDevs)
}
tx, err = dbBegin(c.daemon.db)
if err != nil {
return err
}
if err := devicesApplyDeltaLive(tx, c, preDevList, newContainerArgs.Devices); err != nil {
return err
}
if err := txCommit(tx); err != nil {
return err
}
return nil
}
func (c *containerLXD) Config() map[string]string {
return c.config
}
func (c *containerLXD) Devices() shared.Devices {
return c.devices
}
func (c *containerLXD) Profiles() []string {
return c.profiles
}
/*
* Export the container to a unshifted tarfile containing:
* dir/
* metadata.yaml
* rootfs/
*/
func (c *containerLXD) ExportToTar(snap string, w io.Writer) error {
if snap == "" && c.IsRunning() {
return fmt.Errorf("Cannot export a running container as image")
}
if err := c.StorageStart(); err != nil {
return err
}
defer c.StorageStop()
idmap, err := c.LastIdmapSet()
if err != nil {
return err
}
if idmap != nil {
if err := idmap.UnshiftRootfs(c.RootfsPath()); err != nil {
return err
}
defer idmap.ShiftRootfs(c.RootfsPath())
}
tw := tar.NewWriter(w)
// keep track of the first path we saw for each path with nlink>1
linkmap := map[uint64]string{}
cDir := c.Path("")
// Path inside the tar image is the pathname starting after cDir
offset := len(cDir) + 1
writeToTar := func(path string, fi os.FileInfo, err error) error {
if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil {
shared.Debugf("Error tarring up %s: %s", path, err)
return err
}
return nil
}
fnam := filepath.Join(cDir, "metadata.yaml")
if shared.PathExists(fnam) {
fi, err := os.Lstat(fnam)
if err != nil {
shared.Debugf("Error statting %s during exportToTar", fnam)
tw.Close()
return err
}
if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil {
shared.Debugf("Error writing to tarfile: %s", err)
tw.Close()
return err
}
}
fnam = filepath.Join(cDir, "rootfs")
filepath.Walk(fnam, writeToTar)
fnam = filepath.Join(cDir, "templates")
if shared.PathExists(fnam) {
filepath.Walk(fnam, writeToTar)
}
return tw.Close()
}
func (c *containerLXD) TemplateApply(trigger string) error {
fname := path.Join(c.Path(""), "metadata.yaml")
if !shared.PathExists(fname) {
return nil
}
content, err := ioutil.ReadFile(fname)
if err != nil {
return err
}
metadata := new(imageMetadata)
err = yaml.Unmarshal(content, &metadata)
if err != nil {
return fmt.Errorf("Could not parse %s: %v", fname, err)
}
for filepath, template := range metadata.Templates {
var w *os.File
found := false
for _, tplTrigger := range template.When {
if tplTrigger == trigger {
found = true
break
}
}
if !found {
continue
}
fullpath := shared.VarPath("containers", c.name, "rootfs", strings.TrimLeft(filepath, "/"))
if shared.PathExists(fullpath) {
w, err = os.Create(fullpath)
if err != nil {
return err
}
} else {
uid := 0
gid := 0
if !c.IsPrivileged() {
uid, gid = c.idmapset.ShiftIntoNs(0, 0)
}
shared.MkdirAllOwner(path.Dir(fullpath), 0755, uid, gid)
w, err = os.Create(fullpath)
if err != nil {
return err
}
if !c.IsPrivileged() {
w.Chown(uid, gid)
}
w.Chmod(0644)
}
tplString, err := ioutil.ReadFile(shared.VarPath("containers", c.name, "templates", template.Template))
if err != nil {
return err
}
tpl, err := pongo2.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
if err != nil {
return err
}
containerMeta := make(map[string]string)
containerMeta["name"] = c.name
containerMeta["architecture"], _ = shared.ArchitectureName(c.architecture)
if c.ephemeral {
containerMeta["ephemeral"] = "true"
} else {
containerMeta["ephemeral"] = "false"
}
if c.IsPrivileged() {
containerMeta["privileged"] = "true"
} else {
containerMeta["privileged"] = "false"
}
configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
val, ok := c.config[confKey.String()]
if !ok {
return confDefault
}
return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
}
tpl.ExecuteWriter(pongo2.Context{"trigger": trigger,
"path": filepath,
"container": containerMeta,
"config": c.config,
"devices": c.devices,
"properties": template.Properties,
"config_get": configGet}, w)
}
return nil
}
func (c *containerLXD) DetachMount(m shared.Device) error {
// TODO - in case of reboot, we should remove the lxc.mount.entry. Trick
// is, we can't d.c.ClearConfigItem bc that will clear all the keys. So
// we should get the full list, clear, then reinsert all but the one we're
// removing
shared.Debugf("Mounts detach not yet implemented")
pid := c.c.InitPid()
if pid == -1 { // container not running
return nil
}
pidstr := fmt.Sprintf("%d", pid)
return exec.Command(os.Args[0], "forkumount", pidstr, m["path"]).Run()
}
/* This is called when adding a mount to a *running* container */
func (c *containerLXD) AttachMount(m shared.Device) error {
dest := m["path"]
source := m["source"]
flags := 0
sb, err := os.Stat(source)
if err != nil {
return err
}
opts, readonly, optional := getMountOptions(m)
if readonly {
flags |= syscall.MS_RDONLY
}
if shared.IsBlockdev(sb.Mode()) {
fstype, err := shared.BlockFsDetect(source)
if err != nil {
if optional {
shared.Log.Warn("Failed to detect fstype for block device",
log.Ctx{"error": err, "source": source})
return nil
}
return fmt.Errorf("Unable to detect fstype for %s: %s", source, err)
}
// Mount blockdev into $containerdir/blk.$(mktemp)
fnam := fmt.Sprintf("blk.%s", strings.Replace(source, "/", "-", -1))
blkmnt := filepath.Join(c.Path(""), fnam)
syscall.Unmount(blkmnt, syscall.MNT_DETACH)
os.Remove(blkmnt)
if err = os.Mkdir(blkmnt, 0660); err != nil {
if optional {
return nil
}
return fmt.Errorf("Unable to create mountpoint for blockdev %s: %s", source, err)
}
if err = syscall.Mount(source, blkmnt, fstype, uintptr(flags), ""); err != nil {
if optional {
return nil
}
return fmt.Errorf("Unable to prepare blockdev mount for %s: %s", source, err)
}
source = blkmnt
opts = append(opts, "create=dir")
} else if sb.IsDir() {
opts = append(opts, "create=dir")
} else {
opts = append(opts, "create=file")
}
opts = append(opts, "bind")
flags |= syscall.MS_BIND
optstr := strings.Join(opts, ",")
entry := fmt.Sprintf("%s %s %s %s 0 0", source, dest, "none", optstr)
if err := setConfigItem(c, "lxc.mount.entry", entry); err != nil {
if optional {
shared.Log.Warn("Failed to setup lxc mount for block device",
log.Ctx{"error": err, "source": source})
}
return fmt.Errorf("Failed to set up lxc mount entry for %s: %s", m["source"], err)
}
err = c.insertMount(source, dest, "none", flags, optstr)
if err != nil {
if optional {
shared.Log.Warn("Failed to insert mount for block device",
log.Ctx{"error": err, "source": m["source"]})
return nil
}
return fmt.Errorf("Failed to insert mount for %s: %s", m["source"], err)
}
return nil
}
func (c *containerLXD) applyConfig(config map[string]string) error {
var err error
for k, v := range config {
switch k {
case "limits.cpus":
// TODO - Come up with a way to choose cpus for multiple
// containers
var vint int
count, err := fmt.Sscanf(v, "%d", &vint)
if err != nil {
return err
}
if count != 1 || vint < 0 || vint > 65000 {
return fmt.Errorf("Bad cpu limit: %s", v)
}
cpuset := fmt.Sprintf("0-%d", vint-1)
err = setConfigItem(c, "lxc.cgroup.cpuset.cpus", cpuset)
case "limits.memory":
err = setConfigItem(c, "lxc.cgroup.memory.limit_in_bytes", v)
default:
if strings.HasPrefix(k, "environment.") {
setConfigItem(c, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v))
}
/* Things like security.privileged need to be propagated */
c.config[k] = v
}
if err != nil {
shared.Debugf("Error setting %s: %q", k, err)
return err
}
}
return nil
}
func (c *containerLXD) applyPostDeviceConfig() error {
// applies config that must be delayed until after devices are
// instantiated, see bug #588 and fix #635
if lxcConfig, ok := c.config["raw.lxc"]; ok {
if err := validateRawLxc(lxcConfig); err != nil {
return err
}
f, err := ioutil.TempFile("", "lxd_config_")
if err != nil {
return err
}
err = shared.WriteAll(f, []byte(lxcConfig))
f.Close()
defer os.Remove(f.Name())
if err != nil {
return err
}
if err := c.c.LoadConfigFile(f.Name()); err != nil {
return fmt.Errorf("problem applying raw.lxc, perhaps there is a syntax error?")
}
}
return nil
}
func (c *containerLXD) applyProfile(p string) error {
q := `SELECT key, value FROM profiles_config
JOIN profiles ON profiles.id=profiles_config.profile_id
WHERE profiles.name=?`
var k, v string
inargs := []interface{}{p}
outfmt := []interface{}{k, v}
result, err := dbQueryScan(c.daemon.db, q, inargs, outfmt)
if err != nil {
return err
}
config := map[string]string{}
for _, r := range result {
k = r[0].(string)
v = r[1].(string)
config[k] = v
}
newdevs, err := dbDevices(c.daemon.db, p, true)
if err != nil {
return err
}
for k, v := range newdevs {
c.devices[k] = v
}
return c.applyConfig(config)
}
func (c *containerLXD) updateContainerHWAddr(k, v string) {
for name, d := range c.devices {
if d["type"] != "nic" {
continue
}
for key := range c.config {
device, err := extractInterfaceFromConfigName(key)
if err == nil && device == name {
d["hwaddr"] = v
c.config[key] = v
return
}
}
}
}
func (c *containerLXD) setupMacAddresses() error {
newConfigEntries := map[string]string{}
for name, d := range c.devices {
if d["type"] != "nic" {
continue
}
found := false
for key, val := range c.config {
device, err := extractInterfaceFromConfigName(key)
if err == nil && device == name {
found = true
d["hwaddr"] = val
}
}
if !found {
var hwaddr string
var err error
if d["hwaddr"] != "" {
hwaddr, err = generateMacAddr(d["hwaddr"])
if err != nil {
return err
}
} else {
hwaddr, err = generateMacAddr("00:16:3e:xx:xx:xx")
if err != nil {
return err
}
}
if hwaddr != d["hwaddr"] {
d["hwaddr"] = hwaddr
key := fmt.Sprintf("volatile.%s.hwaddr", name)
c.config[key] = hwaddr
c.baseConfig[key] = hwaddr
newConfigEntries[key] = hwaddr
}
}
}
if len(newConfigEntries) > 0 {
tx, err := dbBegin(c.daemon.db)
if err != nil {
return err
}
/*
* My logic may be flawed here, but it seems to me that one of
* the following must be true:
* 1. The current database entry equals what we had stored.
* Our update akes precedence
* 2. The current database entry is different from what we had
* stored. Someone updated it since we last grabbed the
* container configuration. So either
* a. it contains 'x' and is a template. We have generated
* a real mac, so our update takes precedence
* b. it contains no 'x' and is an hwaddr, not template. We
* defer to the racer's update since it may be actually
* starting the container.
*/
str := "INSERT INTO containers_config (container_id, key, value) values (?, ?, ?)"
stmt, err := tx.Prepare(str)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
ustr := "UPDATE containers_config SET value=? WHERE container_id=? AND key=?"
ustmt, err := tx.Prepare(ustr)
if err != nil {
tx.Rollback()
return err
}
defer ustmt.Close()
qstr := "SELECT value FROM containers_config WHERE container_id=? AND key=?"
qstmt, err := tx.Prepare(qstr)
if err != nil {
tx.Rollback()
return err
}
defer qstmt.Close()
for k, v := range newConfigEntries {
var racer string
err := qstmt.QueryRow(c.id, k).Scan(&racer)
if err == sql.ErrNoRows {
_, err = stmt.Exec(c.id, k, v)
if err != nil {
shared.Debugf("Error adding mac address to container")
tx.Rollback()
return err
}
} else if err != nil {
tx.Rollback()
return err
} else if strings.Contains(racer, "x") {
_, err = ustmt.Exec(v, c.id, k)
if err != nil {
shared.Debugf("Error updating mac address to container")
tx.Rollback()
return err
}
} else {
// we accept the racing task's update
c.updateContainerHWAddr(k, v)
}
}
err = txCommit(tx)
if err != nil {
fmt.Printf("setupMacAddresses: (TxCommit) error %s\n", err)
}
return err
}
return nil
}
func (c *containerLXD) applyIdmapSet() error {
if c.idmapset == nil {
return nil
}
lines := c.idmapset.ToLxcString()
for _, line := range lines {
err := setConfigItem(c, "lxc.id_map", line+"\n")
if err != nil {
return err
}
}
return nil
}
func (c *containerLXD) applyDevices() error {
var keys []string
for k := range c.devices {
keys = append(keys, k)
}
sort.Strings(keys)
for _, name := range keys {
d := c.devices[name]
if name == "type" {
continue
}
configs, err := deviceToLxc(c.Path(""), d)
if err != nil {
return fmt.Errorf("Failed configuring device %s: %s", name, err)
}
for _, line := range configs {
err := setConfigItem(c, line[0], line[1])
if err != nil {
return fmt.Errorf("Failed configuring device %s: %s", name, err)
}
}
if d["type"] == "unix-block" || d["type"] == "unix-char" {
if err := c.setupUnixDev(d); err != nil {
return fmt.Errorf("Failed creating device %s: %s", d["name"], err)
}
}
}
return nil
}
func (c *containerLXD) iPsGet() []shared.Ip {
ips := []shared.Ip{}
names, err := c.c.Interfaces()
if err != nil {
return ips
}
for _, n := range names {
addresses, err := c.c.IPAddress(n)
if err != nil {
continue
}
veth := ""
for i := 0; i < len(c.c.ConfigItem("lxc.network")); i++ {
nicName := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.name", i))[0]
if nicName != n {
continue
}
interfaceType := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.type", i))
if interfaceType[0] != "veth" {
continue
}
veth = c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.veth.pair", i))[0]
break
}
for _, a := range addresses {
ip := shared.Ip{Interface: n, Address: a, HostVeth: veth}
if net.ParseIP(a).To4() == nil {
ip.Protocol = "IPV6"
} else {
ip.Protocol = "IPV4"
}
ips = append(ips, ip)
}
}
return ips
}
func (c *containerLXD) tarStoreFile(linkmap map[uint64]string, offset int, tw *tar.Writer, path string, fi os.FileInfo) error {
var err error
var major, minor, nlink int
var ino uint64
link := ""
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err = os.Readlink(path)
if err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return err
}
hdr.Name = path[offset:]
if fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink {
hdr.Size = 0
} else {
hdr.Size = fi.Size()
}
hdr.Uid, hdr.Gid, major, minor, ino, nlink, err = shared.GetFileStat(path)
if err != nil {
return fmt.Errorf("error getting file info: %s", err)
}
// unshift the id under /rootfs/ for unpriv containers
if !c.IsPrivileged() && strings.HasPrefix(hdr.Name, "/rootfs") {
hdr.Uid, hdr.Gid = c.idmapset.ShiftFromNs(hdr.Uid, hdr.Gid)
if hdr.Uid == -1 || hdr.Gid == -1 {
return nil
}
}
if major != -1 {
hdr.Devmajor = int64(major)
hdr.Devminor = int64(minor)
}
// If it's a hardlink we've already seen use the old name
if fi.Mode().IsRegular() && nlink > 1 {
if firstpath, found := linkmap[ino]; found {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = firstpath
hdr.Size = 0
} else {
linkmap[ino] = hdr.Name
}
}
// todo - handle xattrs
if err := tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("error writing header: %s", err)
}
if hdr.Typeflag == tar.TypeReg {
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("tarStoreFile: error opening file: %s", err)
}
defer f.Close()
if _, err := io.Copy(tw, f); err != nil {
return fmt.Errorf("error copying file %s", err)
}
}
return nil
}
func (c *containerLXD) mkdirAllContainerRoot(path string, perm os.FileMode) error {
var uid int
var gid int
if !c.IsPrivileged() {
uid, gid = c.idmapset.ShiftIntoNs(0, 0)
if uid == -1 {
uid = 0
}
if gid == -1 {
gid = 0
}
}
return shared.MkdirAllOwner(path, perm, uid, gid)
}
func (c *containerLXD) mountShared() error {
source := shared.VarPath("shmounts", c.Name())
entry := fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", source)
if !shared.PathExists(source) {
if err := c.mkdirAllContainerRoot(source, 0755); err != nil {
return err
}
}
return setConfigItem(c, "lxc.mount.entry", entry)
}
func (c *containerLXD) Checkpoint(opts lxc.CheckpointOptions) error {
return c.c.Checkpoint(opts)
}
func (c *containerLXD) StartFromMigration(imagesDir string) error {
f, err := ioutil.TempFile("", "lxd_lxc_migrateconfig_")
if err != nil {
return err
}
if err = f.Chmod(0600); err != nil {
f.Close()
os.Remove(f.Name())
return err
}
f.Close()
os.Remove(f.Name())
if err := c.c.SaveConfigFile(f.Name()); err != nil {
return err
}
/* (Re)Load the AA profile; we set it in the container's config above
* in init()
*/
if err := AALoadProfile(c); err != nil {
c.StorageStop()
return err
}
if err := SeccompCreateProfile(c); err != nil {
c.StorageStop()
return err
}
cmd := exec.Command(
os.Args[0],
"forkmigrate",
c.name,
c.c.ConfigPath(),
f.Name(),
imagesDir,
)
return cmd.Run()
}
|
package main
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"gopkg.in/lxc/go-lxc.v2"
"gopkg.in/robfig/cron.v2"
"github.com/flosch/pongo2"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/sys"
"github.com/lxc/lxd/lxd/task"
"github.com/lxc/lxd/lxd/types"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/osarch"
"github.com/pkg/errors"
)
// Helper functions
// Returns the parent container name, snapshot name, and whether it actually was
// a snapshot name.
func containerGetParentAndSnapshotName(name string) (string, string, bool) {
fields := strings.SplitN(name, shared.SnapshotDelimiter, 2)
if len(fields) == 1 {
return name, "", false
}
return fields[0], fields[1], true
}
func containerPath(name string, isSnapshot bool) string {
if isSnapshot {
return shared.VarPath("snapshots", name)
}
return shared.VarPath("containers", name)
}
func containerValidName(name string) error {
if strings.Contains(name, shared.SnapshotDelimiter) {
return fmt.Errorf(
"The character '%s' is reserved for snapshots.",
shared.SnapshotDelimiter)
}
if !shared.ValidHostname(name) {
return fmt.Errorf("Container name isn't a valid hostname")
}
return nil
}
func containerValidConfigKey(os *sys.OS, key string, value string) error {
f, err := shared.ConfigKeyChecker(key)
if err != nil {
return err
}
if err = f(value); err != nil {
return err
}
if key == "raw.lxc" {
return lxcValidConfig(value)
}
if key == "security.syscalls.blacklist_compat" {
for _, arch := range os.Architectures {
if arch == osarch.ARCH_64BIT_INTEL_X86 ||
arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN ||
arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN {
return nil
}
}
return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture")
}
return nil
}
var containerNetworkLimitKeys = []string{"limits.max", "limits.ingress", "limits.egress"}
func containerValidDeviceConfigKey(t, k string) bool {
if k == "type" {
return true
}
switch t {
case "unix-char", "unix-block":
switch k {
case "gid":
return true
case "major":
return true
case "minor":
return true
case "mode":
return true
case "source":
return true
case "path":
return true
case "required":
return true
case "uid":
return true
default:
return false
}
case "nic":
switch k {
case "limits.max":
return true
case "limits.ingress":
return true
case "limits.egress":
return true
case "host_name":
return true
case "hwaddr":
return true
case "mtu":
return true
case "name":
return true
case "nictype":
return true
case "parent":
return true
case "vlan":
return true
case "ipv4.address":
return true
case "ipv6.address":
return true
case "security.mac_filtering":
return true
case "maas.subnet.ipv4":
return true
case "maas.subnet.ipv6":
return true
default:
return false
}
case "disk":
switch k {
case "limits.max":
return true
case "limits.read":
return true
case "limits.write":
return true
case "optional":
return true
case "path":
return true
case "readonly":
return true
case "size":
return true
case "source":
return true
case "recursive":
return true
case "pool":
return true
case "propagation":
return true
default:
return false
}
case "usb":
switch k {
case "vendorid":
return true
case "productid":
return true
case "mode":
return true
case "gid":
return true
case "uid":
return true
case "required":
return true
default:
return false
}
case "gpu":
switch k {
case "vendorid":
return true
case "productid":
return true
case "id":
return true
case "pci":
return true
case "mode":
return true
case "gid":
return true
case "uid":
return true
default:
return false
}
case "infiniband":
switch k {
case "hwaddr":
return true
case "mtu":
return true
case "name":
return true
case "nictype":
return true
case "parent":
return true
default:
return false
}
case "proxy":
switch k {
case "bind":
return true
case "connect":
return true
case "gid":
return true
case "listen":
return true
case "mode":
return true
case "proxy_protocol":
return true
case "nat":
return true
case "security.gid":
return true
case "security.uid":
return true
case "uid":
return true
default:
return false
}
case "none":
return false
default:
return false
}
}
func allowedUnprivilegedOnlyMap(rawIdmap string) error {
rawMaps, err := parseRawIdmap(rawIdmap)
if err != nil {
return err
}
for _, ent := range rawMaps {
if ent.Hostid == 0 {
return fmt.Errorf("Cannot map root user into container as LXD was configured to only allow unprivileged containers")
}
}
return nil
}
func containerValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error {
if config == nil {
return nil
}
for k, v := range config {
if profile && strings.HasPrefix(k, "volatile.") {
return fmt.Errorf("Volatile keys can only be set on containers")
}
if profile && strings.HasPrefix(k, "image.") {
return fmt.Errorf("Image keys can only be set on containers")
}
err := containerValidConfigKey(sysOS, k, v)
if err != nil {
return err
}
}
_, rawSeccomp := config["raw.seccomp"]
_, whitelist := config["security.syscalls.whitelist"]
_, blacklist := config["security.syscalls.blacklist"]
blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"])
blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"])
if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) {
return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*")
}
if whitelist && (blacklist || blacklistDefault || blacklistCompat) {
return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*")
}
if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil {
return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported")
}
unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
if shared.IsTrue(unprivOnly) {
if config["raw.idmap"] != "" {
err := allowedUnprivilegedOnlyMap(config["raw.idmap"])
if err != nil {
return err
}
}
if shared.IsTrue(config["security.privileged"]) {
return fmt.Errorf("LXD was configured to only allow unprivileged containers")
}
}
return nil
}
func containerValidDevices(cluster *db.Cluster, devices types.Devices, profile bool, expanded bool) error {
// Empty device list
if devices == nil {
return nil
}
var diskDevicePaths []string
// Check each device individually
for name, m := range devices {
if m["type"] == "" {
return fmt.Errorf("Missing device type for device '%s'", name)
}
if !shared.StringInSlice(m["type"], []string{"disk", "gpu", "infiniband", "nic", "none", "proxy", "unix-block", "unix-char", "usb"}) {
return fmt.Errorf("Invalid device type for device '%s'", name)
}
for k := range m {
if !containerValidDeviceConfigKey(m["type"], k) {
return fmt.Errorf("Invalid device configuration key for %s: %s", m["type"], k)
}
}
if m["type"] == "nic" {
if m["nictype"] == "" {
return fmt.Errorf("Missing nic type")
}
if !shared.StringInSlice(m["nictype"], []string{"bridged", "macvlan", "p2p", "physical", "sriov"}) {
return fmt.Errorf("Bad nic type: %s", m["nictype"])
}
if shared.StringInSlice(m["nictype"], []string{"bridged", "macvlan", "physical", "sriov"}) && m["parent"] == "" {
return fmt.Errorf("Missing parent for %s type nic", m["nictype"])
}
if m["ipv4.address"] != "" {
err := networkValidAddressV4(m["ipv4.address"])
if err != nil {
return err
}
}
if m["ipv6.address"] != "" {
err := networkValidAddressV6(m["ipv6.address"])
if err != nil {
return err
}
}
} else if m["type"] == "infiniband" {
if m["nictype"] == "" {
return fmt.Errorf("Missing nic type")
}
if !shared.StringInSlice(m["nictype"], []string{"physical", "sriov"}) {
return fmt.Errorf("Bad nic type: %s", m["nictype"])
}
if m["parent"] == "" {
return fmt.Errorf("Missing parent for %s type nic", m["nictype"])
}
} else if m["type"] == "disk" {
if !expanded && !shared.StringInSlice(m["path"], diskDevicePaths) {
diskDevicePaths = append(diskDevicePaths, m["path"])
} else if !expanded {
return fmt.Errorf("More than one disk device uses the same path: %s", m["path"])
}
if m["path"] == "" {
return fmt.Errorf("Disk entry is missing the required \"path\" property")
}
if m["source"] == "" && m["path"] != "/" {
return fmt.Errorf("Disk entry is missing the required \"source\" property")
}
if m["path"] == "/" && m["source"] != "" {
return fmt.Errorf("Root disk entry may not have a \"source\" property set")
}
if m["size"] != "" && m["path"] != "/" {
return fmt.Errorf("Only the root disk may have a size quota")
}
if (m["path"] == "/" || !shared.IsDir(shared.HostPath(m["source"]))) && m["recursive"] != "" {
return fmt.Errorf("The recursive option is only supported for additional bind-mounted paths")
}
if m["pool"] != "" {
if filepath.IsAbs(m["source"]) {
return fmt.Errorf("Storage volumes cannot be specified as absolute paths")
}
_, err := cluster.StoragePoolGetID(m["pool"])
if err != nil {
return fmt.Errorf("The \"%s\" storage pool doesn't exist", m["pool"])
}
if !profile && expanded && m["source"] != "" && m["path"] != "/" {
isAvailable, err := cluster.StorageVolumeIsAvailable(
m["pool"], m["source"])
if err != nil {
return errors.Wrap(err, "Check if volume is available")
}
if !isAvailable {
return fmt.Errorf(
"Storage volume %q is already attached to a container "+
"on a different node", m["source"])
}
}
}
if m["propagation"] != "" {
if !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
return fmt.Errorf("liblxc 3.0 is required for mount propagation configuration")
}
if !shared.StringInSlice(m["propagation"], []string{"private", "shared", "slave", "unbindable", "rprivate", "rshared", "rslave", "runbindable"}) {
return fmt.Errorf("Invalid propagation mode '%s'", m["propagation"])
}
}
} else if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) {
if m["source"] == "" && m["path"] == "" {
return fmt.Errorf("Unix device entry is missing the required \"source\" or \"path\" property")
}
if (m["required"] == "" || shared.IsTrue(m["required"])) && (m["major"] == "" || m["minor"] == "") {
srcPath, exist := m["source"]
if !exist {
srcPath = m["path"]
}
if !shared.PathExists(srcPath) {
return fmt.Errorf("The device path doesn't exist on the host and major/minor wasn't specified")
}
dType, _, _, err := deviceGetAttributes(srcPath)
if err != nil {
return err
}
if m["type"] == "unix-char" && dType != "c" {
return fmt.Errorf("Path specified for unix-char device is a block device")
}
if m["type"] == "unix-block" && dType != "b" {
return fmt.Errorf("Path specified for unix-block device is a character device")
}
}
} else if m["type"] == "usb" {
// Nothing needed for usb.
} else if m["type"] == "gpu" {
if m["pci"] != "" && !shared.PathExists(fmt.Sprintf("/sys/bus/pci/devices/%s", m["pci"])) {
return fmt.Errorf("Invalid PCI address (no device found): %s", m["pci"])
}
if m["pci"] != "" && (m["id"] != "" || m["productid"] != "" || m["vendorid"] != "") {
return fmt.Errorf("Cannot use id, productid or vendorid when pci is set")
}
if m["id"] != "" && (m["pci"] != "" || m["productid"] != "" || m["vendorid"] != "") {
return fmt.Errorf("Cannot use pci, productid or vendorid when id is set")
}
} else if m["type"] == "proxy" {
if m["listen"] == "" {
return fmt.Errorf("Proxy device entry is missing the required \"listen\" property")
}
if m["connect"] == "" {
return fmt.Errorf("Proxy device entry is missing the required \"connect\" property")
}
listenAddr, err := parseAddr(m["listen"])
if err != nil {
return err
}
connectAddr, err := parseAddr(m["connect"])
if err != nil {
return err
}
if len(connectAddr.addr) > len(listenAddr.addr) {
// Cannot support single port -> multiple port
return fmt.Errorf("Cannot map a single port to multiple ports")
}
if shared.IsTrue(m["proxy_protocol"]) && !strings.HasPrefix(m["connect"], "tcp") {
return fmt.Errorf("The PROXY header can only be sent to tcp servers")
}
if (!strings.HasPrefix(m["listen"], "unix:") || strings.HasPrefix(m["listen"], "unix:@")) &&
(m["uid"] != "" || m["gid"] != "" || m["mode"] != "") {
return fmt.Errorf("Only proxy devices for non-abstract unix sockets can carry uid, gid, or mode properties")
}
if shared.IsTrue(m["nat"]) {
if m["bind"] != "" && m["bind"] != "host" {
return fmt.Errorf("Only host-bound proxies can use NAT")
}
// Support TCP <-> TCP and UDP <-> UDP
if listenAddr.connType == "unix" || connectAddr.connType == "unix" ||
listenAddr.connType != connectAddr.connType {
return fmt.Errorf("Proxying %s <-> %s is not supported when using NAT",
listenAddr.connType, connectAddr.connType)
}
}
} else if m["type"] == "none" {
continue
} else {
return fmt.Errorf("Invalid device type: %s", m["type"])
}
}
// Checks on the expanded config
if expanded {
_, _, err := shared.GetRootDiskDevice(devices)
if err != nil {
return errors.Wrap(err, "Detect root disk device")
}
}
return nil
}
// The container interface
type container interface {
// Container actions
Freeze() error
Shutdown(timeout time.Duration) error
Start(stateful bool) error
Stop(stateful bool) error
Unfreeze() error
// Snapshots & migration & backups
Restore(sourceContainer container, stateful bool) error
/* actionScript here is a script called action.sh in the stateDir, to
* be passed to CRIU as --action-script
*/
Migrate(args *CriuMigrationArgs) error
Snapshots() ([]container, error)
Backups() ([]backup, error)
// Config handling
Rename(newName string) error
Update(newConfig db.ContainerArgs, userRequested bool) error
Delete() error
Export(w io.Writer, properties map[string]string) error
// Live configuration
CGroupGet(key string) (string, error)
CGroupSet(key string, value string) error
ConfigKeySet(key string, value string) error
// File handling
FileExists(path string) error
FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error)
FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error
FileRemove(path string) error
// Console - Allocate and run a console tty.
//
// terminal - Bidirectional file descriptor.
//
// This function will not return until the console has been exited by
// the user.
Console(terminal *os.File) *exec.Cmd
ConsoleLog(opts lxc.ConsoleLogOptions) (string, error)
/* Command execution:
* 1. passing in false for wait
* - equivalent to calling cmd.Run()
* 2. passing in true for wait
* - start the command and return its PID in the first return
* argument and the PID of the attached process in the second
* argument. It's the callers responsibility to wait on the
* command. (Note. The returned PID of the attached process can not
* be waited upon since it's a child of the lxd forkexec command
* (the PID returned in the first return argument). It can however
* be used to e.g. forward signals.)
*/
Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool) (*exec.Cmd, int, int, error)
// Status
Render() (interface{}, interface{}, error)
RenderFull() (*api.ContainerFull, interface{}, error)
RenderState() (*api.ContainerState, error)
IsPrivileged() bool
IsRunning() bool
IsFrozen() bool
IsEphemeral() bool
IsSnapshot() bool
IsStateful() bool
IsNesting() bool
// Hooks
OnStart() error
OnStop(target string) error
// Properties
Id() int
Project() string
Name() string
Description() string
Architecture() int
CreationDate() time.Time
LastUsedDate() time.Time
ExpandedConfig() map[string]string
ExpandedDevices() types.Devices
LocalConfig() map[string]string
LocalDevices() types.Devices
Profiles() []string
InitPID() int
State() string
ExpiryDate() time.Time
// Paths
Path() string
RootfsPath() string
TemplatesPath() string
StatePath() string
LogFilePath() string
ConsoleBufferLogPath() string
LogPath() string
// Storage
StoragePool() (string, error)
// Progress reporting
SetOperation(op *operation)
// FIXME: Those should be internal functions
// Needed for migration for now.
StorageStart() (bool, error)
StorageStop() (bool, error)
Storage() storage
IdmapSet() (*idmap.IdmapSet, error)
LastIdmapSet() (*idmap.IdmapSet, error)
TemplateApply(trigger string) error
DaemonState() *state.State
}
// Loader functions
func containerCreateAsEmpty(d *Daemon, args db.ContainerArgs) (container, error) {
// Create the container
c, err := containerCreateInternal(d.State(), args)
if err != nil {
return nil, err
}
// Now create the empty storage
err = c.Storage().ContainerCreate(c)
if err != nil {
c.Delete()
return nil, err
}
// Apply any post-storage configuration
err = containerConfigureInternal(c)
if err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerCreateFromBackup(s *state.State, info backupInfo, data io.ReadSeeker,
customPool bool) error {
var pool storage
var fixBackupFile = false
// Get storage pool from index.yaml
pool, storageErr := storagePoolInit(s, info.Pool)
if storageErr != nil && errors.Cause(storageErr) != db.ErrNoSuchObject {
// Unexpected error
return storageErr
}
if errors.Cause(storageErr) == db.ErrNoSuchObject {
// The pool doesn't exist, and the backup is in binary format so we
// cannot alter the backup.yaml.
if info.HasBinaryFormat {
return storageErr
}
// Get the default profile
_, profile, err := s.Cluster.ProfileGet(info.Project, "default")
if err != nil {
return errors.Wrap(err, "Failed to get default profile")
}
_, v, err := shared.GetRootDiskDevice(profile.Devices)
if err != nil {
return errors.Wrap(err, "Failed to get root disk device")
}
// Use the default-profile's root pool
pool, err = storagePoolInit(s, v["pool"])
if err != nil {
return errors.Wrap(err, "Failed to initialize storage pool")
}
fixBackupFile = true
}
// Find the compression algorithm
tarArgs, _, _, err := shared.DetectCompressionFile(data)
if err != nil {
return err
}
data.Seek(0, 0)
// Unpack tarball
err = pool.ContainerBackupLoad(info, data, tarArgs)
if err != nil {
return err
}
if fixBackupFile || customPool {
// Update the pool
err = backupFixStoragePool(s.Cluster, info, !customPool)
if err != nil {
return err
}
}
return nil
}
func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (container, error) {
// Create the snapshot
c, err := containerCreateInternal(s, args)
if err != nil {
return nil, err
}
// Now create the empty snapshot
err = c.Storage().ContainerSnapshotCreateEmpty(c)
if err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string, tracker *ioprogress.ProgressTracker) (container, error) {
s := d.State()
// Get the image properties
_, img, err := s.Cluster.ImageGet(args.Project, hash, false, false)
if err != nil {
return nil, errors.Wrapf(err, "Fetch image %s from database", hash)
}
// Check if the image is available locally or it's on another node.
nodeAddress, err := s.Cluster.ImageLocate(hash)
if err != nil {
return nil, errors.Wrapf(err, "Locate image %s in the cluster", hash)
}
if nodeAddress != "" {
// The image is available from another node, let's try to
// import it.
logger.Debugf("Transferring image %s from node %s", hash, nodeAddress)
client, err := cluster.Connect(nodeAddress, d.endpoints.NetworkCert(), false)
if err != nil {
return nil, err
}
client = client.UseProject(args.Project)
err = imageImportFromNode(filepath.Join(d.os.VarDir, "images"), client, hash)
if err != nil {
return nil, err
}
err = d.cluster.ImageAssociateNode(args.Project, hash)
if err != nil {
return nil, err
}
}
// Set the "image.*" keys
if img.Properties != nil {
for k, v := range img.Properties {
args.Config[fmt.Sprintf("image.%s", k)] = v
}
}
// Set the BaseImage field (regardless of previous value)
args.BaseImage = hash
// Create the container
c, err := containerCreateInternal(s, args)
if err != nil {
return nil, errors.Wrap(err, "Create container")
}
err = s.Cluster.ImageLastAccessUpdate(hash, time.Now().UTC())
if err != nil {
c.Delete()
return nil, fmt.Errorf("Error updating image last use date: %s", err)
}
// Now create the storage from an image
err = c.Storage().ContainerCreateFromImage(c, hash, tracker)
if err != nil {
c.Delete()
return nil, errors.Wrap(err, "Create container from image")
}
// Apply any post-storage configuration
err = containerConfigureInternal(c)
if err != nil {
c.Delete()
return nil, errors.Wrap(err, "Configure container")
}
return c, nil
}
func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer container, containerOnly bool, refresh bool) (container, error) {
var ct container
var err error
if refresh {
// Load the target container
ct, err = containerLoadByProjectAndName(s, args.Project, args.Name)
if err != nil {
refresh = false
}
}
if !refresh {
// Create the container.
ct, err = containerCreateInternal(s, args)
if err != nil {
return nil, err
}
}
if refresh && ct.IsRunning() {
return nil, fmt.Errorf("Cannot refresh a running container")
}
// At this point we have already figured out the parent
// container's root disk device so we can simply
// retrieve it from the expanded devices.
parentStoragePool := ""
parentExpandedDevices := ct.ExpandedDevices()
parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices)
if parentLocalRootDiskDeviceKey != "" {
parentStoragePool = parentLocalRootDiskDevice["pool"]
}
csList := []*container{}
var snapshots []container
if !containerOnly {
if refresh {
// Compare snapshots
syncSnapshots, deleteSnapshots, err := containerCompareSnapshots(sourceContainer, ct)
if err != nil {
return nil, err
}
// Delete extra snapshots
for _, snap := range deleteSnapshots {
err := snap.Delete()
if err != nil {
return nil, err
}
}
// Only care about the snapshots that need updating
snapshots = syncSnapshots
} else {
// Get snapshots of source container
snapshots, err = sourceContainer.Snapshots()
if err != nil {
ct.Delete()
return nil, err
}
}
for _, snap := range snapshots {
fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2)
// Ensure that snapshot and parent container have the
// same storage pool in their local root disk device.
// If the root disk device for the snapshot comes from a
// profile on the new instance as well we don't need to
// do anything.
snapDevices := snap.LocalDevices()
if snapDevices != nil {
snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapDevices)
if snapLocalRootDiskDeviceKey != "" {
snapDevices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
} else {
snapDevices["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": parentStoragePool,
}
}
}
newSnapName := fmt.Sprintf("%s/%s", ct.Name(), fields[1])
csArgs := db.ContainerArgs{
Architecture: snap.Architecture(),
Config: snap.LocalConfig(),
Ctype: db.CTypeSnapshot,
Devices: snapDevices,
Description: snap.Description(),
Ephemeral: snap.IsEphemeral(),
Name: newSnapName,
Profiles: snap.Profiles(),
Project: args.Project,
}
// Create the snapshots.
cs, err := containerCreateInternal(s, csArgs)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
// Restore snapshot creation date
err = s.Cluster.ContainerCreationUpdate(cs.Id(), snap.CreationDate())
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
csList = append(csList, &cs)
}
}
// Now clone or refresh the storage
if refresh {
err = ct.Storage().ContainerRefresh(ct, sourceContainer, snapshots)
if err != nil {
return nil, err
}
} else {
err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
}
// Apply any post-storage configuration.
err = containerConfigureInternal(ct)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
if !containerOnly {
for _, cs := range csList {
// Apply any post-storage configuration.
err = containerConfigureInternal(*cs)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
}
}
return ct, nil
}
func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceContainer container) (container, error) {
// Deal with state
if args.Stateful {
if !sourceContainer.IsRunning() {
return nil, fmt.Errorf("Unable to create a stateful snapshot. The container isn't running")
}
_, err := exec.LookPath("criu")
if err != nil {
return nil, fmt.Errorf("Unable to create a stateful snapshot. CRIU isn't installed")
}
stateDir := sourceContainer.StatePath()
err = os.MkdirAll(stateDir, 0700)
if err != nil {
return nil, err
}
/* TODO: ideally we would freeze here and unfreeze below after
* we've copied the filesystem, to make sure there are no
* changes by the container while snapshotting. Unfortunately
* there is abug in CRIU where it doesn't leave the container
* in the same state it found it w.r.t. freezing, i.e. CRIU
* freezes too, and then /always/ thaws, even if the container
* was frozen. Until that's fixed, all calls to Unfreeze()
* after snapshotting will fail.
*/
criuMigrationArgs := CriuMigrationArgs{
cmd: lxc.MIGRATE_DUMP,
stateDir: stateDir,
function: "snapshot",
stop: false,
actionScript: false,
dumpDir: "",
preDumpDir: "",
}
err = sourceContainer.Migrate(&criuMigrationArgs)
if err != nil {
os.RemoveAll(sourceContainer.StatePath())
return nil, err
}
}
// Create the snapshot
c, err := containerCreateInternal(s, args)
if err != nil {
return nil, err
}
// Clone the container
err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer)
if err != nil {
c.Delete()
return nil, err
}
ourStart, err := c.StorageStart()
if err != nil {
c.Delete()
return nil, err
}
if ourStart {
defer c.StorageStop()
}
err = writeBackupFile(sourceContainer)
if err != nil {
c.Delete()
return nil, err
}
// Once we're done, remove the state directory
if args.Stateful {
os.RemoveAll(sourceContainer.StatePath())
}
eventSendLifecycle(sourceContainer.Project(), "container-snapshot-created",
fmt.Sprintf("/1.0/containers/%s", sourceContainer.Name()),
map[string]interface{}{
"snapshot_name": args.Name,
})
return c, nil
}
func containerCreateInternal(s *state.State, args db.ContainerArgs) (container, error) {
// Set default values
if args.Project == "" {
args.Project = "default"
}
if args.Profiles == nil {
args.Profiles = []string{"default"}
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.BaseImage != "" {
args.Config["volatile.base_image"] = args.BaseImage
}
if args.Devices == nil {
args.Devices = types.Devices{}
}
if args.Architecture == 0 {
args.Architecture = s.OS.Architectures[0]
}
// Validate container name
if args.Ctype == db.CTypeRegular {
err := containerValidName(args.Name)
if err != nil {
return nil, err
}
// Unset expiry date since containers don't expire
args.ExpiryDate = time.Time{}
}
// Validate container config
err := containerValidConfig(s.OS, args.Config, false, false)
if err != nil {
return nil, err
}
// Validate container devices
err = containerValidDevices(s.Cluster, args.Devices, false, false)
if err != nil {
return nil, errors.Wrap(err, "Invalid devices")
}
// Validate architecture
_, err = osarch.ArchitectureName(args.Architecture)
if err != nil {
return nil, err
}
if !shared.IntInSlice(args.Architecture, s.OS.Architectures) {
return nil, fmt.Errorf("Requested architecture isn't supported by this host")
}
// Validate profiles
profiles, err := s.Cluster.Profiles(args.Project)
if err != nil {
return nil, err
}
checkedProfiles := []string{}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
if shared.StringInSlice(profile, checkedProfiles) {
return nil, fmt.Errorf("Duplicate profile found in request")
}
checkedProfiles = append(checkedProfiles, profile)
}
if args.CreationDate.IsZero() {
args.CreationDate = time.Now().UTC()
}
if args.LastUsedDate.IsZero() {
args.LastUsedDate = time.Unix(0, 0).UTC()
}
var container db.Container
err = s.Cluster.Transaction(func(tx *db.ClusterTx) error {
node, err := tx.NodeName()
if err != nil {
return err
}
// TODO: this check should probably be performed by the db
// package itself.
exists, err := tx.ProjectExists(args.Project)
if err != nil {
return errors.Wrapf(err, "Check if project %q exists", args.Project)
}
if !exists {
return fmt.Errorf("Project %q does not exist", args.Project)
}
// Create the container entry
container = db.Container{
Project: args.Project,
Name: args.Name,
Node: node,
Type: int(args.Ctype),
Architecture: args.Architecture,
Ephemeral: args.Ephemeral,
CreationDate: args.CreationDate,
Stateful: args.Stateful,
LastUseDate: args.LastUsedDate,
Description: args.Description,
Config: args.Config,
Devices: args.Devices,
Profiles: args.Profiles,
ExpiryDate: args.ExpiryDate,
}
_, err = tx.ContainerCreate(container)
if err != nil {
return errors.Wrap(err, "Add container info to the database")
}
// Read back the container, to get ID and creation time.
c, err := tx.ContainerGet(args.Project, args.Name)
if err != nil {
return errors.Wrap(err, "Fetch created container from the database")
}
container = *c
if container.ID < 1 {
return errors.Wrapf(err, "Unexpected container database ID %d", container.ID)
}
return nil
})
if err != nil {
if err == db.ErrAlreadyDefined {
thing := "Container"
if shared.IsSnapshot(args.Name) {
thing = "Snapshot"
}
return nil, fmt.Errorf("%s '%s' already exists", thing, args.Name)
}
return nil, err
}
// Wipe any existing log for this container name
os.RemoveAll(shared.LogPath(args.Name))
args = db.ContainerToArgs(&container)
// Setup the container struct and finish creation (storage and idmap)
c, err := containerLXCCreate(s, args)
if err != nil {
s.Cluster.ContainerRemove(args.Project, args.Name)
return nil, errors.Wrap(err, "Create LXC container")
}
return c, nil
}
func containerConfigureInternal(c container) error {
// Find the root device
_, rootDiskDevice, err := shared.GetRootDiskDevice(c.ExpandedDevices())
if err != nil {
return err
}
ourStart, err := c.StorageStart()
if err != nil {
return err
}
// handle quota: at this point, storage is guaranteed to be ready
storage := c.Storage()
if rootDiskDevice["size"] != "" {
storageTypeName := storage.GetStorageTypeName()
if (storageTypeName == "lvm" || storageTypeName == "ceph") && c.IsRunning() {
err = c.ConfigKeySet("volatile.apply_quota", rootDiskDevice["size"])
if err != nil {
return err
}
} else {
size, err := shared.ParseByteSizeString(rootDiskDevice["size"])
if err != nil {
return err
}
err = storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, size, c)
if err != nil {
return err
}
}
}
if ourStart {
defer c.StorageStop()
}
err = writeBackupFile(c)
if err != nil {
return err
}
return nil
}
func containerLoadById(s *state.State, id int) (container, error) {
// Get the DB record
project, name, err := s.Cluster.ContainerProjectAndName(id)
if err != nil {
return nil, err
}
return containerLoadByProjectAndName(s, project, name)
}
func containerLoadByProjectAndName(s *state.State, project, name string) (container, error) {
// Get the DB record
var container *db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
container, err = tx.ContainerGet(project, name)
if err != nil {
return errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
}
return nil
})
if err != nil {
return nil, err
}
args := db.ContainerToArgs(container)
c, err := containerLXCLoad(s, args, nil)
if err != nil {
return nil, errors.Wrap(err, "Failed to load container")
}
return c, nil
}
func containerLoadByProject(s *state.State, project string) ([]container, error) {
// Get all the containers
var cts []db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
filter := db.ContainerFilter{
Project: project,
Type: int(db.CTypeRegular),
}
var err error
cts, err = tx.ContainerList(filter)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return containerLoadAllInternal(cts, s)
}
// Load all containers across all projects.
func containerLoadFromAllProjects(s *state.State) ([]container, error) {
var projects []string
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
projects, err = tx.ProjectNames()
return err
})
if err != nil {
return nil, err
}
containers := []container{}
for _, project := range projects {
projectContainers, err := containerLoadByProject(s, project)
if err != nil {
return nil, errors.Wrapf(nil, "Load containers in project %s", project)
}
containers = append(containers, projectContainers...)
}
return containers, nil
}
// Legacy interface.
func containerLoadAll(s *state.State) ([]container, error) {
return containerLoadByProject(s, "default")
}
// Load all containers of this nodes.
func containerLoadNodeAll(s *state.State) ([]container, error) {
// Get all the container arguments
var cts []db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
cts, err = tx.ContainerNodeList()
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return containerLoadAllInternal(cts, s)
}
// Load all containers of this nodes under the given project.
func containerLoadNodeProjectAll(s *state.State, project string) ([]container, error) {
// Get all the container arguments
var cts []db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
cts, err = tx.ContainerNodeProjectList(project)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return containerLoadAllInternal(cts, s)
}
func containerLoadAllInternal(cts []db.Container, s *state.State) ([]container, error) {
// Figure out what profiles are in use
profiles := map[string]map[string]api.Profile{}
for _, cArgs := range cts {
projectProfiles, ok := profiles[cArgs.Project]
if !ok {
projectProfiles = map[string]api.Profile{}
profiles[cArgs.Project] = projectProfiles
}
for _, profile := range cArgs.Profiles {
_, ok := projectProfiles[profile]
if !ok {
projectProfiles[profile] = api.Profile{}
}
}
}
// Get the profile data
for project, projectProfiles := range profiles {
for name := range projectProfiles {
_, profile, err := s.Cluster.ProfileGet(project, name)
if err != nil {
return nil, err
}
projectProfiles[name] = *profile
}
}
// Load the container structs
containers := []container{}
for _, container := range cts {
// Figure out the container's profiles
cProfiles := []api.Profile{}
for _, name := range container.Profiles {
cProfiles = append(cProfiles, profiles[container.Project][name])
}
args := db.ContainerToArgs(&container)
ct, err := containerLXCLoad(s, args, cProfiles)
if err != nil {
return nil, err
}
containers = append(containers, ct)
}
return containers, nil
}
func containerCompareSnapshots(source container, target container) ([]container, []container, error) {
// Get the source snapshots
sourceSnapshots, err := source.Snapshots()
if err != nil {
return nil, nil, err
}
// Get the target snapshots
targetSnapshots, err := target.Snapshots()
if err != nil {
return nil, nil, err
}
// Compare source and target
sourceSnapshotsTime := map[string]time.Time{}
targetSnapshotsTime := map[string]time.Time{}
toDelete := []container{}
toSync := []container{}
for _, snap := range sourceSnapshots {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
sourceSnapshotsTime[snapName] = snap.CreationDate()
}
for _, snap := range targetSnapshots {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
targetSnapshotsTime[snapName] = snap.CreationDate()
existDate, exists := sourceSnapshotsTime[snapName]
if !exists {
toDelete = append(toDelete, snap)
} else if existDate != snap.CreationDate() {
toDelete = append(toDelete, snap)
}
}
for _, snap := range sourceSnapshots {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
existDate, exists := targetSnapshotsTime[snapName]
if !exists || existDate != snap.CreationDate() {
toSync = append(toSync, snap)
}
}
return toSync, toDelete, nil
}
func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
// Load all local containers
allContainers, err := containerLoadNodeAll(d.State())
if err != nil {
logger.Error("Failed to load containers for scheduled snapshots", log.Ctx{"err": err})
return
}
// Figure out which need snapshotting (if any)
containers := []container{}
for _, c := range allContainers {
schedule := c.LocalConfig()["snapshots.schedule"]
if schedule == "" {
continue
}
// Extend our schedule to one that is accepted by the used cron parser
sched, err := cron.Parse(fmt.Sprintf("* %s", schedule))
if err != nil {
continue
}
// Check if it's time to snapshot
now := time.Now()
next := sched.Next(now)
if now.Add(time.Minute).Before(next) {
continue
}
// Check if the container is running
if !shared.IsTrue(c.LocalConfig()["snapshots.schedule.stopped"]) && !c.IsRunning() {
continue
}
containers = append(containers, c)
}
if len(containers) == 0 {
return
}
opRun := func(op *operation) error {
return autoCreateContainerSnapshots(ctx, d, containers)
}
op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotCreate, nil, nil, opRun, nil, nil)
if err != nil {
logger.Error("Failed to start create snapshot operation", log.Ctx{"err": err})
return
}
logger.Info("Creating scheduled container snapshots")
_, err = op.Run()
if err != nil {
logger.Error("Failed to create scheduled container snapshots", log.Ctx{"err": err})
}
logger.Info("Done creating scheduled container snapshots")
}
first := true
schedule := func() (time.Duration, error) {
interval := time.Minute
if first {
first = false
return interval, task.ErrSkip
}
return interval, nil
}
return f, schedule
}
func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, containers []container) error {
// Make the snapshots
for _, c := range containers {
ch := make(chan error)
go func() {
snapshotName, err := containerDetermineNextSnapshotName(d, c, "snap%d")
if err != nil {
logger.Error("Error retrieving next snapshot name", log.Ctx{"err": err, "container": c})
ch <- nil
return
}
snapshotName = fmt.Sprintf("%s%s%s", c.Name(), shared.SnapshotDelimiter, snapshotName)
expiry, err := shared.GetSnapshotExpiry(time.Now(), c.LocalConfig()["snapshots.expiry"])
if err != nil {
logger.Error("Error getting expiry date", log.Ctx{"err": err, "container": c})
ch <- nil
return
}
args := db.ContainerArgs{
Architecture: c.Architecture(),
Config: c.LocalConfig(),
Ctype: db.CTypeSnapshot,
Devices: c.LocalDevices(),
Ephemeral: c.IsEphemeral(),
Name: snapshotName,
Profiles: c.Profiles(),
Project: c.Project(),
Stateful: false,
ExpiryDate: expiry,
}
_, err = containerCreateAsSnapshot(d.State(), args, c)
if err != nil {
logger.Error("Error creating snapshots", log.Ctx{"err": err, "container": c})
}
ch <- nil
}()
select {
case <-ctx.Done():
return nil
case <-ch:
}
}
return nil
}
func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
// Load all local containers
allContainers, err := containerLoadNodeAll(d.State())
if err != nil {
logger.Error("Failed to load containers for snapshot expiry", log.Ctx{"err": err})
return
}
// Figure out which need snapshotting (if any)
expiredSnapshots := []container{}
for _, c := range allContainers {
snapshots, err := c.Snapshots()
if err != nil {
logger.Error("Failed to list snapshots", log.Ctx{"err": err, "container": c.Name(), "project": c.Project()})
continue
}
for _, snapshot := range snapshots {
if snapshot.ExpiryDate().IsZero() {
// Snapshot doesn't expire
continue
}
if time.Now().Unix()-snapshot.ExpiryDate().Unix() >= 0 {
expiredSnapshots = append(expiredSnapshots, snapshot)
}
}
}
if len(expiredSnapshots) == 0 {
return
}
opRun := func(op *operation) error {
return pruneExpiredContainerSnapshots(ctx, d, expiredSnapshots)
}
op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotsExpire, nil, nil, opRun, nil, nil)
if err != nil {
logger.Error("Failed to start expired snapshots operation", log.Ctx{"err": err})
return
}
logger.Info("Pruning expired container snapshots")
_, err = op.Run()
if err != nil {
logger.Error("Failed to remove expired container snapshots", log.Ctx{"err": err})
}
logger.Info("Done pruning expired container snapshots")
}
first := true
schedule := func() (time.Duration, error) {
interval := time.Minute
if first {
first = false
return interval, task.ErrSkip
}
return interval, nil
}
return f, schedule
}
func pruneExpiredContainerSnapshots(ctx context.Context, d *Daemon, snapshots []container) error {
// Find snapshots to delete
for _, snapshot := range snapshots {
err := snapshot.Delete()
if err != nil {
return errors.Wrapf(err, "Failed to delete expired snapshot '%s' in project '%s'", snapshot.Name(), snapshot.Project())
}
}
return nil
}
func containerDetermineNextSnapshotName(d *Daemon, c container, defaultPattern string) (string, error) {
var err error
pattern := c.LocalConfig()["snapshots.pattern"]
if pattern == "" {
pattern = defaultPattern
}
pattern, err = shared.RenderTemplate(pattern, pongo2.Context{
"creation_date": time.Now(),
})
if err != nil {
return "", err
}
count := strings.Count(pattern, "%d")
if count > 1 {
return "", fmt.Errorf("Snapshot pattern may contain '%%d' only once")
} else if count == 1 {
i := d.cluster.ContainerNextSnapshot(c.Project(), c.Name(), pattern)
return strings.Replace(pattern, "%d", strconv.Itoa(i), 1), nil
}
snapshotExists := false
snapshots, err := c.Snapshots()
if err != nil {
return "", err
}
for _, snap := range snapshots {
_, snapOnlyName, _ := containerGetParentAndSnapshotName(snap.Name())
if snapOnlyName == pattern {
snapshotExists = true
break
}
}
// Append '-0', '-1', etc. if the actual pattern/snapshot name already exists
if snapshotExists {
pattern = fmt.Sprintf("%s-%%d", pattern)
i := d.cluster.ContainerNextSnapshot(c.Project(), c.Name(), pattern)
return strings.Replace(pattern, "%d", strconv.Itoa(i), 1), nil
}
return pattern, nil
}
lxd: Fix duplicate scheduled snapshots
This fixes scheduled snapshots being created twice after each other.
This fixes #5504.
Signed-off-by: Thomas Hipp <5f82c492b3b00e427412d216ce820707a10c51ce@canonical.com>
package main
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"gopkg.in/lxc/go-lxc.v2"
"gopkg.in/robfig/cron.v2"
"github.com/flosch/pongo2"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/lxd/sys"
"github.com/lxc/lxd/lxd/task"
"github.com/lxc/lxd/lxd/types"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/ioprogress"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/osarch"
"github.com/pkg/errors"
)
// Helper functions
// Returns the parent container name, snapshot name, and whether it actually was
// a snapshot name.
func containerGetParentAndSnapshotName(name string) (string, string, bool) {
fields := strings.SplitN(name, shared.SnapshotDelimiter, 2)
if len(fields) == 1 {
return name, "", false
}
return fields[0], fields[1], true
}
func containerPath(name string, isSnapshot bool) string {
if isSnapshot {
return shared.VarPath("snapshots", name)
}
return shared.VarPath("containers", name)
}
func containerValidName(name string) error {
if strings.Contains(name, shared.SnapshotDelimiter) {
return fmt.Errorf(
"The character '%s' is reserved for snapshots.",
shared.SnapshotDelimiter)
}
if !shared.ValidHostname(name) {
return fmt.Errorf("Container name isn't a valid hostname")
}
return nil
}
func containerValidConfigKey(os *sys.OS, key string, value string) error {
f, err := shared.ConfigKeyChecker(key)
if err != nil {
return err
}
if err = f(value); err != nil {
return err
}
if key == "raw.lxc" {
return lxcValidConfig(value)
}
if key == "security.syscalls.blacklist_compat" {
for _, arch := range os.Architectures {
if arch == osarch.ARCH_64BIT_INTEL_X86 ||
arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN ||
arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN {
return nil
}
}
return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture")
}
return nil
}
var containerNetworkLimitKeys = []string{"limits.max", "limits.ingress", "limits.egress"}
func containerValidDeviceConfigKey(t, k string) bool {
if k == "type" {
return true
}
switch t {
case "unix-char", "unix-block":
switch k {
case "gid":
return true
case "major":
return true
case "minor":
return true
case "mode":
return true
case "source":
return true
case "path":
return true
case "required":
return true
case "uid":
return true
default:
return false
}
case "nic":
switch k {
case "limits.max":
return true
case "limits.ingress":
return true
case "limits.egress":
return true
case "host_name":
return true
case "hwaddr":
return true
case "mtu":
return true
case "name":
return true
case "nictype":
return true
case "parent":
return true
case "vlan":
return true
case "ipv4.address":
return true
case "ipv6.address":
return true
case "security.mac_filtering":
return true
case "maas.subnet.ipv4":
return true
case "maas.subnet.ipv6":
return true
default:
return false
}
case "disk":
switch k {
case "limits.max":
return true
case "limits.read":
return true
case "limits.write":
return true
case "optional":
return true
case "path":
return true
case "readonly":
return true
case "size":
return true
case "source":
return true
case "recursive":
return true
case "pool":
return true
case "propagation":
return true
default:
return false
}
case "usb":
switch k {
case "vendorid":
return true
case "productid":
return true
case "mode":
return true
case "gid":
return true
case "uid":
return true
case "required":
return true
default:
return false
}
case "gpu":
switch k {
case "vendorid":
return true
case "productid":
return true
case "id":
return true
case "pci":
return true
case "mode":
return true
case "gid":
return true
case "uid":
return true
default:
return false
}
case "infiniband":
switch k {
case "hwaddr":
return true
case "mtu":
return true
case "name":
return true
case "nictype":
return true
case "parent":
return true
default:
return false
}
case "proxy":
switch k {
case "bind":
return true
case "connect":
return true
case "gid":
return true
case "listen":
return true
case "mode":
return true
case "proxy_protocol":
return true
case "nat":
return true
case "security.gid":
return true
case "security.uid":
return true
case "uid":
return true
default:
return false
}
case "none":
return false
default:
return false
}
}
func allowedUnprivilegedOnlyMap(rawIdmap string) error {
rawMaps, err := parseRawIdmap(rawIdmap)
if err != nil {
return err
}
for _, ent := range rawMaps {
if ent.Hostid == 0 {
return fmt.Errorf("Cannot map root user into container as LXD was configured to only allow unprivileged containers")
}
}
return nil
}
func containerValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error {
if config == nil {
return nil
}
for k, v := range config {
if profile && strings.HasPrefix(k, "volatile.") {
return fmt.Errorf("Volatile keys can only be set on containers")
}
if profile && strings.HasPrefix(k, "image.") {
return fmt.Errorf("Image keys can only be set on containers")
}
err := containerValidConfigKey(sysOS, k, v)
if err != nil {
return err
}
}
_, rawSeccomp := config["raw.seccomp"]
_, whitelist := config["security.syscalls.whitelist"]
_, blacklist := config["security.syscalls.blacklist"]
blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"])
blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"])
if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) {
return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*")
}
if whitelist && (blacklist || blacklistDefault || blacklistCompat) {
return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*")
}
if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil {
return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported")
}
unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
if shared.IsTrue(unprivOnly) {
if config["raw.idmap"] != "" {
err := allowedUnprivilegedOnlyMap(config["raw.idmap"])
if err != nil {
return err
}
}
if shared.IsTrue(config["security.privileged"]) {
return fmt.Errorf("LXD was configured to only allow unprivileged containers")
}
}
return nil
}
func containerValidDevices(cluster *db.Cluster, devices types.Devices, profile bool, expanded bool) error {
// Empty device list
if devices == nil {
return nil
}
var diskDevicePaths []string
// Check each device individually
for name, m := range devices {
if m["type"] == "" {
return fmt.Errorf("Missing device type for device '%s'", name)
}
if !shared.StringInSlice(m["type"], []string{"disk", "gpu", "infiniband", "nic", "none", "proxy", "unix-block", "unix-char", "usb"}) {
return fmt.Errorf("Invalid device type for device '%s'", name)
}
for k := range m {
if !containerValidDeviceConfigKey(m["type"], k) {
return fmt.Errorf("Invalid device configuration key for %s: %s", m["type"], k)
}
}
if m["type"] == "nic" {
if m["nictype"] == "" {
return fmt.Errorf("Missing nic type")
}
if !shared.StringInSlice(m["nictype"], []string{"bridged", "macvlan", "p2p", "physical", "sriov"}) {
return fmt.Errorf("Bad nic type: %s", m["nictype"])
}
if shared.StringInSlice(m["nictype"], []string{"bridged", "macvlan", "physical", "sriov"}) && m["parent"] == "" {
return fmt.Errorf("Missing parent for %s type nic", m["nictype"])
}
if m["ipv4.address"] != "" {
err := networkValidAddressV4(m["ipv4.address"])
if err != nil {
return err
}
}
if m["ipv6.address"] != "" {
err := networkValidAddressV6(m["ipv6.address"])
if err != nil {
return err
}
}
} else if m["type"] == "infiniband" {
if m["nictype"] == "" {
return fmt.Errorf("Missing nic type")
}
if !shared.StringInSlice(m["nictype"], []string{"physical", "sriov"}) {
return fmt.Errorf("Bad nic type: %s", m["nictype"])
}
if m["parent"] == "" {
return fmt.Errorf("Missing parent for %s type nic", m["nictype"])
}
} else if m["type"] == "disk" {
if !expanded && !shared.StringInSlice(m["path"], diskDevicePaths) {
diskDevicePaths = append(diskDevicePaths, m["path"])
} else if !expanded {
return fmt.Errorf("More than one disk device uses the same path: %s", m["path"])
}
if m["path"] == "" {
return fmt.Errorf("Disk entry is missing the required \"path\" property")
}
if m["source"] == "" && m["path"] != "/" {
return fmt.Errorf("Disk entry is missing the required \"source\" property")
}
if m["path"] == "/" && m["source"] != "" {
return fmt.Errorf("Root disk entry may not have a \"source\" property set")
}
if m["size"] != "" && m["path"] != "/" {
return fmt.Errorf("Only the root disk may have a size quota")
}
if (m["path"] == "/" || !shared.IsDir(shared.HostPath(m["source"]))) && m["recursive"] != "" {
return fmt.Errorf("The recursive option is only supported for additional bind-mounted paths")
}
if m["pool"] != "" {
if filepath.IsAbs(m["source"]) {
return fmt.Errorf("Storage volumes cannot be specified as absolute paths")
}
_, err := cluster.StoragePoolGetID(m["pool"])
if err != nil {
return fmt.Errorf("The \"%s\" storage pool doesn't exist", m["pool"])
}
if !profile && expanded && m["source"] != "" && m["path"] != "/" {
isAvailable, err := cluster.StorageVolumeIsAvailable(
m["pool"], m["source"])
if err != nil {
return errors.Wrap(err, "Check if volume is available")
}
if !isAvailable {
return fmt.Errorf(
"Storage volume %q is already attached to a container "+
"on a different node", m["source"])
}
}
}
if m["propagation"] != "" {
if !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) {
return fmt.Errorf("liblxc 3.0 is required for mount propagation configuration")
}
if !shared.StringInSlice(m["propagation"], []string{"private", "shared", "slave", "unbindable", "rprivate", "rshared", "rslave", "runbindable"}) {
return fmt.Errorf("Invalid propagation mode '%s'", m["propagation"])
}
}
} else if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) {
if m["source"] == "" && m["path"] == "" {
return fmt.Errorf("Unix device entry is missing the required \"source\" or \"path\" property")
}
if (m["required"] == "" || shared.IsTrue(m["required"])) && (m["major"] == "" || m["minor"] == "") {
srcPath, exist := m["source"]
if !exist {
srcPath = m["path"]
}
if !shared.PathExists(srcPath) {
return fmt.Errorf("The device path doesn't exist on the host and major/minor wasn't specified")
}
dType, _, _, err := deviceGetAttributes(srcPath)
if err != nil {
return err
}
if m["type"] == "unix-char" && dType != "c" {
return fmt.Errorf("Path specified for unix-char device is a block device")
}
if m["type"] == "unix-block" && dType != "b" {
return fmt.Errorf("Path specified for unix-block device is a character device")
}
}
} else if m["type"] == "usb" {
// Nothing needed for usb.
} else if m["type"] == "gpu" {
if m["pci"] != "" && !shared.PathExists(fmt.Sprintf("/sys/bus/pci/devices/%s", m["pci"])) {
return fmt.Errorf("Invalid PCI address (no device found): %s", m["pci"])
}
if m["pci"] != "" && (m["id"] != "" || m["productid"] != "" || m["vendorid"] != "") {
return fmt.Errorf("Cannot use id, productid or vendorid when pci is set")
}
if m["id"] != "" && (m["pci"] != "" || m["productid"] != "" || m["vendorid"] != "") {
return fmt.Errorf("Cannot use pci, productid or vendorid when id is set")
}
} else if m["type"] == "proxy" {
if m["listen"] == "" {
return fmt.Errorf("Proxy device entry is missing the required \"listen\" property")
}
if m["connect"] == "" {
return fmt.Errorf("Proxy device entry is missing the required \"connect\" property")
}
listenAddr, err := parseAddr(m["listen"])
if err != nil {
return err
}
connectAddr, err := parseAddr(m["connect"])
if err != nil {
return err
}
if len(connectAddr.addr) > len(listenAddr.addr) {
// Cannot support single port -> multiple port
return fmt.Errorf("Cannot map a single port to multiple ports")
}
if shared.IsTrue(m["proxy_protocol"]) && !strings.HasPrefix(m["connect"], "tcp") {
return fmt.Errorf("The PROXY header can only be sent to tcp servers")
}
if (!strings.HasPrefix(m["listen"], "unix:") || strings.HasPrefix(m["listen"], "unix:@")) &&
(m["uid"] != "" || m["gid"] != "" || m["mode"] != "") {
return fmt.Errorf("Only proxy devices for non-abstract unix sockets can carry uid, gid, or mode properties")
}
if shared.IsTrue(m["nat"]) {
if m["bind"] != "" && m["bind"] != "host" {
return fmt.Errorf("Only host-bound proxies can use NAT")
}
// Support TCP <-> TCP and UDP <-> UDP
if listenAddr.connType == "unix" || connectAddr.connType == "unix" ||
listenAddr.connType != connectAddr.connType {
return fmt.Errorf("Proxying %s <-> %s is not supported when using NAT",
listenAddr.connType, connectAddr.connType)
}
}
} else if m["type"] == "none" {
continue
} else {
return fmt.Errorf("Invalid device type: %s", m["type"])
}
}
// Checks on the expanded config
if expanded {
_, _, err := shared.GetRootDiskDevice(devices)
if err != nil {
return errors.Wrap(err, "Detect root disk device")
}
}
return nil
}
// The container interface
type container interface {
// Container actions
Freeze() error
Shutdown(timeout time.Duration) error
Start(stateful bool) error
Stop(stateful bool) error
Unfreeze() error
// Snapshots & migration & backups
Restore(sourceContainer container, stateful bool) error
/* actionScript here is a script called action.sh in the stateDir, to
* be passed to CRIU as --action-script
*/
Migrate(args *CriuMigrationArgs) error
Snapshots() ([]container, error)
Backups() ([]backup, error)
// Config handling
Rename(newName string) error
Update(newConfig db.ContainerArgs, userRequested bool) error
Delete() error
Export(w io.Writer, properties map[string]string) error
// Live configuration
CGroupGet(key string) (string, error)
CGroupSet(key string, value string) error
ConfigKeySet(key string, value string) error
// File handling
FileExists(path string) error
FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error)
FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error
FileRemove(path string) error
// Console - Allocate and run a console tty.
//
// terminal - Bidirectional file descriptor.
//
// This function will not return until the console has been exited by
// the user.
Console(terminal *os.File) *exec.Cmd
ConsoleLog(opts lxc.ConsoleLogOptions) (string, error)
/* Command execution:
* 1. passing in false for wait
* - equivalent to calling cmd.Run()
* 2. passing in true for wait
* - start the command and return its PID in the first return
* argument and the PID of the attached process in the second
* argument. It's the callers responsibility to wait on the
* command. (Note. The returned PID of the attached process can not
* be waited upon since it's a child of the lxd forkexec command
* (the PID returned in the first return argument). It can however
* be used to e.g. forward signals.)
*/
Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool) (*exec.Cmd, int, int, error)
// Status
Render() (interface{}, interface{}, error)
RenderFull() (*api.ContainerFull, interface{}, error)
RenderState() (*api.ContainerState, error)
IsPrivileged() bool
IsRunning() bool
IsFrozen() bool
IsEphemeral() bool
IsSnapshot() bool
IsStateful() bool
IsNesting() bool
// Hooks
OnStart() error
OnStop(target string) error
// Properties
Id() int
Project() string
Name() string
Description() string
Architecture() int
CreationDate() time.Time
LastUsedDate() time.Time
ExpandedConfig() map[string]string
ExpandedDevices() types.Devices
LocalConfig() map[string]string
LocalDevices() types.Devices
Profiles() []string
InitPID() int
State() string
ExpiryDate() time.Time
// Paths
Path() string
RootfsPath() string
TemplatesPath() string
StatePath() string
LogFilePath() string
ConsoleBufferLogPath() string
LogPath() string
// Storage
StoragePool() (string, error)
// Progress reporting
SetOperation(op *operation)
// FIXME: Those should be internal functions
// Needed for migration for now.
StorageStart() (bool, error)
StorageStop() (bool, error)
Storage() storage
IdmapSet() (*idmap.IdmapSet, error)
LastIdmapSet() (*idmap.IdmapSet, error)
TemplateApply(trigger string) error
DaemonState() *state.State
}
// Loader functions
func containerCreateAsEmpty(d *Daemon, args db.ContainerArgs) (container, error) {
// Create the container
c, err := containerCreateInternal(d.State(), args)
if err != nil {
return nil, err
}
// Now create the empty storage
err = c.Storage().ContainerCreate(c)
if err != nil {
c.Delete()
return nil, err
}
// Apply any post-storage configuration
err = containerConfigureInternal(c)
if err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerCreateFromBackup(s *state.State, info backupInfo, data io.ReadSeeker,
customPool bool) error {
var pool storage
var fixBackupFile = false
// Get storage pool from index.yaml
pool, storageErr := storagePoolInit(s, info.Pool)
if storageErr != nil && errors.Cause(storageErr) != db.ErrNoSuchObject {
// Unexpected error
return storageErr
}
if errors.Cause(storageErr) == db.ErrNoSuchObject {
// The pool doesn't exist, and the backup is in binary format so we
// cannot alter the backup.yaml.
if info.HasBinaryFormat {
return storageErr
}
// Get the default profile
_, profile, err := s.Cluster.ProfileGet(info.Project, "default")
if err != nil {
return errors.Wrap(err, "Failed to get default profile")
}
_, v, err := shared.GetRootDiskDevice(profile.Devices)
if err != nil {
return errors.Wrap(err, "Failed to get root disk device")
}
// Use the default-profile's root pool
pool, err = storagePoolInit(s, v["pool"])
if err != nil {
return errors.Wrap(err, "Failed to initialize storage pool")
}
fixBackupFile = true
}
// Find the compression algorithm
tarArgs, _, _, err := shared.DetectCompressionFile(data)
if err != nil {
return err
}
data.Seek(0, 0)
// Unpack tarball
err = pool.ContainerBackupLoad(info, data, tarArgs)
if err != nil {
return err
}
if fixBackupFile || customPool {
// Update the pool
err = backupFixStoragePool(s.Cluster, info, !customPool)
if err != nil {
return err
}
}
return nil
}
func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (container, error) {
// Create the snapshot
c, err := containerCreateInternal(s, args)
if err != nil {
return nil, err
}
// Now create the empty snapshot
err = c.Storage().ContainerSnapshotCreateEmpty(c)
if err != nil {
c.Delete()
return nil, err
}
return c, nil
}
func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string, tracker *ioprogress.ProgressTracker) (container, error) {
s := d.State()
// Get the image properties
_, img, err := s.Cluster.ImageGet(args.Project, hash, false, false)
if err != nil {
return nil, errors.Wrapf(err, "Fetch image %s from database", hash)
}
// Check if the image is available locally or it's on another node.
nodeAddress, err := s.Cluster.ImageLocate(hash)
if err != nil {
return nil, errors.Wrapf(err, "Locate image %s in the cluster", hash)
}
if nodeAddress != "" {
// The image is available from another node, let's try to
// import it.
logger.Debugf("Transferring image %s from node %s", hash, nodeAddress)
client, err := cluster.Connect(nodeAddress, d.endpoints.NetworkCert(), false)
if err != nil {
return nil, err
}
client = client.UseProject(args.Project)
err = imageImportFromNode(filepath.Join(d.os.VarDir, "images"), client, hash)
if err != nil {
return nil, err
}
err = d.cluster.ImageAssociateNode(args.Project, hash)
if err != nil {
return nil, err
}
}
// Set the "image.*" keys
if img.Properties != nil {
for k, v := range img.Properties {
args.Config[fmt.Sprintf("image.%s", k)] = v
}
}
// Set the BaseImage field (regardless of previous value)
args.BaseImage = hash
// Create the container
c, err := containerCreateInternal(s, args)
if err != nil {
return nil, errors.Wrap(err, "Create container")
}
err = s.Cluster.ImageLastAccessUpdate(hash, time.Now().UTC())
if err != nil {
c.Delete()
return nil, fmt.Errorf("Error updating image last use date: %s", err)
}
// Now create the storage from an image
err = c.Storage().ContainerCreateFromImage(c, hash, tracker)
if err != nil {
c.Delete()
return nil, errors.Wrap(err, "Create container from image")
}
// Apply any post-storage configuration
err = containerConfigureInternal(c)
if err != nil {
c.Delete()
return nil, errors.Wrap(err, "Configure container")
}
return c, nil
}
func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer container, containerOnly bool, refresh bool) (container, error) {
var ct container
var err error
if refresh {
// Load the target container
ct, err = containerLoadByProjectAndName(s, args.Project, args.Name)
if err != nil {
refresh = false
}
}
if !refresh {
// Create the container.
ct, err = containerCreateInternal(s, args)
if err != nil {
return nil, err
}
}
if refresh && ct.IsRunning() {
return nil, fmt.Errorf("Cannot refresh a running container")
}
// At this point we have already figured out the parent
// container's root disk device so we can simply
// retrieve it from the expanded devices.
parentStoragePool := ""
parentExpandedDevices := ct.ExpandedDevices()
parentLocalRootDiskDeviceKey, parentLocalRootDiskDevice, _ := shared.GetRootDiskDevice(parentExpandedDevices)
if parentLocalRootDiskDeviceKey != "" {
parentStoragePool = parentLocalRootDiskDevice["pool"]
}
csList := []*container{}
var snapshots []container
if !containerOnly {
if refresh {
// Compare snapshots
syncSnapshots, deleteSnapshots, err := containerCompareSnapshots(sourceContainer, ct)
if err != nil {
return nil, err
}
// Delete extra snapshots
for _, snap := range deleteSnapshots {
err := snap.Delete()
if err != nil {
return nil, err
}
}
// Only care about the snapshots that need updating
snapshots = syncSnapshots
} else {
// Get snapshots of source container
snapshots, err = sourceContainer.Snapshots()
if err != nil {
ct.Delete()
return nil, err
}
}
for _, snap := range snapshots {
fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2)
// Ensure that snapshot and parent container have the
// same storage pool in their local root disk device.
// If the root disk device for the snapshot comes from a
// profile on the new instance as well we don't need to
// do anything.
snapDevices := snap.LocalDevices()
if snapDevices != nil {
snapLocalRootDiskDeviceKey, _, _ := shared.GetRootDiskDevice(snapDevices)
if snapLocalRootDiskDeviceKey != "" {
snapDevices[snapLocalRootDiskDeviceKey]["pool"] = parentStoragePool
} else {
snapDevices["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": parentStoragePool,
}
}
}
newSnapName := fmt.Sprintf("%s/%s", ct.Name(), fields[1])
csArgs := db.ContainerArgs{
Architecture: snap.Architecture(),
Config: snap.LocalConfig(),
Ctype: db.CTypeSnapshot,
Devices: snapDevices,
Description: snap.Description(),
Ephemeral: snap.IsEphemeral(),
Name: newSnapName,
Profiles: snap.Profiles(),
Project: args.Project,
}
// Create the snapshots.
cs, err := containerCreateInternal(s, csArgs)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
// Restore snapshot creation date
err = s.Cluster.ContainerCreationUpdate(cs.Id(), snap.CreationDate())
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
csList = append(csList, &cs)
}
}
// Now clone or refresh the storage
if refresh {
err = ct.Storage().ContainerRefresh(ct, sourceContainer, snapshots)
if err != nil {
return nil, err
}
} else {
err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
}
// Apply any post-storage configuration.
err = containerConfigureInternal(ct)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
if !containerOnly {
for _, cs := range csList {
// Apply any post-storage configuration.
err = containerConfigureInternal(*cs)
if err != nil {
if !refresh {
ct.Delete()
}
return nil, err
}
}
}
return ct, nil
}
func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceContainer container) (container, error) {
// Deal with state
if args.Stateful {
if !sourceContainer.IsRunning() {
return nil, fmt.Errorf("Unable to create a stateful snapshot. The container isn't running")
}
_, err := exec.LookPath("criu")
if err != nil {
return nil, fmt.Errorf("Unable to create a stateful snapshot. CRIU isn't installed")
}
stateDir := sourceContainer.StatePath()
err = os.MkdirAll(stateDir, 0700)
if err != nil {
return nil, err
}
/* TODO: ideally we would freeze here and unfreeze below after
* we've copied the filesystem, to make sure there are no
* changes by the container while snapshotting. Unfortunately
* there is abug in CRIU where it doesn't leave the container
* in the same state it found it w.r.t. freezing, i.e. CRIU
* freezes too, and then /always/ thaws, even if the container
* was frozen. Until that's fixed, all calls to Unfreeze()
* after snapshotting will fail.
*/
criuMigrationArgs := CriuMigrationArgs{
cmd: lxc.MIGRATE_DUMP,
stateDir: stateDir,
function: "snapshot",
stop: false,
actionScript: false,
dumpDir: "",
preDumpDir: "",
}
err = sourceContainer.Migrate(&criuMigrationArgs)
if err != nil {
os.RemoveAll(sourceContainer.StatePath())
return nil, err
}
}
// Create the snapshot
c, err := containerCreateInternal(s, args)
if err != nil {
return nil, err
}
// Clone the container
err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer)
if err != nil {
c.Delete()
return nil, err
}
ourStart, err := c.StorageStart()
if err != nil {
c.Delete()
return nil, err
}
if ourStart {
defer c.StorageStop()
}
err = writeBackupFile(sourceContainer)
if err != nil {
c.Delete()
return nil, err
}
// Once we're done, remove the state directory
if args.Stateful {
os.RemoveAll(sourceContainer.StatePath())
}
eventSendLifecycle(sourceContainer.Project(), "container-snapshot-created",
fmt.Sprintf("/1.0/containers/%s", sourceContainer.Name()),
map[string]interface{}{
"snapshot_name": args.Name,
})
return c, nil
}
func containerCreateInternal(s *state.State, args db.ContainerArgs) (container, error) {
// Set default values
if args.Project == "" {
args.Project = "default"
}
if args.Profiles == nil {
args.Profiles = []string{"default"}
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.BaseImage != "" {
args.Config["volatile.base_image"] = args.BaseImage
}
if args.Devices == nil {
args.Devices = types.Devices{}
}
if args.Architecture == 0 {
args.Architecture = s.OS.Architectures[0]
}
// Validate container name
if args.Ctype == db.CTypeRegular {
err := containerValidName(args.Name)
if err != nil {
return nil, err
}
// Unset expiry date since containers don't expire
args.ExpiryDate = time.Time{}
}
// Validate container config
err := containerValidConfig(s.OS, args.Config, false, false)
if err != nil {
return nil, err
}
// Validate container devices
err = containerValidDevices(s.Cluster, args.Devices, false, false)
if err != nil {
return nil, errors.Wrap(err, "Invalid devices")
}
// Validate architecture
_, err = osarch.ArchitectureName(args.Architecture)
if err != nil {
return nil, err
}
if !shared.IntInSlice(args.Architecture, s.OS.Architectures) {
return nil, fmt.Errorf("Requested architecture isn't supported by this host")
}
// Validate profiles
profiles, err := s.Cluster.Profiles(args.Project)
if err != nil {
return nil, err
}
checkedProfiles := []string{}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
if shared.StringInSlice(profile, checkedProfiles) {
return nil, fmt.Errorf("Duplicate profile found in request")
}
checkedProfiles = append(checkedProfiles, profile)
}
if args.CreationDate.IsZero() {
args.CreationDate = time.Now().UTC()
}
if args.LastUsedDate.IsZero() {
args.LastUsedDate = time.Unix(0, 0).UTC()
}
var container db.Container
err = s.Cluster.Transaction(func(tx *db.ClusterTx) error {
node, err := tx.NodeName()
if err != nil {
return err
}
// TODO: this check should probably be performed by the db
// package itself.
exists, err := tx.ProjectExists(args.Project)
if err != nil {
return errors.Wrapf(err, "Check if project %q exists", args.Project)
}
if !exists {
return fmt.Errorf("Project %q does not exist", args.Project)
}
// Create the container entry
container = db.Container{
Project: args.Project,
Name: args.Name,
Node: node,
Type: int(args.Ctype),
Architecture: args.Architecture,
Ephemeral: args.Ephemeral,
CreationDate: args.CreationDate,
Stateful: args.Stateful,
LastUseDate: args.LastUsedDate,
Description: args.Description,
Config: args.Config,
Devices: args.Devices,
Profiles: args.Profiles,
ExpiryDate: args.ExpiryDate,
}
_, err = tx.ContainerCreate(container)
if err != nil {
return errors.Wrap(err, "Add container info to the database")
}
// Read back the container, to get ID and creation time.
c, err := tx.ContainerGet(args.Project, args.Name)
if err != nil {
return errors.Wrap(err, "Fetch created container from the database")
}
container = *c
if container.ID < 1 {
return errors.Wrapf(err, "Unexpected container database ID %d", container.ID)
}
return nil
})
if err != nil {
if err == db.ErrAlreadyDefined {
thing := "Container"
if shared.IsSnapshot(args.Name) {
thing = "Snapshot"
}
return nil, fmt.Errorf("%s '%s' already exists", thing, args.Name)
}
return nil, err
}
// Wipe any existing log for this container name
os.RemoveAll(shared.LogPath(args.Name))
args = db.ContainerToArgs(&container)
// Setup the container struct and finish creation (storage and idmap)
c, err := containerLXCCreate(s, args)
if err != nil {
s.Cluster.ContainerRemove(args.Project, args.Name)
return nil, errors.Wrap(err, "Create LXC container")
}
return c, nil
}
func containerConfigureInternal(c container) error {
// Find the root device
_, rootDiskDevice, err := shared.GetRootDiskDevice(c.ExpandedDevices())
if err != nil {
return err
}
ourStart, err := c.StorageStart()
if err != nil {
return err
}
// handle quota: at this point, storage is guaranteed to be ready
storage := c.Storage()
if rootDiskDevice["size"] != "" {
storageTypeName := storage.GetStorageTypeName()
if (storageTypeName == "lvm" || storageTypeName == "ceph") && c.IsRunning() {
err = c.ConfigKeySet("volatile.apply_quota", rootDiskDevice["size"])
if err != nil {
return err
}
} else {
size, err := shared.ParseByteSizeString(rootDiskDevice["size"])
if err != nil {
return err
}
err = storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, size, c)
if err != nil {
return err
}
}
}
if ourStart {
defer c.StorageStop()
}
err = writeBackupFile(c)
if err != nil {
return err
}
return nil
}
func containerLoadById(s *state.State, id int) (container, error) {
// Get the DB record
project, name, err := s.Cluster.ContainerProjectAndName(id)
if err != nil {
return nil, err
}
return containerLoadByProjectAndName(s, project, name)
}
func containerLoadByProjectAndName(s *state.State, project, name string) (container, error) {
// Get the DB record
var container *db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
container, err = tx.ContainerGet(project, name)
if err != nil {
return errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
}
return nil
})
if err != nil {
return nil, err
}
args := db.ContainerToArgs(container)
c, err := containerLXCLoad(s, args, nil)
if err != nil {
return nil, errors.Wrap(err, "Failed to load container")
}
return c, nil
}
func containerLoadByProject(s *state.State, project string) ([]container, error) {
// Get all the containers
var cts []db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
filter := db.ContainerFilter{
Project: project,
Type: int(db.CTypeRegular),
}
var err error
cts, err = tx.ContainerList(filter)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return containerLoadAllInternal(cts, s)
}
// Load all containers across all projects.
func containerLoadFromAllProjects(s *state.State) ([]container, error) {
var projects []string
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
projects, err = tx.ProjectNames()
return err
})
if err != nil {
return nil, err
}
containers := []container{}
for _, project := range projects {
projectContainers, err := containerLoadByProject(s, project)
if err != nil {
return nil, errors.Wrapf(nil, "Load containers in project %s", project)
}
containers = append(containers, projectContainers...)
}
return containers, nil
}
// Legacy interface.
func containerLoadAll(s *state.State) ([]container, error) {
return containerLoadByProject(s, "default")
}
// Load all containers of this nodes.
func containerLoadNodeAll(s *state.State) ([]container, error) {
// Get all the container arguments
var cts []db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
cts, err = tx.ContainerNodeList()
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return containerLoadAllInternal(cts, s)
}
// Load all containers of this nodes under the given project.
func containerLoadNodeProjectAll(s *state.State, project string) ([]container, error) {
// Get all the container arguments
var cts []db.Container
err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
cts, err = tx.ContainerNodeProjectList(project)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return containerLoadAllInternal(cts, s)
}
func containerLoadAllInternal(cts []db.Container, s *state.State) ([]container, error) {
// Figure out what profiles are in use
profiles := map[string]map[string]api.Profile{}
for _, cArgs := range cts {
projectProfiles, ok := profiles[cArgs.Project]
if !ok {
projectProfiles = map[string]api.Profile{}
profiles[cArgs.Project] = projectProfiles
}
for _, profile := range cArgs.Profiles {
_, ok := projectProfiles[profile]
if !ok {
projectProfiles[profile] = api.Profile{}
}
}
}
// Get the profile data
for project, projectProfiles := range profiles {
for name := range projectProfiles {
_, profile, err := s.Cluster.ProfileGet(project, name)
if err != nil {
return nil, err
}
projectProfiles[name] = *profile
}
}
// Load the container structs
containers := []container{}
for _, container := range cts {
// Figure out the container's profiles
cProfiles := []api.Profile{}
for _, name := range container.Profiles {
cProfiles = append(cProfiles, profiles[container.Project][name])
}
args := db.ContainerToArgs(&container)
ct, err := containerLXCLoad(s, args, cProfiles)
if err != nil {
return nil, err
}
containers = append(containers, ct)
}
return containers, nil
}
func containerCompareSnapshots(source container, target container) ([]container, []container, error) {
// Get the source snapshots
sourceSnapshots, err := source.Snapshots()
if err != nil {
return nil, nil, err
}
// Get the target snapshots
targetSnapshots, err := target.Snapshots()
if err != nil {
return nil, nil, err
}
// Compare source and target
sourceSnapshotsTime := map[string]time.Time{}
targetSnapshotsTime := map[string]time.Time{}
toDelete := []container{}
toSync := []container{}
for _, snap := range sourceSnapshots {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
sourceSnapshotsTime[snapName] = snap.CreationDate()
}
for _, snap := range targetSnapshots {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
targetSnapshotsTime[snapName] = snap.CreationDate()
existDate, exists := sourceSnapshotsTime[snapName]
if !exists {
toDelete = append(toDelete, snap)
} else if existDate != snap.CreationDate() {
toDelete = append(toDelete, snap)
}
}
for _, snap := range sourceSnapshots {
_, snapName, _ := containerGetParentAndSnapshotName(snap.Name())
existDate, exists := targetSnapshotsTime[snapName]
if !exists || existDate != snap.CreationDate() {
toSync = append(toSync, snap)
}
}
return toSync, toDelete, nil
}
func autoCreateContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
// Load all local containers
allContainers, err := containerLoadNodeAll(d.State())
if err != nil {
logger.Error("Failed to load containers for scheduled snapshots", log.Ctx{"err": err})
return
}
// Figure out which need snapshotting (if any)
containers := []container{}
for _, c := range allContainers {
schedule := c.LocalConfig()["snapshots.schedule"]
if schedule == "" {
continue
}
// Extend our schedule to one that is accepted by the used cron parser
sched, err := cron.Parse(fmt.Sprintf("* %s", schedule))
if err != nil {
continue
}
// Check if it's time to snapshot
now := time.Now()
next := sched.Next(now)
// Ignore everything that is more precise than minutes.
now = now.Truncate(time.Minute)
next = next.Truncate(time.Minute)
if !now.Equal(next) {
continue
}
// Check if the container is running
if !shared.IsTrue(c.LocalConfig()["snapshots.schedule.stopped"]) && !c.IsRunning() {
continue
}
containers = append(containers, c)
}
if len(containers) == 0 {
return
}
opRun := func(op *operation) error {
return autoCreateContainerSnapshots(ctx, d, containers)
}
op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotCreate, nil, nil, opRun, nil, nil)
if err != nil {
logger.Error("Failed to start create snapshot operation", log.Ctx{"err": err})
return
}
logger.Info("Creating scheduled container snapshots")
_, err = op.Run()
if err != nil {
logger.Error("Failed to create scheduled container snapshots", log.Ctx{"err": err})
}
logger.Info("Done creating scheduled container snapshots")
}
first := true
schedule := func() (time.Duration, error) {
interval := time.Minute
if first {
first = false
return interval, task.ErrSkip
}
return interval, nil
}
return f, schedule
}
func autoCreateContainerSnapshots(ctx context.Context, d *Daemon, containers []container) error {
// Make the snapshots
for _, c := range containers {
ch := make(chan error)
go func() {
snapshotName, err := containerDetermineNextSnapshotName(d, c, "snap%d")
if err != nil {
logger.Error("Error retrieving next snapshot name", log.Ctx{"err": err, "container": c})
ch <- nil
return
}
snapshotName = fmt.Sprintf("%s%s%s", c.Name(), shared.SnapshotDelimiter, snapshotName)
expiry, err := shared.GetSnapshotExpiry(time.Now(), c.LocalConfig()["snapshots.expiry"])
if err != nil {
logger.Error("Error getting expiry date", log.Ctx{"err": err, "container": c})
ch <- nil
return
}
args := db.ContainerArgs{
Architecture: c.Architecture(),
Config: c.LocalConfig(),
Ctype: db.CTypeSnapshot,
Devices: c.LocalDevices(),
Ephemeral: c.IsEphemeral(),
Name: snapshotName,
Profiles: c.Profiles(),
Project: c.Project(),
Stateful: false,
ExpiryDate: expiry,
}
_, err = containerCreateAsSnapshot(d.State(), args, c)
if err != nil {
logger.Error("Error creating snapshots", log.Ctx{"err": err, "container": c})
}
ch <- nil
}()
select {
case <-ctx.Done():
return nil
case <-ch:
}
}
return nil
}
func pruneExpiredContainerSnapshotsTask(d *Daemon) (task.Func, task.Schedule) {
f := func(ctx context.Context) {
// Load all local containers
allContainers, err := containerLoadNodeAll(d.State())
if err != nil {
logger.Error("Failed to load containers for snapshot expiry", log.Ctx{"err": err})
return
}
// Figure out which need snapshotting (if any)
expiredSnapshots := []container{}
for _, c := range allContainers {
snapshots, err := c.Snapshots()
if err != nil {
logger.Error("Failed to list snapshots", log.Ctx{"err": err, "container": c.Name(), "project": c.Project()})
continue
}
for _, snapshot := range snapshots {
if snapshot.ExpiryDate().IsZero() {
// Snapshot doesn't expire
continue
}
if time.Now().Unix()-snapshot.ExpiryDate().Unix() >= 0 {
expiredSnapshots = append(expiredSnapshots, snapshot)
}
}
}
if len(expiredSnapshots) == 0 {
return
}
opRun := func(op *operation) error {
return pruneExpiredContainerSnapshots(ctx, d, expiredSnapshots)
}
op, err := operationCreate(d.cluster, "", operationClassTask, db.OperationSnapshotsExpire, nil, nil, opRun, nil, nil)
if err != nil {
logger.Error("Failed to start expired snapshots operation", log.Ctx{"err": err})
return
}
logger.Info("Pruning expired container snapshots")
_, err = op.Run()
if err != nil {
logger.Error("Failed to remove expired container snapshots", log.Ctx{"err": err})
}
logger.Info("Done pruning expired container snapshots")
}
first := true
schedule := func() (time.Duration, error) {
interval := time.Minute
if first {
first = false
return interval, task.ErrSkip
}
return interval, nil
}
return f, schedule
}
func pruneExpiredContainerSnapshots(ctx context.Context, d *Daemon, snapshots []container) error {
// Find snapshots to delete
for _, snapshot := range snapshots {
err := snapshot.Delete()
if err != nil {
return errors.Wrapf(err, "Failed to delete expired snapshot '%s' in project '%s'", snapshot.Name(), snapshot.Project())
}
}
return nil
}
func containerDetermineNextSnapshotName(d *Daemon, c container, defaultPattern string) (string, error) {
var err error
pattern := c.LocalConfig()["snapshots.pattern"]
if pattern == "" {
pattern = defaultPattern
}
pattern, err = shared.RenderTemplate(pattern, pongo2.Context{
"creation_date": time.Now(),
})
if err != nil {
return "", err
}
count := strings.Count(pattern, "%d")
if count > 1 {
return "", fmt.Errorf("Snapshot pattern may contain '%%d' only once")
} else if count == 1 {
i := d.cluster.ContainerNextSnapshot(c.Project(), c.Name(), pattern)
return strings.Replace(pattern, "%d", strconv.Itoa(i), 1), nil
}
snapshotExists := false
snapshots, err := c.Snapshots()
if err != nil {
return "", err
}
for _, snap := range snapshots {
_, snapOnlyName, _ := containerGetParentAndSnapshotName(snap.Name())
if snapOnlyName == pattern {
snapshotExists = true
break
}
}
// Append '-0', '-1', etc. if the actual pattern/snapshot name already exists
if snapshotExists {
pattern = fmt.Sprintf("%s-%%d", pattern)
i := d.cluster.ContainerNextSnapshot(c.Project(), c.Name(), pattern)
return strings.Replace(pattern, "%d", strconv.Itoa(i), 1), nil
}
return pattern, nil
}
|
package main
import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
)
type initData struct {
api.ServerPut `yaml:",inline"`
Cluster *initDataCluster `json:"cluster" yaml:"cluster"`
Networks []api.NetworksPost `json:"networks" yaml:"networks"`
StoragePools []api.StoragePoolsPost `json:"storage_pools" yaml:"storage_pools"`
Profiles []api.ProfilesPost `json:"profiles" yaml:"profiles"`
}
type initDataCluster struct {
api.ClusterPut `yaml:",inline"`
ClusterPassword string `json:"cluster_password" yaml:"cluster_password"`
}
type cmdInit struct {
cmd *cobra.Command
global *cmdGlobal
flagAuto bool
flagPreseed bool
flagNetworkAddress string
flagNetworkPort int
flagStorageBackend string
flagStorageDevice string
flagStorageLoopSize int
flagStoragePool string
flagTrustPassword string
}
func (c *cmdInit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = "init"
cmd.Short = "Configure the LXD daemon"
cmd.Long = `Description:
Configure the LXD daemon
`
cmd.Example = ` init --preseed
init --auto [--network-address=IP] [--network-port=8443] [--storage-backend=dir]
[--storage-create-device=DEVICE] [--storage-create-loop=SIZE]
[--storage-pool=POOL] [--trust-password=PASSWORD]
`
cmd.RunE = c.Run
cmd.Flags().BoolVar(&c.flagAuto, "auto", false, "Automatic (non-interactive) mode")
cmd.Flags().BoolVar(&c.flagPreseed, "preseed", false, "Pre-seed mode, expects YAML config from stdin")
cmd.Flags().StringVar(&c.flagNetworkAddress, "network-address", "", "Address to bind LXD to (default: none)"+"``")
cmd.Flags().IntVar(&c.flagNetworkPort, "network-port", -1, "Port to bind LXD to (default: 8443)"+"``")
cmd.Flags().StringVar(&c.flagStorageBackend, "storage-backend", "", "Storage backend to use (btrfs, dir, lvm or zfs, default: dir)"+"``")
cmd.Flags().StringVar(&c.flagStorageDevice, "storage-create-device", "", "Setup device based storage using DEVICE"+"``")
cmd.Flags().IntVar(&c.flagStorageLoopSize, "storage-create-loop", -1, "Setup loop based storage with SIZE in GB"+"``")
cmd.Flags().StringVar(&c.flagStoragePool, "storage-pool", "", "Storage pool to use or create"+"``")
cmd.Flags().StringVar(&c.flagTrustPassword, "trust-password", "", "Password required to add new clients"+"``")
c.cmd = cmd
return cmd
}
func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
// Sanity checks
if c.flagAuto && c.flagPreseed {
return fmt.Errorf("Can't use --auto and --preseed together")
}
if !c.flagAuto && (c.flagNetworkAddress != "" || c.flagNetworkPort != -1 ||
c.flagStorageBackend != "" || c.flagStorageDevice != "" ||
c.flagStorageLoopSize != -1 || c.flagStoragePool != "" ||
c.flagTrustPassword != "") {
return fmt.Errorf("Configuration flags require --auto")
}
// Connect to LXD
d, err := lxd.ConnectLXDUnix("", nil)
if err != nil {
return errors.Wrap(err, "Failed to connect to local LXD")
}
// Prepare the input data
var config *initData
// Preseed mode
if c.flagPreseed {
config, err = c.RunPreseed(cmd, args, d)
if err != nil {
return err
}
}
// Auto mode
if c.flagAuto {
config, err = c.RunAuto(cmd, args, d)
if err != nil {
return err
}
}
// Interactive mode
if !c.flagAuto && !c.flagPreseed {
config, err = c.RunInteractive(cmd, args, d)
if err != nil {
return err
}
}
return c.ApplyConfig(cmd, args, d, *config)
}
func (c *cmdInit) availableStorageDrivers() []string {
drivers := []string{"dir"}
backingFs, err := util.FilesystemDetect(shared.VarPath())
if err != nil {
backingFs = "dir"
}
// Check available backends
for _, driver := range supportedStoragePoolDrivers {
if driver == "dir" {
continue
}
// btrfs can work in user namespaces too. (If
// source=/some/path/on/btrfs is used.)
if shared.RunningInUserNS() && (backingFs != "btrfs" || driver != "btrfs") {
continue
}
// Initialize a core storage interface for the given driver.
_, err := storageCoreInit(driver)
if err != nil {
continue
}
drivers = append(drivers, driver)
}
return drivers
}
func (c *cmdInit) ApplyConfig(cmd *cobra.Command, args []string, d lxd.ContainerServer, config initData) error {
// Handle reverts
revert := true
reverts := []func(){}
defer func() {
if !revert {
return
}
// Lets undo things in reverse order
for i := len(reverts) - 1; i >= 0; i-- {
reverts[i]()
}
}()
// Apply server configuration
if config.Config != nil && len(config.Config) > 0 {
// Get current config
currentServer, etag, err := d.GetServer()
if err != nil {
return errors.Wrap(err, "Failed to retrieve current server configuration")
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateServer(currentServer.Writable(), "")
})
// Prepare the update
newServer := api.ServerPut{}
err = shared.DeepCopy(currentServer.Writable(), &newServer)
if err != nil {
return errors.Wrap(err, "Failed to copy server configuration")
}
for k, v := range config.Config {
newServer.Config[k] = fmt.Sprintf("%v", v)
}
// Apply it
err = d.UpdateServer(newServer, etag)
if err != nil {
return errors.Wrap(err, "Failed to update server configuration")
}
}
// Apply network configuration
if config.Networks != nil && len(config.Networks) > 0 {
// Get the list of networks
networkNames, err := d.GetNetworkNames()
if err != nil {
return errors.Wrap(err, "Failed to retrieve list of networks")
}
// Network creator
createNetwork := func(network api.NetworksPost) error {
// Create the network if doesn't exist
err := d.CreateNetwork(network)
if err != nil {
return errors.Wrapf(err, "Failed to create network '%s'", network.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.DeleteNetwork(network.Name)
})
return nil
}
// Network updater
updateNetwork := func(network api.NetworksPost) error {
// Get the current network
currentNetwork, etag, err := d.GetNetwork(network.Name)
if err != nil {
return errors.Wrapf(err, "Failed to retrieve current network '%s'", network.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateNetwork(currentNetwork.Name, currentNetwork.Writable(), "")
})
// Prepare the update
newNetwork := api.NetworkPut{}
err = shared.DeepCopy(currentNetwork.Writable(), &newNetwork)
if err != nil {
return errors.Wrapf(err, "Failed to copy configuration of network '%s'", network.Name)
}
// Description override
if network.Description != "" {
newNetwork.Description = network.Description
}
// Config overrides
for k, v := range network.Config {
newNetwork.Config[k] = fmt.Sprintf("%v", v)
}
// Apply it
err = d.UpdateNetwork(currentNetwork.Name, newNetwork, etag)
if err != nil {
return errors.Wrapf(err, "Failed to update network '%s'", network.Name)
}
return nil
}
for _, network := range config.Networks {
// New network
if !shared.StringInSlice(network.Name, networkNames) {
err := createNetwork(network)
if err != nil {
return err
}
continue
}
// Existing network
err := updateNetwork(network)
if err != nil {
return err
}
}
}
// Apply storage configuration
if config.StoragePools != nil && len(config.StoragePools) > 0 {
// Get the list of storagePools
storagePoolNames, err := d.GetStoragePoolNames()
if err != nil {
return errors.Wrap(err, "Failed to retrieve list of storage pools")
}
// StoragePool creator
createStoragePool := func(storagePool api.StoragePoolsPost) error {
// Create the storagePool if doesn't exist
err := d.CreateStoragePool(storagePool)
if err != nil {
return errors.Wrapf(err, "Failed to create storage pool '%s'", storagePool.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.DeleteStoragePool(storagePool.Name)
})
return nil
}
// StoragePool updater
updateStoragePool := func(storagePool api.StoragePoolsPost) error {
// Get the current storagePool
currentStoragePool, etag, err := d.GetStoragePool(storagePool.Name)
if err != nil {
return errors.Wrapf(err, "Failed to retrieve current storage pool '%s'", storagePool.Name)
}
// Sanity check
if currentStoragePool.Driver != storagePool.Driver {
return fmt.Errorf("Storage pool '%s' is of type '%s' instead of '%s'", currentStoragePool.Name, currentStoragePool.Driver, storagePool.Driver)
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateStoragePool(currentStoragePool.Name, currentStoragePool.Writable(), "")
})
// Prepare the update
newStoragePool := api.StoragePoolPut{}
err = shared.DeepCopy(currentStoragePool.Writable(), &newStoragePool)
if err != nil {
return errors.Wrapf(err, "Failed to copy configuration of storage pool '%s'", storagePool.Name)
}
// Description override
if storagePool.Description != "" {
newStoragePool.Description = storagePool.Description
}
// Config overrides
for k, v := range storagePool.Config {
newStoragePool.Config[k] = fmt.Sprintf("%v", v)
}
// Apply it
err = d.UpdateStoragePool(currentStoragePool.Name, newStoragePool, etag)
if err != nil {
return errors.Wrapf(err, "Failed to update storage pool '%s'", storagePool.Name)
}
return nil
}
for _, storagePool := range config.StoragePools {
// New storagePool
if !shared.StringInSlice(storagePool.Name, storagePoolNames) {
err := createStoragePool(storagePool)
if err != nil {
return err
}
continue
}
// Existing storagePool
err := updateStoragePool(storagePool)
if err != nil {
return err
}
}
}
// Apply profile configuration
if config.Profiles != nil && len(config.Profiles) > 0 {
// Get the list of profiles
profileNames, err := d.GetProfileNames()
if err != nil {
return errors.Wrap(err, "Failed to retrieve list of profiles")
}
// Profile creator
createProfile := func(profile api.ProfilesPost) error {
// Create the profile if doesn't exist
err := d.CreateProfile(profile)
if err != nil {
return errors.Wrapf(err, "Failed to create profile '%s'", profile.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.DeleteProfile(profile.Name)
})
return nil
}
// Profile updater
updateProfile := func(profile api.ProfilesPost) error {
// Get the current profile
currentProfile, etag, err := d.GetProfile(profile.Name)
if err != nil {
return errors.Wrapf(err, "Failed to retrieve current profile '%s'", profile.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateProfile(currentProfile.Name, currentProfile.Writable(), "")
})
// Prepare the update
newProfile := api.ProfilePut{}
err = shared.DeepCopy(currentProfile.Writable(), &newProfile)
if err != nil {
return errors.Wrapf(err, "Failed to copy configuration of profile '%s'", profile.Name)
}
// Description override
if profile.Description != "" {
newProfile.Description = profile.Description
}
// Config overrides
for k, v := range profile.Config {
newProfile.Config[k] = fmt.Sprintf("%v", v)
}
// Device overrides
for k, v := range profile.Devices {
// New device
_, ok := newProfile.Devices[k]
if !ok {
newProfile.Devices[k] = v
continue
}
// Existing device
for configKey, configValue := range v {
newProfile.Devices[k][configKey] = fmt.Sprintf("%v", configValue)
}
}
// Apply it
err = d.UpdateProfile(currentProfile.Name, newProfile, etag)
if err != nil {
return errors.Wrapf(err, "Failed to update profile '%s'", profile.Name)
}
return nil
}
for _, profile := range config.Profiles {
// New profile
if !shared.StringInSlice(profile.Name, profileNames) {
err := createProfile(profile)
if err != nil {
return err
}
continue
}
// Existing profile
err := updateProfile(profile)
if err != nil {
return err
}
}
}
// Apply clustering configuration
if config.Cluster != nil && config.Cluster.Enabled {
// Get the current cluster configuration
currentCluster, etag, err := d.GetCluster()
if err != nil {
return errors.Wrap(err, "Failed to retrieve current cluster config")
}
// Check if already enabled
if !currentCluster.Enabled {
// Setup trust relationship
if config.Cluster.ClusterPassword != "" {
// Get our certificate
serverConfig, _, err := d.GetServer()
if err != nil {
return errors.Wrap(err, "Failed to retrieve server configuration")
}
// Try to setup trust
err = cluster.SetupTrust(serverConfig.Environment.Certificate, config.Cluster.ClusterAddress,
config.Cluster.ClusterCertificate, config.Cluster.ClusterPassword)
if err != nil {
return errors.Wrap(err, "Failed to setup cluster trust")
}
}
// Configure the cluster
op, err := d.UpdateCluster(config.Cluster.ClusterPut, etag)
if err != nil {
return errors.Wrap(err, "Failed to configure cluster")
}
err = op.Wait()
if err != nil {
return errors.Wrap(err, "Failed to configure cluster")
}
}
}
revert = false
return nil
}
lxd/init: Require cluster address for trust
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
package main
import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
)
type initData struct {
api.ServerPut `yaml:",inline"`
Cluster *initDataCluster `json:"cluster" yaml:"cluster"`
Networks []api.NetworksPost `json:"networks" yaml:"networks"`
StoragePools []api.StoragePoolsPost `json:"storage_pools" yaml:"storage_pools"`
Profiles []api.ProfilesPost `json:"profiles" yaml:"profiles"`
}
type initDataCluster struct {
api.ClusterPut `yaml:",inline"`
ClusterPassword string `json:"cluster_password" yaml:"cluster_password"`
}
type cmdInit struct {
cmd *cobra.Command
global *cmdGlobal
flagAuto bool
flagPreseed bool
flagNetworkAddress string
flagNetworkPort int
flagStorageBackend string
flagStorageDevice string
flagStorageLoopSize int
flagStoragePool string
flagTrustPassword string
}
func (c *cmdInit) Command() *cobra.Command {
cmd := &cobra.Command{}
cmd.Use = "init"
cmd.Short = "Configure the LXD daemon"
cmd.Long = `Description:
Configure the LXD daemon
`
cmd.Example = ` init --preseed
init --auto [--network-address=IP] [--network-port=8443] [--storage-backend=dir]
[--storage-create-device=DEVICE] [--storage-create-loop=SIZE]
[--storage-pool=POOL] [--trust-password=PASSWORD]
`
cmd.RunE = c.Run
cmd.Flags().BoolVar(&c.flagAuto, "auto", false, "Automatic (non-interactive) mode")
cmd.Flags().BoolVar(&c.flagPreseed, "preseed", false, "Pre-seed mode, expects YAML config from stdin")
cmd.Flags().StringVar(&c.flagNetworkAddress, "network-address", "", "Address to bind LXD to (default: none)"+"``")
cmd.Flags().IntVar(&c.flagNetworkPort, "network-port", -1, "Port to bind LXD to (default: 8443)"+"``")
cmd.Flags().StringVar(&c.flagStorageBackend, "storage-backend", "", "Storage backend to use (btrfs, dir, lvm or zfs, default: dir)"+"``")
cmd.Flags().StringVar(&c.flagStorageDevice, "storage-create-device", "", "Setup device based storage using DEVICE"+"``")
cmd.Flags().IntVar(&c.flagStorageLoopSize, "storage-create-loop", -1, "Setup loop based storage with SIZE in GB"+"``")
cmd.Flags().StringVar(&c.flagStoragePool, "storage-pool", "", "Storage pool to use or create"+"``")
cmd.Flags().StringVar(&c.flagTrustPassword, "trust-password", "", "Password required to add new clients"+"``")
c.cmd = cmd
return cmd
}
func (c *cmdInit) Run(cmd *cobra.Command, args []string) error {
// Sanity checks
if c.flagAuto && c.flagPreseed {
return fmt.Errorf("Can't use --auto and --preseed together")
}
if !c.flagAuto && (c.flagNetworkAddress != "" || c.flagNetworkPort != -1 ||
c.flagStorageBackend != "" || c.flagStorageDevice != "" ||
c.flagStorageLoopSize != -1 || c.flagStoragePool != "" ||
c.flagTrustPassword != "") {
return fmt.Errorf("Configuration flags require --auto")
}
// Connect to LXD
d, err := lxd.ConnectLXDUnix("", nil)
if err != nil {
return errors.Wrap(err, "Failed to connect to local LXD")
}
// Prepare the input data
var config *initData
// Preseed mode
if c.flagPreseed {
config, err = c.RunPreseed(cmd, args, d)
if err != nil {
return err
}
}
// Auto mode
if c.flagAuto {
config, err = c.RunAuto(cmd, args, d)
if err != nil {
return err
}
}
// Interactive mode
if !c.flagAuto && !c.flagPreseed {
config, err = c.RunInteractive(cmd, args, d)
if err != nil {
return err
}
}
return c.ApplyConfig(cmd, args, d, *config)
}
func (c *cmdInit) availableStorageDrivers() []string {
drivers := []string{"dir"}
backingFs, err := util.FilesystemDetect(shared.VarPath())
if err != nil {
backingFs = "dir"
}
// Check available backends
for _, driver := range supportedStoragePoolDrivers {
if driver == "dir" {
continue
}
// btrfs can work in user namespaces too. (If
// source=/some/path/on/btrfs is used.)
if shared.RunningInUserNS() && (backingFs != "btrfs" || driver != "btrfs") {
continue
}
// Initialize a core storage interface for the given driver.
_, err := storageCoreInit(driver)
if err != nil {
continue
}
drivers = append(drivers, driver)
}
return drivers
}
func (c *cmdInit) ApplyConfig(cmd *cobra.Command, args []string, d lxd.ContainerServer, config initData) error {
// Handle reverts
revert := true
reverts := []func(){}
defer func() {
if !revert {
return
}
// Lets undo things in reverse order
for i := len(reverts) - 1; i >= 0; i-- {
reverts[i]()
}
}()
// Apply server configuration
if config.Config != nil && len(config.Config) > 0 {
// Get current config
currentServer, etag, err := d.GetServer()
if err != nil {
return errors.Wrap(err, "Failed to retrieve current server configuration")
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateServer(currentServer.Writable(), "")
})
// Prepare the update
newServer := api.ServerPut{}
err = shared.DeepCopy(currentServer.Writable(), &newServer)
if err != nil {
return errors.Wrap(err, "Failed to copy server configuration")
}
for k, v := range config.Config {
newServer.Config[k] = fmt.Sprintf("%v", v)
}
// Apply it
err = d.UpdateServer(newServer, etag)
if err != nil {
return errors.Wrap(err, "Failed to update server configuration")
}
}
// Apply network configuration
if config.Networks != nil && len(config.Networks) > 0 {
// Get the list of networks
networkNames, err := d.GetNetworkNames()
if err != nil {
return errors.Wrap(err, "Failed to retrieve list of networks")
}
// Network creator
createNetwork := func(network api.NetworksPost) error {
// Create the network if doesn't exist
err := d.CreateNetwork(network)
if err != nil {
return errors.Wrapf(err, "Failed to create network '%s'", network.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.DeleteNetwork(network.Name)
})
return nil
}
// Network updater
updateNetwork := func(network api.NetworksPost) error {
// Get the current network
currentNetwork, etag, err := d.GetNetwork(network.Name)
if err != nil {
return errors.Wrapf(err, "Failed to retrieve current network '%s'", network.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateNetwork(currentNetwork.Name, currentNetwork.Writable(), "")
})
// Prepare the update
newNetwork := api.NetworkPut{}
err = shared.DeepCopy(currentNetwork.Writable(), &newNetwork)
if err != nil {
return errors.Wrapf(err, "Failed to copy configuration of network '%s'", network.Name)
}
// Description override
if network.Description != "" {
newNetwork.Description = network.Description
}
// Config overrides
for k, v := range network.Config {
newNetwork.Config[k] = fmt.Sprintf("%v", v)
}
// Apply it
err = d.UpdateNetwork(currentNetwork.Name, newNetwork, etag)
if err != nil {
return errors.Wrapf(err, "Failed to update network '%s'", network.Name)
}
return nil
}
for _, network := range config.Networks {
// New network
if !shared.StringInSlice(network.Name, networkNames) {
err := createNetwork(network)
if err != nil {
return err
}
continue
}
// Existing network
err := updateNetwork(network)
if err != nil {
return err
}
}
}
// Apply storage configuration
if config.StoragePools != nil && len(config.StoragePools) > 0 {
// Get the list of storagePools
storagePoolNames, err := d.GetStoragePoolNames()
if err != nil {
return errors.Wrap(err, "Failed to retrieve list of storage pools")
}
// StoragePool creator
createStoragePool := func(storagePool api.StoragePoolsPost) error {
// Create the storagePool if doesn't exist
err := d.CreateStoragePool(storagePool)
if err != nil {
return errors.Wrapf(err, "Failed to create storage pool '%s'", storagePool.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.DeleteStoragePool(storagePool.Name)
})
return nil
}
// StoragePool updater
updateStoragePool := func(storagePool api.StoragePoolsPost) error {
// Get the current storagePool
currentStoragePool, etag, err := d.GetStoragePool(storagePool.Name)
if err != nil {
return errors.Wrapf(err, "Failed to retrieve current storage pool '%s'", storagePool.Name)
}
// Sanity check
if currentStoragePool.Driver != storagePool.Driver {
return fmt.Errorf("Storage pool '%s' is of type '%s' instead of '%s'", currentStoragePool.Name, currentStoragePool.Driver, storagePool.Driver)
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateStoragePool(currentStoragePool.Name, currentStoragePool.Writable(), "")
})
// Prepare the update
newStoragePool := api.StoragePoolPut{}
err = shared.DeepCopy(currentStoragePool.Writable(), &newStoragePool)
if err != nil {
return errors.Wrapf(err, "Failed to copy configuration of storage pool '%s'", storagePool.Name)
}
// Description override
if storagePool.Description != "" {
newStoragePool.Description = storagePool.Description
}
// Config overrides
for k, v := range storagePool.Config {
newStoragePool.Config[k] = fmt.Sprintf("%v", v)
}
// Apply it
err = d.UpdateStoragePool(currentStoragePool.Name, newStoragePool, etag)
if err != nil {
return errors.Wrapf(err, "Failed to update storage pool '%s'", storagePool.Name)
}
return nil
}
for _, storagePool := range config.StoragePools {
// New storagePool
if !shared.StringInSlice(storagePool.Name, storagePoolNames) {
err := createStoragePool(storagePool)
if err != nil {
return err
}
continue
}
// Existing storagePool
err := updateStoragePool(storagePool)
if err != nil {
return err
}
}
}
// Apply profile configuration
if config.Profiles != nil && len(config.Profiles) > 0 {
// Get the list of profiles
profileNames, err := d.GetProfileNames()
if err != nil {
return errors.Wrap(err, "Failed to retrieve list of profiles")
}
// Profile creator
createProfile := func(profile api.ProfilesPost) error {
// Create the profile if doesn't exist
err := d.CreateProfile(profile)
if err != nil {
return errors.Wrapf(err, "Failed to create profile '%s'", profile.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.DeleteProfile(profile.Name)
})
return nil
}
// Profile updater
updateProfile := func(profile api.ProfilesPost) error {
// Get the current profile
currentProfile, etag, err := d.GetProfile(profile.Name)
if err != nil {
return errors.Wrapf(err, "Failed to retrieve current profile '%s'", profile.Name)
}
// Setup reverter
reverts = append(reverts, func() {
d.UpdateProfile(currentProfile.Name, currentProfile.Writable(), "")
})
// Prepare the update
newProfile := api.ProfilePut{}
err = shared.DeepCopy(currentProfile.Writable(), &newProfile)
if err != nil {
return errors.Wrapf(err, "Failed to copy configuration of profile '%s'", profile.Name)
}
// Description override
if profile.Description != "" {
newProfile.Description = profile.Description
}
// Config overrides
for k, v := range profile.Config {
newProfile.Config[k] = fmt.Sprintf("%v", v)
}
// Device overrides
for k, v := range profile.Devices {
// New device
_, ok := newProfile.Devices[k]
if !ok {
newProfile.Devices[k] = v
continue
}
// Existing device
for configKey, configValue := range v {
newProfile.Devices[k][configKey] = fmt.Sprintf("%v", configValue)
}
}
// Apply it
err = d.UpdateProfile(currentProfile.Name, newProfile, etag)
if err != nil {
return errors.Wrapf(err, "Failed to update profile '%s'", profile.Name)
}
return nil
}
for _, profile := range config.Profiles {
// New profile
if !shared.StringInSlice(profile.Name, profileNames) {
err := createProfile(profile)
if err != nil {
return err
}
continue
}
// Existing profile
err := updateProfile(profile)
if err != nil {
return err
}
}
}
// Apply clustering configuration
if config.Cluster != nil && config.Cluster.Enabled {
// Get the current cluster configuration
currentCluster, etag, err := d.GetCluster()
if err != nil {
return errors.Wrap(err, "Failed to retrieve current cluster config")
}
// Check if already enabled
if !currentCluster.Enabled {
// Setup trust relationship
if config.Cluster.ClusterAddress != "" && config.Cluster.ClusterPassword != "" {
// Get our certificate
serverConfig, _, err := d.GetServer()
if err != nil {
return errors.Wrap(err, "Failed to retrieve server configuration")
}
// Try to setup trust
err = cluster.SetupTrust(serverConfig.Environment.Certificate, config.Cluster.ClusterAddress,
config.Cluster.ClusterCertificate, config.Cluster.ClusterPassword)
if err != nil {
return errors.Wrap(err, "Failed to setup cluster trust")
}
}
// Configure the cluster
op, err := d.UpdateCluster(config.Cluster.ClusterPut, etag)
if err != nil {
return errors.Wrap(err, "Failed to configure cluster")
}
err = op.Wait()
if err != nil {
return errors.Wrap(err, "Failed to configure cluster")
}
}
}
revert = false
return nil
}
|
package main
import (
"fmt"
"net"
"os/exec"
"strconv"
"strings"
"syscall"
"golang.org/x/crypto/ssh/terminal"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/cmd"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/logger"
)
// CmdInit implements the "lxd init" command line.
type CmdInit struct {
Context *cmd.Context
Args *Args
RunningInUserns bool
SocketPath string
PasswordReader func(int) ([]byte, error)
}
// Run triggers the execution of the init command.
func (cmd *CmdInit) Run() error {
// Check that command line arguments don't conflict with each other
err := cmd.validateArgs()
if err != nil {
return err
}
// Connect to LXD
client, err := lxd.ConnectLXDUnix(cmd.SocketPath, nil)
if err != nil {
return fmt.Errorf("Unable to talk to LXD: %s", err)
}
existingPools, err := client.GetStoragePoolNames()
if err != nil {
// We should consider this fatal since this means
// something's wrong with the daemon.
return err
}
data := &cmdInitData{}
// Kick off the appropriate way to fill the data (either
// preseed, auto or interactive).
if cmd.Args.Preseed {
err = cmd.fillDataPreseed(data, client)
} else {
// Copy the data from the current default profile, if it exists.
cmd.fillDataWithCurrentServerConfig(data, client)
// Copy the data from the current server config.
cmd.fillDataWithCurrentDefaultProfile(data, client)
// Figure what storage drivers among the supported ones are actually
// available on this system.
backendsAvailable := cmd.availableStoragePoolsDrivers()
if cmd.Args.Auto {
err = cmd.fillDataAuto(data, client, backendsAvailable, existingPools)
} else {
err = cmd.fillDataInteractive(data, client, backendsAvailable, existingPools)
}
}
if err != nil {
return err
}
// Apply the desired configuration.
err = cmd.apply(client, data)
if err != nil {
return err
}
cmd.Context.Output("LXD has been successfully configured.\n")
return nil
}
// Fill the given configuration data with parameters collected from
// the --auto command line.
func (cmd *CmdInit) fillDataAuto(data *cmdInitData, client lxd.ContainerServer, backendsAvailable []string, existingPools []string) error {
if cmd.Args.StorageBackend == "" {
cmd.Args.StorageBackend = "dir"
}
err := cmd.validateArgsAuto(backendsAvailable)
if err != nil {
return err
}
if cmd.Args.NetworkAddress != "" {
// If no port was provided, use the default one
if cmd.Args.NetworkPort == -1 {
cmd.Args.NetworkPort = 8443
}
networking := &cmdInitNetworkingParams{
Address: cmd.Args.NetworkAddress,
Port: cmd.Args.NetworkPort,
TrustPassword: cmd.Args.TrustPassword,
}
cmd.fillDataWithNetworking(data, networking)
}
if len(existingPools) == 0 {
storage := &cmdInitStorageParams{
Backend: cmd.Args.StorageBackend,
LoopSize: cmd.Args.StorageCreateLoop,
Device: cmd.Args.StorageCreateDevice,
Dataset: cmd.Args.StorageDataset,
Pool: "default",
}
err = cmd.fillDataWithStorage(data, storage, existingPools)
if err != nil {
return err
}
}
return nil
}
// Fill the given configuration data with parameters collected with
// interactive questions.
func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerServer, backendsAvailable []string, existingPools []string) error {
storage, err := cmd.askStorage(client, existingPools, backendsAvailable)
if err != nil {
return err
}
defaultPrivileged := cmd.askDefaultPrivileged()
networking := cmd.askNetworking()
imagesAutoUpdate := cmd.askImages()
bridge := cmd.askBridge(client)
_, err = exec.LookPath("dnsmasq")
if err != nil && bridge != nil {
return fmt.Errorf("LXD managed bridges require \"dnsmasq\". Install it and try again.")
}
err = cmd.fillDataWithStorage(data, storage, existingPools)
if err != nil {
return err
}
err = cmd.fillDataWithDefaultPrivileged(data, defaultPrivileged)
if err != nil {
return err
}
cmd.fillDataWithNetworking(data, networking)
cmd.fillDataWithImages(data, imagesAutoUpdate)
err = cmd.fillDataWithBridge(data, bridge)
if err != nil {
return err
}
return nil
}
// Fill the given configuration data from the preseed YAML text stream.
func (cmd *CmdInit) fillDataPreseed(data *cmdInitData, client lxd.ContainerServer) error {
err := cmd.Context.InputYAML(data)
if err != nil {
return fmt.Errorf("Invalid preseed YAML content")
}
return nil
}
// Fill the given data with the current server configuration.
func (cmd *CmdInit) fillDataWithCurrentServerConfig(data *cmdInitData, client lxd.ContainerServer) error {
server, _, err := client.GetServer()
if err != nil {
return err
}
data.ServerPut = server.Writable()
return nil
}
// Fill the given data with the current default profile, if it exists.
func (cmd *CmdInit) fillDataWithCurrentDefaultProfile(data *cmdInitData, client lxd.ContainerServer) {
defaultProfile, _, err := client.GetProfile("default")
if err == nil {
// Copy the default profile configuration (that we have
// possibly modified above).
data.Profiles = []api.ProfilesPost{{Name: "default"}}
data.Profiles[0].ProfilePut = defaultProfile.ProfilePut
}
}
// Fill the given init data with a new storage pool structure matching the
// given storage parameters.
func (cmd *CmdInit) fillDataWithStorage(data *cmdInitData, storage *cmdInitStorageParams, existingPools []string) error {
if storage == nil {
return nil
}
// Pool configuration
storagePoolConfig := map[string]string{}
if storage.Config != nil {
storagePoolConfig = storage.Config
}
if storage.Device != "" {
storagePoolConfig["source"] = storage.Device
if storage.Dataset != "" {
storage.Pool = storage.Dataset
}
} else if storage.LoopSize != -1 {
if storage.Dataset != "" {
storage.Pool = storage.Dataset
}
} else {
storagePoolConfig["source"] = storage.Dataset
}
if storage.LoopSize > 0 {
storagePoolConfig["size"] = strconv.FormatInt(storage.LoopSize, 10) + "GB"
}
// Create the requested storage pool.
storageStruct := api.StoragePoolsPost{
Name: storage.Pool,
Driver: storage.Backend,
}
storageStruct.Config = storagePoolConfig
data.Pools = []api.StoragePoolsPost{storageStruct}
// When lxd init is rerun and there are already storage pools
// configured, do not try to set a root disk device in the
// default profile again. Let the user figure this out.
if len(existingPools) == 0 {
if len(data.Profiles) != 0 {
defaultProfile := data.Profiles[0]
foundRootDiskDevice := false
for k, v := range defaultProfile.Devices {
if v["path"] == "/" && v["source"] == "" {
foundRootDiskDevice = true
// Unconditionally overwrite because if the user ends up
// with a clean LXD but with a pool property key existing in
// the default profile it must be empty otherwise it would
// not have been possible to delete the storage pool in
// the first place.
defaultProfile.Devices[k]["pool"] = storage.Pool
logger.Debugf("Set pool property of existing root disk device \"%s\" in profile \"default\" to \"%s\".", storage.Pool)
break
}
}
if !foundRootDiskDevice {
err := cmd.profileDeviceAlreadyExists(&defaultProfile, "root")
if err != nil {
return err
}
defaultProfile.Devices["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": storage.Pool,
}
}
} else {
logger.Warnf("Did not find profile \"default\" so no default storage pool will be set. Manual intervention needed.")
}
}
return nil
}
// Fill the default profile in the given init data with options about whether
// to run in privileged mode.
func (cmd *CmdInit) fillDataWithDefaultPrivileged(data *cmdInitData, defaultPrivileged int) error {
if defaultPrivileged == -1 {
return nil
}
if len(data.Profiles) == 0 {
return fmt.Errorf("error: profile 'default' profile not found")
}
defaultProfile := data.Profiles[0]
if defaultPrivileged == 0 {
defaultProfile.Config["security.privileged"] = ""
} else if defaultPrivileged == 1 {
defaultProfile.Config["security.privileged"] = "true"
}
return nil
}
// Fill the given init data with server config details matching the
// given networking parameters.
func (cmd *CmdInit) fillDataWithNetworking(data *cmdInitData, networking *cmdInitNetworkingParams) {
if networking == nil {
return
}
data.Config["core.https_address"] = fmt.Sprintf("%s:%d", networking.Address, networking.Port)
if networking.TrustPassword != "" {
data.Config["core.trust_password"] = networking.TrustPassword
}
}
// Fill the given init data with server config details matching the
// given images auto update choice.
func (cmd *CmdInit) fillDataWithImages(data *cmdInitData, imagesAutoUpdate bool) {
if imagesAutoUpdate {
if val, ok := data.Config["images.auto_update_interval"]; ok && val == "0" {
data.Config["images.auto_update_interval"] = ""
}
} else {
data.Config["images.auto_update_interval"] = "0"
}
}
// Fill the given init data with a new bridge network device structure
// matching the given storage parameters.
func (cmd *CmdInit) fillDataWithBridge(data *cmdInitData, bridge *cmdInitBridgeParams) error {
if bridge == nil {
return nil
}
bridgeConfig := map[string]string{}
bridgeConfig["ipv4.address"] = bridge.IPv4
bridgeConfig["ipv6.address"] = bridge.IPv6
if bridge.IPv4Nat {
bridgeConfig["ipv4.nat"] = "true"
}
if bridge.IPv6Nat {
bridgeConfig["ipv6.nat"] = "true"
}
network := api.NetworksPost{
Name: bridge.Name}
network.Config = bridgeConfig
data.Networks = []api.NetworksPost{network}
if len(data.Profiles) == 0 {
return fmt.Errorf("error: profile 'default' profile not found")
}
// Attach the bridge as eth0 device of the default profile, if such
// device doesn't exists yet.
defaultProfile := data.Profiles[0]
err := cmd.profileDeviceAlreadyExists(&defaultProfile, "eth0")
if err != nil {
return err
}
defaultProfile.Devices["eth0"] = map[string]string{
"type": "nic",
"nictype": "bridged",
"parent": bridge.Name,
}
return nil
}
// Apply the configuration specified in the given init data.
func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
// Functions that should be invoked to revert back to initial
// state any change that was successfully applied, in case
// anything goes wrong after that change.
reverters := make([]reverter, 0)
// Functions to apply the desired changes.
changers := make([](func() (reverter, error)), 0)
// Server config changer
changers = append(changers, func() (reverter, error) {
return cmd.initConfig(client, data.Config)
})
// Storage pool changers
for i := range data.Pools {
pool := data.Pools[i] // Local variable for the closure
changers = append(changers, func() (reverter, error) {
return cmd.initPool(client, pool)
})
}
// Network changers
for i := range data.Networks {
network := data.Networks[i] // Local variable for the closure
changers = append(changers, func() (reverter, error) {
return cmd.initNetwork(client, network)
})
}
// Profile changers
for i := range data.Profiles {
profile := data.Profiles[i] // Local variable for the closure
changers = append(changers, func() (reverter, error) {
return cmd.initProfile(client, profile)
})
}
// Apply all changes. If anything goes wrong at any iteration
// of the loop, we'll try to revert any change performed in
// earlier iterations.
for _, changer := range changers {
reverter, err := changer()
if err != nil {
cmd.revert(reverters)
return err
}
// Save the revert function for later.
reverters = append(reverters, reverter)
}
return nil
}
// Try to revert the state to what it was before running the "lxd init" command.
func (cmd *CmdInit) revert(reverters []reverter) {
for _, reverter := range reverters {
err := reverter()
if err != nil {
logger.Warnf("Reverting to pre-init state failed: %s", err)
break
}
}
}
// Apply the server-level configuration in the given map.
func (cmd *CmdInit) initConfig(client lxd.ContainerServer, config map[string]interface{}) (reverter, error) {
server, etag, err := client.GetServer()
if err != nil {
return nil, err
}
// Build a function that can be used to revert the config to
// its original values.
reverter := func() error {
return client.UpdateServer(server.Writable(), "")
}
// The underlying code expects all values to be string, even if when
// using preseed the yaml.v2 package unmarshals them as integers.
for key, value := range config {
if number, ok := value.(int); ok {
value = strconv.Itoa(number)
}
config[key] = value
}
err = client.UpdateServer(api.ServerPut{Config: config}, etag)
if err != nil {
return nil, err
}
// Updating the server was successful, so return the reverter function
// in case it's needed later.
return reverter, nil
}
// Create or update a single pool, and return a revert function in case of success.
func (cmd *CmdInit) initPool(client lxd.ContainerServer, pool api.StoragePoolsPost) (reverter, error) {
var reverter func() error
currentPool, _, err := client.GetStoragePool(pool.Name)
if err == nil {
reverter, err = cmd.initPoolUpdate(client, pool, currentPool.Writable())
} else {
reverter, err = cmd.initPoolCreate(client, pool)
}
if err != nil {
return nil, err
}
return reverter, nil
}
// Create a single new pool, and return a revert function to delete it.
func (cmd *CmdInit) initPoolCreate(client lxd.ContainerServer, pool api.StoragePoolsPost) (reverter, error) {
reverter := func() error {
return client.DeleteStoragePool(pool.Name)
}
err := client.CreateStoragePool(pool)
return reverter, err
}
// Update a single pool, and return a function that can be used to
// revert it to its original state.
func (cmd *CmdInit) initPoolUpdate(client lxd.ContainerServer, pool api.StoragePoolsPost, currentPool api.StoragePoolPut) (reverter, error) {
reverter := func() error {
return client.UpdateStoragePool(pool.Name, currentPool, "")
}
err := client.UpdateStoragePool(pool.Name, api.StoragePoolPut{
Config: pool.Config,
}, "")
return reverter, err
}
// Create or update a single network, and return a revert function in case of success.
func (cmd *CmdInit) initNetwork(client lxd.ContainerServer, network api.NetworksPost) (reverter, error) {
var revert func() error
currentNetwork, _, err := client.GetNetwork(network.Name)
if err == nil {
// Sanity check, make sure the network type being updated
// is still "bridge", which is the only type the existing
// network can have.
if network.Type != "" && network.Type != "bridge" {
return nil, fmt.Errorf("Only 'bridge' type networks are supported")
}
revert, err = cmd.initNetworkUpdate(client, network, currentNetwork.Writable())
} else {
revert, err = cmd.initNetworkCreate(client, network)
}
if err != nil {
return nil, err
}
return revert, nil
}
// Create a single new network, and return a revert function to delete it.
func (cmd *CmdInit) initNetworkCreate(client lxd.ContainerServer, network api.NetworksPost) (reverter, error) {
reverter := func() error {
return client.DeleteNetwork(network.Name)
}
err := client.CreateNetwork(network)
return reverter, err
}
// Update a single network, and return a function that can be used to
// revert it to its original state.
func (cmd *CmdInit) initNetworkUpdate(client lxd.ContainerServer, network api.NetworksPost, currentNetwork api.NetworkPut) (reverter, error) {
reverter := func() error {
return client.UpdateNetwork(network.Name, currentNetwork, "")
}
err := client.UpdateNetwork(network.Name, api.NetworkPut{
Config: network.Config,
}, "")
return reverter, err
}
// Create or update a single profile, and return a revert function in case of success.
func (cmd *CmdInit) initProfile(client lxd.ContainerServer, profile api.ProfilesPost) (reverter, error) {
var reverter func() error
currentProfile, _, err := client.GetProfile(profile.Name)
if err == nil {
reverter, err = cmd.initProfileUpdate(client, profile, currentProfile.Writable())
} else {
reverter, err = cmd.initProfileCreate(client, profile)
}
if err != nil {
return nil, err
}
return reverter, nil
}
// Create a single new profile, and return a revert function to delete it.
func (cmd *CmdInit) initProfileCreate(client lxd.ContainerServer, profile api.ProfilesPost) (reverter, error) {
reverter := func() error {
return client.DeleteProfile(profile.Name)
}
err := client.CreateProfile(profile)
return reverter, err
}
// Update a single profile, and return a function that can be used to
// revert it to its original state.
func (cmd *CmdInit) initProfileUpdate(client lxd.ContainerServer, profile api.ProfilesPost, currentProfile api.ProfilePut) (reverter, error) {
reverter := func() error {
return client.UpdateProfile(profile.Name, currentProfile, "")
}
err := client.UpdateProfile(profile.Name, api.ProfilePut{
Config: profile.Config,
Description: profile.Description,
Devices: profile.Devices,
}, "")
return reverter, err
}
// Check that the arguments passed via command line are consistent,
// and no invalid combination is provided.
func (cmd *CmdInit) validateArgs() error {
if cmd.Args.Auto && cmd.Args.Preseed {
return fmt.Errorf("Non-interactive mode supported by only one of --auto or --preseed")
}
if !cmd.Args.Auto {
if cmd.Args.StorageBackend != "" || cmd.Args.StorageCreateDevice != "" || cmd.Args.StorageCreateLoop != -1 || cmd.Args.StorageDataset != "" || cmd.Args.NetworkAddress != "" || cmd.Args.NetworkPort != -1 || cmd.Args.TrustPassword != "" {
return fmt.Errorf("Init configuration is only valid with --auto")
}
}
return nil
}
// Check that the arguments passed along with --auto are valid and consistent.
// and no invalid combination is provided.
func (cmd *CmdInit) validateArgsAuto(availableStoragePoolsDrivers []string) error {
if !shared.StringInSlice(cmd.Args.StorageBackend, supportedStoragePoolDrivers) {
return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", cmd.Args.StorageBackend)
}
if !shared.StringInSlice(cmd.Args.StorageBackend, availableStoragePoolsDrivers) {
return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", cmd.Args.StorageBackend)
}
if cmd.Args.StorageBackend == "dir" {
if cmd.Args.StorageCreateLoop != -1 || cmd.Args.StorageCreateDevice != "" || cmd.Args.StorageDataset != "" {
return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend.")
}
} else {
if cmd.Args.StorageCreateLoop != -1 && cmd.Args.StorageCreateDevice != "" {
return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified.")
}
}
if cmd.Args.NetworkAddress == "" {
if cmd.Args.NetworkPort != -1 {
return fmt.Errorf("--network-port cannot be used without --network-address.")
}
if cmd.Args.TrustPassword != "" {
return fmt.Errorf("--trust-password cannot be used without --network-address.")
}
}
return nil
}
// Return the available storage pools drivers (depending on installed tools).
func (cmd *CmdInit) availableStoragePoolsDrivers() []string {
drivers := []string{"dir"}
// Check available backends
for _, driver := range supportedStoragePoolDrivers {
if driver == "dir" {
continue
}
// btrfs can work in user namespaces too. (If
// source=/some/path/on/btrfs is used.)
if cmd.RunningInUserns && driver != "btrfs" {
continue
}
// Initialize a core storage interface for the given driver.
_, err := storageCoreInit(driver)
if err != nil {
continue
}
drivers = append(drivers, driver)
}
return drivers
}
// Return an error if the given profile has already a device with the
// given name.
func (cmd *CmdInit) profileDeviceAlreadyExists(profile *api.ProfilesPost, deviceName string) error {
_, ok := profile.Devices[deviceName]
if ok {
return fmt.Errorf("Device already exists: %s", deviceName)
}
return nil
}
// Ask if the user wants to create a new storage pool, and return
// the relevant parameters if so.
func (cmd *CmdInit) askStorage(client lxd.ContainerServer, existingPools []string, availableBackends []string) (*cmdInitStorageParams, error) {
if !cmd.Context.AskBool("Do you want to configure a new storage pool (yes/no) [default=yes]? ", "yes") {
return nil, nil
}
storage := &cmdInitStorageParams{
Config: map[string]string{},
}
defaultStorage := "dir"
if shared.StringInSlice("zfs", availableBackends) {
defaultStorage = "zfs"
}
for {
storage.Pool = cmd.Context.AskString("Name of the new storage pool [default=default]: ", "default", nil)
if shared.StringInSlice(storage.Pool, existingPools) {
fmt.Printf("The requested storage pool \"%s\" already exists. Please choose another name.\n", storage.Pool)
// Ask the user again if hew wants to create a
// storage pool.
continue
}
storage.Backend = cmd.Context.AskChoice(fmt.Sprintf("Name of the storage backend to use (%s) [default=%s]: ", strings.Join(availableBackends, ", "), defaultStorage), supportedStoragePoolDrivers, defaultStorage)
// XXX The following to checks don't make much sense, since
// AskChoice will always re-ask the question if the answer
// is not among supportedStoragePoolDrivers. It seems legacy
// code that we should drop?
if !shared.StringInSlice(storage.Backend, supportedStoragePoolDrivers) {
return nil, fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storage.Backend)
}
// XXX Instead of manually checking if the provided choice is
// among availableBackends, we could just pass to askChoice the
// availableBackends list instead of supportedStoragePoolDrivers.
if !shared.StringInSlice(storage.Backend, availableBackends) {
return nil, fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storage.Backend)
}
if storage.Backend == "dir" {
break
}
storage.LoopSize = -1
question := fmt.Sprintf("Create a new %s pool (yes/no) [default=yes]? ", strings.ToUpper(storage.Backend))
if cmd.Context.AskBool(question, "yes") {
if storage.Backend == "ceph" {
// Pool configuration
if storage.Config != nil {
storage.Config = map[string]string{}
}
// ask for the name of the cluster
storage.Config["ceph.cluster_name"] = cmd.Context.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// ask for the name of the osd pool
storage.Config["ceph.osd.pool_name"] = cmd.Context.AskString("Name of the OSD storage pool [default=lxd]: ", "lxd", nil)
// ask for the number of placement groups
storage.Config["ceph.osd.pg_num"] = cmd.Context.AskString("Number of placement groups [default=32]: ", "32", nil)
} else if cmd.Context.AskBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") {
deviceExists := func(path string) error {
if !shared.IsBlockdevPath(path) {
return fmt.Errorf("'%s' is not a block device", path)
}
return nil
}
storage.Device = cmd.Context.AskString("Path to the existing block device: ", "", deviceExists)
} else {
backingFs, err := util.FilesystemDetect(shared.VarPath())
if err == nil && storage.Backend == "btrfs" && backingFs == "btrfs" {
if cmd.Context.AskBool("Would you like to create a new subvolume for the BTRFS storage pool (yes/no) [default=yes]: ", "yes") {
storage.Dataset = shared.VarPath("storage-pools", storage.Pool)
}
} else {
st := syscall.Statfs_t{}
err := syscall.Statfs(shared.VarPath(), &st)
if err != nil {
return nil, fmt.Errorf("couldn't statfs %s: %s", shared.VarPath(), err)
}
/* choose 15 GB < x < 100GB, where x is 20% of the disk size */
defaultSize := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5
if defaultSize > 100 {
defaultSize = 100
}
if defaultSize < 15 {
defaultSize = 15
}
question := fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%dGB]: ", defaultSize)
storage.LoopSize = cmd.Context.AskInt(question, 1, -1, fmt.Sprintf("%d", defaultSize))
}
}
} else {
if storage.Backend == "ceph" {
// Pool configuration
if storage.Config != nil {
storage.Config = map[string]string{}
}
// ask for the name of the cluster
storage.Config["ceph.cluster_name"] = cmd.Context.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// ask for the name of the existing pool
storage.Config["source"] = cmd.Context.AskString("Name of the existing OSD storage pool [default=lxd]: ", "lxd", nil)
storage.Config["ceph.osd.pool_name"] = storage.Config["source"]
} else {
question := fmt.Sprintf("Name of the existing %s pool or dataset: ", strings.ToUpper(storage.Backend))
storage.Dataset = cmd.Context.AskString(question, "", nil)
}
}
if storage.Backend == "lvm" {
_, err := exec.LookPath("thin_check")
if err != nil {
fmt.Printf(`
The LVM thin provisioning tools couldn't be found. LVM can still be used
without thin provisioning but this will disable over-provisioning,
increase the space requirements and creation time of images, containers
and snapshots.
If you wish to use thin provisioning, abort now, install the tools from
your Linux distribution and run "lxd init" again afterwards.
`)
if !cmd.Context.AskBool("Do you want to continue without thin provisioning? (yes/no) [default=yes]: ", "yes") {
return nil, fmt.Errorf("The LVM thin provisioning tools couldn't be found on the system.")
}
storage.Config["lvm.use_thinpool"] = "false"
}
}
break
}
return storage, nil
}
// If we detect that we are running inside an unprivileged container,
// ask if the user wants to the default profile to be a privileged
// one.
func (cmd *CmdInit) askDefaultPrivileged() int {
// Detect lack of uid/gid
defaultPrivileged := -1
needPrivileged := false
idmapset, err := idmap.DefaultIdmapSet()
if err != nil || len(idmapset.Idmap) == 0 || idmapset.Usable() != nil {
needPrivileged = true
}
if cmd.RunningInUserns && needPrivileged {
fmt.Printf(`
We detected that you are running inside an unprivileged container.
This means that unless you manually configured your host otherwise,
you will not have enough uid and gid to allocate to your containers.
LXD can re-use your container's own allocation to avoid the problem.
Doing so makes your nested containers slightly less safe as they could
in theory attack their parent container and gain more privileges than
they otherwise would.
`)
if cmd.Context.AskBool("Would you like to have your containers share their parent's allocation (yes/no) [default=yes]? ", "yes") {
defaultPrivileged = 1
} else {
defaultPrivileged = 0
}
}
return defaultPrivileged
}
// Ask if the user wants to expose LXD over the network, and collect
// the relevant parameters if so.
func (cmd *CmdInit) askNetworking() *cmdInitNetworkingParams {
if !cmd.Context.AskBool("Would you like LXD to be available over the network (yes/no) [default=no]? ", "no") {
return nil
}
networking := &cmdInitNetworkingParams{}
isIPAddress := func(s string) error {
if s != "all" && net.ParseIP(s) == nil {
return fmt.Errorf("'%s' is not an IP address", s)
}
return nil
}
networking.Address = cmd.Context.AskString("Address to bind LXD to (not including port) [default=all]: ", "all", isIPAddress)
if networking.Address == "all" {
networking.Address = "::"
}
if net.ParseIP(networking.Address).To4() == nil {
networking.Address = fmt.Sprintf("[%s]", networking.Address)
}
networking.Port = cmd.Context.AskInt("Port to bind LXD to [default=8443]: ", 1, 65535, "8443")
networking.TrustPassword = cmd.Context.AskPassword("Trust password for new clients: ", cmd.PasswordReader)
return networking
}
// Ask if the user wants images to be automatically refreshed.
func (cmd *CmdInit) askImages() bool {
return cmd.Context.AskBool("Would you like stale cached images to be updated automatically (yes/no) [default=yes]? ", "yes")
}
// Ask if the user wants to create a new network bridge, and return
// the relevant parameters if so.
func (cmd *CmdInit) askBridge(client lxd.ContainerServer) *cmdInitBridgeParams {
if !cmd.Context.AskBool("Would you like to create a new network bridge (yes/no) [default=yes]? ", "yes") {
return nil
}
bridge := &cmdInitBridgeParams{}
for {
bridge.Name = cmd.Context.AskString("What should the new bridge be called [default=lxdbr0]? ", "lxdbr0", networkValidName)
_, _, err := client.GetNetwork(bridge.Name)
if err == nil {
fmt.Printf("The requested network bridge \"%s\" already exists. Please choose another name.\n", bridge.Name)
// Ask the user again if hew wants to create a
// storage pool.
continue
}
bridge.IPv4 = cmd.Context.AskString("What IPv4 address should be used (CIDR subnet notation, “auto” or “none”) [default=auto]? ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return networkValidAddressCIDRV4(value)
})
if !shared.StringInSlice(bridge.IPv4, []string{"auto", "none"}) {
bridge.IPv4Nat = cmd.Context.AskBool("Would you like LXD to NAT IPv4 traffic on your bridge? [default=yes]? ", "yes")
}
bridge.IPv6 = cmd.Context.AskString("What IPv6 address should be used (CIDR subnet notation, “auto” or “none”) [default=auto]? ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return networkValidAddressCIDRV6(value)
})
if !shared.StringInSlice(bridge.IPv6, []string{"auto", "none"}) {
bridge.IPv6Nat = cmd.Context.AskBool("Would you like LXD to NAT IPv6 traffic on your bridge? [default=yes]? ", "yes")
}
break
}
return bridge
}
// Defines the schema for all possible configuration knobs supported by the
// lxd init command, either directly fed via --preseed or populated by
// the auto/interactive modes.
type cmdInitData struct {
api.ServerPut `yaml:",inline"`
Pools []api.StoragePoolsPost `yaml:"storage_pools"`
Networks []api.NetworksPost
Profiles []api.ProfilesPost
}
// Parameters needed when creating a storage pool in interactive or auto
// mode.
type cmdInitStorageParams struct {
Backend string // == supportedStoragePoolDrivers
LoopSize int64 // Size in GB
Device string // Path
Pool string // pool name
Dataset string // existing ZFS pool name
Config map[string]string // Additional pool configuration
}
// Parameters needed when configuring the LXD server networking options in interactive
// mode or auto mode.
type cmdInitNetworkingParams struct {
Address string // Address
Port int64 // Port
TrustPassword string // Trust password
}
// Parameters needed when creating a bridge network device in interactive
// mode.
type cmdInitBridgeParams struct {
Name string // Bridge name
IPv4 string // IPv4 address
IPv4Nat bool // IPv4 address
IPv6 string // IPv6 address
IPv6Nat bool // IPv6 address
}
// Shortcut for closure/anonymous functions that are meant to revert
// some change, and that are passed around as parameters.
type reverter func() error
func cmdInit(args *Args) error {
command := &CmdInit{
Context: cmd.DefaultContext(),
Args: args,
RunningInUserns: shared.RunningInUserNS(),
SocketPath: "",
PasswordReader: terminal.ReadPassword,
}
return command.Run()
}
init: Re-order btrfs questions
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
package main
import (
"fmt"
"net"
"os/exec"
"strconv"
"strings"
"syscall"
"golang.org/x/crypto/ssh/terminal"
"github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/cmd"
"github.com/lxc/lxd/shared/idmap"
"github.com/lxc/lxd/shared/logger"
)
// CmdInit implements the "lxd init" command line.
type CmdInit struct {
Context *cmd.Context
Args *Args
RunningInUserns bool
SocketPath string
PasswordReader func(int) ([]byte, error)
}
// Run triggers the execution of the init command.
func (cmd *CmdInit) Run() error {
// Check that command line arguments don't conflict with each other
err := cmd.validateArgs()
if err != nil {
return err
}
// Connect to LXD
client, err := lxd.ConnectLXDUnix(cmd.SocketPath, nil)
if err != nil {
return fmt.Errorf("Unable to talk to LXD: %s", err)
}
existingPools, err := client.GetStoragePoolNames()
if err != nil {
// We should consider this fatal since this means
// something's wrong with the daemon.
return err
}
data := &cmdInitData{}
// Kick off the appropriate way to fill the data (either
// preseed, auto or interactive).
if cmd.Args.Preseed {
err = cmd.fillDataPreseed(data, client)
} else {
// Copy the data from the current default profile, if it exists.
cmd.fillDataWithCurrentServerConfig(data, client)
// Copy the data from the current server config.
cmd.fillDataWithCurrentDefaultProfile(data, client)
// Figure what storage drivers among the supported ones are actually
// available on this system.
backendsAvailable := cmd.availableStoragePoolsDrivers()
if cmd.Args.Auto {
err = cmd.fillDataAuto(data, client, backendsAvailable, existingPools)
} else {
err = cmd.fillDataInteractive(data, client, backendsAvailable, existingPools)
}
}
if err != nil {
return err
}
// Apply the desired configuration.
err = cmd.apply(client, data)
if err != nil {
return err
}
cmd.Context.Output("LXD has been successfully configured.\n")
return nil
}
// Fill the given configuration data with parameters collected from
// the --auto command line.
func (cmd *CmdInit) fillDataAuto(data *cmdInitData, client lxd.ContainerServer, backendsAvailable []string, existingPools []string) error {
if cmd.Args.StorageBackend == "" {
cmd.Args.StorageBackend = "dir"
}
err := cmd.validateArgsAuto(backendsAvailable)
if err != nil {
return err
}
if cmd.Args.NetworkAddress != "" {
// If no port was provided, use the default one
if cmd.Args.NetworkPort == -1 {
cmd.Args.NetworkPort = 8443
}
networking := &cmdInitNetworkingParams{
Address: cmd.Args.NetworkAddress,
Port: cmd.Args.NetworkPort,
TrustPassword: cmd.Args.TrustPassword,
}
cmd.fillDataWithNetworking(data, networking)
}
if len(existingPools) == 0 {
storage := &cmdInitStorageParams{
Backend: cmd.Args.StorageBackend,
LoopSize: cmd.Args.StorageCreateLoop,
Device: cmd.Args.StorageCreateDevice,
Dataset: cmd.Args.StorageDataset,
Pool: "default",
}
err = cmd.fillDataWithStorage(data, storage, existingPools)
if err != nil {
return err
}
}
return nil
}
// Fill the given configuration data with parameters collected with
// interactive questions.
func (cmd *CmdInit) fillDataInteractive(data *cmdInitData, client lxd.ContainerServer, backendsAvailable []string, existingPools []string) error {
storage, err := cmd.askStorage(client, existingPools, backendsAvailable)
if err != nil {
return err
}
defaultPrivileged := cmd.askDefaultPrivileged()
networking := cmd.askNetworking()
imagesAutoUpdate := cmd.askImages()
bridge := cmd.askBridge(client)
_, err = exec.LookPath("dnsmasq")
if err != nil && bridge != nil {
return fmt.Errorf("LXD managed bridges require \"dnsmasq\". Install it and try again.")
}
err = cmd.fillDataWithStorage(data, storage, existingPools)
if err != nil {
return err
}
err = cmd.fillDataWithDefaultPrivileged(data, defaultPrivileged)
if err != nil {
return err
}
cmd.fillDataWithNetworking(data, networking)
cmd.fillDataWithImages(data, imagesAutoUpdate)
err = cmd.fillDataWithBridge(data, bridge)
if err != nil {
return err
}
return nil
}
// Fill the given configuration data from the preseed YAML text stream.
func (cmd *CmdInit) fillDataPreseed(data *cmdInitData, client lxd.ContainerServer) error {
err := cmd.Context.InputYAML(data)
if err != nil {
return fmt.Errorf("Invalid preseed YAML content")
}
return nil
}
// Fill the given data with the current server configuration.
func (cmd *CmdInit) fillDataWithCurrentServerConfig(data *cmdInitData, client lxd.ContainerServer) error {
server, _, err := client.GetServer()
if err != nil {
return err
}
data.ServerPut = server.Writable()
return nil
}
// Fill the given data with the current default profile, if it exists.
func (cmd *CmdInit) fillDataWithCurrentDefaultProfile(data *cmdInitData, client lxd.ContainerServer) {
defaultProfile, _, err := client.GetProfile("default")
if err == nil {
// Copy the default profile configuration (that we have
// possibly modified above).
data.Profiles = []api.ProfilesPost{{Name: "default"}}
data.Profiles[0].ProfilePut = defaultProfile.ProfilePut
}
}
// Fill the given init data with a new storage pool structure matching the
// given storage parameters.
func (cmd *CmdInit) fillDataWithStorage(data *cmdInitData, storage *cmdInitStorageParams, existingPools []string) error {
if storage == nil {
return nil
}
// Pool configuration
storagePoolConfig := map[string]string{}
if storage.Config != nil {
storagePoolConfig = storage.Config
}
if storage.Device != "" {
storagePoolConfig["source"] = storage.Device
if storage.Dataset != "" {
storage.Pool = storage.Dataset
}
} else if storage.LoopSize != -1 {
if storage.Dataset != "" {
storage.Pool = storage.Dataset
}
} else {
storagePoolConfig["source"] = storage.Dataset
}
if storage.LoopSize > 0 {
storagePoolConfig["size"] = strconv.FormatInt(storage.LoopSize, 10) + "GB"
}
// Create the requested storage pool.
storageStruct := api.StoragePoolsPost{
Name: storage.Pool,
Driver: storage.Backend,
}
storageStruct.Config = storagePoolConfig
data.Pools = []api.StoragePoolsPost{storageStruct}
// When lxd init is rerun and there are already storage pools
// configured, do not try to set a root disk device in the
// default profile again. Let the user figure this out.
if len(existingPools) == 0 {
if len(data.Profiles) != 0 {
defaultProfile := data.Profiles[0]
foundRootDiskDevice := false
for k, v := range defaultProfile.Devices {
if v["path"] == "/" && v["source"] == "" {
foundRootDiskDevice = true
// Unconditionally overwrite because if the user ends up
// with a clean LXD but with a pool property key existing in
// the default profile it must be empty otherwise it would
// not have been possible to delete the storage pool in
// the first place.
defaultProfile.Devices[k]["pool"] = storage.Pool
logger.Debugf("Set pool property of existing root disk device \"%s\" in profile \"default\" to \"%s\".", storage.Pool)
break
}
}
if !foundRootDiskDevice {
err := cmd.profileDeviceAlreadyExists(&defaultProfile, "root")
if err != nil {
return err
}
defaultProfile.Devices["root"] = map[string]string{
"type": "disk",
"path": "/",
"pool": storage.Pool,
}
}
} else {
logger.Warnf("Did not find profile \"default\" so no default storage pool will be set. Manual intervention needed.")
}
}
return nil
}
// Fill the default profile in the given init data with options about whether
// to run in privileged mode.
func (cmd *CmdInit) fillDataWithDefaultPrivileged(data *cmdInitData, defaultPrivileged int) error {
if defaultPrivileged == -1 {
return nil
}
if len(data.Profiles) == 0 {
return fmt.Errorf("error: profile 'default' profile not found")
}
defaultProfile := data.Profiles[0]
if defaultPrivileged == 0 {
defaultProfile.Config["security.privileged"] = ""
} else if defaultPrivileged == 1 {
defaultProfile.Config["security.privileged"] = "true"
}
return nil
}
// Fill the given init data with server config details matching the
// given networking parameters.
func (cmd *CmdInit) fillDataWithNetworking(data *cmdInitData, networking *cmdInitNetworkingParams) {
if networking == nil {
return
}
data.Config["core.https_address"] = fmt.Sprintf("%s:%d", networking.Address, networking.Port)
if networking.TrustPassword != "" {
data.Config["core.trust_password"] = networking.TrustPassword
}
}
// Fill the given init data with server config details matching the
// given images auto update choice.
func (cmd *CmdInit) fillDataWithImages(data *cmdInitData, imagesAutoUpdate bool) {
if imagesAutoUpdate {
if val, ok := data.Config["images.auto_update_interval"]; ok && val == "0" {
data.Config["images.auto_update_interval"] = ""
}
} else {
data.Config["images.auto_update_interval"] = "0"
}
}
// Fill the given init data with a new bridge network device structure
// matching the given storage parameters.
func (cmd *CmdInit) fillDataWithBridge(data *cmdInitData, bridge *cmdInitBridgeParams) error {
if bridge == nil {
return nil
}
bridgeConfig := map[string]string{}
bridgeConfig["ipv4.address"] = bridge.IPv4
bridgeConfig["ipv6.address"] = bridge.IPv6
if bridge.IPv4Nat {
bridgeConfig["ipv4.nat"] = "true"
}
if bridge.IPv6Nat {
bridgeConfig["ipv6.nat"] = "true"
}
network := api.NetworksPost{
Name: bridge.Name}
network.Config = bridgeConfig
data.Networks = []api.NetworksPost{network}
if len(data.Profiles) == 0 {
return fmt.Errorf("error: profile 'default' profile not found")
}
// Attach the bridge as eth0 device of the default profile, if such
// device doesn't exists yet.
defaultProfile := data.Profiles[0]
err := cmd.profileDeviceAlreadyExists(&defaultProfile, "eth0")
if err != nil {
return err
}
defaultProfile.Devices["eth0"] = map[string]string{
"type": "nic",
"nictype": "bridged",
"parent": bridge.Name,
}
return nil
}
// Apply the configuration specified in the given init data.
func (cmd *CmdInit) apply(client lxd.ContainerServer, data *cmdInitData) error {
// Functions that should be invoked to revert back to initial
// state any change that was successfully applied, in case
// anything goes wrong after that change.
reverters := make([]reverter, 0)
// Functions to apply the desired changes.
changers := make([](func() (reverter, error)), 0)
// Server config changer
changers = append(changers, func() (reverter, error) {
return cmd.initConfig(client, data.Config)
})
// Storage pool changers
for i := range data.Pools {
pool := data.Pools[i] // Local variable for the closure
changers = append(changers, func() (reverter, error) {
return cmd.initPool(client, pool)
})
}
// Network changers
for i := range data.Networks {
network := data.Networks[i] // Local variable for the closure
changers = append(changers, func() (reverter, error) {
return cmd.initNetwork(client, network)
})
}
// Profile changers
for i := range data.Profiles {
profile := data.Profiles[i] // Local variable for the closure
changers = append(changers, func() (reverter, error) {
return cmd.initProfile(client, profile)
})
}
// Apply all changes. If anything goes wrong at any iteration
// of the loop, we'll try to revert any change performed in
// earlier iterations.
for _, changer := range changers {
reverter, err := changer()
if err != nil {
cmd.revert(reverters)
return err
}
// Save the revert function for later.
reverters = append(reverters, reverter)
}
return nil
}
// Try to revert the state to what it was before running the "lxd init" command.
func (cmd *CmdInit) revert(reverters []reverter) {
for _, reverter := range reverters {
err := reverter()
if err != nil {
logger.Warnf("Reverting to pre-init state failed: %s", err)
break
}
}
}
// Apply the server-level configuration in the given map.
func (cmd *CmdInit) initConfig(client lxd.ContainerServer, config map[string]interface{}) (reverter, error) {
server, etag, err := client.GetServer()
if err != nil {
return nil, err
}
// Build a function that can be used to revert the config to
// its original values.
reverter := func() error {
return client.UpdateServer(server.Writable(), "")
}
// The underlying code expects all values to be string, even if when
// using preseed the yaml.v2 package unmarshals them as integers.
for key, value := range config {
if number, ok := value.(int); ok {
value = strconv.Itoa(number)
}
config[key] = value
}
err = client.UpdateServer(api.ServerPut{Config: config}, etag)
if err != nil {
return nil, err
}
// Updating the server was successful, so return the reverter function
// in case it's needed later.
return reverter, nil
}
// Create or update a single pool, and return a revert function in case of success.
func (cmd *CmdInit) initPool(client lxd.ContainerServer, pool api.StoragePoolsPost) (reverter, error) {
var reverter func() error
currentPool, _, err := client.GetStoragePool(pool.Name)
if err == nil {
reverter, err = cmd.initPoolUpdate(client, pool, currentPool.Writable())
} else {
reverter, err = cmd.initPoolCreate(client, pool)
}
if err != nil {
return nil, err
}
return reverter, nil
}
// Create a single new pool, and return a revert function to delete it.
func (cmd *CmdInit) initPoolCreate(client lxd.ContainerServer, pool api.StoragePoolsPost) (reverter, error) {
reverter := func() error {
return client.DeleteStoragePool(pool.Name)
}
err := client.CreateStoragePool(pool)
return reverter, err
}
// Update a single pool, and return a function that can be used to
// revert it to its original state.
func (cmd *CmdInit) initPoolUpdate(client lxd.ContainerServer, pool api.StoragePoolsPost, currentPool api.StoragePoolPut) (reverter, error) {
reverter := func() error {
return client.UpdateStoragePool(pool.Name, currentPool, "")
}
err := client.UpdateStoragePool(pool.Name, api.StoragePoolPut{
Config: pool.Config,
}, "")
return reverter, err
}
// Create or update a single network, and return a revert function in case of success.
func (cmd *CmdInit) initNetwork(client lxd.ContainerServer, network api.NetworksPost) (reverter, error) {
var revert func() error
currentNetwork, _, err := client.GetNetwork(network.Name)
if err == nil {
// Sanity check, make sure the network type being updated
// is still "bridge", which is the only type the existing
// network can have.
if network.Type != "" && network.Type != "bridge" {
return nil, fmt.Errorf("Only 'bridge' type networks are supported")
}
revert, err = cmd.initNetworkUpdate(client, network, currentNetwork.Writable())
} else {
revert, err = cmd.initNetworkCreate(client, network)
}
if err != nil {
return nil, err
}
return revert, nil
}
// Create a single new network, and return a revert function to delete it.
func (cmd *CmdInit) initNetworkCreate(client lxd.ContainerServer, network api.NetworksPost) (reverter, error) {
reverter := func() error {
return client.DeleteNetwork(network.Name)
}
err := client.CreateNetwork(network)
return reverter, err
}
// Update a single network, and return a function that can be used to
// revert it to its original state.
func (cmd *CmdInit) initNetworkUpdate(client lxd.ContainerServer, network api.NetworksPost, currentNetwork api.NetworkPut) (reverter, error) {
reverter := func() error {
return client.UpdateNetwork(network.Name, currentNetwork, "")
}
err := client.UpdateNetwork(network.Name, api.NetworkPut{
Config: network.Config,
}, "")
return reverter, err
}
// Create or update a single profile, and return a revert function in case of success.
func (cmd *CmdInit) initProfile(client lxd.ContainerServer, profile api.ProfilesPost) (reverter, error) {
var reverter func() error
currentProfile, _, err := client.GetProfile(profile.Name)
if err == nil {
reverter, err = cmd.initProfileUpdate(client, profile, currentProfile.Writable())
} else {
reverter, err = cmd.initProfileCreate(client, profile)
}
if err != nil {
return nil, err
}
return reverter, nil
}
// Create a single new profile, and return a revert function to delete it.
func (cmd *CmdInit) initProfileCreate(client lxd.ContainerServer, profile api.ProfilesPost) (reverter, error) {
reverter := func() error {
return client.DeleteProfile(profile.Name)
}
err := client.CreateProfile(profile)
return reverter, err
}
// Update a single profile, and return a function that can be used to
// revert it to its original state.
func (cmd *CmdInit) initProfileUpdate(client lxd.ContainerServer, profile api.ProfilesPost, currentProfile api.ProfilePut) (reverter, error) {
reverter := func() error {
return client.UpdateProfile(profile.Name, currentProfile, "")
}
err := client.UpdateProfile(profile.Name, api.ProfilePut{
Config: profile.Config,
Description: profile.Description,
Devices: profile.Devices,
}, "")
return reverter, err
}
// Check that the arguments passed via command line are consistent,
// and no invalid combination is provided.
func (cmd *CmdInit) validateArgs() error {
if cmd.Args.Auto && cmd.Args.Preseed {
return fmt.Errorf("Non-interactive mode supported by only one of --auto or --preseed")
}
if !cmd.Args.Auto {
if cmd.Args.StorageBackend != "" || cmd.Args.StorageCreateDevice != "" || cmd.Args.StorageCreateLoop != -1 || cmd.Args.StorageDataset != "" || cmd.Args.NetworkAddress != "" || cmd.Args.NetworkPort != -1 || cmd.Args.TrustPassword != "" {
return fmt.Errorf("Init configuration is only valid with --auto")
}
}
return nil
}
// Check that the arguments passed along with --auto are valid and consistent.
// and no invalid combination is provided.
func (cmd *CmdInit) validateArgsAuto(availableStoragePoolsDrivers []string) error {
if !shared.StringInSlice(cmd.Args.StorageBackend, supportedStoragePoolDrivers) {
return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", cmd.Args.StorageBackend)
}
if !shared.StringInSlice(cmd.Args.StorageBackend, availableStoragePoolsDrivers) {
return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", cmd.Args.StorageBackend)
}
if cmd.Args.StorageBackend == "dir" {
if cmd.Args.StorageCreateLoop != -1 || cmd.Args.StorageCreateDevice != "" || cmd.Args.StorageDataset != "" {
return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend.")
}
} else {
if cmd.Args.StorageCreateLoop != -1 && cmd.Args.StorageCreateDevice != "" {
return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified.")
}
}
if cmd.Args.NetworkAddress == "" {
if cmd.Args.NetworkPort != -1 {
return fmt.Errorf("--network-port cannot be used without --network-address.")
}
if cmd.Args.TrustPassword != "" {
return fmt.Errorf("--trust-password cannot be used without --network-address.")
}
}
return nil
}
// Return the available storage pools drivers (depending on installed tools).
func (cmd *CmdInit) availableStoragePoolsDrivers() []string {
drivers := []string{"dir"}
// Check available backends
for _, driver := range supportedStoragePoolDrivers {
if driver == "dir" {
continue
}
// btrfs can work in user namespaces too. (If
// source=/some/path/on/btrfs is used.)
if cmd.RunningInUserns && driver != "btrfs" {
continue
}
// Initialize a core storage interface for the given driver.
_, err := storageCoreInit(driver)
if err != nil {
continue
}
drivers = append(drivers, driver)
}
return drivers
}
// Return an error if the given profile has already a device with the
// given name.
func (cmd *CmdInit) profileDeviceAlreadyExists(profile *api.ProfilesPost, deviceName string) error {
_, ok := profile.Devices[deviceName]
if ok {
return fmt.Errorf("Device already exists: %s", deviceName)
}
return nil
}
// Ask if the user wants to create a new storage pool, and return
// the relevant parameters if so.
func (cmd *CmdInit) askStorage(client lxd.ContainerServer, existingPools []string, availableBackends []string) (*cmdInitStorageParams, error) {
if !cmd.Context.AskBool("Do you want to configure a new storage pool (yes/no) [default=yes]? ", "yes") {
return nil, nil
}
storage := &cmdInitStorageParams{
Config: map[string]string{},
}
defaultStorage := "dir"
if shared.StringInSlice("zfs", availableBackends) {
defaultStorage = "zfs"
}
for {
storage.Pool = cmd.Context.AskString("Name of the new storage pool [default=default]: ", "default", nil)
if shared.StringInSlice(storage.Pool, existingPools) {
fmt.Printf("The requested storage pool \"%s\" already exists. Please choose another name.\n", storage.Pool)
// Ask the user again if hew wants to create a
// storage pool.
continue
}
storage.Backend = cmd.Context.AskChoice(fmt.Sprintf("Name of the storage backend to use (%s) [default=%s]: ", strings.Join(availableBackends, ", "), defaultStorage), supportedStoragePoolDrivers, defaultStorage)
// XXX The following to checks don't make much sense, since
// AskChoice will always re-ask the question if the answer
// is not among supportedStoragePoolDrivers. It seems legacy
// code that we should drop?
if !shared.StringInSlice(storage.Backend, supportedStoragePoolDrivers) {
return nil, fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storage.Backend)
}
// XXX Instead of manually checking if the provided choice is
// among availableBackends, we could just pass to askChoice the
// availableBackends list instead of supportedStoragePoolDrivers.
if !shared.StringInSlice(storage.Backend, availableBackends) {
return nil, fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storage.Backend)
}
if storage.Backend == "dir" {
break
}
// Optimization for btrfs on btrfs
backingFs, err := util.FilesystemDetect(shared.VarPath())
if err == nil && storage.Backend == "btrfs" && backingFs == "btrfs" {
if cmd.Context.AskBool(fmt.Sprintf("Would you like to create a new btrfs subvolume under %s (yes/no) [default=yes]: ", shared.VarPath("")), "yes") {
storage.Dataset = shared.VarPath("storage-pools", storage.Pool)
break
}
}
storage.LoopSize = -1
question := fmt.Sprintf("Create a new %s pool (yes/no) [default=yes]? ", strings.ToUpper(storage.Backend))
if cmd.Context.AskBool(question, "yes") {
if storage.Backend == "ceph" {
// Pool configuration
if storage.Config != nil {
storage.Config = map[string]string{}
}
// ask for the name of the cluster
storage.Config["ceph.cluster_name"] = cmd.Context.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// ask for the name of the osd pool
storage.Config["ceph.osd.pool_name"] = cmd.Context.AskString("Name of the OSD storage pool [default=lxd]: ", "lxd", nil)
// ask for the number of placement groups
storage.Config["ceph.osd.pg_num"] = cmd.Context.AskString("Number of placement groups [default=32]: ", "32", nil)
} else if cmd.Context.AskBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") {
deviceExists := func(path string) error {
if !shared.IsBlockdevPath(path) {
return fmt.Errorf("'%s' is not a block device", path)
}
return nil
}
storage.Device = cmd.Context.AskString("Path to the existing block device: ", "", deviceExists)
} else {
st := syscall.Statfs_t{}
err := syscall.Statfs(shared.VarPath(), &st)
if err != nil {
return nil, fmt.Errorf("couldn't statfs %s: %s", shared.VarPath(), err)
}
/* choose 15 GB < x < 100GB, where x is 20% of the disk size */
defaultSize := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5
if defaultSize > 100 {
defaultSize = 100
}
if defaultSize < 15 {
defaultSize = 15
}
question := fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%dGB]: ", defaultSize)
storage.LoopSize = cmd.Context.AskInt(question, 1, -1, fmt.Sprintf("%d", defaultSize))
}
} else {
if storage.Backend == "ceph" {
// Pool configuration
if storage.Config != nil {
storage.Config = map[string]string{}
}
// ask for the name of the cluster
storage.Config["ceph.cluster_name"] = cmd.Context.AskString("Name of the existing CEPH cluster [default=ceph]: ", "ceph", nil)
// ask for the name of the existing pool
storage.Config["source"] = cmd.Context.AskString("Name of the existing OSD storage pool [default=lxd]: ", "lxd", nil)
storage.Config["ceph.osd.pool_name"] = storage.Config["source"]
} else {
question := fmt.Sprintf("Name of the existing %s pool or dataset: ", strings.ToUpper(storage.Backend))
storage.Dataset = cmd.Context.AskString(question, "", nil)
}
}
if storage.Backend == "lvm" {
_, err := exec.LookPath("thin_check")
if err != nil {
fmt.Printf(`
The LVM thin provisioning tools couldn't be found. LVM can still be used
without thin provisioning but this will disable over-provisioning,
increase the space requirements and creation time of images, containers
and snapshots.
If you wish to use thin provisioning, abort now, install the tools from
your Linux distribution and run "lxd init" again afterwards.
`)
if !cmd.Context.AskBool("Do you want to continue without thin provisioning? (yes/no) [default=yes]: ", "yes") {
return nil, fmt.Errorf("The LVM thin provisioning tools couldn't be found on the system.")
}
storage.Config["lvm.use_thinpool"] = "false"
}
}
break
}
return storage, nil
}
// If we detect that we are running inside an unprivileged container,
// ask if the user wants to the default profile to be a privileged
// one.
func (cmd *CmdInit) askDefaultPrivileged() int {
// Detect lack of uid/gid
defaultPrivileged := -1
needPrivileged := false
idmapset, err := idmap.DefaultIdmapSet()
if err != nil || len(idmapset.Idmap) == 0 || idmapset.Usable() != nil {
needPrivileged = true
}
if cmd.RunningInUserns && needPrivileged {
fmt.Printf(`
We detected that you are running inside an unprivileged container.
This means that unless you manually configured your host otherwise,
you will not have enough uid and gid to allocate to your containers.
LXD can re-use your container's own allocation to avoid the problem.
Doing so makes your nested containers slightly less safe as they could
in theory attack their parent container and gain more privileges than
they otherwise would.
`)
if cmd.Context.AskBool("Would you like to have your containers share their parent's allocation (yes/no) [default=yes]? ", "yes") {
defaultPrivileged = 1
} else {
defaultPrivileged = 0
}
}
return defaultPrivileged
}
// Ask if the user wants to expose LXD over the network, and collect
// the relevant parameters if so.
func (cmd *CmdInit) askNetworking() *cmdInitNetworkingParams {
if !cmd.Context.AskBool("Would you like LXD to be available over the network (yes/no) [default=no]? ", "no") {
return nil
}
networking := &cmdInitNetworkingParams{}
isIPAddress := func(s string) error {
if s != "all" && net.ParseIP(s) == nil {
return fmt.Errorf("'%s' is not an IP address", s)
}
return nil
}
networking.Address = cmd.Context.AskString("Address to bind LXD to (not including port) [default=all]: ", "all", isIPAddress)
if networking.Address == "all" {
networking.Address = "::"
}
if net.ParseIP(networking.Address).To4() == nil {
networking.Address = fmt.Sprintf("[%s]", networking.Address)
}
networking.Port = cmd.Context.AskInt("Port to bind LXD to [default=8443]: ", 1, 65535, "8443")
networking.TrustPassword = cmd.Context.AskPassword("Trust password for new clients: ", cmd.PasswordReader)
return networking
}
// Ask if the user wants images to be automatically refreshed.
func (cmd *CmdInit) askImages() bool {
return cmd.Context.AskBool("Would you like stale cached images to be updated automatically (yes/no) [default=yes]? ", "yes")
}
// Ask if the user wants to create a new network bridge, and return
// the relevant parameters if so.
func (cmd *CmdInit) askBridge(client lxd.ContainerServer) *cmdInitBridgeParams {
if !cmd.Context.AskBool("Would you like to create a new network bridge (yes/no) [default=yes]? ", "yes") {
return nil
}
bridge := &cmdInitBridgeParams{}
for {
bridge.Name = cmd.Context.AskString("What should the new bridge be called [default=lxdbr0]? ", "lxdbr0", networkValidName)
_, _, err := client.GetNetwork(bridge.Name)
if err == nil {
fmt.Printf("The requested network bridge \"%s\" already exists. Please choose another name.\n", bridge.Name)
// Ask the user again if hew wants to create a
// storage pool.
continue
}
bridge.IPv4 = cmd.Context.AskString("What IPv4 address should be used (CIDR subnet notation, “auto” or “none”) [default=auto]? ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return networkValidAddressCIDRV4(value)
})
if !shared.StringInSlice(bridge.IPv4, []string{"auto", "none"}) {
bridge.IPv4Nat = cmd.Context.AskBool("Would you like LXD to NAT IPv4 traffic on your bridge? [default=yes]? ", "yes")
}
bridge.IPv6 = cmd.Context.AskString("What IPv6 address should be used (CIDR subnet notation, “auto” or “none”) [default=auto]? ", "auto", func(value string) error {
if shared.StringInSlice(value, []string{"auto", "none"}) {
return nil
}
return networkValidAddressCIDRV6(value)
})
if !shared.StringInSlice(bridge.IPv6, []string{"auto", "none"}) {
bridge.IPv6Nat = cmd.Context.AskBool("Would you like LXD to NAT IPv6 traffic on your bridge? [default=yes]? ", "yes")
}
break
}
return bridge
}
// Defines the schema for all possible configuration knobs supported by the
// lxd init command, either directly fed via --preseed or populated by
// the auto/interactive modes.
type cmdInitData struct {
api.ServerPut `yaml:",inline"`
Pools []api.StoragePoolsPost `yaml:"storage_pools"`
Networks []api.NetworksPost
Profiles []api.ProfilesPost
}
// Parameters needed when creating a storage pool in interactive or auto
// mode.
type cmdInitStorageParams struct {
Backend string // == supportedStoragePoolDrivers
LoopSize int64 // Size in GB
Device string // Path
Pool string // pool name
Dataset string // existing ZFS pool name
Config map[string]string // Additional pool configuration
}
// Parameters needed when configuring the LXD server networking options in interactive
// mode or auto mode.
type cmdInitNetworkingParams struct {
Address string // Address
Port int64 // Port
TrustPassword string // Trust password
}
// Parameters needed when creating a bridge network device in interactive
// mode.
type cmdInitBridgeParams struct {
Name string // Bridge name
IPv4 string // IPv4 address
IPv4Nat bool // IPv4 address
IPv6 string // IPv6 address
IPv6Nat bool // IPv6 address
}
// Shortcut for closure/anonymous functions that are meant to revert
// some change, and that are passed around as parameters.
type reverter func() error
func cmdInit(args *Args) error {
command := &CmdInit{
Context: cmd.DefaultContext(),
Args: args,
RunningInUserns: shared.RunningInUserNS(),
SocketPath: "",
PasswordReader: terminal.ReadPassword,
}
return command.Run()
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httputil
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// One of the copies, say from b to r2, could be avoided by using a more
// elaborate trick where the other copy is made during Request/Response.Write.
// This would complicate things too much, given that these functions are for
// debugging only.
func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
var buf bytes.Buffer
if _, err = buf.ReadFrom(b); err != nil {
return nil, nil, err
}
if err = b.Close(); err != nil {
return nil, nil, err
}
return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
}
// dumpConn is a net.Conn which writes to Writer and reads from Reader
type dumpConn struct {
io.Writer
io.Reader
}
func (c *dumpConn) Close() error { return nil }
func (c *dumpConn) LocalAddr() net.Addr { return nil }
func (c *dumpConn) RemoteAddr() net.Addr { return nil }
func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
// DumpRequestOut is like DumpRequest but includes
// headers that the standard http.Transport adds,
// such as User-Agent.
func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
var err error
save, req.Body, err = drainBody(req.Body)
if err != nil {
return nil, err
}
}
// Since we're using the actual Transport code to write the request,
// switch to http so the Transport doesn't try to do an SSL
// negotiation with our dumpConn and its bytes.Buffer & pipe.
// The wire format for https and http are the same, anyway.
reqSend := req
if req.URL.Scheme == "https" {
reqSend = new(http.Request)
*reqSend = *req
reqSend.URL = new(url.URL)
*reqSend.URL = *req.URL
reqSend.URL.Scheme = "http"
}
// Use the actual Transport code to record what we would send
// on the wire, but not using TCP. Use a Transport with a
// custom dialer that returns a fake net.Conn that waits
// for the full input (and recording it), and then responds
// with a dummy response.
var buf bytes.Buffer // records the output
pr, pw := io.Pipe()
dr := &delegateReader{c: make(chan io.Reader)}
// Wait for the request before replying with a dummy response:
go func() {
http.ReadRequest(bufio.NewReader(pr))
dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n")
}()
t := &http.Transport{
Dial: func(net, addr string) (net.Conn, error) {
return &dumpConn{io.MultiWriter(pw, &buf), dr}, nil
},
}
_, err := t.RoundTrip(reqSend)
req.Body = save
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// delegateReader is a reader that delegates to another reader,
// once it arrives on a channel.
type delegateReader struct {
c chan io.Reader
r io.Reader // nil until received from c
}
func (r *delegateReader) Read(p []byte) (int, error) {
if r.r == nil {
r.r = <-r.c
}
return r.r.Read(p)
}
// Return value if nonempty, def otherwise.
func valueOrDefault(value, def string) string {
if value != "" {
return value
}
return def
}
var reqWriteExcludeHeaderDump = map[string]bool{
"Host": true, // not in Header map anyway
"Content-Length": true,
"Transfer-Encoding": true,
"Trailer": true,
}
// dumpAsReceived writes req to w in the form as it was received, or
// at least as accurately as possible from the information retained in
// the request.
func dumpAsReceived(req *http.Request, w io.Writer) error {
return nil
}
// DumpRequest returns the as-received wire representation of req,
// optionally including the request body, for debugging.
// DumpRequest is semantically a no-op, but in order to
// dump the body, it reads the body data into memory and
// changes req.Body to refer to the in-memory copy.
// The documentation for http.Request.Write details which fields
// of req are used.
func DumpRequest(req *http.Request, body bool) (dump []byte, err error) {
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
save, req.Body, err = drainBody(req.Body)
if err != nil {
return
}
}
var b bytes.Buffer
fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor)
host := req.Host
if host == "" && req.URL != nil {
host = req.URL.Host
}
if host != "" {
fmt.Fprintf(&b, "Host: %s\r\n", host)
}
chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
if len(req.TransferEncoding) > 0 {
fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
}
if req.Close {
fmt.Fprintf(&b, "Connection: close\r\n")
}
err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
if err != nil {
return
}
io.WriteString(&b, "\r\n")
if req.Body != nil {
var dest io.Writer = &b
if chunked {
dest = NewChunkedWriter(dest)
}
_, err = io.Copy(dest, req.Body)
if chunked {
dest.(io.Closer).Close()
io.WriteString(&b, "\r\n")
}
}
req.Body = save
if err != nil {
return
}
dump = b.Bytes()
return
}
// DumpResponse is like DumpRequest but dumps a response.
func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) {
var b bytes.Buffer
save := resp.Body
savecl := resp.ContentLength
if !body || resp.Body == nil {
resp.Body = nil
resp.ContentLength = 0
} else {
save, resp.Body, err = drainBody(resp.Body)
if err != nil {
return
}
}
err = resp.Write(&b)
resp.Body = save
resp.ContentLength = savecl
if err != nil {
return
}
dump = b.Bytes()
return
}
net/http/httputil: fix race in DumpRequestOut
Fixes issue 3892.
Swapping the order of the writers inside the MultiWriter ensures
the request will be written to buf before http.ReadRequest completes.
The fencedBuffer is not required to make the test pass on
any machine that I have access too, but as the buf is shared
across goroutines, I think it is necessary for correctness.
R=bradfitz, fullung, franciscossouza
CC=golang-dev
http://codereview.appspot.com/6483061
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httputil
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// One of the copies, say from b to r2, could be avoided by using a more
// elaborate trick where the other copy is made during Request/Response.Write.
// This would complicate things too much, given that these functions are for
// debugging only.
func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
var buf bytes.Buffer
if _, err = buf.ReadFrom(b); err != nil {
return nil, nil, err
}
if err = b.Close(); err != nil {
return nil, nil, err
}
return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
}
// dumpConn is a net.Conn which writes to Writer and reads from Reader
type dumpConn struct {
io.Writer
io.Reader
}
func (c *dumpConn) Close() error { return nil }
func (c *dumpConn) LocalAddr() net.Addr { return nil }
func (c *dumpConn) RemoteAddr() net.Addr { return nil }
func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
// DumpRequestOut is like DumpRequest but includes
// headers that the standard http.Transport adds,
// such as User-Agent.
func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
var err error
save, req.Body, err = drainBody(req.Body)
if err != nil {
return nil, err
}
}
// Since we're using the actual Transport code to write the request,
// switch to http so the Transport doesn't try to do an SSL
// negotiation with our dumpConn and its bytes.Buffer & pipe.
// The wire format for https and http are the same, anyway.
reqSend := req
if req.URL.Scheme == "https" {
reqSend = new(http.Request)
*reqSend = *req
reqSend.URL = new(url.URL)
*reqSend.URL = *req.URL
reqSend.URL.Scheme = "http"
}
// Use the actual Transport code to record what we would send
// on the wire, but not using TCP. Use a Transport with a
// custom dialer that returns a fake net.Conn that waits
// for the full input (and recording it), and then responds
// with a dummy response.
var buf bytes.Buffer // records the output
pr, pw := io.Pipe()
dr := &delegateReader{c: make(chan io.Reader)}
// Wait for the request before replying with a dummy response:
go func() {
http.ReadRequest(bufio.NewReader(pr))
dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n")
}()
t := &http.Transport{
Dial: func(net, addr string) (net.Conn, error) {
return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
},
}
_, err := t.RoundTrip(reqSend)
req.Body = save
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// delegateReader is a reader that delegates to another reader,
// once it arrives on a channel.
type delegateReader struct {
c chan io.Reader
r io.Reader // nil until received from c
}
func (r *delegateReader) Read(p []byte) (int, error) {
if r.r == nil {
r.r = <-r.c
}
return r.r.Read(p)
}
// Return value if nonempty, def otherwise.
func valueOrDefault(value, def string) string {
if value != "" {
return value
}
return def
}
var reqWriteExcludeHeaderDump = map[string]bool{
"Host": true, // not in Header map anyway
"Content-Length": true,
"Transfer-Encoding": true,
"Trailer": true,
}
// dumpAsReceived writes req to w in the form as it was received, or
// at least as accurately as possible from the information retained in
// the request.
func dumpAsReceived(req *http.Request, w io.Writer) error {
return nil
}
// DumpRequest returns the as-received wire representation of req,
// optionally including the request body, for debugging.
// DumpRequest is semantically a no-op, but in order to
// dump the body, it reads the body data into memory and
// changes req.Body to refer to the in-memory copy.
// The documentation for http.Request.Write details which fields
// of req are used.
func DumpRequest(req *http.Request, body bool) (dump []byte, err error) {
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
save, req.Body, err = drainBody(req.Body)
if err != nil {
return
}
}
var b bytes.Buffer
fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor)
host := req.Host
if host == "" && req.URL != nil {
host = req.URL.Host
}
if host != "" {
fmt.Fprintf(&b, "Host: %s\r\n", host)
}
chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
if len(req.TransferEncoding) > 0 {
fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
}
if req.Close {
fmt.Fprintf(&b, "Connection: close\r\n")
}
err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
if err != nil {
return
}
io.WriteString(&b, "\r\n")
if req.Body != nil {
var dest io.Writer = &b
if chunked {
dest = NewChunkedWriter(dest)
}
_, err = io.Copy(dest, req.Body)
if chunked {
dest.(io.Closer).Close()
io.WriteString(&b, "\r\n")
}
}
req.Body = save
if err != nil {
return
}
dump = b.Bytes()
return
}
// DumpResponse is like DumpRequest but dumps a response.
func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) {
var b bytes.Buffer
save := resp.Body
savecl := resp.ContentLength
if !body || resp.Body == nil {
resp.Body = nil
resp.ContentLength = 0
} else {
save, resp.Body, err = drainBody(resp.Body)
if err != nil {
return
}
}
err = resp.Write(&b)
resp.Body = save
resp.ContentLength = savecl
if err != nil {
return
}
dump = b.Bytes()
return
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
// and to - Pack() - wire format.
// All the packers and unpackers take a (msg []byte, off int)
// and return (off1 int, ok bool). If they return ok==false, they
// also return off1==len(msg), so that the next unpacker will
// also fail. This lets us avoid checks of ok until the end of a
// packing sequence.
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/hex"
"fmt"
"math/rand"
"net"
"reflect"
"strconv"
"time"
)
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
var (
ErrFqdn error = &Error{Err: "domain name must be fully qualified"}
ErrDomain error = &Error{Err: "domain name must be 255 bytes or less long"}
ErrId error = &Error{Err: "id mismatch"}
ErrRdata error = &Error{Err: "bad rdata"}
ErrBuf error = &Error{Err: "buffer size too small"}
ErrShortRead error = &Error{Err: "short read"}
ErrConn error = &Error{Err: "conn holds both UDP and TCP connection"}
ErrConnEmpty error = &Error{Err: "conn has no connection"}
ErrServ error = &Error{Err: "no servers could be reached"}
ErrKey error = &Error{Err: "bad key"}
ErrPrivKey error = &Error{Err: "bad private key"}
ErrKeySize error = &Error{Err: "bad key size"}
ErrKeyAlg error = &Error{Err: "bad key algorithm"}
ErrAlg error = &Error{Err: "bad algorithm"}
ErrTime error = &Error{Err: "bad time"}
ErrNoSig error = &Error{Err: "no signature found"}
ErrSig error = &Error{Err: "bad signature"}
ErrSecret error = &Error{Err: "no secrets defined"}
ErrSigGen error = &Error{Err: "bad signature generation"}
ErrAuth error = &Error{Err: "bad authentication"}
ErrSoa error = &Error{Err: "no SOA"}
ErrRRset error = &Error{Err: "bad rrset"}
)
// A manually-unpacked version of (id, bits).
// This is in its own struct for easy printing.
type MsgHdr struct {
Id uint16
Response bool
Opcode int
Authoritative bool
Truncated bool
RecursionDesired bool
RecursionAvailable bool
Zero bool
AuthenticatedData bool
CheckingDisabled bool
Rcode int
}
// The layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool // If true, the message will be compressed when converted to wire format.
Size int // Number of octects in the message received from the wire.
Question []Question // Holds the RR(s) of the question section.
Answer []RR // Holds the RR(s) of the answer section.
Ns []RR // Holds the RR(s) of the authority section.
Extra []RR // Holds the RR(s) of the additional section.
}
// Map of strings for each RR wire type.
var TypeToString = map[uint16]string{
TypeCNAME: "CNAME",
TypeHINFO: "HINFO",
TypeTLSA: "TSLA",
TypeMB: "MB",
TypeMG: "MG",
TypeRP: "RP",
TypeMD: "MD",
TypeMF: "MF",
TypeMINFO: "MINFO",
TypeMR: "MR",
TypeMX: "MX",
TypeWKS: "WKS",
TypeNS: "NS",
TypeNULL: "NULL",
TypeAFSDB: "AFSDB",
TypeX25: "X25",
TypeISDN: "ISDN",
TypePTR: "PTR",
TypeRT: "RT",
TypeSOA: "SOA",
TypeTXT: "TXT",
TypeSRV: "SRV",
TypeATMA: "ATMA",
TypeNAPTR: "NAPTR",
TypeKX: "KX",
TypeCERT: "CERT",
TypeDNAME: "DNAME",
TypeA: "A",
TypeAAAA: "AAAA",
TypeLOC: "LOC",
TypeOPT: "OPT",
TypeDS: "DS",
TypeDHCID: "DHCID",
TypeHIP: "HIP",
TypeNINFO: "NINFO",
TypeRKEY: "RKEY",
TypeCDS: "CDS",
TypeCAA: "CAA",
TypeIPSECKEY: "IPSECKEY",
TypeSSHFP: "SSHFP",
TypeRRSIG: "RRSIG",
TypeNSEC: "NSEC",
TypeDNSKEY: "DNSKEY",
TypeNSEC3: "NSEC3",
TypeNSEC3PARAM: "NSEC3PARAM",
TypeTALINK: "TALINK",
TypeSPF: "SPF",
TypeNID: "NID",
TypeL32: "L32",
TypeL64: "L64",
TypeLP: "LP",
TypeTKEY: "TKEY", // Meta RR
TypeTSIG: "TSIG", // Meta RR
TypeAXFR: "AXFR", // Meta RR
TypeIXFR: "IXFR", // Meta RR
TypeANY: "ANY", // Meta RR
TypeURI: "URI",
TypeTA: "TA",
TypeDLV: "DLV",
}
// Reverse, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
var StringToClass = reverseInt16(ClassToString)
// Map of opcodes strings.
var StringToOpcode = reverseInt(OpcodeToString)
// Map of rcodes strings.
var StringToRcode = reverseInt(RcodeToString)
// Map of strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
ClassCSNET: "CS",
ClassCHAOS: "CH",
ClassHESIOD: "HS",
ClassNONE: "NONE",
ClassANY: "ANY",
}
// Map of strings for opcodes.
var OpcodeToString = map[int]string{
OpcodeQuery: "QUERY",
OpcodeIQuery: "IQUERY",
OpcodeStatus: "STATUS",
OpcodeNotify: "NOTIFY",
OpcodeUpdate: "UPDATE",
}
// Map of strings for rcodes.
var RcodeToString = map[int]string{
RcodeSuccess: "NOERROR",
RcodeFormatError: "FORMERR",
RcodeServerFailure: "SERVFAIL",
RcodeNameError: "NXDOMAIN",
RcodeNotImplemented: "NOTIMPL",
RcodeRefused: "REFUSED",
RcodeYXDomain: "YXDOMAIN", // From RFC 2136
RcodeYXRrset: "YXRRSET",
RcodeNXRrset: "NXRRSET",
RcodeNotAuth: "NOTAUTH",
RcodeNotZone: "NOTZONE",
RcodeBadSig: "BADSIG",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
}
// Rather than write the usual handful of routines to pack and
// unpack every message that can appear on the wire, we use
// reflection to write a generic pack/unpack for structs and then
// use it. Thus, if in the future we need to define new message
// structs, no new pack/unpack/printing code needs to be written.
// Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string.
// PackDomainName packs a domain name s into msg[off:].
// If compression is wanted compress must be true and the compression
// map needs to hold a mapping between domain names and offsets
// pointing into msg[].
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
lenmsg := len(msg)
ls := len(s)
offstart := off
// If not fully qualified, error out
if ls == 0 || s[ls-1] != '.' {
return lenmsg, ErrFqdn
}
// Each dot ends a segment of the name.
// We trade each dot byte for a length byte.
// Except for escaped dots (\.), which are normal dots.
// There is also a trailing zero.
// Compression
nameoffset := -1
pointer := -1
// Emit sequence of counted strings, chopping at dots.
begin := 0
bs := []byte(s)
for i := 0; i < ls; i++ {
if bs[i] == '\\' {
for j := i; j < ls-1; j++ {
bs[j] = bs[j+1]
}
ls--
// check for \DDD
if off+1 > lenmsg {
return lenmsg, ErrBuf
}
if i+2 < ls && bs[i] >= '0' && bs[i] <= '9' &&
bs[i+1] >= '0' && bs[i+1] <= '9' &&
bs[i+2] >= '0' && bs[i+2] <= '9' {
bs[i] = byte((bs[i]-'0')*100 + (bs[i+1]-'0')*10 + (bs[i+2] - '0'))
for j := i + 1; j < ls-2; j++ {
bs[j] = bs[j+2]
}
ls -= 2
}
continue
}
if bs[i] == '.' {
if i-begin >= 1<<6 { // top two bits of length must be clear
return lenmsg, ErrRdata
}
// off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified
if off+1 > lenmsg {
return lenmsg, ErrBuf
}
msg[off] = byte(i - begin)
offset := off
off++
for j := begin; j < i; j++ {
if off+1 > lenmsg {
return lenmsg, ErrBuf
}
msg[off] = bs[j]
off++
}
// Dont try to compress '.'
if compression != nil && string(bs[begin:]) != "." {
if p, ok := compression[string(bs[begin:])]; !ok {
// Only offsets smaller than this can be used.
if offset < maxCompressionOffset {
compression[string(bs[begin:])] = offset
}
} else {
// The first hit is the longest matching dname
// keep the pointer offset we get back and store
// the offset of the current name, because that's
// where we need to insert the pointer later
// If compress is true, we're allowed to compress this dname
if pointer == -1 && compress {
pointer = p // Where to point to
nameoffset = offset // Where to point from
break
}
}
}
begin = i + 1
}
}
// Root label is special
if string(bs) == "." {
return off, nil
}
// If we did compression and we find something at the pointer here
if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in
msg[nameoffset], msg[nameoffset+1] = packUint16(uint16(pointer ^ 0xC000))
off = nameoffset + 1
goto End
}
msg[off] = 0
End:
off++
if off - offstart > 255 {
return lenmsg, ErrDomain
}
return off, nil
}
// Unpack a domain name.
// In addition to the simple sequences of counted strings above,
// domain names are allowed to refer to strings elsewhere in the
// packet, to avoid repeating common suffixes when returning
// many entries in a single domain. The pointers are marked
// by a length byte with the top two bits set. Ignoring those
// two bits, that byte and the next give a 14 bit offset from msg[0]
// where we should pick up the trail.
// Note that if we jump elsewhere in the packet,
// we return off1 == the offset after the first pointer we found,
// which is where the next record will start.
// In theory, the pointers are only allowed to jump backward.
// We let them jump anywhere and stop jumping after a while.
// UnpackDomainName unpacks a domain name into a string.
func UnpackDomainName(msg []byte, off int) (s string, off1 int, err error) {
s = ""
lenmsg := len(msg)
ptr := 0 // number of pointers followed
Loop:
for {
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// end of name
if s == "" {
return ".", off, nil
}
break Loop
}
// literal string
if off+c > lenmsg {
return "", lenmsg, ErrBuf
}
for j := off; j < off+c; j++ {
switch {
case msg[j] == '.': // literal dots
s += "\\."
case msg[j] < 32: // unprintable use \DDD
fallthrough
case msg[j] >= 127:
s += fmt.Sprintf("\\%03d", msg[j])
default:
s += string(msg[j])
}
}
s += "."
off += c
case 0xC0:
// pointer to somewhere else in msg.
// remember location after first ptr,
// since that's how many bytes we consumed.
// also, don't follow too many pointers --
// maybe there's a loop.
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c1 := msg[off]
off++
if ptr == 0 {
off1 = off
}
if ptr++; ptr > 10 {
return "", lenmsg, &Error{Err: "too many compression pointers"}
}
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
return "", lenmsg, ErrRdata
}
}
if ptr == 0 {
off1 = off
}
return s, off1, nil
}
// Pack a reflect.StructValue into msg. Struct members can only be uint8, uint16, uint32, string,
// slices and other (often anonymous) structs.
func packStructValue(val reflect.Value, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
lenmsg := len(msg)
for i := 0; i < val.NumField(); i++ {
switch fv := val.Field(i); fv.Kind() {
default:
return lenmsg, &Error{Err: "bad kind packing"}
case reflect.Slice:
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag packing slice"}
case `dns:"domain-name"`:
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).String()
off, err = PackDomainName(element, msg, off, compression, false && compress)
if err != nil {
return lenmsg, err
}
}
case `dns:"txt"`:
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).String()
// Counted string: 1 byte length.
if len(element) > 255 || off+1+len(element) > lenmsg {
return lenmsg, &Error{Err: "overflow packing txt"}
}
msg[off] = byte(len(element))
off++
for i := 0; i < len(element); i++ {
msg[off+i] = element[i]
}
off += len(element)
}
case `dns:"opt"`: // edns
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).Interface()
b, e := element.(EDNS0).pack()
if e != nil {
return lenmsg, &Error{Err: "overflow packing opt"}
}
// Option code
msg[off], msg[off+1] = packUint16(element.(EDNS0).Option())
// Length
msg[off+2], msg[off+3] = packUint16(uint16(len(b)))
off += 4
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
case `dns:"a"`:
// It must be a slice of 4, even if it is 16, we encode
// only the first 4
if off+net.IPv4len > lenmsg {
return lenmsg, &Error{Err: "overflow packing a"}
}
switch fv.Len() {
case net.IPv6len:
msg[off] = byte(fv.Index(12).Uint())
msg[off+1] = byte(fv.Index(13).Uint())
msg[off+2] = byte(fv.Index(14).Uint())
msg[off+3] = byte(fv.Index(15).Uint())
off += net.IPv4len
case net.IPv4len:
msg[off] = byte(fv.Index(0).Uint())
msg[off+1] = byte(fv.Index(1).Uint())
msg[off+2] = byte(fv.Index(2).Uint())
msg[off+3] = byte(fv.Index(3).Uint())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates
default:
return lenmsg, &Error{Err: "overflow packing a"}
}
case `dns:"aaaa"`:
if fv.Len() > net.IPv6len || off+fv.Len() > lenmsg {
return lenmsg, &Error{Err: "overflow packing aaaa"}
}
for j := 0; j < net.IPv6len; j++ {
msg[off] = byte(fv.Index(j).Uint())
off++
}
case `dns:"wks"`:
if val.Field(i).Len() == 0 {
break
}
var bitmapbyte uint16
for j := 0; j < val.Field(i).Len(); j++ {
serv := uint16((fv.Index(j).Uint()))
bitmapbyte = uint16(serv / 8)
if int(bitmapbyte) > lenmsg {
return lenmsg, &Error{Err: "overflow packing wks"}
}
bit := uint16(serv) - bitmapbyte*8
msg[bitmapbyte] = byte(1 << (7 - bit))
}
off += int(bitmapbyte)
case `dns:"nsec"`: // NSEC/NSEC3
// This is the uint16 type bitmap
if val.Field(i).Len() == 0 {
// Do absolutely nothing
break
}
lastwindow := uint16(0)
length := uint16(0)
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx"}
}
for j := 0; j < val.Field(i).Len(); j++ {
t := uint16((fv.Index(j).Uint()))
window := uint16(t / 256)
if lastwindow != window {
// New window, jump to the new offset
off += int(length) + 3
if off > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx bitmap"}
}
}
length = (t - window*256) / 8
bit := t - (window * 256) - (length * 8)
if off+2+int(length) > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx bitmap"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length + 1)
// Setting the bit value for the type in the right octet
msg[off+2+int(length)] |= byte(1 << (7 - bit))
lastwindow = window
}
off += 2 + int(length)
off++
if off > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx bitmap"}
}
}
case reflect.Struct:
off, err = packStructValue(fv, msg, off, compression, compress)
case reflect.Uint8:
if off+1 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint8"}
}
msg[off] = byte(fv.Uint())
off++
case reflect.Uint16:
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint16"}
}
i := fv.Uint()
msg[off] = byte(i >> 8)
msg[off+1] = byte(i)
off += 2
case reflect.Uint32:
if off+4 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint32"}
}
i := fv.Uint()
msg[off] = byte(i >> 24)
msg[off+1] = byte(i >> 16)
msg[off+2] = byte(i >> 8)
msg[off+3] = byte(i)
off += 4
case reflect.Uint64:
switch val.Type().Field(i).Tag {
default:
if off+8 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint64"}
}
i := fv.Uint()
msg[off] = byte(i >> 56)
msg[off+1] = byte(i >> 48)
msg[off+2] = byte(i >> 40)
msg[off+3] = byte(i >> 32)
msg[off+4] = byte(i >> 24)
msg[off+5] = byte(i >> 16)
msg[off+6] = byte(i >> 8)
msg[off+7] = byte(i)
off += 8
case `dns:"uint48"`:
// Used in TSIG, where it stops at 48 bits, so we discard the upper 16
if off+6 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint64 as uint48"}
}
i := fv.Uint()
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
}
case reflect.String:
// There are multiple string encodings.
// The tag distinguishes ordinary strings from domain names.
s := fv.String()
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag packing string"}
case `dns:"base64"`:
b64, err := packBase64([]byte(s))
if err != nil {
return lenmsg, &Error{Err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
case `dns:"domain-name"`:
if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil {
return lenmsg, err
}
case `dns:"cdomain-name"`:
if off, err = PackDomainName(s, msg, off, compression, true && compress); err != nil {
return lenmsg, err
}
case `dns:"size-base32"`:
// This is purely for NSEC3 atm, the previous byte must
// holds the length of the encoded string. As NSEC3
// is only defined to SHA1, the hashlength is 20 (160 bits)
msg[off-1] = 20
fallthrough
case `dns:"base32"`:
b32, err := packBase32([]byte(s))
if err != nil {
return lenmsg, &Error{Err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
case `dns:"size-hex"`:
fallthrough
case `dns:"hex"`:
// There is no length encoded here
h, e := hex.DecodeString(s)
if e != nil {
return lenmsg, &Error{Err: "overflow packing hex"}
}
if off+hex.DecodedLen(len(s)) > lenmsg {
return lenmsg, &Error{Err: "overflow packing hex"}
}
copy(msg[off:off+hex.DecodedLen(len(s))], h)
off += hex.DecodedLen(len(s))
case `dns:"size"`:
// the size is already encoded in the RR, we can safely use the
// length of string. String is RAW (not encoded in hex, nor base64)
copy(msg[off:off+len(s)], s)
off += len(s)
case `dns:"txt"`:
fallthrough
case "":
// Counted string: 1 byte length.
if len(s) > 255 || off+1+len(s) > lenmsg {
return lenmsg, &Error{Err: "overflow packing string"}
}
msg[off] = byte(len(s))
off++
for i := 0; i < len(s); i++ {
msg[off+i] = s[i]
}
off += len(s)
}
}
}
return off, nil
}
func structValue(any interface{}) reflect.Value {
return reflect.ValueOf(any).Elem()
}
func PackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
off, err = packStructValue(structValue(any), msg, off, nil, false)
return off, err
}
func packStructCompress(any interface{}, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off, err = packStructValue(structValue(any), msg, off, compression, compress)
return off, err
}
// Unpack a reflect.StructValue from msg.
// Same restrictions as packStructValue.
func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) {
var rdstart int
lenmsg := len(msg)
for i := 0; i < val.NumField(); i++ {
switch fv := val.Field(i); fv.Kind() {
default:
return lenmsg, &Error{Err: "bad kind unpacking"}
case reflect.Slice:
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag unpacking slice"}
case `dns:"domain-name"`:
// HIP record slice of name (or none)
servers := make([]string, 0)
var s string
for off < lenmsg {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return lenmsg, err
}
servers = append(servers, s)
}
fv.Set(reflect.ValueOf(servers))
case `dns:"txt"`:
txt := make([]string, 0)
rdlength := off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
Txts:
l := int(msg[off])
if off+l+1 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking txt"}
}
txt = append(txt, string(msg[off+1:off+l+1]))
off += l + 1
if off < rdlength {
// More
goto Txts
}
fv.Set(reflect.ValueOf(txt))
case `dns:"opt"`: // edns0
// TODO: multiple EDNS0 options
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
if rdlength == 0 {
// This is an EDNS0 (OPT Record) with no rdata
// We can savely return here.
break
}
edns := make([]EDNS0, 0)
code := uint16(0)
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking opt"}
}
code, off = unpackUint16(msg, off)
optlen, off1 := unpackUint16(msg, off)
if off1+int(optlen) > off+rdlength {
return lenmsg, &Error{Err: "overflow unpacking opt"}
}
switch code {
case EDNS0NSID:
e := new(EDNS0_NSID)
e.unpack(msg[off1 : off1+int(optlen)])
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0SUBNET:
e := new(EDNS0_SUBNET)
e.unpack(msg[off1 : off1+int(optlen)])
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0UPDATELEASE:
e := new(EDNS0_UPDATE_LEASE)
e.unpack(msg[off1 : off1+int(optlen)])
edns = append(edns, e)
off = off1 + int(optlen)
}
fv.Set(reflect.ValueOf(edns))
// multiple EDNS codes?
case `dns:"a"`:
if off+net.IPv4len > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking a"}
}
fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3])))
off += net.IPv4len
case `dns:"aaaa"`:
if off+net.IPv6len > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking aaaa"}
}
fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4],
msg[off+5], msg[off+6], msg[off+7], msg[off+8], msg[off+9], msg[off+10],
msg[off+11], msg[off+12], msg[off+13], msg[off+14], msg[off+15]}))
off += net.IPv6len
case `dns:"wks"`:
// Rest of the record is the bitmap
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
serv := make([]uint16, 0)
j := 0
for off < endrr {
b := msg[off]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
serv = append(serv, uint16(j*8+0))
}
if b&0x40 == 0x40 {
serv = append(serv, uint16(j*8+1))
}
if b&0x20 == 0x20 {
serv = append(serv, uint16(j*8+2))
}
if b&0x10 == 0x10 {
serv = append(serv, uint16(j*8+3))
}
if b&0x8 == 0x8 {
serv = append(serv, uint16(j*8+4))
}
if b&0x4 == 0x4 {
serv = append(serv, uint16(j*8+5))
}
if b&0x2 == 0x2 {
serv = append(serv, uint16(j*8+6))
}
if b&0x1 == 0x1 {
serv = append(serv, uint16(j*8+7))
}
j++
off++
}
fv.Set(reflect.ValueOf(serv))
case `dns:"nsec"`: // NSEC/NSEC3
// Rest of the record is the type bitmap
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking nsecx"}
}
nsec := make([]uint16, 0)
length := 0
window := 0
for off+2 < endrr {
window = int(msg[off])
length = int(msg[off+1])
//println("off, windows, length, end", off, window, length, endrr)
if length == 0 {
// A length window of zero is strange. If there
// the window should not have been specified. Bail out
// println("dns: length == 0 when unpacking NSEC")
return lenmsg, ErrRdata
}
if length > 32 {
return lenmsg, ErrRdata
}
// Walk the bytes in the window - and check the bit settings...
off += 2
for j := 0; j < length; j++ {
b := msg[off+j]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
}
fv.Set(reflect.ValueOf(nsec))
}
case reflect.Struct:
off, err = unpackStructValue(fv, msg, off)
if val.Type().Field(i).Name == "Hdr" {
rdstart = off
}
case reflect.Uint8:
if off+1 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint8"}
}
fv.SetUint(uint64(uint8(msg[off])))
off++
case reflect.Uint16:
var i uint16
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint16"}
}
i, off = unpackUint16(msg, off)
fv.SetUint(uint64(i))
case reflect.Uint32:
if off+4 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint32"}
}
fv.SetUint(uint64(uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])))
off += 4
case reflect.Uint64:
switch val.Type().Field(i).Tag {
default:
if off+8 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint64"}
}
fv.SetUint(uint64(uint64(msg[off])<<56 | uint64(msg[off+1])<<48 | uint64(msg[off+2])<<40 |
uint64(msg[off+3])<<32 | uint64(msg[off+4])<<24 | uint64(msg[off+5])<<16 | uint64(msg[off+6])<<8 | uint64(msg[off+7])))
off += 8
case `dns:"uint48"`:
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
if off+6 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint64 as uint48"}
}
fv.SetUint(uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
off += 6
}
case reflect.String:
var s string
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag unpacking string"}
case `dns:"hex"`:
// Rest of the RR is hex encoded, network order an issue here?
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
if endrr > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking hex"}
}
s = hex.EncodeToString(msg[off:endrr])
off = endrr
case `dns:"base64"`:
// Rest of the RR is base64 encoded value
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
if endrr > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking base64"}
}
s = unpackBase64(msg[off:endrr])
off = endrr
case `dns:"cdomain-name"`:
fallthrough
case `dns:"domain-name"`:
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return lenmsg, err
}
case `dns:"size-base32"`:
var size int
switch val.Type().Name() {
case "RR_NSEC3":
switch val.Type().Field(i).Name {
case "NextDomain":
name := val.FieldByName("HashLength")
size = int(name.Uint())
}
}
if off+size > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking base32"}
}
s = unpackBase32(msg[off : off+size])
off += size
case `dns:"size-hex"`:
// a "size" string, but it must be encoded in hex in the string
var size int
switch val.Type().Name() {
case "RR_NSEC3":
switch val.Type().Field(i).Name {
case "Salt":
name := val.FieldByName("SaltLength")
size = int(name.Uint())
case "NextDomain":
name := val.FieldByName("HashLength")
size = int(name.Uint())
}
case "RR_TSIG":
switch val.Type().Field(i).Name {
case "MAC":
name := val.FieldByName("MACSize")
size = int(name.Uint())
case "OtherData":
name := val.FieldByName("OtherLen")
size = int(name.Uint())
}
}
if off+size > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking hex"}
}
s = hex.EncodeToString(msg[off : off+size])
off += size
case `dns:"txt"`:
// 1 txt piece
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
Txt:
if off >= lenmsg || off+1+int(msg[off]) > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking txt"}
}
n := int(msg[off])
off++
for i := 0; i < n; i++ {
s += string(msg[off+i])
}
off += n
if off < rdlength {
// More to
goto Txt
}
case "":
if off >= lenmsg || off+1+int(msg[off]) > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking string"}
}
n := int(msg[off])
off++
for i := 0; i < n; i++ {
s += string(msg[off+i])
}
off += n
}
fv.SetString(s)
}
}
return off, nil
}
// Helper function for unpacking
func unpackUint16(msg []byte, off int) (v uint16, off1 int) {
v = uint16(msg[off])<<8 | uint16(msg[off+1])
off1 = off + 2
return
}
func UnpackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
off, err = unpackStructValue(structValue(any), msg, off)
return off, err
}
func unpackBase32(b []byte) string {
b32 := make([]byte, base32.HexEncoding.EncodedLen(len(b)))
base32.HexEncoding.Encode(b32, b)
return string(b32)
}
func unpackBase64(b []byte) string {
b64 := make([]byte, base64.StdEncoding.EncodedLen(len(b)))
base64.StdEncoding.Encode(b64, b)
return string(b64)
}
// Helper function for packing
func packUint16(i uint16) (byte, byte) {
return byte(i >> 8), byte(i)
}
func packBase64(s []byte) ([]byte, error) {
b64len := base64.StdEncoding.DecodedLen(len(s))
buf := make([]byte, b64len)
n, err := base64.StdEncoding.Decode(buf, []byte(s))
if err != nil {
return nil, err
}
buf = buf[:n]
return buf, nil
}
// Helper function for packing, mostly used in dnssec.go
func packBase32(s []byte) ([]byte, error) {
b32len := base32.HexEncoding.DecodedLen(len(s))
buf := make([]byte, b32len)
n, err := base32.HexEncoding.Decode(buf, []byte(s))
if err != nil {
return nil, err
}
buf = buf[:n]
return buf, nil
}
// Resource record packer, pack rr into msg[off:]. See PackDomainName for documentation
// about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
if rr == nil {
return len(msg), &Error{Err: "nil rr"}
}
off1, err = packStructCompress(rr, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
rawSetRdlength(msg, off, off1)
return off1, nil
}
// Resource record unpacker, unpack msg[off:] into an RR.
func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
// unpack just the header, to find the rr type and length
var h RR_Header
off0 := off
if off, err = UnpackStruct(&h, msg, off); err != nil {
return nil, len(msg), err
}
end := off + int(h.Rdlength)
// make an rr of that type and re-unpack.
mk, known := rr_mk[h.Rrtype]
if !known {
rr = new(RR_RFC3597)
} else {
rr = mk()
}
off, err = UnpackStruct(rr, msg, off0)
if off != end {
return &h, end, nil
}
return rr, off, err
}
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8)
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16)
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int)
for u, s := range m {
n[s] = u
}
return n
}
// Convert a MsgHdr to a string, with dig-like headers:
//
//;; opcode: QUERY, status: NOERROR, id: 48404
//
//;; flags: qr aa rd ra;
func (h *MsgHdr) String() string {
if h == nil {
return "<nil> MsgHdr"
}
s := ";; opcode: " + OpcodeToString[h.Opcode]
s += ", status: " + RcodeToString[h.Rcode]
s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
s += ";; flags:"
if h.Response {
s += " qr"
}
if h.Authoritative {
s += " aa"
}
if h.Truncated {
s += " tc"
}
if h.RecursionDesired {
s += " rd"
}
if h.RecursionAvailable {
s += " ra"
}
if h.Zero { // Hmm
s += " z"
}
if h.AuthenticatedData {
s += " ad"
}
if h.CheckingDisabled {
s += " cd"
}
s += ";"
return s
}
// Pack packs a Msg: it is converted to to wire format.
// If the dns.Compress is true the message will be in compressed wire format.
func (dns *Msg) Pack() (msg []byte, err error) {
var dh Header
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings
} else {
compression = nil
}
// Convert convenient Msg into wire-like Header.
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
if dns.Response {
dh.Bits |= _QR
}
if dns.Authoritative {
dh.Bits |= _AA
}
if dns.Truncated {
dh.Bits |= _TC
}
if dns.RecursionDesired {
dh.Bits |= _RD
}
if dns.RecursionAvailable {
dh.Bits |= _RA
}
if dns.Zero {
dh.Bits |= _Z
}
if dns.AuthenticatedData {
dh.Bits |= _AD
}
if dns.CheckingDisabled {
dh.Bits |= _CD
}
// Prepare variable sized arrays.
question := dns.Question
answer := dns.Answer
ns := dns.Ns
extra := dns.Extra
dh.Qdcount = uint16(len(question))
dh.Ancount = uint16(len(answer))
dh.Nscount = uint16(len(ns))
dh.Arcount = uint16(len(extra))
// TODO(mg): still a little too much, but better than 64K...
msg = make([]byte, dns.Len()+10)
// Pack it in: header and then the pieces.
off := 0
off, err = packStructCompress(&dh, msg, off, compression, dns.Compress)
for i := 0; i < len(question); i++ {
off, err = packStructCompress(&question[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(answer); i++ {
off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(ns); i++ {
off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(extra); i++ {
off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
return msg[:off], nil
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
// Header.
var dh Header
off := 0
if off, err = UnpackStruct(&dh, msg, off); err != nil {
return err
}
dns.Id = dh.Id
dns.Response = (dh.Bits & _QR) != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = (dh.Bits & _AA) != 0
dns.Truncated = (dh.Bits & _TC) != 0
dns.RecursionDesired = (dh.Bits & _RD) != 0
dns.RecursionAvailable = (dh.Bits & _RA) != 0
dns.Zero = (dh.Bits & _Z) != 0
dns.AuthenticatedData = (dh.Bits & _AD) != 0
dns.CheckingDisabled = (dh.Bits & _CD) != 0
dns.Rcode = int(dh.Bits & 0xF)
// Arrays.
dns.Question = make([]Question, dh.Qdcount)
dns.Answer = make([]RR, dh.Ancount)
dns.Ns = make([]RR, dh.Nscount)
dns.Extra = make([]RR, dh.Arcount)
for i := 0; i < len(dns.Question); i++ {
off, err = UnpackStruct(&dns.Question[i], msg, off)
if err != nil {
return err
}
}
for i := 0; i < len(dns.Answer); i++ {
dns.Answer[i], off, err = UnpackRR(msg, off)
if err != nil {
return err
}
}
for i := 0; i < len(dns.Ns); i++ {
dns.Ns[i], off, err = UnpackRR(msg, off)
if err != nil {
return err
}
}
for i := 0; i < len(dns.Extra); i++ {
dns.Extra[i], off, err = UnpackRR(msg, off)
if err != nil {
return err
}
}
if off != len(msg) {
// TODO(mg) remove eventually
// println("extra bytes in dns packet", off, "<", len(msg))
}
return nil
}
// Convert a complete message to a string with dig-like output.
func (dns *Msg) String() string {
if dns == nil {
return "<nil> MsgHdr"
}
s := dns.MsgHdr.String() + " "
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n"
for i := 0; i < len(dns.Question); i++ {
s += dns.Question[i].String() + "\n"
}
}
if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n"
for i := 0; i < len(dns.Answer); i++ {
if dns.Answer[i] != nil {
s += dns.Answer[i].String() + "\n"
}
}
}
if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n"
for i := 0; i < len(dns.Ns); i++ {
if dns.Ns[i] != nil {
s += dns.Ns[i].String() + "\n"
}
}
}
if len(dns.Extra) > 0 {
s += "\n;; ADDITIONAL SECTION:\n"
for i := 0; i < len(dns.Extra); i++ {
if dns.Extra[i] != nil {
s += dns.Extra[i].String() + "\n"
}
}
}
return s
}
// Len return the message length when in (un)compressed wire format.
// If dns.Compress is true compression is taken into account, currently
// this only counts owner name compression. There is no check for
// nil valued sections (allocated, but contains no RRs).
func (dns *Msg) Len() int {
// Message header is always 12 bytes
l := 12
var compression map[string]int
if dns.Compress {
compression = make(map[string]int)
}
for i := 0; i < len(dns.Question); i++ {
l += dns.Question[i].Len()
if dns.Compress {
compressionHelper(compression, dns.Question[i].Name)
}
}
for i := 0; i < len(dns.Answer); i++ {
if dns.Compress {
if v, ok := compression[dns.Answer[i].Header().Name]; ok {
l += dns.Answer[i].Len() - v
continue
}
compressionHelper(compression, dns.Answer[i].Header().Name)
}
l += dns.Answer[i].Len()
}
for i := 0; i < len(dns.Ns); i++ {
if dns.Compress {
if v, ok := compression[dns.Ns[i].Header().Name]; ok {
l += dns.Ns[i].Len() - v
continue
}
compressionHelper(compression, dns.Ns[i].Header().Name)
}
l += dns.Ns[i].Len()
}
for i := 0; i < len(dns.Extra); i++ {
if dns.Compress {
if v, ok := compression[dns.Extra[i].Header().Name]; ok {
l += dns.Extra[i].Len() - v
continue
}
compressionHelper(compression, dns.Extra[i].Header().Name)
}
l += dns.Extra[i].Len()
}
return l
}
func compressionHelper(c map[string]int, s string) {
pref := ""
lbs := SplitLabels(s)
for j := len(lbs) - 1; j >= 0; j-- {
c[lbs[j]+"."+pref] = 1 + len(pref) + len(lbs[j])
pref = lbs[j] + "." + pref
}
}
// Id return a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func Id() uint16 {
return uint16(rand.Int()) ^ uint16(time.Now().Nanosecond())
}
Also check when unpacking for domainnames > 255
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
// and to - Pack() - wire format.
// All the packers and unpackers take a (msg []byte, off int)
// and return (off1 int, ok bool). If they return ok==false, they
// also return off1==len(msg), so that the next unpacker will
// also fail. This lets us avoid checks of ok until the end of a
// packing sequence.
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/hex"
"fmt"
"math/rand"
"net"
"reflect"
"strconv"
"time"
)
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
var (
ErrFqdn error = &Error{Err: "domain name must be fully qualified"}
ErrDomain error = &Error{Err: "domain name must be 255 bytes or less long"}
ErrId error = &Error{Err: "id mismatch"}
ErrRdata error = &Error{Err: "bad rdata"}
ErrBuf error = &Error{Err: "buffer size too small"}
ErrShortRead error = &Error{Err: "short read"}
ErrConn error = &Error{Err: "conn holds both UDP and TCP connection"}
ErrConnEmpty error = &Error{Err: "conn has no connection"}
ErrServ error = &Error{Err: "no servers could be reached"}
ErrKey error = &Error{Err: "bad key"}
ErrPrivKey error = &Error{Err: "bad private key"}
ErrKeySize error = &Error{Err: "bad key size"}
ErrKeyAlg error = &Error{Err: "bad key algorithm"}
ErrAlg error = &Error{Err: "bad algorithm"}
ErrTime error = &Error{Err: "bad time"}
ErrNoSig error = &Error{Err: "no signature found"}
ErrSig error = &Error{Err: "bad signature"}
ErrSecret error = &Error{Err: "no secrets defined"}
ErrSigGen error = &Error{Err: "bad signature generation"}
ErrAuth error = &Error{Err: "bad authentication"}
ErrSoa error = &Error{Err: "no SOA"}
ErrRRset error = &Error{Err: "bad rrset"}
)
// A manually-unpacked version of (id, bits).
// This is in its own struct for easy printing.
type MsgHdr struct {
Id uint16
Response bool
Opcode int
Authoritative bool
Truncated bool
RecursionDesired bool
RecursionAvailable bool
Zero bool
AuthenticatedData bool
CheckingDisabled bool
Rcode int
}
// The layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool // If true, the message will be compressed when converted to wire format.
Size int // Number of octects in the message received from the wire.
Question []Question // Holds the RR(s) of the question section.
Answer []RR // Holds the RR(s) of the answer section.
Ns []RR // Holds the RR(s) of the authority section.
Extra []RR // Holds the RR(s) of the additional section.
}
// Map of strings for each RR wire type.
var TypeToString = map[uint16]string{
TypeCNAME: "CNAME",
TypeHINFO: "HINFO",
TypeTLSA: "TSLA",
TypeMB: "MB",
TypeMG: "MG",
TypeRP: "RP",
TypeMD: "MD",
TypeMF: "MF",
TypeMINFO: "MINFO",
TypeMR: "MR",
TypeMX: "MX",
TypeWKS: "WKS",
TypeNS: "NS",
TypeNULL: "NULL",
TypeAFSDB: "AFSDB",
TypeX25: "X25",
TypeISDN: "ISDN",
TypePTR: "PTR",
TypeRT: "RT",
TypeSOA: "SOA",
TypeTXT: "TXT",
TypeSRV: "SRV",
TypeATMA: "ATMA",
TypeNAPTR: "NAPTR",
TypeKX: "KX",
TypeCERT: "CERT",
TypeDNAME: "DNAME",
TypeA: "A",
TypeAAAA: "AAAA",
TypeLOC: "LOC",
TypeOPT: "OPT",
TypeDS: "DS",
TypeDHCID: "DHCID",
TypeHIP: "HIP",
TypeNINFO: "NINFO",
TypeRKEY: "RKEY",
TypeCDS: "CDS",
TypeCAA: "CAA",
TypeIPSECKEY: "IPSECKEY",
TypeSSHFP: "SSHFP",
TypeRRSIG: "RRSIG",
TypeNSEC: "NSEC",
TypeDNSKEY: "DNSKEY",
TypeNSEC3: "NSEC3",
TypeNSEC3PARAM: "NSEC3PARAM",
TypeTALINK: "TALINK",
TypeSPF: "SPF",
TypeNID: "NID",
TypeL32: "L32",
TypeL64: "L64",
TypeLP: "LP",
TypeTKEY: "TKEY", // Meta RR
TypeTSIG: "TSIG", // Meta RR
TypeAXFR: "AXFR", // Meta RR
TypeIXFR: "IXFR", // Meta RR
TypeANY: "ANY", // Meta RR
TypeURI: "URI",
TypeTA: "TA",
TypeDLV: "DLV",
}
// Reverse, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
var StringToClass = reverseInt16(ClassToString)
// Map of opcodes strings.
var StringToOpcode = reverseInt(OpcodeToString)
// Map of rcodes strings.
var StringToRcode = reverseInt(RcodeToString)
// Map of strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
ClassCSNET: "CS",
ClassCHAOS: "CH",
ClassHESIOD: "HS",
ClassNONE: "NONE",
ClassANY: "ANY",
}
// Map of strings for opcodes.
var OpcodeToString = map[int]string{
OpcodeQuery: "QUERY",
OpcodeIQuery: "IQUERY",
OpcodeStatus: "STATUS",
OpcodeNotify: "NOTIFY",
OpcodeUpdate: "UPDATE",
}
// Map of strings for rcodes.
var RcodeToString = map[int]string{
RcodeSuccess: "NOERROR",
RcodeFormatError: "FORMERR",
RcodeServerFailure: "SERVFAIL",
RcodeNameError: "NXDOMAIN",
RcodeNotImplemented: "NOTIMPL",
RcodeRefused: "REFUSED",
RcodeYXDomain: "YXDOMAIN", // From RFC 2136
RcodeYXRrset: "YXRRSET",
RcodeNXRrset: "NXRRSET",
RcodeNotAuth: "NOTAUTH",
RcodeNotZone: "NOTZONE",
RcodeBadSig: "BADSIG",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
}
// Rather than write the usual handful of routines to pack and
// unpack every message that can appear on the wire, we use
// reflection to write a generic pack/unpack for structs and then
// use it. Thus, if in the future we need to define new message
// structs, no new pack/unpack/printing code needs to be written.
// Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string.
// PackDomainName packs a domain name s into msg[off:].
// If compression is wanted compress must be true and the compression
// map needs to hold a mapping between domain names and offsets
// pointing into msg[].
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
lenmsg := len(msg)
ls := len(s)
offstart := off
// If not fully qualified, error out
if ls == 0 || s[ls-1] != '.' {
return lenmsg, ErrFqdn
}
// Each dot ends a segment of the name.
// We trade each dot byte for a length byte.
// Except for escaped dots (\.), which are normal dots.
// There is also a trailing zero.
// Compression
nameoffset := -1
pointer := -1
// Emit sequence of counted strings, chopping at dots.
begin := 0
bs := []byte(s)
for i := 0; i < ls; i++ {
if bs[i] == '\\' {
for j := i; j < ls-1; j++ {
bs[j] = bs[j+1]
}
ls--
// check for \DDD
if off+1 > lenmsg {
return lenmsg, ErrBuf
}
if i+2 < ls && bs[i] >= '0' && bs[i] <= '9' &&
bs[i+1] >= '0' && bs[i+1] <= '9' &&
bs[i+2] >= '0' && bs[i+2] <= '9' {
bs[i] = byte((bs[i]-'0')*100 + (bs[i+1]-'0')*10 + (bs[i+2] - '0'))
for j := i + 1; j < ls-2; j++ {
bs[j] = bs[j+2]
}
ls -= 2
}
continue
}
if bs[i] == '.' {
if i-begin >= 1<<6 { // top two bits of length must be clear
return lenmsg, ErrRdata
}
// off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified
if off+1 > lenmsg {
return lenmsg, ErrBuf
}
msg[off] = byte(i - begin)
offset := off
off++
for j := begin; j < i; j++ {
if off+1 > lenmsg {
return lenmsg, ErrBuf
}
msg[off] = bs[j]
off++
}
// Dont try to compress '.'
if compression != nil && string(bs[begin:]) != "." {
if p, ok := compression[string(bs[begin:])]; !ok {
// Only offsets smaller than this can be used.
if offset < maxCompressionOffset {
compression[string(bs[begin:])] = offset
}
} else {
// The first hit is the longest matching dname
// keep the pointer offset we get back and store
// the offset of the current name, because that's
// where we need to insert the pointer later
// If compress is true, we're allowed to compress this dname
if pointer == -1 && compress {
pointer = p // Where to point to
nameoffset = offset // Where to point from
break
}
}
}
begin = i + 1
}
}
// Root label is special
if string(bs) == "." {
return off, nil
}
// If we did compression and we find something at the pointer here
if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in
msg[nameoffset], msg[nameoffset+1] = packUint16(uint16(pointer ^ 0xC000))
off = nameoffset + 1
goto End
}
msg[off] = 0
End:
off++
if off - offstart > 255 {
return lenmsg, ErrDomain
}
return off, nil
}
// Unpack a domain name.
// In addition to the simple sequences of counted strings above,
// domain names are allowed to refer to strings elsewhere in the
// packet, to avoid repeating common suffixes when returning
// many entries in a single domain. The pointers are marked
// by a length byte with the top two bits set. Ignoring those
// two bits, that byte and the next give a 14 bit offset from msg[0]
// where we should pick up the trail.
// Note that if we jump elsewhere in the packet,
// we return off1 == the offset after the first pointer we found,
// which is where the next record will start.
// In theory, the pointers are only allowed to jump backward.
// We let them jump anywhere and stop jumping after a while.
// UnpackDomainName unpacks a domain name into a string.
func UnpackDomainName(msg []byte, off int) (s string, off1 int, err error) {
s = ""
lenmsg := len(msg)
ptr := 0 // number of pointers followed
offstart := off
Loop:
for {
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// end of name
if s == "" {
return ".", off, nil
}
break Loop
}
// literal string
if off+c > lenmsg {
return "", lenmsg, ErrBuf
}
for j := off; j < off+c; j++ {
switch {
case msg[j] == '.': // literal dots
s += "\\."
case msg[j] < 32: // unprintable use \DDD
fallthrough
case msg[j] >= 127:
s += fmt.Sprintf("\\%03d", msg[j])
default:
s += string(msg[j])
}
}
s += "."
off += c
case 0xC0:
// pointer to somewhere else in msg.
// remember location after first ptr,
// since that's how many bytes we consumed.
// also, don't follow too many pointers --
// maybe there's a loop.
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c1 := msg[off]
off++
if ptr == 0 {
off1 = off
}
if ptr++; ptr > 10 {
return "", lenmsg, &Error{Err: "too many compression pointers"}
}
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
return "", lenmsg, ErrRdata
}
}
if ptr == 0 {
off1 = off
}
if off1 - offstart > 255 {
return "", lenmsg, ErrDomain
}
return s, off1, nil
}
// Pack a reflect.StructValue into msg. Struct members can only be uint8, uint16, uint32, string,
// slices and other (often anonymous) structs.
func packStructValue(val reflect.Value, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
lenmsg := len(msg)
for i := 0; i < val.NumField(); i++ {
switch fv := val.Field(i); fv.Kind() {
default:
return lenmsg, &Error{Err: "bad kind packing"}
case reflect.Slice:
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag packing slice"}
case `dns:"domain-name"`:
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).String()
off, err = PackDomainName(element, msg, off, compression, false && compress)
if err != nil {
return lenmsg, err
}
}
case `dns:"txt"`:
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).String()
// Counted string: 1 byte length.
if len(element) > 255 || off+1+len(element) > lenmsg {
return lenmsg, &Error{Err: "overflow packing txt"}
}
msg[off] = byte(len(element))
off++
for i := 0; i < len(element); i++ {
msg[off+i] = element[i]
}
off += len(element)
}
case `dns:"opt"`: // edns
for j := 0; j < val.Field(i).Len(); j++ {
element := val.Field(i).Index(j).Interface()
b, e := element.(EDNS0).pack()
if e != nil {
return lenmsg, &Error{Err: "overflow packing opt"}
}
// Option code
msg[off], msg[off+1] = packUint16(element.(EDNS0).Option())
// Length
msg[off+2], msg[off+3] = packUint16(uint16(len(b)))
off += 4
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
case `dns:"a"`:
// It must be a slice of 4, even if it is 16, we encode
// only the first 4
if off+net.IPv4len > lenmsg {
return lenmsg, &Error{Err: "overflow packing a"}
}
switch fv.Len() {
case net.IPv6len:
msg[off] = byte(fv.Index(12).Uint())
msg[off+1] = byte(fv.Index(13).Uint())
msg[off+2] = byte(fv.Index(14).Uint())
msg[off+3] = byte(fv.Index(15).Uint())
off += net.IPv4len
case net.IPv4len:
msg[off] = byte(fv.Index(0).Uint())
msg[off+1] = byte(fv.Index(1).Uint())
msg[off+2] = byte(fv.Index(2).Uint())
msg[off+3] = byte(fv.Index(3).Uint())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates
default:
return lenmsg, &Error{Err: "overflow packing a"}
}
case `dns:"aaaa"`:
if fv.Len() > net.IPv6len || off+fv.Len() > lenmsg {
return lenmsg, &Error{Err: "overflow packing aaaa"}
}
for j := 0; j < net.IPv6len; j++ {
msg[off] = byte(fv.Index(j).Uint())
off++
}
case `dns:"wks"`:
if val.Field(i).Len() == 0 {
break
}
var bitmapbyte uint16
for j := 0; j < val.Field(i).Len(); j++ {
serv := uint16((fv.Index(j).Uint()))
bitmapbyte = uint16(serv / 8)
if int(bitmapbyte) > lenmsg {
return lenmsg, &Error{Err: "overflow packing wks"}
}
bit := uint16(serv) - bitmapbyte*8
msg[bitmapbyte] = byte(1 << (7 - bit))
}
off += int(bitmapbyte)
case `dns:"nsec"`: // NSEC/NSEC3
// This is the uint16 type bitmap
if val.Field(i).Len() == 0 {
// Do absolutely nothing
break
}
lastwindow := uint16(0)
length := uint16(0)
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx"}
}
for j := 0; j < val.Field(i).Len(); j++ {
t := uint16((fv.Index(j).Uint()))
window := uint16(t / 256)
if lastwindow != window {
// New window, jump to the new offset
off += int(length) + 3
if off > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx bitmap"}
}
}
length = (t - window*256) / 8
bit := t - (window * 256) - (length * 8)
if off+2+int(length) > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx bitmap"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length + 1)
// Setting the bit value for the type in the right octet
msg[off+2+int(length)] |= byte(1 << (7 - bit))
lastwindow = window
}
off += 2 + int(length)
off++
if off > lenmsg {
return lenmsg, &Error{Err: "overflow packing nsecx bitmap"}
}
}
case reflect.Struct:
off, err = packStructValue(fv, msg, off, compression, compress)
case reflect.Uint8:
if off+1 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint8"}
}
msg[off] = byte(fv.Uint())
off++
case reflect.Uint16:
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint16"}
}
i := fv.Uint()
msg[off] = byte(i >> 8)
msg[off+1] = byte(i)
off += 2
case reflect.Uint32:
if off+4 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint32"}
}
i := fv.Uint()
msg[off] = byte(i >> 24)
msg[off+1] = byte(i >> 16)
msg[off+2] = byte(i >> 8)
msg[off+3] = byte(i)
off += 4
case reflect.Uint64:
switch val.Type().Field(i).Tag {
default:
if off+8 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint64"}
}
i := fv.Uint()
msg[off] = byte(i >> 56)
msg[off+1] = byte(i >> 48)
msg[off+2] = byte(i >> 40)
msg[off+3] = byte(i >> 32)
msg[off+4] = byte(i >> 24)
msg[off+5] = byte(i >> 16)
msg[off+6] = byte(i >> 8)
msg[off+7] = byte(i)
off += 8
case `dns:"uint48"`:
// Used in TSIG, where it stops at 48 bits, so we discard the upper 16
if off+6 > lenmsg {
return lenmsg, &Error{Err: "overflow packing uint64 as uint48"}
}
i := fv.Uint()
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
}
case reflect.String:
// There are multiple string encodings.
// The tag distinguishes ordinary strings from domain names.
s := fv.String()
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag packing string"}
case `dns:"base64"`:
b64, err := packBase64([]byte(s))
if err != nil {
return lenmsg, &Error{Err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
case `dns:"domain-name"`:
if off, err = PackDomainName(s, msg, off, compression, false && compress); err != nil {
return lenmsg, err
}
case `dns:"cdomain-name"`:
if off, err = PackDomainName(s, msg, off, compression, true && compress); err != nil {
return lenmsg, err
}
case `dns:"size-base32"`:
// This is purely for NSEC3 atm, the previous byte must
// holds the length of the encoded string. As NSEC3
// is only defined to SHA1, the hashlength is 20 (160 bits)
msg[off-1] = 20
fallthrough
case `dns:"base32"`:
b32, err := packBase32([]byte(s))
if err != nil {
return lenmsg, &Error{Err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
case `dns:"size-hex"`:
fallthrough
case `dns:"hex"`:
// There is no length encoded here
h, e := hex.DecodeString(s)
if e != nil {
return lenmsg, &Error{Err: "overflow packing hex"}
}
if off+hex.DecodedLen(len(s)) > lenmsg {
return lenmsg, &Error{Err: "overflow packing hex"}
}
copy(msg[off:off+hex.DecodedLen(len(s))], h)
off += hex.DecodedLen(len(s))
case `dns:"size"`:
// the size is already encoded in the RR, we can safely use the
// length of string. String is RAW (not encoded in hex, nor base64)
copy(msg[off:off+len(s)], s)
off += len(s)
case `dns:"txt"`:
fallthrough
case "":
// Counted string: 1 byte length.
if len(s) > 255 || off+1+len(s) > lenmsg {
return lenmsg, &Error{Err: "overflow packing string"}
}
msg[off] = byte(len(s))
off++
for i := 0; i < len(s); i++ {
msg[off+i] = s[i]
}
off += len(s)
}
}
}
return off, nil
}
func structValue(any interface{}) reflect.Value {
return reflect.ValueOf(any).Elem()
}
func PackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
off, err = packStructValue(structValue(any), msg, off, nil, false)
return off, err
}
func packStructCompress(any interface{}, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off, err = packStructValue(structValue(any), msg, off, compression, compress)
return off, err
}
// Unpack a reflect.StructValue from msg.
// Same restrictions as packStructValue.
func unpackStructValue(val reflect.Value, msg []byte, off int) (off1 int, err error) {
var rdstart int
lenmsg := len(msg)
for i := 0; i < val.NumField(); i++ {
switch fv := val.Field(i); fv.Kind() {
default:
return lenmsg, &Error{Err: "bad kind unpacking"}
case reflect.Slice:
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag unpacking slice"}
case `dns:"domain-name"`:
// HIP record slice of name (or none)
servers := make([]string, 0)
var s string
for off < lenmsg {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return lenmsg, err
}
servers = append(servers, s)
}
fv.Set(reflect.ValueOf(servers))
case `dns:"txt"`:
txt := make([]string, 0)
rdlength := off + int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
Txts:
l := int(msg[off])
if off+l+1 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking txt"}
}
txt = append(txt, string(msg[off+1:off+l+1]))
off += l + 1
if off < rdlength {
// More
goto Txts
}
fv.Set(reflect.ValueOf(txt))
case `dns:"opt"`: // edns0
// TODO: multiple EDNS0 options
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
if rdlength == 0 {
// This is an EDNS0 (OPT Record) with no rdata
// We can savely return here.
break
}
edns := make([]EDNS0, 0)
code := uint16(0)
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking opt"}
}
code, off = unpackUint16(msg, off)
optlen, off1 := unpackUint16(msg, off)
if off1+int(optlen) > off+rdlength {
return lenmsg, &Error{Err: "overflow unpacking opt"}
}
switch code {
case EDNS0NSID:
e := new(EDNS0_NSID)
e.unpack(msg[off1 : off1+int(optlen)])
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0SUBNET:
e := new(EDNS0_SUBNET)
e.unpack(msg[off1 : off1+int(optlen)])
edns = append(edns, e)
off = off1 + int(optlen)
case EDNS0UPDATELEASE:
e := new(EDNS0_UPDATE_LEASE)
e.unpack(msg[off1 : off1+int(optlen)])
edns = append(edns, e)
off = off1 + int(optlen)
}
fv.Set(reflect.ValueOf(edns))
// multiple EDNS codes?
case `dns:"a"`:
if off+net.IPv4len > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking a"}
}
fv.Set(reflect.ValueOf(net.IPv4(msg[off], msg[off+1], msg[off+2], msg[off+3])))
off += net.IPv4len
case `dns:"aaaa"`:
if off+net.IPv6len > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking aaaa"}
}
fv.Set(reflect.ValueOf(net.IP{msg[off], msg[off+1], msg[off+2], msg[off+3], msg[off+4],
msg[off+5], msg[off+6], msg[off+7], msg[off+8], msg[off+9], msg[off+10],
msg[off+11], msg[off+12], msg[off+13], msg[off+14], msg[off+15]}))
off += net.IPv6len
case `dns:"wks"`:
// Rest of the record is the bitmap
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
serv := make([]uint16, 0)
j := 0
for off < endrr {
b := msg[off]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
serv = append(serv, uint16(j*8+0))
}
if b&0x40 == 0x40 {
serv = append(serv, uint16(j*8+1))
}
if b&0x20 == 0x20 {
serv = append(serv, uint16(j*8+2))
}
if b&0x10 == 0x10 {
serv = append(serv, uint16(j*8+3))
}
if b&0x8 == 0x8 {
serv = append(serv, uint16(j*8+4))
}
if b&0x4 == 0x4 {
serv = append(serv, uint16(j*8+5))
}
if b&0x2 == 0x2 {
serv = append(serv, uint16(j*8+6))
}
if b&0x1 == 0x1 {
serv = append(serv, uint16(j*8+7))
}
j++
off++
}
fv.Set(reflect.ValueOf(serv))
case `dns:"nsec"`: // NSEC/NSEC3
// Rest of the record is the type bitmap
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking nsecx"}
}
nsec := make([]uint16, 0)
length := 0
window := 0
for off+2 < endrr {
window = int(msg[off])
length = int(msg[off+1])
//println("off, windows, length, end", off, window, length, endrr)
if length == 0 {
// A length window of zero is strange. If there
// the window should not have been specified. Bail out
// println("dns: length == 0 when unpacking NSEC")
return lenmsg, ErrRdata
}
if length > 32 {
return lenmsg, ErrRdata
}
// Walk the bytes in the window - and check the bit settings...
off += 2
for j := 0; j < length; j++ {
b := msg[off+j]
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
}
fv.Set(reflect.ValueOf(nsec))
}
case reflect.Struct:
off, err = unpackStructValue(fv, msg, off)
if val.Type().Field(i).Name == "Hdr" {
rdstart = off
}
case reflect.Uint8:
if off+1 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint8"}
}
fv.SetUint(uint64(uint8(msg[off])))
off++
case reflect.Uint16:
var i uint16
if off+2 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint16"}
}
i, off = unpackUint16(msg, off)
fv.SetUint(uint64(i))
case reflect.Uint32:
if off+4 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint32"}
}
fv.SetUint(uint64(uint32(msg[off])<<24 | uint32(msg[off+1])<<16 | uint32(msg[off+2])<<8 | uint32(msg[off+3])))
off += 4
case reflect.Uint64:
switch val.Type().Field(i).Tag {
default:
if off+8 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint64"}
}
fv.SetUint(uint64(uint64(msg[off])<<56 | uint64(msg[off+1])<<48 | uint64(msg[off+2])<<40 |
uint64(msg[off+3])<<32 | uint64(msg[off+4])<<24 | uint64(msg[off+5])<<16 | uint64(msg[off+6])<<8 | uint64(msg[off+7])))
off += 8
case `dns:"uint48"`:
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
if off+6 > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking uint64 as uint48"}
}
fv.SetUint(uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])))
off += 6
}
case reflect.String:
var s string
switch val.Type().Field(i).Tag {
default:
return lenmsg, &Error{Name: val.Type().Field(i).Tag.Get("dns"), Err: "bad tag unpacking string"}
case `dns:"hex"`:
// Rest of the RR is hex encoded, network order an issue here?
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
if endrr > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking hex"}
}
s = hex.EncodeToString(msg[off:endrr])
off = endrr
case `dns:"base64"`:
// Rest of the RR is base64 encoded value
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
endrr := rdstart + rdlength
if endrr > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking base64"}
}
s = unpackBase64(msg[off:endrr])
off = endrr
case `dns:"cdomain-name"`:
fallthrough
case `dns:"domain-name"`:
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return lenmsg, err
}
case `dns:"size-base32"`:
var size int
switch val.Type().Name() {
case "RR_NSEC3":
switch val.Type().Field(i).Name {
case "NextDomain":
name := val.FieldByName("HashLength")
size = int(name.Uint())
}
}
if off+size > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking base32"}
}
s = unpackBase32(msg[off : off+size])
off += size
case `dns:"size-hex"`:
// a "size" string, but it must be encoded in hex in the string
var size int
switch val.Type().Name() {
case "RR_NSEC3":
switch val.Type().Field(i).Name {
case "Salt":
name := val.FieldByName("SaltLength")
size = int(name.Uint())
case "NextDomain":
name := val.FieldByName("HashLength")
size = int(name.Uint())
}
case "RR_TSIG":
switch val.Type().Field(i).Name {
case "MAC":
name := val.FieldByName("MACSize")
size = int(name.Uint())
case "OtherData":
name := val.FieldByName("OtherLen")
size = int(name.Uint())
}
}
if off+size > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking hex"}
}
s = hex.EncodeToString(msg[off : off+size])
off += size
case `dns:"txt"`:
// 1 txt piece
rdlength := int(val.FieldByName("Hdr").FieldByName("Rdlength").Uint())
Txt:
if off >= lenmsg || off+1+int(msg[off]) > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking txt"}
}
n := int(msg[off])
off++
for i := 0; i < n; i++ {
s += string(msg[off+i])
}
off += n
if off < rdlength {
// More to
goto Txt
}
case "":
if off >= lenmsg || off+1+int(msg[off]) > lenmsg {
return lenmsg, &Error{Err: "overflow unpacking string"}
}
n := int(msg[off])
off++
for i := 0; i < n; i++ {
s += string(msg[off+i])
}
off += n
}
fv.SetString(s)
}
}
return off, nil
}
// Helper function for unpacking
func unpackUint16(msg []byte, off int) (v uint16, off1 int) {
v = uint16(msg[off])<<8 | uint16(msg[off+1])
off1 = off + 2
return
}
func UnpackStruct(any interface{}, msg []byte, off int) (off1 int, err error) {
off, err = unpackStructValue(structValue(any), msg, off)
return off, err
}
func unpackBase32(b []byte) string {
b32 := make([]byte, base32.HexEncoding.EncodedLen(len(b)))
base32.HexEncoding.Encode(b32, b)
return string(b32)
}
func unpackBase64(b []byte) string {
b64 := make([]byte, base64.StdEncoding.EncodedLen(len(b)))
base64.StdEncoding.Encode(b64, b)
return string(b64)
}
// Helper function for packing
func packUint16(i uint16) (byte, byte) {
return byte(i >> 8), byte(i)
}
func packBase64(s []byte) ([]byte, error) {
b64len := base64.StdEncoding.DecodedLen(len(s))
buf := make([]byte, b64len)
n, err := base64.StdEncoding.Decode(buf, []byte(s))
if err != nil {
return nil, err
}
buf = buf[:n]
return buf, nil
}
// Helper function for packing, mostly used in dnssec.go
func packBase32(s []byte) ([]byte, error) {
b32len := base32.HexEncoding.DecodedLen(len(s))
buf := make([]byte, b32len)
n, err := base32.HexEncoding.Decode(buf, []byte(s))
if err != nil {
return nil, err
}
buf = buf[:n]
return buf, nil
}
// Resource record packer, pack rr into msg[off:]. See PackDomainName for documentation
// about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
if rr == nil {
return len(msg), &Error{Err: "nil rr"}
}
off1, err = packStructCompress(rr, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
rawSetRdlength(msg, off, off1)
return off1, nil
}
// Resource record unpacker, unpack msg[off:] into an RR.
func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
// unpack just the header, to find the rr type and length
var h RR_Header
off0 := off
if off, err = UnpackStruct(&h, msg, off); err != nil {
return nil, len(msg), err
}
end := off + int(h.Rdlength)
// make an rr of that type and re-unpack.
mk, known := rr_mk[h.Rrtype]
if !known {
rr = new(RR_RFC3597)
} else {
rr = mk()
}
off, err = UnpackStruct(rr, msg, off0)
if off != end {
return &h, end, nil
}
return rr, off, err
}
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8)
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16)
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int)
for u, s := range m {
n[s] = u
}
return n
}
// Convert a MsgHdr to a string, with dig-like headers:
//
//;; opcode: QUERY, status: NOERROR, id: 48404
//
//;; flags: qr aa rd ra;
func (h *MsgHdr) String() string {
if h == nil {
return "<nil> MsgHdr"
}
s := ";; opcode: " + OpcodeToString[h.Opcode]
s += ", status: " + RcodeToString[h.Rcode]
s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
s += ";; flags:"
if h.Response {
s += " qr"
}
if h.Authoritative {
s += " aa"
}
if h.Truncated {
s += " tc"
}
if h.RecursionDesired {
s += " rd"
}
if h.RecursionAvailable {
s += " ra"
}
if h.Zero { // Hmm
s += " z"
}
if h.AuthenticatedData {
s += " ad"
}
if h.CheckingDisabled {
s += " cd"
}
s += ";"
return s
}
// Pack packs a Msg: it is converted to to wire format.
// If the dns.Compress is true the message will be in compressed wire format.
func (dns *Msg) Pack() (msg []byte, err error) {
var dh Header
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings
} else {
compression = nil
}
// Convert convenient Msg into wire-like Header.
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode)
if dns.Response {
dh.Bits |= _QR
}
if dns.Authoritative {
dh.Bits |= _AA
}
if dns.Truncated {
dh.Bits |= _TC
}
if dns.RecursionDesired {
dh.Bits |= _RD
}
if dns.RecursionAvailable {
dh.Bits |= _RA
}
if dns.Zero {
dh.Bits |= _Z
}
if dns.AuthenticatedData {
dh.Bits |= _AD
}
if dns.CheckingDisabled {
dh.Bits |= _CD
}
// Prepare variable sized arrays.
question := dns.Question
answer := dns.Answer
ns := dns.Ns
extra := dns.Extra
dh.Qdcount = uint16(len(question))
dh.Ancount = uint16(len(answer))
dh.Nscount = uint16(len(ns))
dh.Arcount = uint16(len(extra))
// TODO(mg): still a little too much, but better than 64K...
msg = make([]byte, dns.Len()+10)
// Pack it in: header and then the pieces.
off := 0
off, err = packStructCompress(&dh, msg, off, compression, dns.Compress)
for i := 0; i < len(question); i++ {
off, err = packStructCompress(&question[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(answer); i++ {
off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(ns); i++ {
off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(extra); i++ {
off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
return msg[:off], nil
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
// Header.
var dh Header
off := 0
if off, err = UnpackStruct(&dh, msg, off); err != nil {
return err
}
dns.Id = dh.Id
dns.Response = (dh.Bits & _QR) != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = (dh.Bits & _AA) != 0
dns.Truncated = (dh.Bits & _TC) != 0
dns.RecursionDesired = (dh.Bits & _RD) != 0
dns.RecursionAvailable = (dh.Bits & _RA) != 0
dns.Zero = (dh.Bits & _Z) != 0
dns.AuthenticatedData = (dh.Bits & _AD) != 0
dns.CheckingDisabled = (dh.Bits & _CD) != 0
dns.Rcode = int(dh.Bits & 0xF)
// Arrays.
dns.Question = make([]Question, dh.Qdcount)
dns.Answer = make([]RR, dh.Ancount)
dns.Ns = make([]RR, dh.Nscount)
dns.Extra = make([]RR, dh.Arcount)
for i := 0; i < len(dns.Question); i++ {
off, err = UnpackStruct(&dns.Question[i], msg, off)
if err != nil {
return err
}
}
for i := 0; i < len(dns.Answer); i++ {
dns.Answer[i], off, err = UnpackRR(msg, off)
if err != nil {
return err
}
}
for i := 0; i < len(dns.Ns); i++ {
dns.Ns[i], off, err = UnpackRR(msg, off)
if err != nil {
return err
}
}
for i := 0; i < len(dns.Extra); i++ {
dns.Extra[i], off, err = UnpackRR(msg, off)
if err != nil {
return err
}
}
if off != len(msg) {
// TODO(mg) remove eventually
// println("extra bytes in dns packet", off, "<", len(msg))
}
return nil
}
// Convert a complete message to a string with dig-like output.
func (dns *Msg) String() string {
if dns == nil {
return "<nil> MsgHdr"
}
s := dns.MsgHdr.String() + " "
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n"
for i := 0; i < len(dns.Question); i++ {
s += dns.Question[i].String() + "\n"
}
}
if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n"
for i := 0; i < len(dns.Answer); i++ {
if dns.Answer[i] != nil {
s += dns.Answer[i].String() + "\n"
}
}
}
if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n"
for i := 0; i < len(dns.Ns); i++ {
if dns.Ns[i] != nil {
s += dns.Ns[i].String() + "\n"
}
}
}
if len(dns.Extra) > 0 {
s += "\n;; ADDITIONAL SECTION:\n"
for i := 0; i < len(dns.Extra); i++ {
if dns.Extra[i] != nil {
s += dns.Extra[i].String() + "\n"
}
}
}
return s
}
// Len return the message length when in (un)compressed wire format.
// If dns.Compress is true compression is taken into account, currently
// this only counts owner name compression. There is no check for
// nil valued sections (allocated, but contains no RRs).
func (dns *Msg) Len() int {
// Message header is always 12 bytes
l := 12
var compression map[string]int
if dns.Compress {
compression = make(map[string]int)
}
for i := 0; i < len(dns.Question); i++ {
l += dns.Question[i].Len()
if dns.Compress {
compressionHelper(compression, dns.Question[i].Name)
}
}
for i := 0; i < len(dns.Answer); i++ {
if dns.Compress {
if v, ok := compression[dns.Answer[i].Header().Name]; ok {
l += dns.Answer[i].Len() - v
continue
}
compressionHelper(compression, dns.Answer[i].Header().Name)
}
l += dns.Answer[i].Len()
}
for i := 0; i < len(dns.Ns); i++ {
if dns.Compress {
if v, ok := compression[dns.Ns[i].Header().Name]; ok {
l += dns.Ns[i].Len() - v
continue
}
compressionHelper(compression, dns.Ns[i].Header().Name)
}
l += dns.Ns[i].Len()
}
for i := 0; i < len(dns.Extra); i++ {
if dns.Compress {
if v, ok := compression[dns.Extra[i].Header().Name]; ok {
l += dns.Extra[i].Len() - v
continue
}
compressionHelper(compression, dns.Extra[i].Header().Name)
}
l += dns.Extra[i].Len()
}
return l
}
func compressionHelper(c map[string]int, s string) {
pref := ""
lbs := SplitLabels(s)
for j := len(lbs) - 1; j >= 0; j-- {
c[lbs[j]+"."+pref] = 1 + len(pref) + len(lbs[j])
pref = lbs[j] + "." + pref
}
}
// Id return a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func Id() uint16 {
return uint16(rand.Int()) ^ uint16(time.Now().Nanosecond())
}
|
// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
// and to - Pack() - wire format.
// All the packers and unpackers take a (msg []byte, off int)
// and return (off1 int, ok bool). If they return ok==false, they
// also return off1==len(msg), so that the next unpacker will
// also fail. This lets us avoid checks of ok until the end of a
// packing sequence.
package dns
//go:generate go run msg_generate.go
//go:generate go run compress_generate.go
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"math/big"
"math/rand"
"strconv"
"sync"
)
const (
maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4
)
// Errors defined in this package.
var (
ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message.
ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized.
ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrShortRead error = &Error{err: "short read"}
ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication.
ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired.
)
// Id by default, returns a 16 bits random number to be used as a
// message id. The random provided should be good enough. This being a
// variable the function can be reassigned to a custom function.
// For instance, to make it return a static value:
//
// dns.Id = func() uint16 { return 3 }
var Id = id
var (
idLock sync.Mutex
idRand *rand.Rand
)
// id returns a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func id() uint16 {
idLock.Lock()
if idRand == nil {
// This (partially) works around
// https://github.com/golang/go/issues/11833 by only
// seeding idRand upon the first call to id.
var seed int64
var buf [8]byte
if _, err := crand.Read(buf[:]); err == nil {
seed = int64(binary.LittleEndian.Uint64(buf[:]))
} else {
seed = rand.Int63()
}
idRand = rand.New(rand.NewSource(seed))
}
// The call to idRand.Uint32 must be within the
// mutex lock because *rand.Rand is not safe for
// concurrent use.
//
// There is no added performance overhead to calling
// idRand.Uint32 inside a mutex lock over just
// calling rand.Uint32 as the global math/rand rng
// is internally protected by a sync.Mutex.
id := uint16(idRand.Uint32())
idLock.Unlock()
return id
}
// MsgHdr is a a manually-unpacked version of (id, bits).
type MsgHdr struct {
Id uint16
Response bool
Opcode int
Authoritative bool
Truncated bool
RecursionDesired bool
RecursionAvailable bool
Zero bool
AuthenticatedData bool
CheckingDisabled bool
Rcode int
}
// Msg contains the layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format.
Question []Question // Holds the RR(s) of the question section.
Answer []RR // Holds the RR(s) of the answer section.
Ns []RR // Holds the RR(s) of the authority section.
Extra []RR // Holds the RR(s) of the additional section.
}
// ClassToString is a maps Classes to strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
ClassCSNET: "CS",
ClassCHAOS: "CH",
ClassHESIOD: "HS",
ClassNONE: "NONE",
ClassANY: "ANY",
}
// OpcodeToString maps Opcodes to strings.
var OpcodeToString = map[int]string{
OpcodeQuery: "QUERY",
OpcodeIQuery: "IQUERY",
OpcodeStatus: "STATUS",
OpcodeNotify: "NOTIFY",
OpcodeUpdate: "UPDATE",
}
// RcodeToString maps Rcodes to strings.
var RcodeToString = map[int]string{
RcodeSuccess: "NOERROR",
RcodeFormatError: "FORMERR",
RcodeServerFailure: "SERVFAIL",
RcodeNameError: "NXDOMAIN",
RcodeNotImplemented: "NOTIMPL",
RcodeRefused: "REFUSED",
RcodeYXDomain: "YXDOMAIN", // See RFC 2136
RcodeYXRrset: "YXRRSET",
RcodeNXRrset: "NXRRSET",
RcodeNotAuth: "NOTAUTH",
RcodeNotZone: "NOTZONE",
RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
// RcodeBadVers: "BADVERS",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
RcodeBadCookie: "BADCOOKIE",
}
// Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string.
// PackDomainName packs a domain name s into msg[off:].
// If compression is wanted compress must be true and the compression
// map needs to hold a mapping between domain names and offsets
// pointing into msg.
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off1, _, err = packDomainName(s, msg, off, compression, compress)
return
}
func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) {
// special case if msg == nil
lenmsg := 256
if msg != nil {
lenmsg = len(msg)
}
ls := len(s)
if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
return off, 0, nil
}
// If not fully qualified, error out, but only if msg != nil #ugly
if s[ls-1] != '.' {
if msg != nil {
return lenmsg, 0, ErrFqdn
}
s += "."
ls++
}
// Each dot ends a segment of the name.
// We trade each dot byte for a length byte.
// Except for escaped dots (\.), which are normal dots.
// There is also a trailing zero.
// Compression
pointer := -1
// Emit sequence of counted strings, chopping at dots.
begin := 0
var bs []byte
roBs, bsFresh, wasDot := s, true, false
loop:
for i := 0; i < ls; i++ {
var c byte
if bs == nil {
c = s[i]
} else {
c = bs[i]
}
switch c {
case '\\':
if bs == nil {
bs = []byte(s)
}
copy(bs[i:ls-1], bs[i+1:])
ls--
if off+1 > lenmsg {
return lenmsg, labels, ErrBuf
}
// check for \DDD
if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
bs[i] = dddToByte(bs[i:])
copy(bs[i+1:ls-2], bs[i+3:])
ls -= 2
}
bsFresh = false
wasDot = false
case '.':
if wasDot {
// two dots back to back is not legal
return lenmsg, labels, ErrRdata
}
wasDot = true
if i-begin >= 1<<6 { // top two bits of length must be clear
return lenmsg, labels, ErrRdata
}
// off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified
if off+1+(i-begin) > lenmsg {
return lenmsg, labels, ErrBuf
}
if compress && !bsFresh {
roBs = string(bs)
bsFresh = true
}
// Don't try to compress '.'
// We should only compress when compress is true, but we should also still pick
// up names that can be used for *future* compression(s).
if compression != nil && roBs[begin:] != "." {
if p, ok := compression[roBs[begin:]]; ok {
// The first hit is the longest matching dname
// keep the pointer offset we get back and store
// the offset of the current name, because that's
// where we need to insert the pointer later
// If compress is true, we're allowed to compress this dname
if compress {
pointer = p // Where to point to
break loop
}
} else if off < maxCompressionOffset {
// Only offsets smaller than maxCompressionOffset can be used.
compression[roBs[begin:]] = off
}
}
// The following is covered by the length check above.
if msg != nil {
msg[off] = byte(i - begin)
if bs == nil {
copy(msg[off+1:], s[begin:i])
} else {
copy(msg[off+1:], bs[begin:i])
}
}
off += 1 + i - begin
labels++
begin = i + 1
default:
wasDot = false
}
}
// Root label is special
if bs == nil && len(s) == 1 && s[0] == '.' {
return off, labels, nil
}
if bs != nil && len(bs) == 1 && bs[0] == '.' {
return off, labels, nil
}
// If we did compression and we find something add the pointer here
if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in
// if msg == nil, we will never do compression
binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
return off + 2, labels, nil
}
if msg != nil && off < lenmsg {
msg[off] = 0
}
return off + 1, labels, nil
}
// Unpack a domain name.
// In addition to the simple sequences of counted strings above,
// domain names are allowed to refer to strings elsewhere in the
// packet, to avoid repeating common suffixes when returning
// many entries in a single domain. The pointers are marked
// by a length byte with the top two bits set. Ignoring those
// two bits, that byte and the next give a 14 bit offset from msg[0]
// where we should pick up the trail.
// Note that if we jump elsewhere in the packet,
// we return off1 == the offset after the first pointer we found,
// which is where the next record will start.
// In theory, the pointers are only allowed to jump backward.
// We let them jump anywhere and stop jumping after a while.
// UnpackDomainName unpacks a domain name into a string.
func UnpackDomainName(msg []byte, off int) (string, int, error) {
s := make([]byte, 0, 64)
off1 := 0
lenmsg := len(msg)
maxLen := maxDomainNameWireOctets
ptr := 0 // number of pointers followed
Loop:
for {
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// end of name
break Loop
}
// literal string
if off+c > lenmsg {
return "", lenmsg, ErrBuf
}
for j := off; j < off+c; j++ {
switch b := msg[j]; b {
case '.', '(', ')', ';', ' ', '@':
fallthrough
case '"', '\\':
s = append(s, '\\', b)
// presentation-format \X escapes add an extra byte
maxLen++
default:
if b < 32 || b >= 127 { // unprintable, use \DDD
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := len(bufs); i < 3; i++ {
s = append(s, '0')
}
s = append(s, bufs...)
// presentation-format \DDD escapes add 3 extra bytes
maxLen += 3
} else {
s = append(s, b)
}
}
}
s = append(s, '.')
off += c
case 0xC0:
// pointer to somewhere else in msg.
// remember location after first ptr,
// since that's how many bytes we consumed.
// also, don't follow too many pointers --
// maybe there's a loop.
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c1 := msg[off]
off++
if ptr == 0 {
off1 = off
}
if ptr++; ptr > 10 {
return "", lenmsg, &Error{err: "too many compression pointers"}
}
// pointer should guarantee that it advances and points forwards at least
// but the condition on previous three lines guarantees that it's
// at least loop-free
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
return "", lenmsg, ErrRdata
}
}
if ptr == 0 {
off1 = off
}
if len(s) == 0 {
s = []byte(".")
} else if len(s) >= maxLen {
// error if the name is too long, but don't throw it away
return string(s), lenmsg, ErrLongDomain
}
return string(s), off1, nil
}
func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
if len(txt) == 0 {
if offset >= len(msg) {
return offset, ErrBuf
}
msg[offset] = 0
return offset, nil
}
var err error
for i := range txt {
if len(txt[i]) > len(tmp) {
return offset, ErrBuf
}
offset, err = packTxtString(txt[i], msg, offset, tmp)
if err != nil {
return offset, err
}
}
return offset, nil
}
func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
lenByteOffset := offset
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
offset++
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
l := offset - lenByteOffset - 1
if l > 255 {
return offset, &Error{err: "string exceeded 255 bytes in txt"}
}
msg[lenByteOffset] = byte(l)
return offset, nil
}
func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
return offset, nil
}
func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
off = off0
var s string
for off < len(msg) && err == nil {
s, off, err = unpackString(msg, off)
if err == nil {
ss = append(ss, s)
}
}
return
}
// Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
func dddToByte(s []byte) byte {
_ = s[2]
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
func dddStringToByte(s string) byte {
_ = s[2]
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
// Helper function for packing and unpacking
func intToBytes(i *big.Int, length int) []byte {
buf := i.Bytes()
if len(buf) < length {
b := make([]byte, length)
copy(b[length-len(buf):], buf)
return b
}
return buf
}
// PackRR packs a resource record rr into msg[off:].
// See PackDomainName for documentation about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
if rr == nil {
return len(msg), &Error{err: "nil rr"}
}
off1, err = rr.pack(msg, off, compression, compress)
if err != nil {
return len(msg), err
}
// TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well.
if rawSetRdlength(msg, off, off1) {
return off1, nil
}
return off, ErrRdata
}
// UnpackRR unpacks msg[off:] into an RR.
func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
h, off, msg, err := unpackHeader(msg, off)
if err != nil {
return nil, len(msg), err
}
return UnpackRRWithHeader(h, msg, off)
}
// UnpackRRWithHeader unpacks the record type specific payload given an existing
// RR_Header.
func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
end := off + int(h.Rdlength)
if fn, known := typeToUnpack[h.Rrtype]; !known {
rr, off, err = unpackRFC3597(h, msg, off)
} else {
rr, off, err = fn(h, msg, off)
}
if off != end {
return &h, end, &Error{err: "bad rdlength"}
}
return rr, off, err
}
// unpackRRslice unpacks msg[off:] into an []RR.
// If we cannot unpack the whole array, then it will return nil
func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
var r RR
// Don't pre-allocate, l may be under attacker control
var dst []RR
for i := 0; i < l; i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
off = len(msg)
break
}
// If offset does not increase anymore, l is a lie
if off1 == off {
l = i
break
}
dst = append(dst, r)
}
if err != nil && off == len(msg) {
dst = nil
}
return dst, off, err
}
// Convert a MsgHdr to a string, with dig-like headers:
//
//;; opcode: QUERY, status: NOERROR, id: 48404
//
//;; flags: qr aa rd ra;
func (h *MsgHdr) String() string {
if h == nil {
return "<nil> MsgHdr"
}
s := ";; opcode: " + OpcodeToString[h.Opcode]
s += ", status: " + RcodeToString[h.Rcode]
s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
s += ";; flags:"
if h.Response {
s += " qr"
}
if h.Authoritative {
s += " aa"
}
if h.Truncated {
s += " tc"
}
if h.RecursionDesired {
s += " rd"
}
if h.RecursionAvailable {
s += " ra"
}
if h.Zero { // Hmm
s += " z"
}
if h.AuthenticatedData {
s += " ad"
}
if h.CheckingDisabled {
s += " cd"
}
s += ";"
return s
}
// Pack packs a Msg: it is converted to to wire format.
// If the dns.Compress is true the message will be in compressed wire format.
func (dns *Msg) Pack() (msg []byte, err error) {
return dns.PackBuffer(nil)
}
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings.
}
return dns.packBufferWithCompressionMap(buf, compression)
}
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var dh Header
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode
}
// Set extended rcode unconditionally if we have an opt, this will allow
// reseting the extended rcode bits if they need to.
if opt := dns.IsEdns0(); opt != nil {
opt.SetExtendedRcode(uint16(dns.Rcode))
} else if dns.Rcode > 0xF {
// If Rcode is an extended one and opt is nil, error out.
return nil, ErrExtendedRcode
}
// Convert convenient Msg into wire-like Header.
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
if dns.Response {
dh.Bits |= _QR
}
if dns.Authoritative {
dh.Bits |= _AA
}
if dns.Truncated {
dh.Bits |= _TC
}
if dns.RecursionDesired {
dh.Bits |= _RD
}
if dns.RecursionAvailable {
dh.Bits |= _RA
}
if dns.Zero {
dh.Bits |= _Z
}
if dns.AuthenticatedData {
dh.Bits |= _AD
}
if dns.CheckingDisabled {
dh.Bits |= _CD
}
// Prepare variable sized arrays.
question := dns.Question
answer := dns.Answer
ns := dns.Ns
extra := dns.Extra
dh.Qdcount = uint16(len(question))
dh.Ancount = uint16(len(answer))
dh.Nscount = uint16(len(ns))
dh.Arcount = uint16(len(extra))
// We need the uncompressed length here, because we first pack it and then compress it.
msg = buf
uncompressedLen := compressedLen(dns, false)
if packLen := uncompressedLen + 1; len(msg) < packLen {
msg = make([]byte, packLen)
}
// Pack it in: header and then the pieces.
off := 0
off, err = dh.pack(msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
for i := 0; i < len(question); i++ {
off, err = question[i].pack(msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(answer); i++ {
off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(ns); i++ {
off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(extra); i++ {
off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
return msg[:off], nil
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
var (
dh Header
off int
)
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
return err
}
dns.Id = dh.Id
dns.Response = dh.Bits&_QR != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = dh.Bits&_AA != 0
dns.Truncated = dh.Bits&_TC != 0
dns.RecursionDesired = dh.Bits&_RD != 0
dns.RecursionAvailable = dh.Bits&_RA != 0
dns.Zero = dh.Bits&_Z != 0
dns.AuthenticatedData = dh.Bits&_AD != 0
dns.CheckingDisabled = dh.Bits&_CD != 0
dns.Rcode = int(dh.Bits & 0xF)
// If we are at the end of the message we should return *just* the
// header. This can still be useful to the caller. 9.9.9.9 sends these
// when responding with REFUSED for instance.
if off == len(msg) {
// reset sections before returning
dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
return nil
}
// Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
// attacker controlled. This means we can't use them to pre-allocate
// slices.
dns.Question = nil
for i := 0; i < int(dh.Qdcount); i++ {
off1 := off
var q Question
q, off, err = unpackQuestion(msg, off)
if err != nil {
// Even if Truncated is set, we only will set ErrTruncated if we
// actually got the questions
return err
}
if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
dh.Qdcount = uint16(i)
break
}
dns.Question = append(dns.Question, q)
}
dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
// The header counts might have been wrong so we need to update it
dh.Ancount = uint16(len(dns.Answer))
if err == nil {
dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Nscount = uint16(len(dns.Ns))
if err == nil {
dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Arcount = uint16(len(dns.Extra))
// Set extended Rcode
if opt := dns.IsEdns0(); opt != nil {
dns.Rcode |= opt.ExtendedRcode()
}
if off != len(msg) {
// TODO(miek) make this an error?
// use PackOpt to let people tell how detailed the error reporting should be?
// println("dns: extra bytes in dns packet", off, "<", len(msg))
} else if dns.Truncated {
// Whether we ran into a an error or not, we want to return that it
// was truncated
err = ErrTruncated
}
return err
}
// Convert a complete message to a string with dig-like output.
func (dns *Msg) String() string {
if dns == nil {
return "<nil> MsgHdr"
}
s := dns.MsgHdr.String() + " "
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n"
for i := 0; i < len(dns.Question); i++ {
s += dns.Question[i].String() + "\n"
}
}
if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n"
for i := 0; i < len(dns.Answer); i++ {
if dns.Answer[i] != nil {
s += dns.Answer[i].String() + "\n"
}
}
}
if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n"
for i := 0; i < len(dns.Ns); i++ {
if dns.Ns[i] != nil {
s += dns.Ns[i].String() + "\n"
}
}
}
if len(dns.Extra) > 0 {
s += "\n;; ADDITIONAL SECTION:\n"
for i := 0; i < len(dns.Extra); i++ {
if dns.Extra[i] != nil {
s += dns.Extra[i].String() + "\n"
}
}
}
return s
}
// Len returns the message length when in (un)compressed wire format.
// If dns.Compress is true compression it is taken into account. Len()
// is provided to be a faster way to get the size of the resulting packet,
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int {
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
compressionLenHelper(compression, r.Name, l)
l += r.len()
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
}
// compressedLen returns the message length when in compressed wire format
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
if compress {
compression := map[string]int{}
return compressedLenWithCompressionMap(dns, compression)
}
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
l += r.len()
}
for _, r := range dns.Answer {
if r != nil {
l += r.len()
}
}
for _, r := range dns.Ns {
if r != nil {
l += r.len()
}
}
for _, r := range dns.Extra {
if r != nil {
l += r.len()
}
}
return l
}
func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
initLen := lenp
for _, r := range rs {
if r == nil {
continue
}
// TmpLen is to track len of record at 14bits boudaries
tmpLen := lenp
x := r.len()
// track this length, and the global length in len, while taking compression into account for both.
k, ok, _ := compressionLenSearch(c, r.Header().Name)
if ok {
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
// so, basically x:= x - k - 1 + 2
x += 1 - k
}
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
k, ok, _ = compressionLenSearchType(c, r)
if ok {
x += 1 - k
}
lenp += x
tmpLen = lenp
tmpLen += compressionLenHelperType(c, r, tmpLen)
}
return lenp - initLen
}
// Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]int, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
initLen := currentLen
prev := s
for off, end := 0, false; !end; off, end = NextLabel(s, off) {
pref := s[off:]
currentLen += len(prev) - len(pref)
prev = pref
if _, ok := c[pref]; !ok {
// If first byte label is within the first 14bits, it might be re-used later
if currentLen < maxCompressionOffset {
c[pref] = currentLen
}
} else {
added := currentLen - initLen
if off > 0 {
// We added a new PTR
added += 2
}
return added
}
}
return currentLen - initLen
}
// Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match.
// Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false, 0
}
fullSize := 0
for {
if _, ok := c[s[off:]]; ok {
return len(s[off:]), true, fullSize + off
}
if end {
break
}
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off)
}
return 0, false, fullSize + len(s)
}
// Copy returns a new RR which is a deep-copy of r.
func Copy(r RR) RR { r1 := r.copy(); return r1 }
// Len returns the length (in octets) of the uncompressed RR in wire format.
func Len(r RR) int { return r.len() }
// Copy returns a new *Msg which is a deep-copy of dns.
func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
func (dns *Msg) CopyTo(r1 *Msg) *Msg {
r1.MsgHdr = dns.MsgHdr
r1.Compress = dns.Compress
if len(dns.Question) > 0 {
r1.Question = make([]Question, len(dns.Question))
copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
}
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
var rri int
if len(dns.Answer) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Answer); i++ {
rrArr[rri] = dns.Answer[i].copy()
rri++
}
r1.Answer = rrArr[rrbegin:rri:rri]
}
if len(dns.Ns) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Ns); i++ {
rrArr[rri] = dns.Ns[i].copy()
rri++
}
r1.Ns = rrArr[rrbegin:rri:rri]
}
if len(dns.Extra) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Extra); i++ {
rrArr[rri] = dns.Extra[i].copy()
rri++
}
r1.Extra = rrArr[rrbegin:rri:rri]
}
return r1
}
func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := PackDomainName(q.Name, msg, off, compression, compress)
if err != nil {
return off, err
}
off, err = packUint16(q.Qtype, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(q.Qclass, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func unpackQuestion(msg []byte, off int) (Question, int, error) {
var (
q Question
err error
)
q.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qtype, off, err = unpackUint16(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qclass, off, err = unpackUint16(msg, off)
if off == len(msg) {
return q, off, nil
}
return q, off, err
}
func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := packUint16(dh.Id, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Bits, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Qdcount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Ancount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Nscount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Arcount, msg, off)
return off, err
}
func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
var (
dh Header
err error
)
dh.Id, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Bits, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Qdcount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Ancount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Nscount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Arcount, off, err = unpackUint16(msg, off)
return dh, off, err
}
Only compute i-begin once in packDomainName
// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
// and to - Pack() - wire format.
// All the packers and unpackers take a (msg []byte, off int)
// and return (off1 int, ok bool). If they return ok==false, they
// also return off1==len(msg), so that the next unpacker will
// also fail. This lets us avoid checks of ok until the end of a
// packing sequence.
package dns
//go:generate go run msg_generate.go
//go:generate go run compress_generate.go
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"math/big"
"math/rand"
"strconv"
"sync"
)
const (
maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4
)
// Errors defined in this package.
var (
ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message.
ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized.
ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
ErrKey error = &Error{err: "bad key"}
ErrKeySize error = &Error{err: "bad key size"}
ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
ErrNoSig error = &Error{err: "no signature found"}
ErrPrivKey error = &Error{err: "bad private key"}
ErrRcode error = &Error{err: "bad rcode"}
ErrRdata error = &Error{err: "bad rdata"}
ErrRRset error = &Error{err: "bad rrset"}
ErrSecret error = &Error{err: "no secrets defined"}
ErrShortRead error = &Error{err: "short read"}
ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication.
ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired.
)
// Id by default, returns a 16 bits random number to be used as a
// message id. The random provided should be good enough. This being a
// variable the function can be reassigned to a custom function.
// For instance, to make it return a static value:
//
// dns.Id = func() uint16 { return 3 }
var Id = id
var (
idLock sync.Mutex
idRand *rand.Rand
)
// id returns a 16 bits random number to be used as a
// message id. The random provided should be good enough.
func id() uint16 {
idLock.Lock()
if idRand == nil {
// This (partially) works around
// https://github.com/golang/go/issues/11833 by only
// seeding idRand upon the first call to id.
var seed int64
var buf [8]byte
if _, err := crand.Read(buf[:]); err == nil {
seed = int64(binary.LittleEndian.Uint64(buf[:]))
} else {
seed = rand.Int63()
}
idRand = rand.New(rand.NewSource(seed))
}
// The call to idRand.Uint32 must be within the
// mutex lock because *rand.Rand is not safe for
// concurrent use.
//
// There is no added performance overhead to calling
// idRand.Uint32 inside a mutex lock over just
// calling rand.Uint32 as the global math/rand rng
// is internally protected by a sync.Mutex.
id := uint16(idRand.Uint32())
idLock.Unlock()
return id
}
// MsgHdr is a a manually-unpacked version of (id, bits).
type MsgHdr struct {
Id uint16
Response bool
Opcode int
Authoritative bool
Truncated bool
RecursionDesired bool
RecursionAvailable bool
Zero bool
AuthenticatedData bool
CheckingDisabled bool
Rcode int
}
// Msg contains the layout of a DNS message.
type Msg struct {
MsgHdr
Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format.
Question []Question // Holds the RR(s) of the question section.
Answer []RR // Holds the RR(s) of the answer section.
Ns []RR // Holds the RR(s) of the authority section.
Extra []RR // Holds the RR(s) of the additional section.
}
// ClassToString is a maps Classes to strings for each CLASS wire type.
var ClassToString = map[uint16]string{
ClassINET: "IN",
ClassCSNET: "CS",
ClassCHAOS: "CH",
ClassHESIOD: "HS",
ClassNONE: "NONE",
ClassANY: "ANY",
}
// OpcodeToString maps Opcodes to strings.
var OpcodeToString = map[int]string{
OpcodeQuery: "QUERY",
OpcodeIQuery: "IQUERY",
OpcodeStatus: "STATUS",
OpcodeNotify: "NOTIFY",
OpcodeUpdate: "UPDATE",
}
// RcodeToString maps Rcodes to strings.
var RcodeToString = map[int]string{
RcodeSuccess: "NOERROR",
RcodeFormatError: "FORMERR",
RcodeServerFailure: "SERVFAIL",
RcodeNameError: "NXDOMAIN",
RcodeNotImplemented: "NOTIMPL",
RcodeRefused: "REFUSED",
RcodeYXDomain: "YXDOMAIN", // See RFC 2136
RcodeYXRrset: "YXRRSET",
RcodeNXRrset: "NXRRSET",
RcodeNotAuth: "NOTAUTH",
RcodeNotZone: "NOTZONE",
RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
// RcodeBadVers: "BADVERS",
RcodeBadKey: "BADKEY",
RcodeBadTime: "BADTIME",
RcodeBadMode: "BADMODE",
RcodeBadName: "BADNAME",
RcodeBadAlg: "BADALG",
RcodeBadTrunc: "BADTRUNC",
RcodeBadCookie: "BADCOOKIE",
}
// Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string.
// PackDomainName packs a domain name s into msg[off:].
// If compression is wanted compress must be true and the compression
// map needs to hold a mapping between domain names and offsets
// pointing into msg.
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off1, _, err = packDomainName(s, msg, off, compression, compress)
return
}
func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) {
// special case if msg == nil
lenmsg := 256
if msg != nil {
lenmsg = len(msg)
}
ls := len(s)
if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
return off, 0, nil
}
// If not fully qualified, error out, but only if msg != nil #ugly
if s[ls-1] != '.' {
if msg != nil {
return lenmsg, 0, ErrFqdn
}
s += "."
ls++
}
// Each dot ends a segment of the name.
// We trade each dot byte for a length byte.
// Except for escaped dots (\.), which are normal dots.
// There is also a trailing zero.
// Compression
pointer := -1
// Emit sequence of counted strings, chopping at dots.
begin := 0
var bs []byte
roBs, bsFresh, wasDot := s, true, false
loop:
for i := 0; i < ls; i++ {
var c byte
if bs == nil {
c = s[i]
} else {
c = bs[i]
}
switch c {
case '\\':
if bs == nil {
bs = []byte(s)
}
copy(bs[i:ls-1], bs[i+1:])
ls--
if off+1 > lenmsg {
return lenmsg, labels, ErrBuf
}
// check for \DDD
if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
bs[i] = dddToByte(bs[i:])
copy(bs[i+1:ls-2], bs[i+3:])
ls -= 2
}
bsFresh = false
wasDot = false
case '.':
if wasDot {
// two dots back to back is not legal
return lenmsg, labels, ErrRdata
}
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return lenmsg, labels, ErrRdata
}
// off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified
if off+1+labelLen > lenmsg {
return lenmsg, labels, ErrBuf
}
if compress && !bsFresh {
roBs = string(bs)
bsFresh = true
}
// Don't try to compress '.'
// We should only compress when compress is true, but we should also still pick
// up names that can be used for *future* compression(s).
if compression != nil && roBs[begin:] != "." {
if p, ok := compression[roBs[begin:]]; ok {
// The first hit is the longest matching dname
// keep the pointer offset we get back and store
// the offset of the current name, because that's
// where we need to insert the pointer later
// If compress is true, we're allowed to compress this dname
if compress {
pointer = p // Where to point to
break loop
}
} else if off < maxCompressionOffset {
// Only offsets smaller than maxCompressionOffset can be used.
compression[roBs[begin:]] = off
}
}
// The following is covered by the length check above.
if msg != nil {
msg[off] = byte(labelLen)
if bs == nil {
copy(msg[off+1:], s[begin:i])
} else {
copy(msg[off+1:], bs[begin:i])
}
}
off += 1 + labelLen
labels++
begin = i + 1
default:
wasDot = false
}
}
// Root label is special
if bs == nil && len(s) == 1 && s[0] == '.' {
return off, labels, nil
}
if bs != nil && len(bs) == 1 && bs[0] == '.' {
return off, labels, nil
}
// If we did compression and we find something add the pointer here
if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in
// if msg == nil, we will never do compression
binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
return off + 2, labels, nil
}
if msg != nil && off < lenmsg {
msg[off] = 0
}
return off + 1, labels, nil
}
// Unpack a domain name.
// In addition to the simple sequences of counted strings above,
// domain names are allowed to refer to strings elsewhere in the
// packet, to avoid repeating common suffixes when returning
// many entries in a single domain. The pointers are marked
// by a length byte with the top two bits set. Ignoring those
// two bits, that byte and the next give a 14 bit offset from msg[0]
// where we should pick up the trail.
// Note that if we jump elsewhere in the packet,
// we return off1 == the offset after the first pointer we found,
// which is where the next record will start.
// In theory, the pointers are only allowed to jump backward.
// We let them jump anywhere and stop jumping after a while.
// UnpackDomainName unpacks a domain name into a string.
func UnpackDomainName(msg []byte, off int) (string, int, error) {
s := make([]byte, 0, 64)
off1 := 0
lenmsg := len(msg)
maxLen := maxDomainNameWireOctets
ptr := 0 // number of pointers followed
Loop:
for {
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// end of name
break Loop
}
// literal string
if off+c > lenmsg {
return "", lenmsg, ErrBuf
}
for j := off; j < off+c; j++ {
switch b := msg[j]; b {
case '.', '(', ')', ';', ' ', '@':
fallthrough
case '"', '\\':
s = append(s, '\\', b)
// presentation-format \X escapes add an extra byte
maxLen++
default:
if b < 32 || b >= 127 { // unprintable, use \DDD
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := len(bufs); i < 3; i++ {
s = append(s, '0')
}
s = append(s, bufs...)
// presentation-format \DDD escapes add 3 extra bytes
maxLen += 3
} else {
s = append(s, b)
}
}
}
s = append(s, '.')
off += c
case 0xC0:
// pointer to somewhere else in msg.
// remember location after first ptr,
// since that's how many bytes we consumed.
// also, don't follow too many pointers --
// maybe there's a loop.
if off >= lenmsg {
return "", lenmsg, ErrBuf
}
c1 := msg[off]
off++
if ptr == 0 {
off1 = off
}
if ptr++; ptr > 10 {
return "", lenmsg, &Error{err: "too many compression pointers"}
}
// pointer should guarantee that it advances and points forwards at least
// but the condition on previous three lines guarantees that it's
// at least loop-free
off = (c^0xC0)<<8 | int(c1)
default:
// 0x80 and 0x40 are reserved
return "", lenmsg, ErrRdata
}
}
if ptr == 0 {
off1 = off
}
if len(s) == 0 {
s = []byte(".")
} else if len(s) >= maxLen {
// error if the name is too long, but don't throw it away
return string(s), lenmsg, ErrLongDomain
}
return string(s), off1, nil
}
func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
if len(txt) == 0 {
if offset >= len(msg) {
return offset, ErrBuf
}
msg[offset] = 0
return offset, nil
}
var err error
for i := range txt {
if len(txt[i]) > len(tmp) {
return offset, ErrBuf
}
offset, err = packTxtString(txt[i], msg, offset, tmp)
if err != nil {
return offset, err
}
}
return offset, nil
}
func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
lenByteOffset := offset
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
offset++
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
l := offset - lenByteOffset - 1
if l > 255 {
return offset, &Error{err: "string exceeded 255 bytes in txt"}
}
msg[lenByteOffset] = byte(l)
return offset, nil
}
func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
if offset >= len(msg) || len(s) > len(tmp) {
return offset, ErrBuf
}
bs := tmp[:len(s)]
copy(bs, s)
for i := 0; i < len(bs); i++ {
if len(msg) <= offset {
return offset, ErrBuf
}
if bs[i] == '\\' {
i++
if i == len(bs) {
break
}
// check for \DDD
if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
msg[offset] = dddToByte(bs[i:])
i += 2
} else {
msg[offset] = bs[i]
}
} else {
msg[offset] = bs[i]
}
offset++
}
return offset, nil
}
func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
off = off0
var s string
for off < len(msg) && err == nil {
s, off, err = unpackString(msg, off)
if err == nil {
ss = append(ss, s)
}
}
return
}
// Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
func dddToByte(s []byte) byte {
_ = s[2]
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
func dddStringToByte(s string) byte {
_ = s[2]
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
// Helper function for packing and unpacking
func intToBytes(i *big.Int, length int) []byte {
buf := i.Bytes()
if len(buf) < length {
b := make([]byte, length)
copy(b[length-len(buf):], buf)
return b
}
return buf
}
// PackRR packs a resource record rr into msg[off:].
// See PackDomainName for documentation about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
if rr == nil {
return len(msg), &Error{err: "nil rr"}
}
off1, err = rr.pack(msg, off, compression, compress)
if err != nil {
return len(msg), err
}
// TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well.
if rawSetRdlength(msg, off, off1) {
return off1, nil
}
return off, ErrRdata
}
// UnpackRR unpacks msg[off:] into an RR.
func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
h, off, msg, err := unpackHeader(msg, off)
if err != nil {
return nil, len(msg), err
}
return UnpackRRWithHeader(h, msg, off)
}
// UnpackRRWithHeader unpacks the record type specific payload given an existing
// RR_Header.
func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
end := off + int(h.Rdlength)
if fn, known := typeToUnpack[h.Rrtype]; !known {
rr, off, err = unpackRFC3597(h, msg, off)
} else {
rr, off, err = fn(h, msg, off)
}
if off != end {
return &h, end, &Error{err: "bad rdlength"}
}
return rr, off, err
}
// unpackRRslice unpacks msg[off:] into an []RR.
// If we cannot unpack the whole array, then it will return nil
func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
var r RR
// Don't pre-allocate, l may be under attacker control
var dst []RR
for i := 0; i < l; i++ {
off1 := off
r, off, err = UnpackRR(msg, off)
if err != nil {
off = len(msg)
break
}
// If offset does not increase anymore, l is a lie
if off1 == off {
l = i
break
}
dst = append(dst, r)
}
if err != nil && off == len(msg) {
dst = nil
}
return dst, off, err
}
// Convert a MsgHdr to a string, with dig-like headers:
//
//;; opcode: QUERY, status: NOERROR, id: 48404
//
//;; flags: qr aa rd ra;
func (h *MsgHdr) String() string {
if h == nil {
return "<nil> MsgHdr"
}
s := ";; opcode: " + OpcodeToString[h.Opcode]
s += ", status: " + RcodeToString[h.Rcode]
s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
s += ";; flags:"
if h.Response {
s += " qr"
}
if h.Authoritative {
s += " aa"
}
if h.Truncated {
s += " tc"
}
if h.RecursionDesired {
s += " rd"
}
if h.RecursionAvailable {
s += " ra"
}
if h.Zero { // Hmm
s += " z"
}
if h.AuthenticatedData {
s += " ad"
}
if h.CheckingDisabled {
s += " cd"
}
s += ";"
return s
}
// Pack packs a Msg: it is converted to to wire format.
// If the dns.Compress is true the message will be in compressed wire format.
func (dns *Msg) Pack() (msg []byte, err error) {
return dns.PackBuffer(nil)
}
// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
var compression map[string]int
if dns.Compress {
compression = make(map[string]int) // Compression pointer mappings.
}
return dns.packBufferWithCompressionMap(buf, compression)
}
// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int) (msg []byte, err error) {
// We use a similar function in tsig.go's stripTsig.
var dh Header
if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode
}
// Set extended rcode unconditionally if we have an opt, this will allow
// reseting the extended rcode bits if they need to.
if opt := dns.IsEdns0(); opt != nil {
opt.SetExtendedRcode(uint16(dns.Rcode))
} else if dns.Rcode > 0xF {
// If Rcode is an extended one and opt is nil, error out.
return nil, ErrExtendedRcode
}
// Convert convenient Msg into wire-like Header.
dh.Id = dns.Id
dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
if dns.Response {
dh.Bits |= _QR
}
if dns.Authoritative {
dh.Bits |= _AA
}
if dns.Truncated {
dh.Bits |= _TC
}
if dns.RecursionDesired {
dh.Bits |= _RD
}
if dns.RecursionAvailable {
dh.Bits |= _RA
}
if dns.Zero {
dh.Bits |= _Z
}
if dns.AuthenticatedData {
dh.Bits |= _AD
}
if dns.CheckingDisabled {
dh.Bits |= _CD
}
// Prepare variable sized arrays.
question := dns.Question
answer := dns.Answer
ns := dns.Ns
extra := dns.Extra
dh.Qdcount = uint16(len(question))
dh.Ancount = uint16(len(answer))
dh.Nscount = uint16(len(ns))
dh.Arcount = uint16(len(extra))
// We need the uncompressed length here, because we first pack it and then compress it.
msg = buf
uncompressedLen := compressedLen(dns, false)
if packLen := uncompressedLen + 1; len(msg) < packLen {
msg = make([]byte, packLen)
}
// Pack it in: header and then the pieces.
off := 0
off, err = dh.pack(msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
for i := 0; i < len(question); i++ {
off, err = question[i].pack(msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(answer); i++ {
off, err = PackRR(answer[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(ns); i++ {
off, err = PackRR(ns[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
for i := 0; i < len(extra); i++ {
off, err = PackRR(extra[i], msg, off, compression, dns.Compress)
if err != nil {
return nil, err
}
}
return msg[:off], nil
}
// Unpack unpacks a binary message to a Msg structure.
func (dns *Msg) Unpack(msg []byte) (err error) {
var (
dh Header
off int
)
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
return err
}
dns.Id = dh.Id
dns.Response = dh.Bits&_QR != 0
dns.Opcode = int(dh.Bits>>11) & 0xF
dns.Authoritative = dh.Bits&_AA != 0
dns.Truncated = dh.Bits&_TC != 0
dns.RecursionDesired = dh.Bits&_RD != 0
dns.RecursionAvailable = dh.Bits&_RA != 0
dns.Zero = dh.Bits&_Z != 0
dns.AuthenticatedData = dh.Bits&_AD != 0
dns.CheckingDisabled = dh.Bits&_CD != 0
dns.Rcode = int(dh.Bits & 0xF)
// If we are at the end of the message we should return *just* the
// header. This can still be useful to the caller. 9.9.9.9 sends these
// when responding with REFUSED for instance.
if off == len(msg) {
// reset sections before returning
dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
return nil
}
// Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
// attacker controlled. This means we can't use them to pre-allocate
// slices.
dns.Question = nil
for i := 0; i < int(dh.Qdcount); i++ {
off1 := off
var q Question
q, off, err = unpackQuestion(msg, off)
if err != nil {
// Even if Truncated is set, we only will set ErrTruncated if we
// actually got the questions
return err
}
if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
dh.Qdcount = uint16(i)
break
}
dns.Question = append(dns.Question, q)
}
dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
// The header counts might have been wrong so we need to update it
dh.Ancount = uint16(len(dns.Answer))
if err == nil {
dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Nscount = uint16(len(dns.Ns))
if err == nil {
dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
}
// The header counts might have been wrong so we need to update it
dh.Arcount = uint16(len(dns.Extra))
// Set extended Rcode
if opt := dns.IsEdns0(); opt != nil {
dns.Rcode |= opt.ExtendedRcode()
}
if off != len(msg) {
// TODO(miek) make this an error?
// use PackOpt to let people tell how detailed the error reporting should be?
// println("dns: extra bytes in dns packet", off, "<", len(msg))
} else if dns.Truncated {
// Whether we ran into a an error or not, we want to return that it
// was truncated
err = ErrTruncated
}
return err
}
// Convert a complete message to a string with dig-like output.
func (dns *Msg) String() string {
if dns == nil {
return "<nil> MsgHdr"
}
s := dns.MsgHdr.String() + " "
s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n"
for i := 0; i < len(dns.Question); i++ {
s += dns.Question[i].String() + "\n"
}
}
if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n"
for i := 0; i < len(dns.Answer); i++ {
if dns.Answer[i] != nil {
s += dns.Answer[i].String() + "\n"
}
}
}
if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n"
for i := 0; i < len(dns.Ns); i++ {
if dns.Ns[i] != nil {
s += dns.Ns[i].String() + "\n"
}
}
}
if len(dns.Extra) > 0 {
s += "\n;; ADDITIONAL SECTION:\n"
for i := 0; i < len(dns.Extra); i++ {
if dns.Extra[i] != nil {
s += dns.Extra[i].String() + "\n"
}
}
}
return s
}
// Len returns the message length when in (un)compressed wire format.
// If dns.Compress is true compression it is taken into account. Len()
// is provided to be a faster way to get the size of the resulting packet,
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
func compressedLenWithCompressionMap(dns *Msg, compression map[string]int) int {
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
compressionLenHelper(compression, r.Name, l)
l += r.len()
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
}
// compressedLen returns the message length when in compressed wire format
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
if compress {
compression := map[string]int{}
return compressedLenWithCompressionMap(dns, compression)
}
l := 12 // Message header is always 12 bytes
for _, r := range dns.Question {
l += r.len()
}
for _, r := range dns.Answer {
if r != nil {
l += r.len()
}
}
for _, r := range dns.Ns {
if r != nil {
l += r.len()
}
}
for _, r := range dns.Extra {
if r != nil {
l += r.len()
}
}
return l
}
func compressionLenSlice(lenp int, c map[string]int, rs []RR) int {
initLen := lenp
for _, r := range rs {
if r == nil {
continue
}
// TmpLen is to track len of record at 14bits boudaries
tmpLen := lenp
x := r.len()
// track this length, and the global length in len, while taking compression into account for both.
k, ok, _ := compressionLenSearch(c, r.Header().Name)
if ok {
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
// so, basically x:= x - k - 1 + 2
x += 1 - k
}
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
k, ok, _ = compressionLenSearchType(c, r)
if ok {
x += 1 - k
}
lenp += x
tmpLen = lenp
tmpLen += compressionLenHelperType(c, r, tmpLen)
}
return lenp - initLen
}
// Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]int, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
initLen := currentLen
prev := s
for off, end := 0, false; !end; off, end = NextLabel(s, off) {
pref := s[off:]
currentLen += len(prev) - len(pref)
prev = pref
if _, ok := c[pref]; !ok {
// If first byte label is within the first 14bits, it might be re-used later
if currentLen < maxCompressionOffset {
c[pref] = currentLen
}
} else {
added := currentLen - initLen
if off > 0 {
// We added a new PTR
added += 2
}
return added
}
}
return currentLen - initLen
}
// Look for each part in the compression map and returns its length,
// keep on searching so we get the longest match.
// Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]int, s string) (int, bool, int) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false, 0
}
fullSize := 0
for {
if _, ok := c[s[off:]]; ok {
return len(s[off:]), true, fullSize + off
}
if end {
break
}
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off)
}
return 0, false, fullSize + len(s)
}
// Copy returns a new RR which is a deep-copy of r.
func Copy(r RR) RR { r1 := r.copy(); return r1 }
// Len returns the length (in octets) of the uncompressed RR in wire format.
func Len(r RR) int { return r.len() }
// Copy returns a new *Msg which is a deep-copy of dns.
func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
func (dns *Msg) CopyTo(r1 *Msg) *Msg {
r1.MsgHdr = dns.MsgHdr
r1.Compress = dns.Compress
if len(dns.Question) > 0 {
r1.Question = make([]Question, len(dns.Question))
copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
}
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
var rri int
if len(dns.Answer) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Answer); i++ {
rrArr[rri] = dns.Answer[i].copy()
rri++
}
r1.Answer = rrArr[rrbegin:rri:rri]
}
if len(dns.Ns) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Ns); i++ {
rrArr[rri] = dns.Ns[i].copy()
rri++
}
r1.Ns = rrArr[rrbegin:rri:rri]
}
if len(dns.Extra) > 0 {
rrbegin := rri
for i := 0; i < len(dns.Extra); i++ {
rrArr[rri] = dns.Extra[i].copy()
rri++
}
r1.Extra = rrArr[rrbegin:rri:rri]
}
return r1
}
func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := PackDomainName(q.Name, msg, off, compression, compress)
if err != nil {
return off, err
}
off, err = packUint16(q.Qtype, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(q.Qclass, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func unpackQuestion(msg []byte, off int) (Question, int, error) {
var (
q Question
err error
)
q.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qtype, off, err = unpackUint16(msg, off)
if err != nil {
return q, off, err
}
if off == len(msg) {
return q, off, nil
}
q.Qclass, off, err = unpackUint16(msg, off)
if off == len(msg) {
return q, off, nil
}
return q, off, err
}
func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := packUint16(dh.Id, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Bits, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Qdcount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Ancount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Nscount, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(dh.Arcount, msg, off)
return off, err
}
func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
var (
dh Header
err error
)
dh.Id, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Bits, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Qdcount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Ancount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Nscount, off, err = unpackUint16(msg, off)
if err != nil {
return dh, off, err
}
dh.Arcount, off, err = unpackUint16(msg, off)
return dh, off, err
}
|
package search
import (
"encoding/json"
"github.com/jinzhu/gorm"
dbModule "github.com/notegio/openrelay/db"
"github.com/notegio/openrelay/types"
"net/http"
"fmt"
"strings"
)
func FormatResponse(orders []dbModule.Order, format string) ([]byte, string, error) {
if format == "application/octet-stream" {
result := []byte{}
for _, order := range orders {
orderBytes := order.Bytes()
result = append(result, orderBytes[:]...)
}
return result, "application/octet-stream", nil
} else {
orderList := []types.Order{}
for _, order := range orders {
orderList = append(orderList, order.Order)
}
result, err := json.Marshal(orderList)
return result, "application/json", err
}
}
func Handler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
orders := []dbModule.Order{}
query := db.Model(&dbModule.Order{})
// Filter Stuff
if err := query.Find(orders).Error; err != nil {
w.WriteHeader(500)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"error\": \"%v\"}", err.Error())))
}
var acceptHeader string
if acceptVal, ok := r.Header["Accept"]; ok {
acceptHeader = strings.Split(acceptVal[0], ";")[0]
} else {
acceptHeader = "unknown"
}
response, contentType, err := FormatResponse(orders, acceptHeader)
if err == nil {
w.WriteHeader(200)
w.Header().Set("Content-Type", contentType)
w.Write(response)
} else {
w.WriteHeader(500)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"error\": \"%v\"}", err.Error())))
}
}
}
Add todos
package search
import (
"encoding/json"
"github.com/jinzhu/gorm"
dbModule "github.com/notegio/openrelay/db"
"github.com/notegio/openrelay/types"
"net/http"
"fmt"
"strings"
)
func FormatResponse(orders []dbModule.Order, format string) ([]byte, string, error) {
if format == "application/octet-stream" {
result := []byte{}
for _, order := range orders {
orderBytes := order.Bytes()
result = append(result, orderBytes[:]...)
}
return result, "application/octet-stream", nil
} else {
orderList := []types.Order{}
for _, order := range orders {
orderList = append(orderList, order.Order)
}
result, err := json.Marshal(orderList)
return result, "application/json", err
}
}
func Handler(db *gorm.DB) func(http.ResponseWriter, *http.Request) {
// TODO: Require blocknumber
// TODO: Filters
return func(w http.ResponseWriter, r *http.Request) {
orders := []dbModule.Order{}
query := db.Model(&dbModule.Order{})
// Filter Stuff
if err := query.Find(orders).Error; err != nil {
w.WriteHeader(500)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"error\": \"%v\"}", err.Error())))
}
var acceptHeader string
if acceptVal, ok := r.Header["Accept"]; ok {
acceptHeader = strings.Split(acceptVal[0], ";")[0]
} else {
acceptHeader = "unknown"
}
response, contentType, err := FormatResponse(orders, acceptHeader)
if err == nil {
w.WriteHeader(200)
w.Header().Set("Content-Type", contentType)
w.Write(response)
} else {
w.WriteHeader(500)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf("{\"error\": \"%v\"}", err.Error())))
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.