CombinedText stringlengths 4 3.42M |
|---|
// Package qq provides quick and dirty debugging output for tired programmers.
// The output is formatted and colorized to enhance readability. The predefined
// "standard" qq logger can be used without i.
package qq
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
type color string
// ANSI color escape codes
const (
bold color = "\033[1m"
yellow color = "\033[33m"
cyan color = "\033[36m"
endColor color = "\033[0m" // "reset everything"
)
// These flags control what's printed in the header line. See
// https://golang.org/pkg/log/#pkg-constants for an explanation of how they
// work.
const (
Ldate = 1 << iota
Ltime
Lmicroseconds
Llongfile
Lshortfile
LUTC
Lfuncname
LstdFlags = Ltime | Lshortfile | Lfuncname
)
const (
noName = ""
maxLineWidth = 80
)
// A Logger writes pretty log messages to a file. Loggers write to files only,
// not io.Writers. The upside of this restriction is you don't have to open
// and close log files yourself. Loggers are safe for concurrent use.
type Logger struct {
mu sync.Mutex // protects all the other fields
path string // full path to log file
prefix string // prefix to write at beginning of each line
flag int // determines what's printed in header line
start time.Time // time of first write in the current log group
timer *time.Timer // when it gets to 0, start a new log group
lastFile string // last file to call Log(). determines when to print header
lastFunc string // last function to call Log()
}
// New creates a Logger that writes to the file at the given path. The prefix
// appears before each log line. The flag determines what is printed in the
// header line, e.g. "[15:21:27 main.go:107 main.main]"
func New(path, prefix string, flag int) *Logger {
t := time.NewTimer(0)
t.Stop()
return &Logger{
path: path,
prefix: prefix,
flag: flag,
timer: t,
}
}
// Flags returns the output header flags for the logger.
func (l *Logger) Flags() int {
l.mu.Lock()
defer l.mu.Unlock()
return l.flag
}
// formatHeader creates the header based on which flags are set in the logger.
func (l *Logger) formatHeader(t time.Time, filename, funcName string, line int) string {
if l.flag&LUTC != 0 {
t = t.UTC()
}
const maxHeaders = 4 // [date time filename funcname]
h := make([]string, 0, maxHeaders)
if l.flag&Ldate != 0 {
h = append(h, t.Format("2006/01/02"))
}
if l.flag&Lmicroseconds != 0 {
h = append(h, t.Format("15:04:05.000000"))
} else if l.flag&Ltime != 0 {
h = append(h, t.Format("15:04:05"))
}
// if Llongfile and Lshortfile both present, Lshortfile wins
if l.flag&Lshortfile != 0 {
filename = filepath.Base(filename)
}
// append line number to filename
if l.flag&(Llongfile|Lshortfile) != 0 {
h = append(h, fmt.Sprintf("%s:%d", filename, line))
}
if l.flag&Lfuncname != 0 {
h = append(h, funcName)
}
return fmt.Sprintf("[%s]", strings.Join(h, " "))
}
// Log pretty-prints the given arguments to the log file.
func (l *Logger) Log(a ...interface{}) {
l.mu.Lock()
defer l.mu.Unlock()
// will print line break if more than 2s since last write (groups logs
// together)
timerExpired := !l.timer.Reset(2 * time.Second)
if timerExpired {
l.start = time.Now()
}
// get info about func calling qq.Log()
var callDepth int
if l == std {
callDepth = 2 // user is calling qq.Log()
} else {
callDepth = 1 // user is calling myCustomQQLogger.Log()
}
pc, filename, line, ok := runtime.Caller(callDepth)
args := formatArgs(a)
if !ok {
l.output(args...) // no name=value printing
return
}
// print header if necessary, e.g. [14:00:36 main.go main.main]
funcName := runtime.FuncForPC(pc).Name()
if timerExpired || funcName != l.lastFunc || filename != l.lastFile {
l.lastFunc = funcName
l.lastFile = filename
header := l.formatHeader(time.Now(), filename, funcName, line)
l.printHeader(header)
}
// extract arg names from source text between parens in qq.Log()
names, err := argNames(filename, line)
if err != nil {
l.output(args...) // no name=value printing
return
}
// convert args to name=value strings
args = prependArgName(names, args)
l.output(args...)
}
// open returns a file descriptor for the open log file. If the file doesn't
// exist, it is created. open will panic if it can't open the log file.
func (l *Logger) open() *os.File {
f, err := os.OpenFile(l.path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
return f
}
// output writes to the log file. Each log message is prepended with a
// timestamp. If the prefix has been set, it will be prepended as well. If there
// is more than one message printed on a line and the line exceeds 80
// characters, the line will be broken up.
func (l *Logger) output(a ...string) {
timestamp := fmt.Sprintf("%.3fs", time.Since(l.start).Seconds())
timestamp = colorize(timestamp, yellow) + " " // pad one space
prefix := ""
if l.prefix != "" {
prefix = l.prefix + " " // pad one space
}
f := l.open()
defer f.Close()
fmt.Fprintf(f, "%s%s", timestamp, prefix)
// preWidth is length of everything before log message
preWidth := len(timestamp) - len(yellow) - len(endColor) + len(prefix)
preSpaces := strings.Repeat(" ", preWidth)
padding := ""
lineArgs := 0 // number of args printed on current log line
lineWidth := preWidth
for _, arg := range a {
argWidth := argWidth(arg)
lineWidth += argWidth + len(padding)
// some names in name=value strings contain newlines. insert indentation
// after each newline so they line up
arg = strings.Replace(arg, "\n", "\n"+preSpaces, -1)
// break up long lines. if this is first arg printed on the line
// (lineArgs == 0), makes no sense to break up the line
if lineWidth > maxLineWidth && lineArgs != 0 {
fmt.Fprint(f, "\n", preSpaces)
lineArgs = 0
lineWidth = preWidth + argWidth
padding = ""
}
fmt.Fprint(f, padding, arg)
lineArgs++
padding = " "
}
fmt.Fprint(f, "\n")
}
// Path retuns the full path to the log file.
func (l *Logger) Path() string {
l.mu.Lock()
defer l.mu.Unlock()
return l.path
}
// Prefix returns the output prefix for the logger.
func (l *Logger) Prefix() string {
l.mu.Lock()
defer l.mu.Unlock()
return l.prefix
}
// printHeader prints a header of the form [16:11:18 main.go main.main].
func (l *Logger) printHeader(header string) {
f := l.open()
defer f.Close()
fmt.Fprint(f, "\n", header, "\n")
}
// SetFlags sets the header flags for the logger.
func (l *Logger) SetFlags(flag int) {
l.mu.Lock()
defer l.mu.Unlock()
l.flag = flag
}
// SetPath sets the destination log file for the logger.
func (l *Logger) SetPath(path string) {
l.mu.Lock()
defer l.mu.Unlock()
l.path = path
}
// SetPrefix sets the ouput prefix that's printed at the start of each log line.
func (l *Logger) SetPrefix(prefix string) {
l.mu.Lock()
defer l.mu.Unlock()
l.prefix = prefix
}
// standard qq logger
var std = New(filepath.Join(os.TempDir(), "qq.log"), "", LstdFlags)
// Flags returns the output flags for the standard qq logger.
func Flags() int {
return std.Flags()
}
// Log writes a log message through the standard qq logger.
func Log(a ...interface{}) {
std.Log(a...)
}
// Path returns the full path to the qq.log file.
func Path() string {
return std.Path()
}
// Prefix returns the output prefix for the standard qq logger.
func Prefix() string {
return std.Prefix()
}
// SetFlags sets the header flags for the standard qq logger.
func SetFlags(flag int) {
std.SetFlags(flag)
}
// SetPath sets the output destination for the standard qq logger. If the given
// path is invalid, the next Log() call will panic.
func SetPath(path string) {
std.SetPath(path)
}
// SetPrefix sets the output prefix for the standard qq logger.
func SetPrefix(prefix string) {
std.SetPrefix(prefix)
}
improve package comment
// Package qq provides quick and dirty debugging output for tired programmers.
// It's used to quickly print variables. With one call you can see the variable
// name, value, and the filename, function, and line number. The output goes to
// a special qq.log file, away from all the noise of your running program. The
// output is easy on the eyes, with pretty colors and nice formatting.
// The API is almost identical to the standard log package. You can set a prefix
// with SetPrefix(). You can set output flags with SetOutput(). Like the log
// package, you can just start calling qq.Log() without initializing anything.
package qq
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
type color string
// ANSI color escape codes
const (
bold color = "\033[1m"
yellow color = "\033[33m"
cyan color = "\033[36m"
endColor color = "\033[0m" // "reset everything"
)
// These flags control what's printed in the header line. See
// https://golang.org/pkg/log/#pkg-constants for an explanation of how they
// work.
const (
Ldate = 1 << iota
Ltime
Lmicroseconds
Llongfile
Lshortfile
LUTC
Lfuncname
LstdFlags = Ltime | Lshortfile | Lfuncname
)
const (
noName = ""
maxLineWidth = 80
)
// A Logger writes pretty log messages to a file. Loggers write to files only,
// not io.Writers. The upside of this restriction is you don't have to open
// and close log files yourself. Loggers are safe for concurrent use.
type Logger struct {
mu sync.Mutex // protects all the other fields
path string // full path to log file
prefix string // prefix to write at beginning of each line
flag int // determines what's printed in header line
start time.Time // time of first write in the current log group
timer *time.Timer // when it gets to 0, start a new log group
lastFile string // last file to call Log(). determines when to print header
lastFunc string // last function to call Log()
}
// New creates a Logger that writes to the file at the given path. The prefix
// appears before each log line. The flag determines what is printed in the
// header line, e.g. "[15:21:27 main.go:107 main.main]"
func New(path, prefix string, flag int) *Logger {
t := time.NewTimer(0)
t.Stop()
return &Logger{
path: path,
prefix: prefix,
flag: flag,
timer: t,
}
}
// Flags returns the output header flags for the logger.
func (l *Logger) Flags() int {
l.mu.Lock()
defer l.mu.Unlock()
return l.flag
}
// formatHeader creates the header based on which flags are set in the logger.
func (l *Logger) formatHeader(t time.Time, filename, funcName string, line int) string {
if l.flag&LUTC != 0 {
t = t.UTC()
}
const maxHeaders = 4 // [date time filename funcname]
h := make([]string, 0, maxHeaders)
if l.flag&Ldate != 0 {
h = append(h, t.Format("2006/01/02"))
}
if l.flag&Lmicroseconds != 0 {
h = append(h, t.Format("15:04:05.000000"))
} else if l.flag&Ltime != 0 {
h = append(h, t.Format("15:04:05"))
}
// if Llongfile and Lshortfile both present, Lshortfile wins
if l.flag&Lshortfile != 0 {
filename = filepath.Base(filename)
}
// append line number to filename
if l.flag&(Llongfile|Lshortfile) != 0 {
h = append(h, fmt.Sprintf("%s:%d", filename, line))
}
if l.flag&Lfuncname != 0 {
h = append(h, funcName)
}
return fmt.Sprintf("[%s]", strings.Join(h, " "))
}
// Log pretty-prints the given arguments to the log file.
func (l *Logger) Log(a ...interface{}) {
l.mu.Lock()
defer l.mu.Unlock()
// will print line break if more than 2s since last write (groups logs
// together)
timerExpired := !l.timer.Reset(2 * time.Second)
if timerExpired {
l.start = time.Now()
}
// get info about func calling qq.Log()
var callDepth int
if l == std {
callDepth = 2 // user is calling qq.Log()
} else {
callDepth = 1 // user is calling myCustomQQLogger.Log()
}
pc, filename, line, ok := runtime.Caller(callDepth)
args := formatArgs(a)
if !ok {
l.output(args...) // no name=value printing
return
}
// print header if necessary, e.g. [14:00:36 main.go main.main]
funcName := runtime.FuncForPC(pc).Name()
if timerExpired || funcName != l.lastFunc || filename != l.lastFile {
l.lastFunc = funcName
l.lastFile = filename
header := l.formatHeader(time.Now(), filename, funcName, line)
l.printHeader(header)
}
// extract arg names from source text between parens in qq.Log()
names, err := argNames(filename, line)
if err != nil {
l.output(args...) // no name=value printing
return
}
// convert args to name=value strings
args = prependArgName(names, args)
l.output(args...)
}
// open returns a file descriptor for the open log file. If the file doesn't
// exist, it is created. open will panic if it can't open the log file.
func (l *Logger) open() *os.File {
f, err := os.OpenFile(l.path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
return f
}
// output writes to the log file. Each log message is prepended with a
// timestamp. If the prefix has been set, it will be prepended as well. If there
// is more than one message printed on a line and the line exceeds 80
// characters, the line will be broken up.
func (l *Logger) output(a ...string) {
timestamp := fmt.Sprintf("%.3fs", time.Since(l.start).Seconds())
timestamp = colorize(timestamp, yellow) + " " // pad one space
prefix := ""
if l.prefix != "" {
prefix = l.prefix + " " // pad one space
}
f := l.open()
defer f.Close()
fmt.Fprintf(f, "%s%s", timestamp, prefix)
// preWidth is length of everything before log message
preWidth := len(timestamp) - len(yellow) - len(endColor) + len(prefix)
preSpaces := strings.Repeat(" ", preWidth)
padding := ""
lineArgs := 0 // number of args printed on current log line
lineWidth := preWidth
for _, arg := range a {
argWidth := argWidth(arg)
lineWidth += argWidth + len(padding)
// some names in name=value strings contain newlines. insert indentation
// after each newline so they line up
arg = strings.Replace(arg, "\n", "\n"+preSpaces, -1)
// break up long lines. if this is first arg printed on the line
// (lineArgs == 0), makes no sense to break up the line
if lineWidth > maxLineWidth && lineArgs != 0 {
fmt.Fprint(f, "\n", preSpaces)
lineArgs = 0
lineWidth = preWidth + argWidth
padding = ""
}
fmt.Fprint(f, padding, arg)
lineArgs++
padding = " "
}
fmt.Fprint(f, "\n")
}
// Path retuns the full path to the log file.
func (l *Logger) Path() string {
l.mu.Lock()
defer l.mu.Unlock()
return l.path
}
// Prefix returns the output prefix for the logger.
func (l *Logger) Prefix() string {
l.mu.Lock()
defer l.mu.Unlock()
return l.prefix
}
// printHeader prints a header of the form [16:11:18 main.go main.main].
func (l *Logger) printHeader(header string) {
f := l.open()
defer f.Close()
fmt.Fprint(f, "\n", header, "\n")
}
// SetFlags sets the header flags for the logger.
func (l *Logger) SetFlags(flag int) {
l.mu.Lock()
defer l.mu.Unlock()
l.flag = flag
}
// SetPath sets the destination log file for the logger.
func (l *Logger) SetPath(path string) {
l.mu.Lock()
defer l.mu.Unlock()
l.path = path
}
// SetPrefix sets the ouput prefix that's printed at the start of each log line.
func (l *Logger) SetPrefix(prefix string) {
l.mu.Lock()
defer l.mu.Unlock()
l.prefix = prefix
}
// standard qq logger
var std = New(filepath.Join(os.TempDir(), "qq.log"), "", LstdFlags)
// Flags returns the output flags for the standard qq logger.
func Flags() int {
return std.Flags()
}
// Log writes a log message through the standard qq logger.
func Log(a ...interface{}) {
std.Log(a...)
}
// Path returns the full path to the qq.log file.
func Path() string {
return std.Path()
}
// Prefix returns the output prefix for the standard qq logger.
func Prefix() string {
return std.Prefix()
}
// SetFlags sets the header flags for the standard qq logger.
func SetFlags(flag int) {
std.SetFlags(flag)
}
// SetPath sets the output destination for the standard qq logger. If the given
// path is invalid, the next Log() call will panic.
func SetPath(path string) {
std.SetPath(path)
}
// SetPrefix sets the output prefix for the standard qq logger.
func SetPrefix(prefix string) {
std.SetPrefix(prefix)
}
|
package profile
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
)
var profileTests = []struct {
name string
code string
checks []func(t *testing.T, stdout, stderr []byte, err error)
}{{
name: "default profile",
code: `
package main
import "github.com/pkg/profile"
func main() {
defer profile.Start().Stop()
}
`,
checks: f(NoStdout, NoErr),
}, {
name: "profile quiet",
code: `
package main
import "github.com/pkg/profile"
func main() {
defer profile.Start(profile.Quiet).Stop()
}
`,
checks: f(NoStdout, NoStderr, NoErr),
}}
func TestProfile(t *testing.T) {
for _, tt := range profileTests {
t.Log(tt.name)
stdout, stderr, err := runTest(t, tt.code)
for _, f := range tt.checks {
f(t, stdout, stderr, err)
}
}
}
func f(funcs ...func(*testing.T, []byte, []byte, error)) []func(*testing.T, []byte, []byte, error) {
return funcs
}
// NoStdout checks that stdout was blank.
func NoStdout(t *testing.T, stdout, _ []byte, _ error) {
if len := len(stdout); len > 0 {
t.Errorf("stdout: wanted 0 bytes, got %d", len)
}
}
// NoStderr checks that stderr was blank.
func NoStderr(t *testing.T, _, stderr []byte, _ error) {
if len := len(stderr); len > 0 {
t.Errorf("stderr: wanted 0 bytes, got %d", len)
}
}
// NoErr checks that err was nil
func NoErr(t *testing.T, _, _ []byte, err error) {
if err != nil {
t.Errorf("error: expected nil, got %v", err)
}
}
// runTest executes the go program supplied and returns the contents of stdout,
// stderr, and an error which may contain status information about the result
// of the program.
func runTest(t *testing.T, code string) ([]byte, []byte, error) {
chk := func(err error) {
if err != nil {
t.Fatal(err)
}
}
gopath, err := ioutil.TempDir("", "profile-gopath")
chk(err)
defer os.RemoveAll(gopath)
srcdir := filepath.Join(gopath, "src")
err = os.Mkdir(srcdir, 0755)
chk(err)
src := filepath.Join(srcdir, "main.go")
err = ioutil.WriteFile(src, []byte(code), 0644)
chk(err)
cmd := exec.Command("go", "run", src)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}
integrated review feedback
package profile
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
)
type checkFn func(t *testing.T, stdout, stderr []byte, err error)
var profileTests = []struct {
name string
code string
checks []checkFn
}{{
name: "default profile",
code: `
package main
import "github.com/pkg/profile"
func main() {
defer profile.Start().Stop()
}
`,
checks: []checkFn{NoStdout, NoErr},
}, {
name: "profile quiet",
code: `
package main
import "github.com/pkg/profile"
func main() {
defer profile.Start(profile.Quiet).Stop()
}
`,
checks: []checkFn{NoStdout, NoStderr, NoErr},
}}
func TestProfile(t *testing.T) {
for _, tt := range profileTests {
t.Log(tt.name)
stdout, stderr, err := runTest(t, tt.code)
for _, f := range tt.checks {
f(t, stdout, stderr, err)
}
}
}
// NoStdout checks that stdout was blank.
func NoStdout(t *testing.T, stdout, _ []byte, _ error) {
if len := len(stdout); len > 0 {
t.Errorf("stdout: wanted 0 bytes, got %d", len)
}
}
// NoStderr checks that stderr was blank.
func NoStderr(t *testing.T, _, stderr []byte, _ error) {
if len := len(stderr); len > 0 {
t.Errorf("stderr: wanted 0 bytes, got %d", len)
}
}
// NoErr checks that err was nil
func NoErr(t *testing.T, _, _ []byte, err error) {
if err != nil {
t.Errorf("error: expected nil, got %v", err)
}
}
// runTest executes the go program supplied and returns the contents of stdout,
// stderr, and an error which may contain status information about the result
// of the program.
func runTest(t *testing.T, code string) ([]byte, []byte, error) {
chk := func(err error) {
if err != nil {
t.Fatal(err)
}
}
gopath, err := ioutil.TempDir("", "profile-gopath")
chk(err)
defer os.RemoveAll(gopath)
srcdir := filepath.Join(gopath, "src")
err = os.Mkdir(srcdir, 0755)
chk(err)
src := filepath.Join(srcdir, "main.go")
err = ioutil.WriteFile(src, []byte(code), 0644)
chk(err)
cmd := exec.Command("go", "run", src)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}
|
package gitgo
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"reflect"
)
type packObject struct {
Name SHA
Offset int
Data []byte
Type packObjectType
}
//go:generate stringer -type=packObjectType
type packObjectType uint8
const (
_ packObjectType = iota
OBJ_COMMIT
OBJ_TREE
OBJ_BLOB
OBJ_TAG
OBJ_OFS_DELTA
OBJ_REF_DELTA
)
type errReadSeeker struct {
r io.ReadSeeker
err error
}
// Read, but only if no errors have been encountered
// in a previous read (including io.EOF)
func (er *errReadSeeker) read(buf []byte) {
if er.err != nil {
return
}
_, er.err = er.r.Read(buf)
}
func (er *errReadSeeker) Seek(offset int64, whence int) (int64, error) {
return er.r.Seek(offset, whence)
}
func GetIdxPath(dotGitRootPath string) (idxFilePath string, err error) {
files, err := filepath.Glob(path.Join(dotGitRootPath, "objects/pack", "*.idx"))
idxFilePath = files[0]
return
}
func VerifyPack(pack io.ReadSeeker, idx io.Reader) error {
objects, err := parsePack(errReadSeeker{pack, nil}, idx)
for _, object := range objects {
if object.Type < 5 {
log.Printf("%s %s", object.Name, object.Type)
}
}
return err
}
func parsePack(pack errReadSeeker, idx io.Reader) (objects []*packObject, err error) {
signature := make([]byte, 4)
pack.read(signature)
if string(signature) != "PACK" {
return nil, fmt.Errorf("Received invalid signature: %s", string(signature))
}
if err != nil {
return nil, err
}
log.Printf("signature %+v", signature)
version := make([]byte, 4)
pack.read(version)
// TODO use encoding/binary here
log.Printf("version is %+v", version)
v := version[3]
switch v {
case 2:
// Parse version 2 packfile
objects, err = parseIdx(idx, 2)
if err != nil {
return
}
objects, err = parsePackV2(pack, objects)
return
default:
return nil, fmt.Errorf("cannot parse packfile with version %d", v)
}
return nil, nil
}
func Clone(r io.Reader) (*bufio.Reader, *bufio.Reader) {
var b1 bytes.Buffer
var b2 bytes.Buffer
w := io.MultiWriter(&b1, &b2)
io.Copy(w, r)
return bufio.NewReader(&b1), bufio.NewReader(&b2)
}
func bytesToNum(b []byte) uint {
var n uint
for i := 0; i < len(b); i++ {
n = n | (uint(b[len(b)-i-1]) << uint(i*8))
}
return n
}
// parsePackV2 parses a packfile that uses
// version 2 of the format
func parsePackV2(r errReadSeeker, objects []*packObject) ([]*packObject, error) {
numObjectsBts := make([]byte, 4)
r.read(numObjectsBts)
if int(bytesToNum(numObjectsBts)) != len(objects) {
return nil, fmt.Errorf("Expected %d objects and found %d", len(objects), numObjectsBts)
}
for _, object := range objects {
r.Seek(int64(object.Offset), os.SEEK_SET)
r.Seek(0, os.SEEK_CUR)
_bytes := make([]byte, 1)
r.read(_bytes)
_byte := _bytes[0]
// This will extract the last three bits of
// the first nibble in the byte
// which tells us the object type
object.Type = packObjectType(((_byte >> 4) & 7))
if object.Type < 5 {
// the object is a commit, tree, blob, or tag
}
switch {
case object.Type < 5:
// the object is a commit, tree, blob, or tag
log.Printf("Object type %d", object.Type)
// determine the (decompressed) object size
// and then deflate the following bytes
// The most-significant byte (MSB)
// tells us whether we need to read more bytes
// to get the encoded object size
MSB := (_byte & 128) // will be either 128 or 0
// This will extract the last four bits of the byte
var objectSize int = int((uint(_byte) & 15))
// shift the first size by 0
// and the rest by 4 + (i-1) * 7
var shift uint = 4
// If the most-significant bit is 0, this is the last byte
// for the object size
for MSB > 0 {
// Keep reading the size until the MSB is 0
_bytes := make([]byte, 1)
r.read(_bytes)
_byte := _bytes[0]
MSB = (_byte & 128)
objectSize += int((uint(_byte) & 127) << shift)
shift += 7
}
// (objectSize) is the size, in bytes, of this object *when expanded*
// the IDX file tells us how many *compressed* bytes the object will take
// (in other words, how much space to allocate for the result)
object.Data = make([]byte, objectSize)
zr, err := zlib.NewReader(r.r)
if err != nil {
return nil, err
}
n, err := zr.Read(object.Data)
if err != nil {
if err == io.EOF {
err = nil
} else {
return nil, err
}
}
zr.Close()
if n != objectSize {
return nil, fmt.Errorf("expected to read %d bytes, read %d", objectSize, n)
}
case object.Type == OBJ_OFS_DELTA:
// read the n-byte offset
// from the git docs:
// "n bytes with MSB set in all but the last one.
// The offset is then the number constructed by
// concatenating the lower 7 bit of each byte, and
// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
// to the result."
log.Printf("encountered ofs delta")
var offset int
MSB := (_byte & 128) // will be either 128 or 0
var shift uint = 0
for MSB > 0 {
// Keep reading the size until the MSB is 0
_bytes := make([]byte, 1)
r.read(_bytes)
_byte := _bytes[0]
MSB = (_byte & 128)
offset += int((uint(_byte) & 127) << shift)
shift += 7
}
case object.Type == OBJ_REF_DELTA:
// Read the 20-byte base object name
log.Printf("encountered ref delta")
baseObjName := make([]byte, 20)
r.read(baseObjName)
fmt.Printf("Obj name %x\n", baseObjName)
fmt.Printf("err %+v", r.err)
}
}
return objects, r.err
}
func parseIdx(idx io.Reader, version int) (objects []*packObject, err error) {
if version != 2 {
return nil, fmt.Errorf("cannot parse IDX with version %d")
}
// parse version 2 idxfile
// Version 2 starts with a 4-byte magic number
header := make([]byte, 4)
n, err := idx.Read(header)
if err != nil {
return nil, err
}
if !reflect.DeepEqual([]byte{255, 116, 79, 99}, header) {
return nil, fmt.Errorf("invalid IDX header: %q", string(header))
}
// Then the version number in four bytes
versionBts := make([]byte, 4)
_, err = idx.Read(versionBts)
if err != nil {
return nil, err
}
// We already know the version, so we can ignore it
// Then the fanout table
// The fanout table has 256 entries, each 4 bytes long
fanoutTableFlat := make([]byte, 256*4)
n, err = idx.Read(fanoutTableFlat)
if err == nil && n != len(fanoutTableFlat) {
err = fmt.Errorf("read incomplete fanout table: %d", n)
}
if err != nil {
return nil, err
}
// Initialize the flat fanout table
fanoutTable := make([][]byte, 256)
for i := 0; i < len(fanoutTableFlat); i += 4 {
entry := fanoutTableFlat[i : i+4]
fanoutTable[(i+1)/4] = entry
}
for i, row := range fanoutTable {
log.Printf("row %d: %+v", i, row)
}
numObjects := int(bytesToNum(fanoutTable[len(fanoutTable)-1]))
objects = make([]*packObject, numObjects)
objectNames := make([]SHA, numObjects)
for i := 0; i < numObjects; i++ {
sha := make([]byte, 20)
n, err = idx.Read(sha)
if err != nil {
return nil, err
}
log.Printf("%x", sha[:n])
objectNames[i] = SHA(fmt.Sprintf("%x", sha[:n]))
objects[i] = &packObject{Name: SHA(fmt.Sprintf("%x", sha[:n]))}
}
// Then come 4-byte CRC32 values
crc32Table := make([]byte, numObjects*4)
_, err = idx.Read(crc32Table)
if err != nil {
return nil, err
}
// Next come 4-byte offset values
// If the MSB is set, there is an index into the next table
// otherwise, these are 31 bits each
offsetsFlat := make([]byte, numObjects*4)
_, err = idx.Read(offsetsFlat)
if err != nil {
return nil, err
}
offsets := make([]int, numObjects)
for i := 0; i < len(offsets); i++ {
offset := int(bytesToNum(offsetsFlat[i*4 : (i+1)*4]))
// check if the MSB is 1
if offset&2147483648 > 0 {
return nil, fmt.Errorf("packfile is too large to parse")
}
offsets[i] = offset
objects[i].Offset = offset
}
// If the pack file is more than 2 GB, there will be a table of 8-byte offset entries here
// TODO implement this
// This is the same as the checksum at the end of the corresponding packfile
packfileChecksum := make([]byte, 20)
_, err = idx.Read(packfileChecksum)
if err != nil {
return
}
// This is the checksum of all of the above data
// We're not checking it now, but if we can't read it properly
// that means an error has occurred earlier in parsing
idxChecksum := make([]byte, 20)
_, err = idx.Read(idxChecksum)
if err != nil {
return
}
// TODO check that there isn't any data left
return objects, err
}
Clean functions up
package gitgo
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"reflect"
)
type packObject struct {
Name SHA
Offset int
Data []byte
Type packObjectType
err error // was an error encountered while processing this object?
}
//go:generate stringer -type=packObjectType
type packObjectType uint8
const (
_ packObjectType = iota
OBJ_COMMIT
OBJ_TREE
OBJ_BLOB
OBJ_TAG
OBJ_OFS_DELTA
OBJ_REF_DELTA
)
type errReadSeeker struct {
r io.ReadSeeker
err error
}
// Read, but only if no errors have been encountered
// in a previous read (including io.EOF)
func (er *errReadSeeker) read(buf []byte) {
if er.err != nil {
return
}
_, er.err = er.r.Read(buf)
}
func (er *errReadSeeker) Seek(offset int64, whence int) (int64, error) {
return er.r.Seek(offset, whence)
}
func GetIdxPath(dotGitRootPath string) (idxFilePath string, err error) {
files, err := filepath.Glob(path.Join(dotGitRootPath, "objects/pack", "*.idx"))
idxFilePath = files[0]
return
}
func VerifyPack(pack io.ReadSeeker, idx io.Reader) error {
objects, err := parsePack(errReadSeeker{pack, nil}, idx)
for _, object := range objects {
if object.err == nil {
log.Printf("Found %s %s", object.Name, object.Type)
} else {
log.Printf("Found %s %s %s", object.Name, object.Type, object.err)
}
}
return err
}
func parsePack(pack errReadSeeker, idx io.Reader) (objects []*packObject, err error) {
signature := make([]byte, 4)
pack.read(signature)
if string(signature) != "PACK" {
return nil, fmt.Errorf("Received invalid signature: %s", string(signature))
}
if err != nil {
return nil, err
}
log.Printf("signature %+v", signature)
version := make([]byte, 4)
pack.read(version)
// TODO use encoding/binary here
log.Printf("version is %+v", version)
v := version[3]
switch v {
case 2:
// Parse version 2 packfile
objects, err = parseIdx(idx, 2)
if err != nil {
return
}
objects, err = parsePackV2(pack, objects)
return
default:
return nil, fmt.Errorf("cannot parse packfile with version %d", v)
}
return nil, nil
}
func Clone(r io.Reader) (*bufio.Reader, *bufio.Reader) {
var b1 bytes.Buffer
var b2 bytes.Buffer
w := io.MultiWriter(&b1, &b2)
io.Copy(w, r)
return bufio.NewReader(&b1), bufio.NewReader(&b2)
}
func bytesToNum(b []byte) uint {
var n uint
for i := 0; i < len(b); i++ {
n = n | (uint(b[len(b)-i-1]) << uint(i*8))
}
return n
}
// parsePackV2 parses a packfile that uses
// version 2 of the format
func parsePackV2(r errReadSeeker, objects []*packObject) ([]*packObject, error) {
numObjectsBts := make([]byte, 4)
r.read(numObjectsBts)
if int(bytesToNum(numObjectsBts)) != len(objects) {
return nil, fmt.Errorf("Expected %d objects and found %d", len(objects), numObjectsBts)
}
for _, object := range objects {
r.Seek(int64(object.Offset), os.SEEK_SET)
r.Seek(0, os.SEEK_CUR)
_bytes := make([]byte, 1)
r.read(_bytes)
_byte := _bytes[0]
// This will extract the last three bits of
// the first nibble in the byte
// which tells us the object type
object.Type = packObjectType(((_byte >> 4) & 7))
// determine the (decompressed) object size
// and then deflate the following bytes
// The most-significant byte (MSB)
// tells us whether we need to read more bytes
// to get the encoded object size
MSB := (_byte & 128) // will be either 128 or 0
// This will extract the last four bits of the byte
var objectSize int = int((uint(_byte) & 15))
// shift the first size by 0
// and the rest by 4 + (i-1) * 7
var shift uint = 4
// If the most-significant bit is 0, this is the last byte
// for the object size
for MSB > 0 {
// Keep reading the size until the MSB is 0
_bytes := make([]byte, 1)
r.read(_bytes)
_byte := _bytes[0]
MSB = (_byte & 128)
objectSize += int((uint(_byte) & 127) << shift)
shift += 7
}
switch {
case object.Type < 5:
// the object is a commit, tree, blob, or tag
// (objectSize) is the size, in bytes, of this object *when expanded*
// the IDX file tells us how many *compressed* bytes the object will take
// (in other words, how much space to allocate for the result)
object.Data = make([]byte, objectSize)
zr, err := zlib.NewReader(r.r)
if err != nil {
return nil, err
}
n, err := zr.Read(object.Data)
if err != nil {
if err == io.EOF {
err = nil
} else {
return nil, err
}
}
zr.Close()
if n != objectSize {
return nil, fmt.Errorf("expected to read %d bytes, read %d", objectSize, n)
}
case object.Type == OBJ_OFS_DELTA:
// read the n-byte offset
// from the git docs:
// "n bytes with MSB set in all but the last one.
// The offset is then the number constructed by
// concatenating the lower 7 bit of each byte, and
// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
// to the result."
log.Printf("encountered ofs delta")
var offset int
MSB := (_byte & 128) // will be either 128 or 0
var shift uint = 0
for MSB > 0 {
// Keep reading the size until the MSB is 0
_bytes := make([]byte, 1)
r.read(_bytes)
_byte := _bytes[0]
MSB = (_byte & 128)
offset += int((uint(_byte) & 127) << shift)
shift += 7
}
case object.Type == OBJ_REF_DELTA:
// Read the 20-byte base object name
log.Printf("encountered ref delta")
baseObjName := make([]byte, 20)
r.read(baseObjName)
fmt.Printf("Obj name %x\n", baseObjName)
fmt.Printf("size %d\n", objectSize)
zr, err := zlib.NewReader(r.r)
if err != nil {
object.err = err
continue
}
_, err = zr.Read(object.Data)
if err != nil && err != io.EOF {
object.err = err
continue
}
zr.Close()
}
}
return objects, r.err
}
func parseIdx(idx io.Reader, version int) (objects []*packObject, err error) {
if version != 2 {
return nil, fmt.Errorf("cannot parse IDX with version %d")
}
// parse version 2 idxfile
// Version 2 starts with a 4-byte magic number
header := make([]byte, 4)
n, err := idx.Read(header)
if err != nil {
return nil, err
}
if !reflect.DeepEqual([]byte{255, 116, 79, 99}, header) {
return nil, fmt.Errorf("invalid IDX header: %q", string(header))
}
// Then the version number in four bytes
versionBts := make([]byte, 4)
_, err = idx.Read(versionBts)
if err != nil {
return nil, err
}
// We already know the version, so we can ignore it
// Then the fanout table
// The fanout table has 256 entries, each 4 bytes long
fanoutTableFlat := make([]byte, 256*4)
n, err = idx.Read(fanoutTableFlat)
if err == nil && n != len(fanoutTableFlat) {
err = fmt.Errorf("read incomplete fanout table: %d", n)
}
if err != nil {
return nil, err
}
// Initialize the flat fanout table
fanoutTable := make([][]byte, 256)
for i := 0; i < len(fanoutTableFlat); i += 4 {
entry := fanoutTableFlat[i : i+4]
fanoutTable[(i+1)/4] = entry
}
numObjects := int(bytesToNum(fanoutTable[len(fanoutTable)-1]))
objects = make([]*packObject, numObjects)
objectNames := make([]SHA, numObjects)
log.Print("Object names from IDX:")
for i := 0; i < numObjects; i++ {
sha := make([]byte, 20)
n, err = idx.Read(sha)
if err != nil {
return nil, err
}
log.Printf("%x", sha[:n])
objectNames[i] = SHA(fmt.Sprintf("%x", sha[:n]))
objects[i] = &packObject{Name: SHA(fmt.Sprintf("%x", sha[:n]))}
}
// Then come 4-byte CRC32 values
crc32Table := make([]byte, numObjects*4)
_, err = idx.Read(crc32Table)
if err != nil {
return nil, err
}
// Next come 4-byte offset values
// If the MSB is set, there is an index into the next table
// otherwise, these are 31 bits each
offsetsFlat := make([]byte, numObjects*4)
_, err = idx.Read(offsetsFlat)
if err != nil {
return nil, err
}
offsets := make([]int, numObjects)
for i := 0; i < len(offsets); i++ {
offset := int(bytesToNum(offsetsFlat[i*4 : (i+1)*4]))
// check if the MSB is 1
if offset&2147483648 > 0 {
return nil, fmt.Errorf("packfile is too large to parse")
}
offsets[i] = offset
objects[i].Offset = offset
}
// If the pack file is more than 2 GB, there will be a table of 8-byte offset entries here
// TODO implement this
// This is the same as the checksum at the end of the corresponding packfile
packfileChecksum := make([]byte, 20)
_, err = idx.Read(packfileChecksum)
if err != nil {
return
}
// This is the checksum of all of the above data
// We're not checking it now, but if we can't read it properly
// that means an error has occurred earlier in parsing
idxChecksum := make([]byte, 20)
_, err = idx.Read(idxChecksum)
if err != nil {
return
}
// TODO check that there isn't any data left
return objects, err
}
|
package otto
import (
"bytes"
"io"
"testing"
"github.com/robertkrimen/otto/parser"
)
func TestOtto(t *testing.T) {
tt(t, func() {
test, _ := test()
test("xyzzy = 2", 2)
test("xyzzy + 2", 4)
test("xyzzy += 16", 18)
test("xyzzy", 18)
test(`
(function(){
return 1
})()
`, 1)
test(`
(function(){
return 1
}).call(this)
`, 1)
test(`
(function(){
var result
(function(){
result = -1
})()
return result
})()
`, -1)
test(`
var abc = 1
abc || (abc = -1)
abc
`, 1)
test(`
var abc = (function(){ 1 === 1 })();
abc;
`, "undefined")
})
}
func TestFunction__(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
function abc() {
return 1;
};
abc();
`, 1)
})
}
func TestIf(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
abc = undefined;
def = undefined;
if (true) abc = 1
else abc = 2;
if (false) {
def = 3;
}
else def = 4;
[ abc, def ];
`, "1,4")
test(`
if (1) {
abc = 1;
}
else {
abc = 0;
}
abc;
`, 1)
test(`
if (0) {
abc = 1;
}
else {
abc = 0;
}
abc;
`, 0)
test(`
abc = 0;
if (0) {
abc = 1;
}
abc;
`, 0)
test(`
abc = 0;
if (abc) {
abc = 1;
}
abc;
`, 0)
})
}
func TestSequence(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
1, 2, 3;
`, 3)
})
}
func TestCall(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
Math.pow(3, 2);
`, 9)
})
}
func TestRunFunctionWithSetArguments(t *testing.T) {
tt(t, func() {
vm := New()
vm.Run(`var sillyFunction = function(record){record.silly = true; record.answer *= -1};`)
record := map[string]interface{}{"foo": "bar", "answer": 42}
// Set performs a conversion that allows the map to be addressed as a Javascript object
vm.Set("argument", record)
_, err := vm.Run("sillyFunction(argument)")
is(err, nil)
is(record["answer"].(float64), -42)
is(record["silly"].(bool), true)
})
}
func TestRunFunctionWithArgumentsPassedToCall(t *testing.T) {
tt(t, func() {
vm := New()
vm.Run(`var sillyFunction = function(record){record.silly = true; record.answer *= -1};`)
record := map[string]interface{}{"foo": "bar", "answer": 42}
_, err := vm.Call("sillyFunction", nil, record)
is(err, nil)
is(record["answer"].(float64), -42)
is(record["silly"].(bool), true)
})
}
func TestMember(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
abc = [ 0, 1, 2 ];
def = {
"abc": 0,
"def": 1,
"ghi": 2,
};
[ abc[2], def.abc, abc[1], def.def ];
`, "2,0,1,1")
})
}
func Test_this(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
typeof this;
`, "object")
})
}
func TestWhile(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
limit = 4
abc = 0
while (limit) {
abc = abc + 1
limit = limit - 1
}
abc;
`, 4)
})
}
func TestSwitch_break(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = true;
var ghi = "Xyzzy";
while (abc) {
switch ('def') {
case 'def':
break;
}
ghi = "Nothing happens.";
abc = false;
}
ghi;
`, "Nothing happens.")
test(`
var abc = true;
var ghi = "Xyzzy";
WHILE:
while (abc) {
switch ('def') {
case 'def':
break WHILE;
}
ghi = "Nothing happens."
abc = false
}
ghi;
`, "Xyzzy")
test(`
var ghi = "Xyzzy";
FOR:
for (;;) {
switch ('def') {
case 'def':
break FOR;
ghi = "";
}
ghi = "Nothing happens.";
}
ghi;
`, "Xyzzy")
test(`
var ghi = "Xyzzy";
FOR:
for (var jkl in {}) {
switch ('def') {
case 'def':
break FOR;
ghi = "Something happens.";
}
ghi = "Nothing happens.";
}
ghi;
`, "Xyzzy")
test(`
var ghi = "Xyzzy";
function jkl() {
switch ('def') {
case 'def':
break;
ghi = "";
}
ghi = "Nothing happens.";
}
while (abc) {
jkl();
abc = false;
ghi = "Something happens.";
}
ghi;
`, "Something happens.")
})
}
func TestTryFinally(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc;
try {
abc = 1;
}
finally {
abc = 2;
}
abc;
`, 2)
test(`
var abc = false, def = 0;
do {
def += 1;
if (def > 100) {
break;
}
try {
continue;
}
finally {
abc = true;
}
}
while(!abc && def < 10)
def;
`, 1)
test(`
var abc = false, def = 0, ghi = 0;
do {
def += 1;
if (def > 100) {
break;
}
try {
throw 0;
}
catch (jkl) {
continue;
}
finally {
abc = true;
ghi = 11;
}
ghi -= 1;
}
while(!abc && def < 10)
ghi;
`, 11)
test(`
var abc = 0, def = 0;
do {
try {
abc += 1;
throw "ghi";
}
finally {
def = 1;
continue;
}
def -= 1;
}
while (abc < 2)
[ abc, def ];
`, "2,1")
})
}
func TestTryCatch(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 1;
try {
throw 4;
abc = -1;
}
catch (xyzzy) {
abc += xyzzy + 1;
}
abc;
`, 6)
test(`
abc = 1;
var def;
try {
try {
throw 4;
abc = -1;
}
catch (xyzzy) {
abc += xyzzy + 1;
throw 64;
}
}
catch (xyzzy) {
def = xyzzy;
abc = -2;
}
[ def, abc ];
`, "64,-2")
})
}
func TestWith(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var def;
with({ abc: 9 }) {
def = abc;
}
def;
`, 9)
test(`
var def;
with({ abc: function(){
return 11;
} }) {
def = abc();
}
def;
`, 11)
})
}
func TestSwitch(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 0;
switch (0) {
default:
abc += 1;
case 1:
abc += 2;
case 2:
abc += 4;
case 3:
abc += 8;
}
abc;
`, 15)
test(`
abc = 0;
switch (3) {
default:
abc += 1;
case 1:
abc += 2;
case 2:
abc += 4;
case 3:
abc += 8;
}
abc;
`, 8)
test(`
abc = 0;
switch (60) {
case 1:
abc += 2;
case 2:
abc += 4;
case 3:
abc += 8;
}
abc;
`, 0)
})
}
func TestForIn(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc;
for (property in { a: 1 }) {
abc = property;
}
abc;
`, "a")
test(`
var ghi;
for (property in new String("xyzzy")) {
ghi = property;
}
ghi;
`, "4")
})
}
func TestFor(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 7;
for (i = 0; i < 3; i += 1) {
abc += 1;
}
abc;
`, 10)
test(`
abc = 7;
for (i = 0; i < 3; i += 1) {
abc += 1;
if (i == 1) {
break;
}
}
abc;
`, 9)
test(`
abc = 7;
for (i = 0; i < 3; i += 1) {
if (i == 2) {
continue;
}
abc += 1;
}
abc;
`, 9)
test(`
abc = 0;
for (;;) {
abc += 1;
if (abc == 3)
break;
}
abc;
`, 3)
test(`
for (abc = 0; ;) {
abc += 1;
if (abc == 3)
break;
}
abc;
`, 3)
test(`
for (abc = 0; ; abc += 1) {
abc += 1;
if (abc == 3)
break;
}
abc;
`, 3)
})
}
func TestLabelled(t *testing.T) {
tt(t, func() {
test, _ := test()
// TODO Add emergency break
test(`
xyzzy: for (var abc = 0; abc <= 0; abc++) {
for (var def = 0; def <= 1; def++) {
if (def === 0) {
continue xyzzy;
} else {
}
}
}
`)
test(`
abc = 0
def:
while (true) {
while (true) {
abc = abc + 1
if (abc > 11) {
break def;
}
}
}
abc;
`, 12)
test(`
abc = 0
def:
do {
do {
abc = abc + 1
if (abc > 11) {
break def;
}
} while (true)
} while (true)
abc;
`, 12)
})
}
func TestConditional(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
[ true ? false : true, true ? 1 : 0, false ? 3.14159 : "abc" ];
`, "false,1,abc")
})
}
func TestArrayLiteral(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
[ 1, , 3.14159 ];
`, "1,,3.14159")
})
}
func TestAssignment(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 1;
abc;
`, 1)
test(`
abc += 2;
abc;
`, 3)
})
}
func TestBinaryOperation(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`0 == 1`, false)
test(`1 == "1"`, true)
test(`0 === 1`, false)
test(`1 === "1"`, false)
test(`"1" === "1"`, true)
})
}
func Test_typeof(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`typeof abc`, "undefined")
test(`typeof abc === 'undefined'`, true)
test(`typeof {}`, "object")
test(`typeof null`, "object")
})
}
func Test_PrimitiveValueObjectValue(t *testing.T) {
tt(t, func() {
test, _ := test()
Number11 := test(`new Number(11)`)
is(Number11.float64(), 11)
})
}
func Test_eval(t *testing.T) {
tt(t, func() {
test, _ := test()
// FIXME terst, Is this correct?
test(`
var abc = 1;
`, "undefined")
test(`
eval("abc += 1");
`, 2)
test(`
(function(){
var abc = 11;
eval("abc += 1");
return abc;
})();
`, 12)
test(`abc`, 2)
test(`
(function(){
try {
eval("var prop = \\u2029;");
return false;
} catch (abc) {
return [ abc instanceof SyntaxError, abc.toString() ];
}
})();
`, "true,SyntaxError: Unexpected token ILLEGAL")
test(`
function abc(){
this.THIS = eval("this");
}
var def = new abc();
def === def.THIS;
`, true)
})
}
func Test_evalDirectIndirect(t *testing.T) {
tt(t, func() {
test, _ := test()
// (function () {return this;}()).abc = "global";
test(`
var abc = "global";
(function(){
try {
var _eval = eval;
var abc = "function";
return [
_eval("\'global\' === abc"), // eval (Indirect)
eval("\'function\' === abc"), // eval (Direct)
];
} finally {
delete this.abc;
}
})();
`, "true,true")
})
}
func TestError_URIError(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`new URIError() instanceof URIError`, true)
test(`
var abc
try {
decodeURI("http://example.com/ _^#%")
}
catch (def) {
abc = def instanceof URIError
}
abc
`, true)
})
}
func TestTo(t *testing.T) {
tt(t, func() {
test, _ := test()
{
value, _ := test(`"11"`).ToFloat()
is(value, float64(11))
}
{
value, _ := test(`"11"`).ToInteger()
is(value, int64(11))
value, _ = test(`1.1`).ToInteger()
is(value, int64(1))
}
})
}
func TestShouldError(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`raise:
xyzzy
throw new TypeError("Nothing happens.")
`, "ReferenceError: 'xyzzy' is not defined")
})
}
func TestAPI(t *testing.T) {
tt(t, func() {
test, vm := test()
test(`
String.prototype.xyzzy = function(){
return this.length + 11 + (arguments[0] || 0)
}
abc = new String("xyzzy")
def = "Nothing happens."
abc.xyzzy()
`, 16)
abc, _ := vm.Get("abc")
def, _ := vm.Get("def")
object := abc.Object()
result, _ := object.Call("xyzzy")
is(result, 16)
result, _ = object.Call("xyzzy", 1)
is(result, 17)
value, _ := object.Get("xyzzy")
result, _ = value.Call(def)
is(result, 27)
result, _ = value.Call(def, 3)
is(result, 30)
object = value.Object() // Object xyzzy
result, _ = object.Value().Call(def, 3)
is(result, 30)
test(`
abc = {
'abc': 1,
'def': false,
3.14159: NaN,
};
abc['abc'];
`, 1)
abc, err := vm.Get("abc")
is(err, nil)
object = abc.Object() // Object abc
value, err = object.Get("abc")
is(err, nil)
is(value, 1)
is(object.Keys(), []string{"abc", "def", "3.14159"})
test(`
abc = [ 0, 1, 2, 3.14159, "abc", , ];
abc.def = true;
`)
abc, err = vm.Get("abc")
is(err, nil)
object = abc.Object() // Object abc
is(object.Keys(), []string{"0", "1", "2", "3", "4", "def"})
})
}
func TestUnicode(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`var abc = eval("\"a\uFFFFa\"");`, "undefined")
test(`abc.length`, 3)
test(`abc != "aa"`, true)
test("abc[1] === \"\uFFFF\"", true)
})
}
func TestDotMember(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
abc = {
ghi: 11,
}
abc.def = "Xyzzy"
abc.null = "Nothing happens."
`)
test(`abc.def`, "Xyzzy")
test(`abc.null`, "Nothing happens.")
test(`abc.ghi`, 11)
test(`
abc = {
null: 11,
}
`)
test(`abc.def`, "undefined")
test(`abc.null`, 11)
test(`abc.ghi`, "undefined")
})
}
func Test_stringToFloat(t *testing.T) {
tt(t, func() {
is(parseNumber("10e10000"), _Infinity)
is(parseNumber("10e10_."), _NaN)
})
}
func Test_delete(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
delete 42;
`, true)
test(`
var abc = delete $_undefined_$;
abc = abc && delete ($_undefined_$);
abc;
`, true)
// delete should not trigger get()
test(`
var abc = {
get def() {
throw "Test_delete: delete should not trigger get()"
}
};
delete abc.def
`, true)
})
}
func TestObject_defineOwnProperty(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var object = {};
var descriptor = new Boolean(false);
descriptor.configurable = true;
Object.defineProperties(object, {
property: descriptor
});
var abc = object.hasOwnProperty("property");
delete object.property;
var def = object.hasOwnProperty("property");
[ abc, def ];
`, "true,false")
test(`
var object = [0, 1, 2];
Object.defineProperty(object, "0", {
value: 42,
writable: false,
enumerable: false,
configurable: false
});
var abc = Object.getOwnPropertyDescriptor(object, "0");
[ abc.value, abc.writable, abc.enumerable, abc.configurable ];
`, "42,false,false,false")
test(`
var abc = { "xyzzy": 42 };
var def = Object.defineProperties(abc, "");
abc === def;
`, true)
})
}
func Test_assignmentEvaluationOrder(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 0;
((abc = 1) & abc);
`, 1)
test(`
var abc = 0;
(abc & (abc = 1));
`, 0)
})
}
func TestOttoCall(t *testing.T) {
tt(t, func() {
vm := New()
_, err := vm.Run(`
var abc = {
ghi: 1,
def: function(def){
var ghi = 0;
if (this.ghi) {
ghi = this.ghi;
}
return "def: " + (def + 3.14159 + ghi);
}
};
function structFunc(s) {
return s.Val;
}
`)
is(err, nil)
value, err := vm.Call(`abc.def`, nil, 2)
is(err, nil)
is(value, "def: 6.14159")
value, err = vm.Call(`abc.def`, "", 2)
is(err, nil)
is(value, "def: 5.14159")
// Do not attempt to do a ToValue on a this of nil
value, err = vm.Call(`jkl.def`, nil, 1, 2, 3)
is(err, "!=", nil)
is(value, "undefined")
value, err = vm.Call(`[ 1, 2, 3, undefined, 4 ].concat`, nil, 5, 6, 7, "abc")
is(err, nil)
is(value, "1,2,3,,4,5,6,7,abc")
s := struct{ Val int }{Val: 10}
value, err = vm.Call("structFunc", nil, s)
is(err, nil)
is(value, 10)
})
}
func TestOttoCall_new(t *testing.T) {
tt(t, func() {
test, vm := test()
vm.Set("abc", func(call FunctionCall) Value {
value, err := call.Otto.Call(`new Object`, nil, "Nothing happens.")
is(err, nil)
return value
})
test(`
def = abc();
[ def, def instanceof String ];
`, "Nothing happens.,true")
})
}
func TestOttoCall_newWithBrackets(t *testing.T) {
tt(t, func() {
test, vm := test()
_, err := vm.Run(`var a = {default: function B(x) { this.x = x; } }`)
is(err, nil)
test(`(new a['default'](1)).x`, 1)
})
}
func TestOttoCall_throw(t *testing.T) {
// FIXME? (Been broken for a while)
// Looks like this has been broken for a while... what
// behavior do we want here?
if true {
return
}
tt(t, func() {
test, vm := test()
vm.Set("abc", func(call FunctionCall) Value {
if false {
call.Otto.Call(`throw eval`, nil, "({ def: 3.14159 })")
}
call.Otto.Call(`throw Error`, nil, "abcdef")
return Value{}
})
// TODO try { abc(); } catch (err) { error = err }
// Possible unrelated error case:
// If error is not declared beforehand, is later referencing it a ReferenceError?
// Should the catch { } declare error in the outer scope?
test(`
var error;
try {
abc();
}
catch (err) {
error = err;
}
[ error instanceof Error, error.message, error.def ];
`, "true,abcdef,")
vm.Set("def", func(call FunctionCall) Value {
call.Otto.Call(`throw new Object`, nil, 3.14159)
return UndefinedValue()
})
test(`
try {
def();
}
catch (err) {
error = err;
}
[ error instanceof Error, error.message, error.def, typeof error, error, error instanceof Number ];
`, "false,,,object,3.14159,true")
})
}
func TestOttoCopy(t *testing.T) {
tt(t, func() {
vm0 := New()
vm0.Run(`
var abc = function() {
return "Xyzzy";
};
function def() {
return abc() + (0 + {});
}
`)
value, err := vm0.Run(`
def();
`)
is(err, nil)
is(value, "Xyzzy0[object Object]")
vm1 := vm0.Copy()
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "Xyzzy0[object Object]")
vm1.Run(`
abc = function() {
return 3.14159;
};
`)
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "3.141590[object Object]")
value, err = vm0.Run(`
def();
`)
is(err, nil)
is(value, "Xyzzy0[object Object]")
{
vm0 := New()
vm0.Run(`
var global = (function () {return this;}())
var abc = 0;
var vm = "vm0";
var def = (function(){
var jkl = 0;
var abc = function() {
global.abc += 1;
jkl += 1;
return 1;
};
return function() {
return [ vm, global.abc, jkl, abc() ];
};
})();
`)
value, err := vm0.Run(`
def();
`)
is(err, nil)
is(value, "vm0,0,0,1")
vm1 := vm0.Copy()
vm1.Set("vm", "vm1")
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "vm1,1,1,1")
value, err = vm0.Run(`
def();
`)
is(err, nil)
is(value, "vm0,1,1,1")
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "vm1,2,2,1")
}
})
}
func TestOttoCall_clone(t *testing.T) {
tt(t, func() {
vm := New().clone()
rt := vm.runtime
{
// FIXME terst, Check how this comparison is done
is(rt.global.Array.prototype, rt.global.FunctionPrototype)
is(rt.global.ArrayPrototype, "!=", nil)
is(rt.global.Array.runtime, rt)
is(rt.global.Array.prototype.runtime, rt)
is(rt.global.Array.get("prototype")._object().runtime, rt)
}
{
value, err := vm.Run(`[ 1, 2, 3 ].toString()`)
is(err, nil)
is(value, "1,2,3")
}
{
value, err := vm.Run(`[ 1, 2, 3 ]`)
is(err, nil)
is(value, "1,2,3")
object := value._object()
is(object, "!=", nil)
is(object.prototype, rt.global.ArrayPrototype)
value, err = vm.Run(`Array.prototype`)
is(err, nil)
object = value._object()
is(object.runtime, rt)
is(object, "!=", nil)
is(object, rt.global.ArrayPrototype)
}
{
otto1 := New()
_, err := otto1.Run(`
var abc = 1;
var def = 2;
`)
is(err, nil)
otto2 := otto1.clone()
value, err := otto2.Run(`abc += 1; abc;`)
is(err, nil)
is(value, 2)
value, err = otto1.Run(`abc += 4; abc;`)
is(err, nil)
is(value, 5)
}
{
vm1 := New()
_, err := vm1.Run(`
var abc = 1;
var def = function(value) {
abc += value;
return abc;
}
`)
is(err, nil)
vm2 := vm1.clone()
value, err := vm2.Run(`def(1)`)
is(err, nil)
is(value, 2)
value, err = vm1.Run(`def(4)`)
is(err, nil)
is(value, 5)
}
{
vm1 := New()
_, err := vm1.Run(`
var abc = {
ghi: 1,
jkl: function(value) {
this.ghi += value;
return this.ghi;
}
};
var def = {
abc: abc
};
`)
is(err, nil)
otto2 := vm1.clone()
value, err := otto2.Run(`def.abc.jkl(1)`)
is(err, nil)
is(value, 2)
value, err = vm1.Run(`def.abc.jkl(4)`)
is(err, nil)
is(value, 5)
}
{
vm1 := New()
_, err := vm1.Run(`
var abc = function() { return "abc"; };
var def = function() { return "def"; };
`)
is(err, nil)
vm2 := vm1.clone()
value, err := vm2.Run(`
[ abc.toString(), def.toString() ];
`)
is(value, `function() { return "abc"; },function() { return "def"; }`)
_, err = vm2.Run(`
var def = function() { return "ghi"; };
`)
is(err, nil)
value, err = vm1.Run(`
[ abc.toString(), def.toString() ];
`)
is(value, `function() { return "abc"; },function() { return "def"; }`)
value, err = vm2.Run(`
[ abc.toString(), def.toString() ];
`)
is(value, `function() { return "abc"; },function() { return "ghi"; }`)
}
})
}
func TestOttoRun(t *testing.T) {
tt(t, func() {
vm := New()
program, err := parser.ParseFile(nil, "", "", 0)
is(err, nil)
value, err := vm.Run(program)
is(err, nil)
is(value, UndefinedValue())
program, err = parser.ParseFile(nil, "", "2 + 2", 0)
is(err, nil)
value, err = vm.Run(program)
is(err, nil)
is(value, 4)
value, err = vm.Run(program)
is(err, nil)
is(value, 4)
program, err = parser.ParseFile(nil, "", "var abc; if (!abc) abc = 0; abc += 2; abc;", 0)
value, err = vm.Run(program)
is(err, nil)
is(value, 2)
value, err = vm.Run(program)
is(err, nil)
is(value, 4)
value, err = vm.Run(program)
is(err, nil)
is(value, 6)
{
src := []byte("var abc; if (!abc) abc = 0; abc += 2; abc;")
value, err = vm.Run(src)
is(err, nil)
is(value, 8)
value, err = vm.Run(bytes.NewBuffer(src))
is(err, nil)
is(value, 10)
value, err = vm.Run(io.Reader(bytes.NewBuffer(src)))
is(err, nil)
is(value, 12)
}
{
script, err := vm.Compile("", `var abc; if (!abc) abc = 0; abc += 2; abc;`)
is(err, nil)
value, err = vm.Run(script)
is(err, nil)
is(value, 14)
value, err = vm.Run(script)
is(err, nil)
is(value, 16)
is(script.String(), "// \nvar abc; if (!abc) abc = 0; abc += 2; abc;")
}
})
}
func makeTestOttoEvalFunction(src, expected interface{}) func(c FunctionCall) Value {
return func(c FunctionCall) Value {
v, err := c.Otto.Eval(src)
is(err, nil)
if err != nil {
panic(err)
}
i, err := v.Export()
is(err, nil)
if err != nil {
panic(err)
}
is(i, expected)
return v
}
}
func TestOttoEval(t *testing.T) {
tt(t, func() {
vm := New()
vm.Set("x1", makeTestOttoEvalFunction(`a`, 1))
vm.Set("y1", makeTestOttoEvalFunction(`b`, "hello"))
vm.Set("z1", makeTestOttoEvalFunction(`c`, true))
vm.Set("w", makeTestOttoEvalFunction(`a = 2; b = 'what'; c = false; null`, nil))
vm.Set("x2", makeTestOttoEvalFunction(`a`, 2))
vm.Set("y2", makeTestOttoEvalFunction(`b`, "what"))
vm.Set("z2", makeTestOttoEvalFunction(`c`, false))
_, err := vm.Run(`(function t() {
var a = 1;
var b = 'hello';
var c = true;
x1();
y1();
z1();
w();
x2();
y2();
z2();
}())`)
is(err, nil)
})
tt(t, func() {
vm := New()
_, err := vm.Eval("null")
is(err, nil)
vm.Set("a", 1)
vm.Set("b", 2)
v, err := vm.Eval("a + b")
is(err, nil)
r, err := v.Export()
is(err, nil)
is(r, 3)
})
}
func Test_objectLength(t *testing.T) {
tt(t, func() {
_, vm := test()
value := vm.Set("abc", []string{"jkl", "mno"})
is(objectLength(value._object()), 2)
value, _ = vm.Run(`[1, 2, 3]`)
is(objectLength(value._object()), 3)
value, _ = vm.Run(`new String("abcdefghi")`)
is(objectLength(value._object()), 9)
value, _ = vm.Run(`"abcdefghi"`)
is(objectLength(value._object()), 0)
})
}
func BenchmarkNew(b *testing.B) {
for i := 0; i < b.N; i++ {
New()
}
}
func BenchmarkClone(b *testing.B) {
vm := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
vm.clone()
}
}
comment the crazy test case for Eval
package otto
import (
"bytes"
"io"
"testing"
"github.com/robertkrimen/otto/parser"
)
func TestOtto(t *testing.T) {
tt(t, func() {
test, _ := test()
test("xyzzy = 2", 2)
test("xyzzy + 2", 4)
test("xyzzy += 16", 18)
test("xyzzy", 18)
test(`
(function(){
return 1
})()
`, 1)
test(`
(function(){
return 1
}).call(this)
`, 1)
test(`
(function(){
var result
(function(){
result = -1
})()
return result
})()
`, -1)
test(`
var abc = 1
abc || (abc = -1)
abc
`, 1)
test(`
var abc = (function(){ 1 === 1 })();
abc;
`, "undefined")
})
}
func TestFunction__(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
function abc() {
return 1;
};
abc();
`, 1)
})
}
func TestIf(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
abc = undefined;
def = undefined;
if (true) abc = 1
else abc = 2;
if (false) {
def = 3;
}
else def = 4;
[ abc, def ];
`, "1,4")
test(`
if (1) {
abc = 1;
}
else {
abc = 0;
}
abc;
`, 1)
test(`
if (0) {
abc = 1;
}
else {
abc = 0;
}
abc;
`, 0)
test(`
abc = 0;
if (0) {
abc = 1;
}
abc;
`, 0)
test(`
abc = 0;
if (abc) {
abc = 1;
}
abc;
`, 0)
})
}
func TestSequence(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
1, 2, 3;
`, 3)
})
}
func TestCall(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
Math.pow(3, 2);
`, 9)
})
}
func TestRunFunctionWithSetArguments(t *testing.T) {
tt(t, func() {
vm := New()
vm.Run(`var sillyFunction = function(record){record.silly = true; record.answer *= -1};`)
record := map[string]interface{}{"foo": "bar", "answer": 42}
// Set performs a conversion that allows the map to be addressed as a Javascript object
vm.Set("argument", record)
_, err := vm.Run("sillyFunction(argument)")
is(err, nil)
is(record["answer"].(float64), -42)
is(record["silly"].(bool), true)
})
}
func TestRunFunctionWithArgumentsPassedToCall(t *testing.T) {
tt(t, func() {
vm := New()
vm.Run(`var sillyFunction = function(record){record.silly = true; record.answer *= -1};`)
record := map[string]interface{}{"foo": "bar", "answer": 42}
_, err := vm.Call("sillyFunction", nil, record)
is(err, nil)
is(record["answer"].(float64), -42)
is(record["silly"].(bool), true)
})
}
func TestMember(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
abc = [ 0, 1, 2 ];
def = {
"abc": 0,
"def": 1,
"ghi": 2,
};
[ abc[2], def.abc, abc[1], def.def ];
`, "2,0,1,1")
})
}
func Test_this(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
typeof this;
`, "object")
})
}
func TestWhile(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
limit = 4
abc = 0
while (limit) {
abc = abc + 1
limit = limit - 1
}
abc;
`, 4)
})
}
func TestSwitch_break(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = true;
var ghi = "Xyzzy";
while (abc) {
switch ('def') {
case 'def':
break;
}
ghi = "Nothing happens.";
abc = false;
}
ghi;
`, "Nothing happens.")
test(`
var abc = true;
var ghi = "Xyzzy";
WHILE:
while (abc) {
switch ('def') {
case 'def':
break WHILE;
}
ghi = "Nothing happens."
abc = false
}
ghi;
`, "Xyzzy")
test(`
var ghi = "Xyzzy";
FOR:
for (;;) {
switch ('def') {
case 'def':
break FOR;
ghi = "";
}
ghi = "Nothing happens.";
}
ghi;
`, "Xyzzy")
test(`
var ghi = "Xyzzy";
FOR:
for (var jkl in {}) {
switch ('def') {
case 'def':
break FOR;
ghi = "Something happens.";
}
ghi = "Nothing happens.";
}
ghi;
`, "Xyzzy")
test(`
var ghi = "Xyzzy";
function jkl() {
switch ('def') {
case 'def':
break;
ghi = "";
}
ghi = "Nothing happens.";
}
while (abc) {
jkl();
abc = false;
ghi = "Something happens.";
}
ghi;
`, "Something happens.")
})
}
func TestTryFinally(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc;
try {
abc = 1;
}
finally {
abc = 2;
}
abc;
`, 2)
test(`
var abc = false, def = 0;
do {
def += 1;
if (def > 100) {
break;
}
try {
continue;
}
finally {
abc = true;
}
}
while(!abc && def < 10)
def;
`, 1)
test(`
var abc = false, def = 0, ghi = 0;
do {
def += 1;
if (def > 100) {
break;
}
try {
throw 0;
}
catch (jkl) {
continue;
}
finally {
abc = true;
ghi = 11;
}
ghi -= 1;
}
while(!abc && def < 10)
ghi;
`, 11)
test(`
var abc = 0, def = 0;
do {
try {
abc += 1;
throw "ghi";
}
finally {
def = 1;
continue;
}
def -= 1;
}
while (abc < 2)
[ abc, def ];
`, "2,1")
})
}
func TestTryCatch(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 1;
try {
throw 4;
abc = -1;
}
catch (xyzzy) {
abc += xyzzy + 1;
}
abc;
`, 6)
test(`
abc = 1;
var def;
try {
try {
throw 4;
abc = -1;
}
catch (xyzzy) {
abc += xyzzy + 1;
throw 64;
}
}
catch (xyzzy) {
def = xyzzy;
abc = -2;
}
[ def, abc ];
`, "64,-2")
})
}
func TestWith(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var def;
with({ abc: 9 }) {
def = abc;
}
def;
`, 9)
test(`
var def;
with({ abc: function(){
return 11;
} }) {
def = abc();
}
def;
`, 11)
})
}
func TestSwitch(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 0;
switch (0) {
default:
abc += 1;
case 1:
abc += 2;
case 2:
abc += 4;
case 3:
abc += 8;
}
abc;
`, 15)
test(`
abc = 0;
switch (3) {
default:
abc += 1;
case 1:
abc += 2;
case 2:
abc += 4;
case 3:
abc += 8;
}
abc;
`, 8)
test(`
abc = 0;
switch (60) {
case 1:
abc += 2;
case 2:
abc += 4;
case 3:
abc += 8;
}
abc;
`, 0)
})
}
func TestForIn(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc;
for (property in { a: 1 }) {
abc = property;
}
abc;
`, "a")
test(`
var ghi;
for (property in new String("xyzzy")) {
ghi = property;
}
ghi;
`, "4")
})
}
func TestFor(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 7;
for (i = 0; i < 3; i += 1) {
abc += 1;
}
abc;
`, 10)
test(`
abc = 7;
for (i = 0; i < 3; i += 1) {
abc += 1;
if (i == 1) {
break;
}
}
abc;
`, 9)
test(`
abc = 7;
for (i = 0; i < 3; i += 1) {
if (i == 2) {
continue;
}
abc += 1;
}
abc;
`, 9)
test(`
abc = 0;
for (;;) {
abc += 1;
if (abc == 3)
break;
}
abc;
`, 3)
test(`
for (abc = 0; ;) {
abc += 1;
if (abc == 3)
break;
}
abc;
`, 3)
test(`
for (abc = 0; ; abc += 1) {
abc += 1;
if (abc == 3)
break;
}
abc;
`, 3)
})
}
func TestLabelled(t *testing.T) {
tt(t, func() {
test, _ := test()
// TODO Add emergency break
test(`
xyzzy: for (var abc = 0; abc <= 0; abc++) {
for (var def = 0; def <= 1; def++) {
if (def === 0) {
continue xyzzy;
} else {
}
}
}
`)
test(`
abc = 0
def:
while (true) {
while (true) {
abc = abc + 1
if (abc > 11) {
break def;
}
}
}
abc;
`, 12)
test(`
abc = 0
def:
do {
do {
abc = abc + 1
if (abc > 11) {
break def;
}
} while (true)
} while (true)
abc;
`, 12)
})
}
func TestConditional(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
[ true ? false : true, true ? 1 : 0, false ? 3.14159 : "abc" ];
`, "false,1,abc")
})
}
func TestArrayLiteral(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
[ 1, , 3.14159 ];
`, "1,,3.14159")
})
}
func TestAssignment(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 1;
abc;
`, 1)
test(`
abc += 2;
abc;
`, 3)
})
}
func TestBinaryOperation(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`0 == 1`, false)
test(`1 == "1"`, true)
test(`0 === 1`, false)
test(`1 === "1"`, false)
test(`"1" === "1"`, true)
})
}
func Test_typeof(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`typeof abc`, "undefined")
test(`typeof abc === 'undefined'`, true)
test(`typeof {}`, "object")
test(`typeof null`, "object")
})
}
func Test_PrimitiveValueObjectValue(t *testing.T) {
tt(t, func() {
test, _ := test()
Number11 := test(`new Number(11)`)
is(Number11.float64(), 11)
})
}
func Test_eval(t *testing.T) {
tt(t, func() {
test, _ := test()
// FIXME terst, Is this correct?
test(`
var abc = 1;
`, "undefined")
test(`
eval("abc += 1");
`, 2)
test(`
(function(){
var abc = 11;
eval("abc += 1");
return abc;
})();
`, 12)
test(`abc`, 2)
test(`
(function(){
try {
eval("var prop = \\u2029;");
return false;
} catch (abc) {
return [ abc instanceof SyntaxError, abc.toString() ];
}
})();
`, "true,SyntaxError: Unexpected token ILLEGAL")
test(`
function abc(){
this.THIS = eval("this");
}
var def = new abc();
def === def.THIS;
`, true)
})
}
func Test_evalDirectIndirect(t *testing.T) {
tt(t, func() {
test, _ := test()
// (function () {return this;}()).abc = "global";
test(`
var abc = "global";
(function(){
try {
var _eval = eval;
var abc = "function";
return [
_eval("\'global\' === abc"), // eval (Indirect)
eval("\'function\' === abc"), // eval (Direct)
];
} finally {
delete this.abc;
}
})();
`, "true,true")
})
}
func TestError_URIError(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`new URIError() instanceof URIError`, true)
test(`
var abc
try {
decodeURI("http://example.com/ _^#%")
}
catch (def) {
abc = def instanceof URIError
}
abc
`, true)
})
}
func TestTo(t *testing.T) {
tt(t, func() {
test, _ := test()
{
value, _ := test(`"11"`).ToFloat()
is(value, float64(11))
}
{
value, _ := test(`"11"`).ToInteger()
is(value, int64(11))
value, _ = test(`1.1`).ToInteger()
is(value, int64(1))
}
})
}
func TestShouldError(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`raise:
xyzzy
throw new TypeError("Nothing happens.")
`, "ReferenceError: 'xyzzy' is not defined")
})
}
func TestAPI(t *testing.T) {
tt(t, func() {
test, vm := test()
test(`
String.prototype.xyzzy = function(){
return this.length + 11 + (arguments[0] || 0)
}
abc = new String("xyzzy")
def = "Nothing happens."
abc.xyzzy()
`, 16)
abc, _ := vm.Get("abc")
def, _ := vm.Get("def")
object := abc.Object()
result, _ := object.Call("xyzzy")
is(result, 16)
result, _ = object.Call("xyzzy", 1)
is(result, 17)
value, _ := object.Get("xyzzy")
result, _ = value.Call(def)
is(result, 27)
result, _ = value.Call(def, 3)
is(result, 30)
object = value.Object() // Object xyzzy
result, _ = object.Value().Call(def, 3)
is(result, 30)
test(`
abc = {
'abc': 1,
'def': false,
3.14159: NaN,
};
abc['abc'];
`, 1)
abc, err := vm.Get("abc")
is(err, nil)
object = abc.Object() // Object abc
value, err = object.Get("abc")
is(err, nil)
is(value, 1)
is(object.Keys(), []string{"abc", "def", "3.14159"})
test(`
abc = [ 0, 1, 2, 3.14159, "abc", , ];
abc.def = true;
`)
abc, err = vm.Get("abc")
is(err, nil)
object = abc.Object() // Object abc
is(object.Keys(), []string{"0", "1", "2", "3", "4", "def"})
})
}
func TestUnicode(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`var abc = eval("\"a\uFFFFa\"");`, "undefined")
test(`abc.length`, 3)
test(`abc != "aa"`, true)
test("abc[1] === \"\uFFFF\"", true)
})
}
func TestDotMember(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
abc = {
ghi: 11,
}
abc.def = "Xyzzy"
abc.null = "Nothing happens."
`)
test(`abc.def`, "Xyzzy")
test(`abc.null`, "Nothing happens.")
test(`abc.ghi`, 11)
test(`
abc = {
null: 11,
}
`)
test(`abc.def`, "undefined")
test(`abc.null`, 11)
test(`abc.ghi`, "undefined")
})
}
func Test_stringToFloat(t *testing.T) {
tt(t, func() {
is(parseNumber("10e10000"), _Infinity)
is(parseNumber("10e10_."), _NaN)
})
}
func Test_delete(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
delete 42;
`, true)
test(`
var abc = delete $_undefined_$;
abc = abc && delete ($_undefined_$);
abc;
`, true)
// delete should not trigger get()
test(`
var abc = {
get def() {
throw "Test_delete: delete should not trigger get()"
}
};
delete abc.def
`, true)
})
}
func TestObject_defineOwnProperty(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var object = {};
var descriptor = new Boolean(false);
descriptor.configurable = true;
Object.defineProperties(object, {
property: descriptor
});
var abc = object.hasOwnProperty("property");
delete object.property;
var def = object.hasOwnProperty("property");
[ abc, def ];
`, "true,false")
test(`
var object = [0, 1, 2];
Object.defineProperty(object, "0", {
value: 42,
writable: false,
enumerable: false,
configurable: false
});
var abc = Object.getOwnPropertyDescriptor(object, "0");
[ abc.value, abc.writable, abc.enumerable, abc.configurable ];
`, "42,false,false,false")
test(`
var abc = { "xyzzy": 42 };
var def = Object.defineProperties(abc, "");
abc === def;
`, true)
})
}
func Test_assignmentEvaluationOrder(t *testing.T) {
tt(t, func() {
test, _ := test()
test(`
var abc = 0;
((abc = 1) & abc);
`, 1)
test(`
var abc = 0;
(abc & (abc = 1));
`, 0)
})
}
func TestOttoCall(t *testing.T) {
tt(t, func() {
vm := New()
_, err := vm.Run(`
var abc = {
ghi: 1,
def: function(def){
var ghi = 0;
if (this.ghi) {
ghi = this.ghi;
}
return "def: " + (def + 3.14159 + ghi);
}
};
function structFunc(s) {
return s.Val;
}
`)
is(err, nil)
value, err := vm.Call(`abc.def`, nil, 2)
is(err, nil)
is(value, "def: 6.14159")
value, err = vm.Call(`abc.def`, "", 2)
is(err, nil)
is(value, "def: 5.14159")
// Do not attempt to do a ToValue on a this of nil
value, err = vm.Call(`jkl.def`, nil, 1, 2, 3)
is(err, "!=", nil)
is(value, "undefined")
value, err = vm.Call(`[ 1, 2, 3, undefined, 4 ].concat`, nil, 5, 6, 7, "abc")
is(err, nil)
is(value, "1,2,3,,4,5,6,7,abc")
s := struct{ Val int }{Val: 10}
value, err = vm.Call("structFunc", nil, s)
is(err, nil)
is(value, 10)
})
}
func TestOttoCall_new(t *testing.T) {
tt(t, func() {
test, vm := test()
vm.Set("abc", func(call FunctionCall) Value {
value, err := call.Otto.Call(`new Object`, nil, "Nothing happens.")
is(err, nil)
return value
})
test(`
def = abc();
[ def, def instanceof String ];
`, "Nothing happens.,true")
})
}
func TestOttoCall_newWithBrackets(t *testing.T) {
tt(t, func() {
test, vm := test()
_, err := vm.Run(`var a = {default: function B(x) { this.x = x; } }`)
is(err, nil)
test(`(new a['default'](1)).x`, 1)
})
}
func TestOttoCall_throw(t *testing.T) {
// FIXME? (Been broken for a while)
// Looks like this has been broken for a while... what
// behavior do we want here?
if true {
return
}
tt(t, func() {
test, vm := test()
vm.Set("abc", func(call FunctionCall) Value {
if false {
call.Otto.Call(`throw eval`, nil, "({ def: 3.14159 })")
}
call.Otto.Call(`throw Error`, nil, "abcdef")
return Value{}
})
// TODO try { abc(); } catch (err) { error = err }
// Possible unrelated error case:
// If error is not declared beforehand, is later referencing it a ReferenceError?
// Should the catch { } declare error in the outer scope?
test(`
var error;
try {
abc();
}
catch (err) {
error = err;
}
[ error instanceof Error, error.message, error.def ];
`, "true,abcdef,")
vm.Set("def", func(call FunctionCall) Value {
call.Otto.Call(`throw new Object`, nil, 3.14159)
return UndefinedValue()
})
test(`
try {
def();
}
catch (err) {
error = err;
}
[ error instanceof Error, error.message, error.def, typeof error, error, error instanceof Number ];
`, "false,,,object,3.14159,true")
})
}
func TestOttoCopy(t *testing.T) {
tt(t, func() {
vm0 := New()
vm0.Run(`
var abc = function() {
return "Xyzzy";
};
function def() {
return abc() + (0 + {});
}
`)
value, err := vm0.Run(`
def();
`)
is(err, nil)
is(value, "Xyzzy0[object Object]")
vm1 := vm0.Copy()
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "Xyzzy0[object Object]")
vm1.Run(`
abc = function() {
return 3.14159;
};
`)
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "3.141590[object Object]")
value, err = vm0.Run(`
def();
`)
is(err, nil)
is(value, "Xyzzy0[object Object]")
{
vm0 := New()
vm0.Run(`
var global = (function () {return this;}())
var abc = 0;
var vm = "vm0";
var def = (function(){
var jkl = 0;
var abc = function() {
global.abc += 1;
jkl += 1;
return 1;
};
return function() {
return [ vm, global.abc, jkl, abc() ];
};
})();
`)
value, err := vm0.Run(`
def();
`)
is(err, nil)
is(value, "vm0,0,0,1")
vm1 := vm0.Copy()
vm1.Set("vm", "vm1")
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "vm1,1,1,1")
value, err = vm0.Run(`
def();
`)
is(err, nil)
is(value, "vm0,1,1,1")
value, err = vm1.Run(`
def();
`)
is(err, nil)
is(value, "vm1,2,2,1")
}
})
}
func TestOttoCall_clone(t *testing.T) {
tt(t, func() {
vm := New().clone()
rt := vm.runtime
{
// FIXME terst, Check how this comparison is done
is(rt.global.Array.prototype, rt.global.FunctionPrototype)
is(rt.global.ArrayPrototype, "!=", nil)
is(rt.global.Array.runtime, rt)
is(rt.global.Array.prototype.runtime, rt)
is(rt.global.Array.get("prototype")._object().runtime, rt)
}
{
value, err := vm.Run(`[ 1, 2, 3 ].toString()`)
is(err, nil)
is(value, "1,2,3")
}
{
value, err := vm.Run(`[ 1, 2, 3 ]`)
is(err, nil)
is(value, "1,2,3")
object := value._object()
is(object, "!=", nil)
is(object.prototype, rt.global.ArrayPrototype)
value, err = vm.Run(`Array.prototype`)
is(err, nil)
object = value._object()
is(object.runtime, rt)
is(object, "!=", nil)
is(object, rt.global.ArrayPrototype)
}
{
otto1 := New()
_, err := otto1.Run(`
var abc = 1;
var def = 2;
`)
is(err, nil)
otto2 := otto1.clone()
value, err := otto2.Run(`abc += 1; abc;`)
is(err, nil)
is(value, 2)
value, err = otto1.Run(`abc += 4; abc;`)
is(err, nil)
is(value, 5)
}
{
vm1 := New()
_, err := vm1.Run(`
var abc = 1;
var def = function(value) {
abc += value;
return abc;
}
`)
is(err, nil)
vm2 := vm1.clone()
value, err := vm2.Run(`def(1)`)
is(err, nil)
is(value, 2)
value, err = vm1.Run(`def(4)`)
is(err, nil)
is(value, 5)
}
{
vm1 := New()
_, err := vm1.Run(`
var abc = {
ghi: 1,
jkl: function(value) {
this.ghi += value;
return this.ghi;
}
};
var def = {
abc: abc
};
`)
is(err, nil)
otto2 := vm1.clone()
value, err := otto2.Run(`def.abc.jkl(1)`)
is(err, nil)
is(value, 2)
value, err = vm1.Run(`def.abc.jkl(4)`)
is(err, nil)
is(value, 5)
}
{
vm1 := New()
_, err := vm1.Run(`
var abc = function() { return "abc"; };
var def = function() { return "def"; };
`)
is(err, nil)
vm2 := vm1.clone()
value, err := vm2.Run(`
[ abc.toString(), def.toString() ];
`)
is(value, `function() { return "abc"; },function() { return "def"; }`)
_, err = vm2.Run(`
var def = function() { return "ghi"; };
`)
is(err, nil)
value, err = vm1.Run(`
[ abc.toString(), def.toString() ];
`)
is(value, `function() { return "abc"; },function() { return "def"; }`)
value, err = vm2.Run(`
[ abc.toString(), def.toString() ];
`)
is(value, `function() { return "abc"; },function() { return "ghi"; }`)
}
})
}
func TestOttoRun(t *testing.T) {
tt(t, func() {
vm := New()
program, err := parser.ParseFile(nil, "", "", 0)
is(err, nil)
value, err := vm.Run(program)
is(err, nil)
is(value, UndefinedValue())
program, err = parser.ParseFile(nil, "", "2 + 2", 0)
is(err, nil)
value, err = vm.Run(program)
is(err, nil)
is(value, 4)
value, err = vm.Run(program)
is(err, nil)
is(value, 4)
program, err = parser.ParseFile(nil, "", "var abc; if (!abc) abc = 0; abc += 2; abc;", 0)
value, err = vm.Run(program)
is(err, nil)
is(value, 2)
value, err = vm.Run(program)
is(err, nil)
is(value, 4)
value, err = vm.Run(program)
is(err, nil)
is(value, 6)
{
src := []byte("var abc; if (!abc) abc = 0; abc += 2; abc;")
value, err = vm.Run(src)
is(err, nil)
is(value, 8)
value, err = vm.Run(bytes.NewBuffer(src))
is(err, nil)
is(value, 10)
value, err = vm.Run(io.Reader(bytes.NewBuffer(src)))
is(err, nil)
is(value, 12)
}
{
script, err := vm.Compile("", `var abc; if (!abc) abc = 0; abc += 2; abc;`)
is(err, nil)
value, err = vm.Run(script)
is(err, nil)
is(value, 14)
value, err = vm.Run(script)
is(err, nil)
is(value, 16)
is(script.String(), "// \nvar abc; if (!abc) abc = 0; abc += 2; abc;")
}
})
}
// This generates functions to be used by the test below. The arguments are
// `src`, which is something that otto can execute, and `expected`, which is
// what the result of executing `src` should be.
func makeTestOttoEvalFunction(src, expected interface{}) func(c FunctionCall) Value {
return func(c FunctionCall) Value {
v, err := c.Otto.Eval(src)
is(err, nil)
if err != nil {
panic(err)
}
i, err := v.Export()
is(err, nil)
if err != nil {
panic(err)
}
is(i, expected)
return v
}
}
func TestOttoEval(t *testing.T) {
tt(t, func() {
vm := New()
vm.Set("x1", makeTestOttoEvalFunction(`a`, 1))
vm.Set("y1", makeTestOttoEvalFunction(`b`, "hello"))
vm.Set("z1", makeTestOttoEvalFunction(`c`, true))
vm.Set("w", makeTestOttoEvalFunction(`a = 2; b = 'what'; c = false; null`, nil))
vm.Set("x2", makeTestOttoEvalFunction(`a`, 2))
vm.Set("y2", makeTestOttoEvalFunction(`b`, "what"))
vm.Set("z2", makeTestOttoEvalFunction(`c`, false))
// note that these variables are defined in the scope of function `t`,
// so would not usually be available to the functions called below.
//
// this is _not_ the recommended use case for `Eval` - instead it's
// intended to be used in `debugger` handlers. this code here is the
// equivalent of reading behind the current stack frame in C...
// technically valid, but completely insane.
//
// makes for a good test case though.
_, err := vm.Run(`(function t() {
var a = 1;
var b = 'hello';
var c = true;
x1();
y1();
z1();
w();
x2();
y2();
z2();
}())`)
is(err, nil)
})
// this test makes sure that `Eval` doesn't explode if the VM doesn't have
// a scope other than global defined.
tt(t, func() {
vm := New()
_, err := vm.Eval("null")
is(err, nil)
vm.Set("a", 1)
vm.Set("b", 2)
v, err := vm.Eval("a + b")
is(err, nil)
r, err := v.Export()
is(err, nil)
is(r, 3)
})
}
func Test_objectLength(t *testing.T) {
tt(t, func() {
_, vm := test()
value := vm.Set("abc", []string{"jkl", "mno"})
is(objectLength(value._object()), 2)
value, _ = vm.Run(`[1, 2, 3]`)
is(objectLength(value._object()), 3)
value, _ = vm.Run(`new String("abcdefghi")`)
is(objectLength(value._object()), 9)
value, _ = vm.Run(`"abcdefghi"`)
is(objectLength(value._object()), 0)
})
}
func BenchmarkNew(b *testing.B) {
for i := 0; i < b.N; i++ {
New()
}
}
func BenchmarkClone(b *testing.B) {
vm := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
vm.clone()
}
}
|
package gc
import (
"sync"
"time"
"github.com/coreos/agro"
)
type blocksByINode struct {
mut sync.Mutex
mds agro.MetadataService
blocks agro.BlockStore
stopChan chan bool
forceChan chan bool
last time.Time
}
func NewBlocksByINodeGC(mds agro.MetadataService, blocks agro.BlockStore) GC {
return &blocksByINode{
mds: mds,
blocks: blocks,
last: time.Unix(0, 0),
}
}
func (b *blocksByINode) Start() {
sc := make(chan bool)
fc := make(chan bool)
b.stopChan = sc
b.forceChan = fc
go blocksByINodeMain(b, sc, fc)
}
func (b *blocksByINode) Stop() {
close(b.stopChan)
}
func (b *blocksByINode) Force() {
b.forceChan <- true
}
func (b *blocksByINode) LastComplete() time.Time {
return b.last
}
func (b *blocksByINode) gc(volume string) {
}
func blocksByINodeMain(b *blocksByINode, stop chan bool, force chan bool) {
clog.Debug("bbi: starting blocksByInode")
all:
for {
forced := false
select {
case <-time.After(DefaultGCWait):
clog.Trace("bbi: top of gc")
case <-stop:
break all
case <-force:
forced = true
clog.Debug("bbi: forcing")
}
volumes, err := b.mds.GetVolumes()
if err != nil {
clog.Error("couldn't get volumes", err)
}
for _, v := range volumes {
b.gc(v)
if forced {
continue
}
select {
case <-time.After(DefaultGCWait):
case <-stop:
break all
}
}
}
clog.Debug("bbi: ending blocksByInode")
}
implement gc algo
package gc
import (
"sync"
"time"
"golang.org/x/net/context"
"github.com/coreos/agro"
)
type blocksByINode struct {
mut sync.Mutex
mds agro.MetadataService
blocks agro.BlockStore
stopChan chan bool
forceChan chan bool
last time.Time
}
func NewBlocksByINodeGC(mds agro.MetadataService, blocks agro.BlockStore) GC {
return &blocksByINode{
mds: mds,
blocks: blocks,
last: time.Unix(0, 0),
}
}
func (b *blocksByINode) Start() {
sc := make(chan bool)
fc := make(chan bool)
b.stopChan = sc
b.forceChan = fc
go blocksByINodeMain(b, sc, fc)
}
func (b *blocksByINode) Stop() {
close(b.stopChan)
}
func (b *blocksByINode) Force() {
b.forceChan <- true
}
func (b *blocksByINode) LastComplete() time.Time {
return b.last
}
func (b *blocksByINode) gc(volume string) {
vid, err := b.mds.GetVolumeID(volume)
if err != nil {
clog.Errorf("bbi: got error getting volume ID for %s %v", volume, err)
}
deadmap, held, err := b.mds.GetVolumeLiveness(volume)
if err != nil {
clog.Errorf("bbi: got error gcing volume %s %v", volume, err)
}
for _, x := range held {
deadmap.AndNot(x)
}
it := deadmap.Iterator()
for it.HasNext() {
i := it.Next()
ref := agro.INodeRef{
Volume: vid,
INode: agro.INodeID(i),
}
b.blocks.DeleteINodeBlocks(context.TODO(), ref)
}
}
func blocksByINodeMain(b *blocksByINode, stop chan bool, force chan bool) {
clog.Debug("bbi: starting blocksByInode")
all:
for {
forced := false
select {
case <-time.After(DefaultGCWait):
clog.Trace("bbi: top of gc")
case <-stop:
break all
case <-force:
forced = true
clog.Debug("bbi: forcing")
}
volumes, err := b.mds.GetVolumes()
if err != nil {
clog.Error("couldn't get volumes", err)
}
for _, v := range volumes {
b.gc(v)
if forced {
continue
}
select {
case <-time.After(DefaultGCWait):
case <-stop:
break all
}
}
}
clog.Debug("bbi: ending blocksByInode")
}
|
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Andrew Bonventre (andybons@gmail.com)
package encoding
import (
"bytes"
"fmt"
"testing"
)
func TestEncodeDecodeString(t *testing.T) {
testCases := []struct {
text string
length int
}{
{"foo", 5},
{"baaaar", 8},
{"bazz", 6},
{"Hello, 世界", 15},
{"", 2},
{"abcd", 6},
{"☺☻☹", 11},
{"日a本b語ç日ð本Ê語þ日¥本¼語i日©", 49},
{"日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©", 143},
}
for _, c := range testCases {
buf := EncodeString(nil, c.text)
n := len(buf)
if n != c.length {
t.Errorf("short write for %q: %d bytes written; %d expected", c.text, n, c.length)
}
if buf[n-1] != orderedEncodingTerminator {
t.Errorf("expected terminating byte (%#x), got %#x", orderedEncodingTerminator, buf[n-1])
}
s := DecodeString(buf)
if s != c.text {
t.Errorf("error decoding string: expected %q, got %q", c.text, s)
}
}
}
func TestInvalidUTF8String(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("expected panic due to invalid utf-8 string")
}
}()
EncodeString(nil, "\x80\x80\x80\x80")
}
func TestStringNullBytePanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("expected panic due to intervening 0x00 byte in string")
}
}()
EncodeString(nil, string([]byte{0x00, 0x01, 0x02}))
}
func TestStringNoTerminatorPanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("expected panic due to absence of terminator byte in encoded string")
}
}()
DecodeString([]byte{orderedEncodingText, byte('a')})
}
func TestEncodeBinary(t *testing.T) {
// TODO(andybons): Write more of these.
testCases := []struct{ blob, encoded []byte }{
{[]byte{}, []byte{orderedEncodingBinary, orderedEncodingTerminator}},
{[]byte{0xff}, []byte{orderedEncodingBinary, 0xff, 0x40}},
{[]byte("Hello, 世界"), []byte{orderedEncodingBinary, 0xa4, 0x99, 0xad, 0xc6, 0xe3, 0xbc, 0xd8, 0xa0, 0xf2, 0xae, 0x92, 0xee, 0xbc, 0xd6, 0x18}},
}
for _, c := range testCases {
b := EncodeBinary(c.blob)
if !bytes.Equal(b, c.encoded) {
t.Errorf("unexpected mismatch of encoded value: expected %s, got %s", prettyBytes(c.encoded), prettyBytes(b))
}
}
}
func prettyBytes(b []byte) string {
str := "["
for i, v := range b {
str += fmt.Sprintf("%#x", v)
if i < len(b)-1 {
str += " "
}
}
str += "]"
return str
}
func TestIntMandE(t *testing.T) {
testCases := []struct {
Value int64
E int
M []byte
}{
{1, 1, []byte{0x02}},
{-1, 1, []byte{0x02}},
{10, 1, []byte{0x14}},
{99, 1, []byte{0xc6}},
{-99, 1, []byte{0xc6}},
{100, 2, []byte{0x03, 0x00}},
{110, 2, []byte{0x03, 0x14}},
{999, 2, []byte{0x13, 0xc6}},
{1234, 2, []byte{0x19, 0x44}},
{9999, 2, []byte{0xc7, 0xc6}},
{10000, 3, []byte{0x03, 0x01, 0x00}},
{10001, 3, []byte{0x03, 0x01, 0x02}},
{12345, 3, []byte{0x03, 0x2f, 0x5a}},
{123450, 3, []byte{0x19, 0x45, 0x64}},
{9223372036854775807, 10, []byte{0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for _, c := range testCases {
if e, m := intMandE(c.Value); e != c.E || !bytes.Equal(m, c.M) {
t.Errorf("unexpected mismatch in E/M for %v. expected E=%v | M=%+v, got E=%v | M=%+v", c.Value, c.E, c.M, e, m)
}
}
}
func TestEncodeInt(t *testing.T) {
testCases := []struct {
Value int64
Encoding []byte
}{
{-9223372036854775808, []byte{0x09, 0xec, 0xd2, 0xbc, 0x6e, 0xf8, 0x76, 0x92, 0x64, 0x8a, 0xef}},
{-9223372036854775807, []byte{0x09, 0xec, 0xd2, 0xbc, 0x6e, 0xf8, 0x76, 0x92, 0x64, 0x8a, 0xf1}},
{-10000, []byte{0x10, 0xfc, 0xfe, 0xff}},
{-9999, []byte{0x11, 0x38, 0x39}},
{-100, []byte{0x11, 0xfc, 0xff}},
{-99, []byte{0x12, 0x39}},
{-1, []byte{0x12, 0xfd}},
{1, []byte{0x18, 0x02}},
{10, []byte{0x18, 0x14}},
{99, []byte{0x18, 0xc6}},
{100, []byte{0x19, 0x03, 0x00}},
{110, []byte{0x19, 0x03, 0x14}},
{999, []byte{0x19, 0x13, 0xc6}},
{1234, []byte{0x19, 0x19, 0x44}},
{9999, []byte{0x19, 0xc7, 0xc6}},
{10000, []byte{0x1a, 0x03, 0x01, 0x00}},
{10001, []byte{0x1a, 0x03, 0x01, 0x02}},
{12345, []byte{0x1a, 0x03, 0x2f, 0x5a}},
{123450, []byte{0x1a, 0x19, 0x45, 0x64}},
{9223372036854775807, []byte{0x21, 0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for i, c := range testCases {
enc := EncodeInt(c.Value)
if !bytes.Equal(enc, c.Encoding) {
t.Errorf("unexpected mismatch for %v. expected %v, got %v", c.Value, c.Encoding, enc)
}
if i > 0 {
if bytes.Compare(testCases[i-1].Encoding, enc) >= 0 {
t.Errorf("expected %v to be less than %v", testCases[i-1].Encoding, enc)
}
}
}
}
func disabledTestFloatMandE(t *testing.T) {
testCases := []struct {
Value float64
E int
M []byte
}{
{1.0, 1, []byte{0x02}},
{10.0, 1, []byte{0x14}},
{99.0, 1, []byte{0xc6}},
{99.01, 1, []byte{0xc7, 0x02}},
{99.0001, 1, []byte{0xc7, 0x01, 0x02}},
{100.0, 2, []byte{0x02}},
{100.01, 2, []byte{0x03, 0x01, 0x02}},
{100.1, 2, []byte{0x03, 0x01, 0x14}},
{1234, 2, []byte{0x19, 0x44}},
{9999, 2, []byte{0xc7, 0xc6}},
{9999.000001, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x02}},
{9999.000009, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x12}},
{9999.00001, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x14}},
{9999.00009, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0xb4}},
{9999.000099, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0xc6}},
{9999.0001, 2, []byte{0xc7, 0xc7, 0x01, 0x02}},
{9999.001, 2, []byte{0xc7, 0xc7, 0x01, 0x14}},
{9999.01, 2, []byte{0xc7, 0xc7, 0x02}},
{9999.1, 2, []byte{0xc7, 0xc7, 0x14}},
{10000, 3, []byte{0x02}},
{10001, 3, []byte{0x03, 0x01, 0x02}},
{12345, 3, []byte{0x03, 0x2f, 0x5a}},
{123450, 3, []byte{0x19, 0x45, 0x64}},
{1234.5, 2, []byte{0x19, 0x45, 0x64}},
{12.345, 1, []byte{0x19, 0x45, 0x64}},
{0.123, 0, []byte{0x19, 0x3c}},
{0.0123, 0, []byte{0x03, 0x2e}},
{0.00123, -1, []byte{0x19, 0x3c}},
{9223372036854775807, 10, []byte{0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for _, c := range testCases {
if e, m := floatMandE(c.Value); e != c.E || !bytes.Equal(m, c.M) {
t.Errorf("unexpected mismatch in E/M for %v. expected E=%v | M=%+v, got E=%v | M=%+v", c.Value, c.E, c.M, e, m)
}
}
}
round out lexicographical ordering tests in key encodings
Summary:
all that's left is working out the float precision issues and testing ordering for those as well.
towards a fix for #31
Test Plan: unit tests
Reviewers: petermattis, spencerkimball
Reviewed By: petermattis, spencerkimball
Subscribers: team
Differential Revision: http://phabricator.cockroachdb.org/D133
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Andrew Bonventre (andybons@gmail.com)
package encoding
import (
"bytes"
"fmt"
"sort"
"testing"
)
func TestEncodeDecodeString(t *testing.T) {
testCases := []struct {
text string
length int
}{
{"foo", 5},
{"baaaar", 8},
{"bazz", 6},
{"Hello, 世界", 15},
{"", 2},
{"abcd", 6},
{"☺☻☹", 11},
{"日a本b語ç日ð本Ê語þ日¥本¼語i日©", 49},
{"日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©", 143},
}
for _, c := range testCases {
buf := EncodeString(nil, c.text)
n := len(buf)
if n != c.length {
t.Errorf("short write for %q: %d bytes written; %d expected", c.text, n, c.length)
}
if buf[n-1] != orderedEncodingTerminator {
t.Errorf("expected terminating byte (%#x), got %#x", orderedEncodingTerminator, buf[n-1])
}
s := DecodeString(buf)
if s != c.text {
t.Errorf("error decoding string: expected %q, got %q", c.text, s)
}
}
}
func TestStringOrdering(t *testing.T) {
strs := []string{
"foo",
"baaaar",
"bazz",
"Hello, 世界",
"",
"abcd",
"☺☻☹",
"日a本b語ç日ð本Ê語þ日¥本¼語i日©",
"日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©日a本b語ç日ð本Ê語þ日¥本¼語i日©",
}
encodedStrs := make(byteSlice, len(strs))
for i := range strs {
encodedStrs[i] = EncodeString(nil, strs[i])
}
sort.Strings(strs)
sort.Sort(encodedStrs)
for i := range strs {
decoded := DecodeString(encodedStrs[i])
if decoded != strs[i] {
t.Errorf("mismatched ordering at index %d: expected: %s, got %s", i, strs[i], decoded)
}
}
}
func TestInvalidUTF8String(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("expected panic due to invalid utf-8 string")
}
}()
EncodeString(nil, "\x80\x80\x80\x80")
}
func TestStringNullBytePanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("expected panic due to intervening 0x00 byte in string")
}
}()
EncodeString(nil, string([]byte{0x00, 0x01, 0x02}))
}
func TestStringNoTerminatorPanic(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Error("expected panic due to absence of terminator byte in encoded string")
}
}()
DecodeString([]byte{orderedEncodingText, byte('a')})
}
func TestEncodeBinary(t *testing.T) {
testCases := []struct{ blob, encoded []byte }{
{[]byte{}, []byte{orderedEncodingBinary, orderedEncodingTerminator}},
{[]byte{0xff}, []byte{orderedEncodingBinary, 0xff, 0x40}},
{[]byte("1"), []byte{orderedEncodingBinary, 0x98, 0x40}},
{[]byte("22"), []byte{orderedEncodingBinary, 0x99, 0x8c, 0x40}},
{[]byte("333"), []byte{orderedEncodingBinary, 0x99, 0xcc, 0xe6, 0x30}},
{[]byte("4444"), []byte{orderedEncodingBinary, 0x9a, 0x8d, 0x86, 0xc3, 0x20}},
{[]byte("55555"), []byte{orderedEncodingBinary, 0x9a, 0xcd, 0xa6, 0xd3, 0xa9, 0x54}},
{[]byte("666666"), []byte{orderedEncodingBinary, 0x9b, 0x8d, 0xc6, 0xe3, 0xb1, 0xd8, 0x6c}},
{[]byte("7777777"), []byte{orderedEncodingBinary, 0x9b, 0xcd, 0xe6, 0xf3, 0xb9, 0xdc, 0xee, 0x37}},
{[]byte("88888888"), []byte{orderedEncodingBinary, 0x9c, 0x8e, 0x87, 0x83, 0xc1, 0xe0, 0xf0, 0xb8, 0x9c, 0x0}},
{[]byte("Carl"), []byte{orderedEncodingBinary, 0xa1, 0xd8, 0xae, 0xa6, 0x60}},
{[]byte("Hello, 世界"), []byte{orderedEncodingBinary, 0xa4, 0x99, 0xad, 0xc6, 0xe3, 0xbc, 0xd8, 0xa0, 0xf2, 0xae, 0x92, 0xee, 0xbc, 0xd6, 0x18}},
}
for _, c := range testCases {
b := EncodeBinary(c.blob)
if !bytes.Equal(b, c.encoded) {
t.Errorf("unexpected mismatch of encoded value: expected %s, got %s", prettyBytes(c.encoded), prettyBytes(b))
}
}
blobs := make(byteSlice, len(testCases))
encodedBlobs := make(byteSlice, len(testCases))
for i, c := range testCases {
blobs[i] = c.blob
encodedBlobs[i] = c.encoded
}
sort.Sort(blobs)
sort.Sort(encodedBlobs)
for i := range encodedBlobs {
// TODO(andybons): Use DecodeBinary once that's landed.
var decoded []byte
for _, c := range testCases {
if bytes.Equal(encodedBlobs[i], c.encoded) {
decoded = c.blob
}
}
if !bytes.Equal(decoded, blobs[i]) {
t.Errorf("mismatched ordering at index %d: expected: %s, got %s", i, prettyBytes(blobs[i]), prettyBytes(decoded))
}
}
}
func prettyBytes(b []byte) string {
str := "["
for i, v := range b {
str += fmt.Sprintf("%#x", v)
if i < len(b)-1 {
str += ", "
}
}
str += "]"
return str
}
func TestIntMandE(t *testing.T) {
testCases := []struct {
Value int64
E int
M []byte
}{
{1, 1, []byte{0x02}},
{-1, 1, []byte{0x02}},
{10, 1, []byte{0x14}},
{99, 1, []byte{0xc6}},
{-99, 1, []byte{0xc6}},
{100, 2, []byte{0x03, 0x00}},
{110, 2, []byte{0x03, 0x14}},
{999, 2, []byte{0x13, 0xc6}},
{1234, 2, []byte{0x19, 0x44}},
{9999, 2, []byte{0xc7, 0xc6}},
{10000, 3, []byte{0x03, 0x01, 0x00}},
{10001, 3, []byte{0x03, 0x01, 0x02}},
{12345, 3, []byte{0x03, 0x2f, 0x5a}},
{123450, 3, []byte{0x19, 0x45, 0x64}},
{9223372036854775807, 10, []byte{0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for _, c := range testCases {
if e, m := intMandE(c.Value); e != c.E || !bytes.Equal(m, c.M) {
t.Errorf("unexpected mismatch in E/M for %v. expected E=%v | M=%+v, got E=%v | M=%+v", c.Value, c.E, c.M, e, m)
}
}
}
func TestEncodeInt(t *testing.T) {
testCases := []struct {
Value int64
Encoding []byte
}{
{-9223372036854775808, []byte{0x09, 0xec, 0xd2, 0xbc, 0x6e, 0xf8, 0x76, 0x92, 0x64, 0x8a, 0xef}},
{-9223372036854775807, []byte{0x09, 0xec, 0xd2, 0xbc, 0x6e, 0xf8, 0x76, 0x92, 0x64, 0x8a, 0xf1}},
{-10000, []byte{0x10, 0xfc, 0xfe, 0xff}},
{-9999, []byte{0x11, 0x38, 0x39}},
{-100, []byte{0x11, 0xfc, 0xff}},
{-99, []byte{0x12, 0x39}},
{-1, []byte{0x12, 0xfd}},
{1, []byte{0x18, 0x02}},
{10, []byte{0x18, 0x14}},
{99, []byte{0x18, 0xc6}},
{100, []byte{0x19, 0x03, 0x00}},
{110, []byte{0x19, 0x03, 0x14}},
{999, []byte{0x19, 0x13, 0xc6}},
{1234, []byte{0x19, 0x19, 0x44}},
{9999, []byte{0x19, 0xc7, 0xc6}},
{10000, []byte{0x1a, 0x03, 0x01, 0x00}},
{10001, []byte{0x1a, 0x03, 0x01, 0x02}},
{12345, []byte{0x1a, 0x03, 0x2f, 0x5a}},
{123450, []byte{0x1a, 0x19, 0x45, 0x64}},
{9223372036854775807, []byte{0x21, 0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for i, c := range testCases {
enc := EncodeInt(c.Value)
if !bytes.Equal(enc, c.Encoding) {
t.Errorf("unexpected mismatch for %v. expected %v, got %v", c.Value, c.Encoding, enc)
}
if i > 0 {
if bytes.Compare(testCases[i-1].Encoding, enc) >= 0 {
t.Errorf("expected %v to be less than %v", testCases[i-1].Encoding, enc)
}
}
}
}
func disabledTestFloatMandE(t *testing.T) {
testCases := []struct {
Value float64
E int
M []byte
}{
{1.0, 1, []byte{0x02}},
{10.0, 1, []byte{0x14}},
{99.0, 1, []byte{0xc6}},
{99.01, 1, []byte{0xc7, 0x02}},
{99.0001, 1, []byte{0xc7, 0x01, 0x02}},
{100.0, 2, []byte{0x02}},
{100.01, 2, []byte{0x03, 0x01, 0x02}},
{100.1, 2, []byte{0x03, 0x01, 0x14}},
{1234, 2, []byte{0x19, 0x44}},
{9999, 2, []byte{0xc7, 0xc6}},
{9999.000001, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x02}},
{9999.000009, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x12}},
{9999.00001, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0x14}},
{9999.00009, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0xb4}},
{9999.000099, 2, []byte{0xc7, 0xc7, 0x01, 0x01, 0xc6}},
{9999.0001, 2, []byte{0xc7, 0xc7, 0x01, 0x02}},
{9999.001, 2, []byte{0xc7, 0xc7, 0x01, 0x14}},
{9999.01, 2, []byte{0xc7, 0xc7, 0x02}},
{9999.1, 2, []byte{0xc7, 0xc7, 0x14}},
{10000, 3, []byte{0x02}},
{10001, 3, []byte{0x03, 0x01, 0x02}},
{12345, 3, []byte{0x03, 0x2f, 0x5a}},
{123450, 3, []byte{0x19, 0x45, 0x64}},
{1234.5, 2, []byte{0x19, 0x45, 0x64}},
{12.345, 1, []byte{0x19, 0x45, 0x64}},
{0.123, 0, []byte{0x19, 0x3c}},
{0.0123, 0, []byte{0x03, 0x2e}},
{0.00123, -1, []byte{0x19, 0x3c}},
{9223372036854775807, 10, []byte{0x13, 0x2d, 0x43, 0x91, 0x07, 0x89, 0x6d, 0x9b, 0x75, 0x0e}},
}
for _, c := range testCases {
if e, m := floatMandE(c.Value); e != c.E || !bytes.Equal(m, c.M) {
t.Errorf("unexpected mismatch in E/M for %v. expected E=%v | M=%+v, got E=%v | M=%+v", c.Value, c.E, c.M, e, m)
}
}
}
|
//: ----------------------------------------------------------------------------
//: Copyright (C) 2017 Verizon. All Rights Reserved.
//: All Rights Reserved
//:
//: file: ipfix.go
//: details: ipfix decoders handler
//: author: Mehrdad Arshad Rad
//: date: 02/01/2017
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//: ----------------------------------------------------------------------------
package main
import (
"bytes"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/VerizonDigital/vflow/ipfix"
"github.com/VerizonDigital/vflow/mirror"
"github.com/VerizonDigital/vflow/producer"
)
// IPFIX represents IPFIX collector
type IPFIX struct {
port int
addr string
workers int
stop bool
stats IPFIXStats
pool chan chan struct{}
}
// IPFIXUDPMsg represents IPFIX UDP data
type IPFIXUDPMsg struct {
raddr *net.UDPAddr
body []byte
}
// IPFIXStats represents IPFIX stats
type IPFIXStats struct {
UDPQueue int
UDPMirrorQueue int
MessageQueue int
UDPCount uint64
DecodedCount uint64
MQErrorCount uint64
Workers int32
}
var (
ipfixUDPCh = make(chan IPFIXUDPMsg, 1000)
ipfixMCh = make(chan IPFIXUDPMsg, 1000)
ipfixMQCh = make(chan []byte, 1000)
ipfixMirrorEnabled bool
// templates memory cache
mCache ipfix.MemCache
// ipfix udp payload pool
ipfixBuffer = &sync.Pool{
New: func() interface{} {
return make([]byte, opts.IPFIXUDPSize)
},
}
)
// NewIPFIX constructs IPFIX
func NewIPFIX() *IPFIX {
return &IPFIX{
port: opts.IPFIXPort,
workers: opts.IPFIXWorkers,
pool: make(chan chan struct{}, maxWorkers),
}
}
func (i *IPFIX) run() {
// exit if the ipfix is disabled
if !opts.IPFIXEnabled {
logger.Println("ipfix has been disabled")
return
}
hostPort := net.JoinHostPort(i.addr, strconv.Itoa(i.port))
udpAddr, _ := net.ResolveUDPAddr("udp", hostPort)
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
logger.Fatal(err)
}
atomic.AddInt32(&i.stats.Workers, int32(i.workers))
for n := 0; n < i.workers; n++ {
go func() {
wQuit := make(chan struct{})
i.pool <- wQuit
i.ipfixWorker(wQuit)
}()
}
logger.Printf("ipfix is running (workers#: %d)", i.workers)
mCache = ipfix.GetCache(opts.IPFIXTplCacheFile)
go ipfix.RPC(mCache, &ipfix.RPCConfig{
Enabled: opts.IPFIXRPCEnabled,
Logger: logger,
})
go mirrorIPFIXDispatcher(ipfixMCh)
go func() {
p := producer.NewProducer(opts.MQName)
p.MQConfigFile = opts.MQConfigFile
p.MQErrorCount = &i.stats.MQErrorCount
p.Logger = logger
p.Chan = ipfixMQCh
p.Topic = "ipfix"
if err := p.Run(); err != nil {
logger.Fatal(err)
}
}()
go func() {
if !opts.DynWorkers {
logger.Println("IPFIX dynamic worker disabled")
return
}
i.dynWorkers()
}()
for !i.stop {
b := ipfixBuffer.Get().([]byte)
conn.SetReadDeadline(time.Now().Add(1e9))
n, raddr, err := conn.ReadFromUDP(b)
if err != nil {
continue
}
atomic.AddUint64(&i.stats.UDPCount, 1)
ipfixUDPCh <- IPFIXUDPMsg{raddr, b[:n]}
}
}
func (i *IPFIX) shutdown() {
// exit if the ipfix is disabled
if !opts.IPFIXEnabled {
logger.Println("ipfix disabled")
return
}
// stop reading from UDP listener
i.stop = true
logger.Println("stopping ipfix service gracefully ...")
time.Sleep(1 * time.Second)
// dump the templates to storage
if err := mCache.Dump(opts.IPFIXTplCacheFile); err != nil {
logger.Println("couldn't not dump template", err)
}
// logging and close UDP channel
logger.Println("ipfix has been shutdown")
close(ipfixUDPCh)
}
func (i *IPFIX) ipfixWorker(wQuit chan struct{}) {
var (
decodedMsg *ipfix.Message
mirror IPFIXUDPMsg
msg = IPFIXUDPMsg{body: ipfixBuffer.Get().([]byte)}
buf = new(bytes.Buffer)
err error
ok bool
b []byte
)
LOOP:
for {
ipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])
buf.Reset()
select {
case <-wQuit:
break LOOP
case msg, ok = <-ipfixUDPCh:
if !ok {
break LOOP
}
}
if opts.Verbose {
logger.Printf("rcvd ipfix data from: %s, size: %d bytes",
msg.raddr, len(msg.body))
}
if ipfixMirrorEnabled {
mirror.body = ipfixBuffer.Get().([]byte)
mirror.raddr = msg.raddr
mirror.body = append(mirror.body[:0], msg.body...)
select {
case ipfixMCh <- mirror:
default:
}
}
d := ipfix.NewDecoder(msg.raddr.IP, msg.body)
if decodedMsg, err = d.Decode(mCache); err != nil {
logger.Println(err)
continue
}
atomic.AddUint64(&i.stats.DecodedCount, 1)
if decodedMsg.DataSets != nil {
b, err = decodedMsg.JSONMarshal(buf)
if err != nil {
logger.Println(err)
continue
}
select {
case ipfixMQCh <- b:
default:
}
}
if opts.Verbose {
logger.Println(string(b))
}
}
}
func (i *IPFIX) status() *IPFIXStats {
return &IPFIXStats{
UDPQueue: len(ipfixUDPCh),
UDPMirrorQueue: len(ipfixMCh),
MessageQueue: len(ipfixMQCh),
UDPCount: atomic.LoadUint64(&i.stats.UDPCount),
DecodedCount: atomic.LoadUint64(&i.stats.DecodedCount),
MQErrorCount: atomic.LoadUint64(&i.stats.MQErrorCount),
Workers: atomic.LoadInt32(&i.stats.Workers),
}
}
func mirrorIPFIX(dst net.IP, port int, ch chan IPFIXUDPMsg) error {
var (
packet = make([]byte, 1500)
msg IPFIXUDPMsg
pLen int
err error
ipHdr []byte
ipHLen int
ipv4 bool
ip mirror.IP
)
conn, err := mirror.NewRawConn(dst)
if err != nil {
return err
}
udp := mirror.UDP{55117, port, 0, 0}
udpHdr := udp.Marshal()
if dst.To4() != nil {
ipv4 = true
}
if ipv4 {
ip = mirror.NewIPv4HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv4HLen
} else {
ip = mirror.NewIPv6HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv6HLen
}
for {
msg = <-ch
pLen = len(msg.body)
ip.SetAddrs(ipHdr, msg.raddr.IP, dst)
ip.SetLen(ipHdr, pLen+mirror.UDPHLen)
udp.SetLen(udpHdr, pLen)
// IPv6 checksum mandatory
if !ipv4 {
udp.SetChecksum()
}
copy(packet[0:ipHLen], ipHdr)
copy(packet[ipHLen:ipHLen+8], udpHdr)
copy(packet[ipHLen+8:], msg.body)
ipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])
if err = conn.Send(packet[0 : ipHLen+8+pLen]); err != nil {
return err
}
}
}
func mirrorIPFIXDispatcher(ch chan IPFIXUDPMsg) {
var (
ch4 = make(chan IPFIXUDPMsg, 1000)
ch6 = make(chan IPFIXUDPMsg, 1000)
msg IPFIXUDPMsg
)
if opts.IPFIXMirrorAddr == "" {
return
}
for w := 0; w < opts.IPFIXMirrorWorkers; w++ {
dst := net.ParseIP(opts.IPFIXMirrorAddr)
if dst.To4() != nil {
go mirrorIPFIX(dst, opts.IPFIXMirrorPort, ch4)
} else {
go mirrorIPFIX(dst, opts.IPFIXMirrorPort, ch6)
}
}
ipfixMirrorEnabled = true
logger.Printf("ipfix mirror service is running (workers#: %d) ...", opts.IPFIXMirrorWorkers)
for {
msg = <-ch
if msg.raddr.IP.To4() != nil {
ch4 <- msg
} else {
ch6 <- msg
}
}
}
func (i *IPFIX) dynWorkers() {
var load, nSeq, newWorkers, workers, n int
tick := time.Tick(120 * time.Second)
for {
<-tick
load = 0
for n = 0; n < 30; n++ {
time.Sleep(1 * time.Second)
load += len(sFlowUDPCh)
}
if load > 15 {
switch {
case load > 300:
newWorkers = 100
case load > 200:
newWorkers = 60
case load > 100:
newWorkers = 40
default:
newWorkers = 30
}
workers = int(atomic.LoadInt32(&i.stats.Workers))
if workers+newWorkers > maxWorkers {
logger.Println("sflow :: max out workers")
continue
}
for n = 0; n < newWorkers; n++ {
go func() {
atomic.AddInt32(&i.stats.Workers, 1)
wQuit := make(chan struct{})
i.pool <- wQuit
i.ipfixWorker(wQuit)
}()
}
}
if load == 0 {
nSeq++
} else {
nSeq = 0
continue
}
if nSeq > 15 {
for n = 0; n < 10; n++ {
if len(i.pool) > i.workers {
atomic.AddInt32(&i.stats.Workers, -1)
wQuit := <-i.pool
close(wQuit)
}
}
nSeq = 0
}
}
}
fix hard coded ipfix udp max size
//: ----------------------------------------------------------------------------
//: Copyright (C) 2017 Verizon. All Rights Reserved.
//: All Rights Reserved
//:
//: file: ipfix.go
//: details: ipfix decoders handler
//: author: Mehrdad Arshad Rad
//: date: 02/01/2017
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//: ----------------------------------------------------------------------------
package main
import (
"bytes"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/VerizonDigital/vflow/ipfix"
"github.com/VerizonDigital/vflow/mirror"
"github.com/VerizonDigital/vflow/producer"
)
// IPFIX represents IPFIX collector
type IPFIX struct {
port int
addr string
workers int
stop bool
stats IPFIXStats
pool chan chan struct{}
}
// IPFIXUDPMsg represents IPFIX UDP data
type IPFIXUDPMsg struct {
raddr *net.UDPAddr
body []byte
}
// IPFIXStats represents IPFIX stats
type IPFIXStats struct {
UDPQueue int
UDPMirrorQueue int
MessageQueue int
UDPCount uint64
DecodedCount uint64
MQErrorCount uint64
Workers int32
}
var (
ipfixUDPCh = make(chan IPFIXUDPMsg, 1000)
ipfixMCh = make(chan IPFIXUDPMsg, 1000)
ipfixMQCh = make(chan []byte, 1000)
ipfixMirrorEnabled bool
// templates memory cache
mCache ipfix.MemCache
// ipfix udp payload pool
ipfixBuffer = &sync.Pool{
New: func() interface{} {
return make([]byte, opts.IPFIXUDPSize)
},
}
)
// NewIPFIX constructs IPFIX
func NewIPFIX() *IPFIX {
return &IPFIX{
port: opts.IPFIXPort,
workers: opts.IPFIXWorkers,
pool: make(chan chan struct{}, maxWorkers),
}
}
func (i *IPFIX) run() {
// exit if the ipfix is disabled
if !opts.IPFIXEnabled {
logger.Println("ipfix has been disabled")
return
}
hostPort := net.JoinHostPort(i.addr, strconv.Itoa(i.port))
udpAddr, _ := net.ResolveUDPAddr("udp", hostPort)
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
logger.Fatal(err)
}
atomic.AddInt32(&i.stats.Workers, int32(i.workers))
for n := 0; n < i.workers; n++ {
go func() {
wQuit := make(chan struct{})
i.pool <- wQuit
i.ipfixWorker(wQuit)
}()
}
logger.Printf("ipfix is running (workers#: %d)", i.workers)
mCache = ipfix.GetCache(opts.IPFIXTplCacheFile)
go ipfix.RPC(mCache, &ipfix.RPCConfig{
Enabled: opts.IPFIXRPCEnabled,
Logger: logger,
})
go mirrorIPFIXDispatcher(ipfixMCh)
go func() {
p := producer.NewProducer(opts.MQName)
p.MQConfigFile = opts.MQConfigFile
p.MQErrorCount = &i.stats.MQErrorCount
p.Logger = logger
p.Chan = ipfixMQCh
p.Topic = "ipfix"
if err := p.Run(); err != nil {
logger.Fatal(err)
}
}()
go func() {
if !opts.DynWorkers {
logger.Println("IPFIX dynamic worker disabled")
return
}
i.dynWorkers()
}()
for !i.stop {
b := ipfixBuffer.Get().([]byte)
conn.SetReadDeadline(time.Now().Add(1e9))
n, raddr, err := conn.ReadFromUDP(b)
if err != nil {
continue
}
atomic.AddUint64(&i.stats.UDPCount, 1)
ipfixUDPCh <- IPFIXUDPMsg{raddr, b[:n]}
}
}
func (i *IPFIX) shutdown() {
// exit if the ipfix is disabled
if !opts.IPFIXEnabled {
logger.Println("ipfix disabled")
return
}
// stop reading from UDP listener
i.stop = true
logger.Println("stopping ipfix service gracefully ...")
time.Sleep(1 * time.Second)
// dump the templates to storage
if err := mCache.Dump(opts.IPFIXTplCacheFile); err != nil {
logger.Println("couldn't not dump template", err)
}
// logging and close UDP channel
logger.Println("ipfix has been shutdown")
close(ipfixUDPCh)
}
func (i *IPFIX) ipfixWorker(wQuit chan struct{}) {
var (
decodedMsg *ipfix.Message
mirror IPFIXUDPMsg
msg = IPFIXUDPMsg{body: ipfixBuffer.Get().([]byte)}
buf = new(bytes.Buffer)
err error
ok bool
b []byte
)
LOOP:
for {
ipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])
buf.Reset()
select {
case <-wQuit:
break LOOP
case msg, ok = <-ipfixUDPCh:
if !ok {
break LOOP
}
}
if opts.Verbose {
logger.Printf("rcvd ipfix data from: %s, size: %d bytes",
msg.raddr, len(msg.body))
}
if ipfixMirrorEnabled {
mirror.body = ipfixBuffer.Get().([]byte)
mirror.raddr = msg.raddr
mirror.body = append(mirror.body[:0], msg.body...)
select {
case ipfixMCh <- mirror:
default:
}
}
d := ipfix.NewDecoder(msg.raddr.IP, msg.body)
if decodedMsg, err = d.Decode(mCache); err != nil {
logger.Println(err)
continue
}
atomic.AddUint64(&i.stats.DecodedCount, 1)
if decodedMsg.DataSets != nil {
b, err = decodedMsg.JSONMarshal(buf)
if err != nil {
logger.Println(err)
continue
}
select {
case ipfixMQCh <- b:
default:
}
}
if opts.Verbose {
logger.Println(string(b))
}
}
}
func (i *IPFIX) status() *IPFIXStats {
return &IPFIXStats{
UDPQueue: len(ipfixUDPCh),
UDPMirrorQueue: len(ipfixMCh),
MessageQueue: len(ipfixMQCh),
UDPCount: atomic.LoadUint64(&i.stats.UDPCount),
DecodedCount: atomic.LoadUint64(&i.stats.DecodedCount),
MQErrorCount: atomic.LoadUint64(&i.stats.MQErrorCount),
Workers: atomic.LoadInt32(&i.stats.Workers),
}
}
func mirrorIPFIX(dst net.IP, port int, ch chan IPFIXUDPMsg) error {
var (
packet = make([]byte, opts.IPFIXUDPSize)
msg IPFIXUDPMsg
pLen int
err error
ipHdr []byte
ipHLen int
ipv4 bool
ip mirror.IP
)
conn, err := mirror.NewRawConn(dst)
if err != nil {
return err
}
udp := mirror.UDP{55117, port, 0, 0}
udpHdr := udp.Marshal()
if dst.To4() != nil {
ipv4 = true
}
if ipv4 {
ip = mirror.NewIPv4HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv4HLen
} else {
ip = mirror.NewIPv6HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv6HLen
}
for {
msg = <-ch
pLen = len(msg.body)
ip.SetAddrs(ipHdr, msg.raddr.IP, dst)
ip.SetLen(ipHdr, pLen+mirror.UDPHLen)
udp.SetLen(udpHdr, pLen)
// IPv6 checksum mandatory
if !ipv4 {
udp.SetChecksum()
}
copy(packet[0:ipHLen], ipHdr)
copy(packet[ipHLen:ipHLen+8], udpHdr)
copy(packet[ipHLen+8:], msg.body)
ipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])
if err = conn.Send(packet[0 : ipHLen+8+pLen]); err != nil {
return err
}
}
}
func mirrorIPFIXDispatcher(ch chan IPFIXUDPMsg) {
var (
ch4 = make(chan IPFIXUDPMsg, 1000)
ch6 = make(chan IPFIXUDPMsg, 1000)
msg IPFIXUDPMsg
)
if opts.IPFIXMirrorAddr == "" {
return
}
for w := 0; w < opts.IPFIXMirrorWorkers; w++ {
dst := net.ParseIP(opts.IPFIXMirrorAddr)
if dst.To4() != nil {
go mirrorIPFIX(dst, opts.IPFIXMirrorPort, ch4)
} else {
go mirrorIPFIX(dst, opts.IPFIXMirrorPort, ch6)
}
}
ipfixMirrorEnabled = true
logger.Printf("ipfix mirror service is running (workers#: %d) ...", opts.IPFIXMirrorWorkers)
for {
msg = <-ch
if msg.raddr.IP.To4() != nil {
ch4 <- msg
} else {
ch6 <- msg
}
}
}
func (i *IPFIX) dynWorkers() {
var load, nSeq, newWorkers, workers, n int
tick := time.Tick(120 * time.Second)
for {
<-tick
load = 0
for n = 0; n < 30; n++ {
time.Sleep(1 * time.Second)
load += len(sFlowUDPCh)
}
if load > 15 {
switch {
case load > 300:
newWorkers = 100
case load > 200:
newWorkers = 60
case load > 100:
newWorkers = 40
default:
newWorkers = 30
}
workers = int(atomic.LoadInt32(&i.stats.Workers))
if workers+newWorkers > maxWorkers {
logger.Println("sflow :: max out workers")
continue
}
for n = 0; n < newWorkers; n++ {
go func() {
atomic.AddInt32(&i.stats.Workers, 1)
wQuit := make(chan struct{})
i.pool <- wQuit
i.ipfixWorker(wQuit)
}()
}
}
if load == 0 {
nSeq++
} else {
nSeq = 0
continue
}
if nSeq > 15 {
for n = 0; n < 10; n++ {
if len(i.pool) > i.workers {
atomic.AddInt32(&i.stats.Workers, -1)
wQuit := <-i.pool
close(wQuit)
}
}
nSeq = 0
}
}
}
|
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"math/rand"
"net/http"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/certificate-transparency-go/client"
"github.com/google/certificate-transparency-go/schedule"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/trillian/ctfe"
"github.com/google/certificate-transparency-go/trillian/ctfe/configpb"
"github.com/google/certificate-transparency-go/x509"
"github.com/google/trillian/monitoring"
"github.com/transparency-dev/merkle"
"github.com/transparency-dev/merkle/proof"
"github.com/transparency-dev/merkle/rfc6962"
ct "github.com/google/certificate-transparency-go"
)
const (
// How many STHs and SCTs to hold on to.
sthCount = 10
sctCount = 10
// How far beyond current tree size to request for invalid requests.
invalidStretch = int64(1000000000)
)
var (
// Metrics are all per-log (label "logid"), but may also be
// per-entrypoint (label "ep") or per-return-code (label "rc").
once sync.Once
reqs monitoring.Counter // logid, ep => value
errs monitoring.Counter // logid, ep => value
rsps monitoring.Counter // logid, ep, rc => value
rspLatency monitoring.Histogram // logid, ep, rc => values
invalidReqs monitoring.Counter // logid, ep => value
)
// setupMetrics initializes all the exported metrics.
func setupMetrics(mf monitoring.MetricFactory) {
reqs = mf.NewCounter("reqs", "Number of valid requests sent", "logid", "ep")
errs = mf.NewCounter("errs", "Number of error responses received for valid requests", "logid", "ep")
rsps = mf.NewCounter("rsps", "Number of responses received for valid requests", "logid", "ep", "rc")
rspLatency = mf.NewHistogram("rsp_latency", "Latency of valid responses in seconds", "logid", "ep", "rc")
invalidReqs = mf.NewCounter("invalid_reqs", "Number of deliberately-invalid requests sent", "logid", "ep")
}
// errSkip indicates that a test operation should be skipped.
type errSkip struct{}
func (e errSkip) Error() string {
return "test operation skipped"
}
// Choice represents a random decision about a hammer operation.
type Choice string
// Constants for per-operation choices.
const (
ParamTooBig = Choice("ParamTooBig")
Param2TooBig = Choice("Param2TooBig")
ParamNegative = Choice("ParamNegative")
ParamInvalid = Choice("ParamInvalid")
ParamsInverted = Choice("ParamsInverted")
InvalidBase64 = Choice("InvalidBase64")
EmptyChain = Choice("EmptyChain")
CertNotPrecert = Choice("CertNotPrecert")
PrecertNotCert = Choice("PrecertNotCert")
NoChainToRoot = Choice("NoChainToRoot")
UnparsableCert = Choice("UnparsableCert")
NewCert = Choice("NewCert")
LastCert = Choice("LastCert")
FirstCert = Choice("FirstCert")
)
// Limiter is an interface to allow different rate limiters to be used with the
// hammer.
type Limiter interface {
Wait()
}
type unLimited struct{}
func (u unLimited) Wait() {
}
// HammerConfig provides configuration for a stress/load test.
type HammerConfig struct {
// Configuration for the log.
LogCfg *configpb.LogConfig
// How to create process-wide metrics.
MetricFactory monitoring.MetricFactory
// Maximum merge delay.
MMD time.Duration
// Certificate chain generator.
ChainGenerator ChainGenerator
// ClientPool provides the clients used to make requests.
ClientPool ClientPool
// Bias values to favor particular log operations.
EPBias HammerBias
// Range of how many entries to get.
MinGetEntries, MaxGetEntries int
// OversizedGetEntries governs whether get-entries requests that go beyond the
// current tree size are allowed (with a truncated response expected).
OversizedGetEntries bool
// Number of operations to perform.
Operations uint64
// Rate limiter
Limiter Limiter
// MaxParallelChains sets the upper limit for the number of parallel
// add-*-chain requests to make when the biasing model says to perfom an add.
MaxParallelChains int
// EmitInterval defines how frequently stats are logged.
EmitInterval time.Duration
// IgnoreErrors controls whether a hammer run fails immediately on any error.
IgnoreErrors bool
// MaxRetryDuration governs how long to keep retrying when IgnoreErrors is true.
MaxRetryDuration time.Duration
// RequestDeadline indicates the deadline to set on each request to the log.
RequestDeadline time.Duration
// DuplicateChance sets the probability of attempting to add a duplicate when
// calling add[-pre]-chain (as the N in 1-in-N). Set to 0 to disable sending
// duplicates.
DuplicateChance int
// StrictSTHConsistencySize if set to true will cause Hammer to only request
// STH consistency proofs between tree sizes for which it's seen valid STHs.
// If set to false, Hammer will request a consistency proof between the
// current tree size, and a random smaller size greater than zero.
StrictSTHConsistencySize bool
}
// HammerBias indicates the bias for selecting different log operations.
type HammerBias struct {
Bias map[ctfe.EntrypointName]int
total int
// InvalidChance gives the odds of performing an invalid operation, as the N in 1-in-N.
InvalidChance map[ctfe.EntrypointName]int
}
// Choose randomly picks an operation to perform according to the biases.
func (hb HammerBias) Choose() ctfe.EntrypointName {
if hb.total == 0 {
for _, ep := range ctfe.Entrypoints {
hb.total += hb.Bias[ep]
}
}
which := rand.Intn(hb.total)
for _, ep := range ctfe.Entrypoints {
which -= hb.Bias[ep]
if which < 0 {
return ep
}
}
panic("random choice out of range")
}
// Invalid randomly chooses whether an operation should be invalid.
func (hb HammerBias) Invalid(ep ctfe.EntrypointName) bool {
chance := hb.InvalidChance[ep]
if chance <= 0 {
return false
}
return rand.Intn(chance) == 0
}
type submittedCert struct {
leafData []byte
leafHash [sha256.Size]byte
sct *ct.SignedCertificateTimestamp
integrateBy time.Time
precert bool
}
// pendingCerts holds certificates that have been submitted that we want
// to check inclusion proofs for. The array is ordered from oldest to
// most recent, but new entries are only appended when enough time has
// passed since the last append, so the SCTs that get checked are spread
// out across the MMD period.
type pendingCerts struct {
mu sync.Mutex
certs [sctCount]*submittedCert
}
func (pc *pendingCerts) empty() bool {
pc.mu.Lock()
defer pc.mu.Unlock()
return pc.certs[0] == nil
}
// tryAppendCert locks mu, checks whether it's possible to append the cert, and
// appends it if so.
func (pc *pendingCerts) tryAppendCert(now time.Time, mmd time.Duration, submitted *submittedCert) {
pc.mu.Lock()
defer pc.mu.Unlock()
if pc.canAppend(now, mmd) {
which := 0
for ; which < sctCount; which++ {
if pc.certs[which] == nil {
break
}
}
pc.certs[which] = submitted
}
}
// canAppend checks whether a pending cert can be appended.
// It must be called with mu locked.
func (pc *pendingCerts) canAppend(now time.Time, mmd time.Duration) bool {
if pc.certs[sctCount-1] != nil {
return false // full already
}
if pc.certs[0] == nil {
return true // nothing yet
}
// Only allow append if enough time has passed, namely MMD/#savedSCTs.
last := sctCount - 1
for ; last >= 0; last-- {
if pc.certs[last] != nil {
break
}
}
lastTime := timeFromMS(pc.certs[last].sct.Timestamp)
nextTime := lastTime.Add(mmd / sctCount)
return now.After(nextTime)
}
// oldestIfMMDPassed returns the oldest submitted certificate if the maximum
// merge delay has passed, i.e. it is expected to be integrated as of now. This
// function locks mu.
func (pc *pendingCerts) oldestIfMMDPassed(now time.Time) *submittedCert {
pc.mu.Lock()
defer pc.mu.Unlock()
if pc.certs[0] == nil {
return nil
}
submitted := pc.certs[0]
if !now.After(submitted.integrateBy) {
// Oldest cert not due to be integrated yet, so neither will any others.
return nil
}
return submitted
}
// dropOldest removes the oldest submitted certificate.
func (pc *pendingCerts) dropOldest() {
pc.mu.Lock()
defer pc.mu.Unlock()
// Can pop the oldest cert and shuffle the others along, which make room for
// another cert to be stored.
for i := 0; i < (sctCount - 1); i++ {
pc.certs[i] = pc.certs[i+1]
}
pc.certs[sctCount-1] = nil
}
// hammerState tracks the operations that have been performed during a test run, including
// earlier SCTs/STHs for later checking.
type hammerState struct {
cfg *HammerConfig
// Store the first submitted and the most recently submitted [pre-]chain,
// to allow submission of both old and new duplicates.
chainMu sync.Mutex
firstChain, lastChain []ct.ASN1Cert
firstChainIntegrated time.Time
firstPreChain, lastPreChain []ct.ASN1Cert
firstPreChainIntegrated time.Time
firstTBS, lastTBS []byte
mu sync.RWMutex
// STHs are arranged from later to earlier (so [0] is the most recent), and the
// discovery of new STHs will push older ones off the end.
sth [sthCount]*ct.SignedTreeHead
// Submitted certs also run from later to earlier, but the discovery of new SCTs
// does not affect the existing contents of the array, so if the array is full it
// keeps the same elements. Instead, the oldest entry is removed (and a space
// created) when we are able to get an inclusion proof for it.
pending pendingCerts
// Operations that are required to fix dependencies.
nextOp []ctfe.EntrypointName
hasher merkle.LogHasher
}
func newHammerState(cfg *HammerConfig) (*hammerState, error) {
mf := cfg.MetricFactory
if mf == nil {
mf = monitoring.InertMetricFactory{}
}
once.Do(func() { setupMetrics(mf) })
if cfg.MinGetEntries <= 0 {
cfg.MinGetEntries = 1
}
if cfg.MaxGetEntries <= cfg.MinGetEntries {
cfg.MaxGetEntries = cfg.MinGetEntries + 300
}
if cfg.EmitInterval <= 0 {
cfg.EmitInterval = 10 * time.Second
}
if cfg.Limiter == nil {
cfg.Limiter = unLimited{}
}
if cfg.MaxRetryDuration <= 0 {
cfg.MaxRetryDuration = 60 * time.Second
}
if cfg.LogCfg.IsMirror {
glog.Warningf("%v: disabling add-[pre-]chain for mirror log", cfg.LogCfg.Prefix)
cfg.EPBias.Bias[ctfe.AddChainName] = 0
cfg.EPBias.Bias[ctfe.AddPreChainName] = 0
}
state := hammerState{
cfg: cfg,
nextOp: make([]ctfe.EntrypointName, 0),
hasher: rfc6962.DefaultHasher,
}
return &state, nil
}
func (s *hammerState) client() *client.LogClient {
return s.cfg.ClientPool.Next()
}
func (s *hammerState) lastTreeSize() uint64 {
if s.sth[0] == nil {
return 0
}
return s.sth[0].TreeSize
}
func (s *hammerState) needOps(ops ...ctfe.EntrypointName) {
glog.V(2).Infof("need operations %+v to satisfy dependencies", ops)
s.nextOp = append(s.nextOp, ops...)
}
// addMultiple calls the passed in function a random number
// (1 <= n < MaxParallelChains) of times.
// The first of any errors returned by calls to addOne will be returned by this function.
func (s *hammerState) addMultiple(ctx context.Context, addOne func(context.Context) error) error {
var wg sync.WaitGroup
numAdds := rand.Intn(s.cfg.MaxParallelChains) + 1
glog.V(2).Infof("%s: do %d parallel add operations...", s.cfg.LogCfg.Prefix, numAdds)
errs := make(chan error, numAdds)
for i := 0; i < numAdds; i++ {
wg.Add(1)
go func() {
if err := addOne(ctx); err != nil {
errs <- err
}
wg.Done()
}()
}
wg.Wait()
glog.V(2).Infof("%s: do %d parallel add operations...done", s.cfg.LogCfg.Prefix, numAdds)
select {
case err := <-errs:
return err
default:
}
return nil
}
func (s *hammerState) getChain() (Choice, []ct.ASN1Cert, error) {
s.chainMu.Lock()
defer s.chainMu.Unlock()
choice := s.chooseCertToAdd()
// Override choice if necessary
if s.lastChain == nil {
choice = NewCert
}
if choice == FirstCert && time.Now().Before(s.firstChainIntegrated) {
choice = NewCert
}
switch choice {
case NewCert:
chain, err := s.cfg.ChainGenerator.CertChain()
if err != nil {
return choice, nil, fmt.Errorf("failed to make fresh cert: %v", err)
}
if s.firstChain == nil {
s.firstChain = chain
s.firstChainIntegrated = time.Now().Add(s.cfg.MMD)
}
s.lastChain = chain
return choice, chain, nil
case FirstCert:
return choice, s.firstChain, nil
case LastCert:
return choice, s.lastChain, nil
}
return choice, nil, fmt.Errorf("unhandled choice %s", choice)
}
func (s *hammerState) addChain(ctx context.Context) error {
choice, chain, err := s.getChain()
if err != nil {
return fmt.Errorf("failed to make chain (%s): %v", choice, err)
}
sct, err := s.client().AddChain(ctx, chain)
if err != nil {
if err, ok := err.(client.RspError); ok {
glog.Errorf("%s: add-chain(%s): error %v HTTP status %d body %s", s.cfg.LogCfg.Prefix, choice, err.Error(), err.StatusCode, err.Body)
}
return fmt.Errorf("failed to add-chain(%s): %v", choice, err)
}
glog.V(2).Infof("%s: Uploaded %s cert, got SCT(time=%q)", s.cfg.LogCfg.Prefix, choice, timeFromMS(sct.Timestamp))
// Calculate leaf hash = SHA256(0x00 | tls-encode(MerkleTreeLeaf))
submitted := submittedCert{precert: false, sct: sct}
leaf := ct.MerkleTreeLeaf{
Version: ct.V1,
LeafType: ct.TimestampedEntryLeafType,
TimestampedEntry: &ct.TimestampedEntry{
Timestamp: sct.Timestamp,
EntryType: ct.X509LogEntryType,
X509Entry: &(chain[0]),
Extensions: sct.Extensions,
},
}
submitted.integrateBy = timeFromMS(sct.Timestamp).Add(s.cfg.MMD)
submitted.leafData, err = tls.Marshal(leaf)
if err != nil {
return fmt.Errorf("failed to tls.Marshal leaf cert: %v", err)
}
submitted.leafHash = sha256.Sum256(append([]byte{ct.TreeLeafPrefix}, submitted.leafData...))
s.pending.tryAppendCert(time.Now(), s.cfg.MMD, &submitted)
glog.V(3).Infof("%s: Uploaded %s cert has leaf-hash %x", s.cfg.LogCfg.Prefix, choice, submitted.leafHash)
return nil
}
func (s *hammerState) addChainInvalid(ctx context.Context) error {
choices := []Choice{EmptyChain, PrecertNotCert, NoChainToRoot, UnparsableCert}
choice := choices[rand.Intn(len(choices))]
var err error
var chain []ct.ASN1Cert
switch choice {
case EmptyChain:
case PrecertNotCert:
chain, _, err = s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return fmt.Errorf("failed to make chain(%s): %v", choice, err)
}
case NoChainToRoot:
chain, err = s.cfg.ChainGenerator.CertChain()
if err != nil {
return fmt.Errorf("failed to make chain(%s): %v", choice, err)
}
// Drop the intermediate (chain[1]).
chain = append(chain[:1], chain[2:]...)
case UnparsableCert:
chain, err = s.cfg.ChainGenerator.CertChain()
if err != nil {
return fmt.Errorf("failed to make chain(%s): %v", choice, err)
}
// Remove the initial ASN.1 SEQUENCE type byte (0x30) to make an unparsable cert.
chain[0].Data[0] = 0x00
default:
glog.Exitf("Unhandled choice %s", choice)
}
sct, err := s.client().AddChain(ctx, chain)
glog.V(3).Infof("invalid add-chain(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: add-chain(%s): %+v", choice, sct)
}
return nil
}
// chooseCertToAdd determines whether to add a new or pre-existing cert.
func (s *hammerState) chooseCertToAdd() Choice {
if s.cfg.DuplicateChance > 0 && rand.Intn(s.cfg.DuplicateChance) == 0 {
// TODO(drysdale): restore LastCert as an option
return FirstCert
}
return NewCert
}
func (s *hammerState) getPreChain() (Choice, []ct.ASN1Cert, []byte, error) {
s.chainMu.Lock()
defer s.chainMu.Unlock()
choice := s.chooseCertToAdd()
// Override choice if necessary
if s.lastPreChain == nil {
choice = NewCert
}
if choice == FirstCert && time.Now().Before(s.firstPreChainIntegrated) {
choice = NewCert
}
switch choice {
case NewCert:
prechain, tbs, err := s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return choice, nil, nil, fmt.Errorf("failed to make fresh pre-cert: %v", err)
}
if s.firstPreChain == nil {
s.firstPreChain = prechain
s.firstPreChainIntegrated = time.Now().Add(s.cfg.MMD)
s.firstTBS = tbs
}
s.lastPreChain = prechain
s.lastTBS = tbs
return choice, prechain, tbs, nil
case FirstCert:
return choice, s.firstPreChain, s.firstTBS, nil
case LastCert:
return choice, s.lastPreChain, s.lastTBS, nil
}
return choice, nil, nil, fmt.Errorf("unhandled choice %s", choice)
}
func (s *hammerState) addPreChain(ctx context.Context) error {
choice, prechain, tbs, err := s.getPreChain()
if err != nil {
return fmt.Errorf("failed to make pre-cert chain (%s): %v", choice, err)
}
issuer, err := x509.ParseCertificate(prechain[1].Data)
if err != nil {
return fmt.Errorf("failed to parse pre-cert issuer: %v", err)
}
sct, err := s.client().AddPreChain(ctx, prechain)
if err != nil {
if err, ok := err.(client.RspError); ok {
glog.Errorf("%s: add-pre-chain(%s): error %v HTTP status %d body %s", s.cfg.LogCfg.Prefix, choice, err.Error(), err.StatusCode, err.Body)
}
return fmt.Errorf("failed to add-pre-chain: %v", err)
}
glog.V(2).Infof("%s: Uploaded %s pre-cert, got SCT(time=%q)", s.cfg.LogCfg.Prefix, choice, timeFromMS(sct.Timestamp))
// Calculate leaf hash = SHA256(0x00 | tls-encode(MerkleTreeLeaf))
submitted := submittedCert{precert: true, sct: sct}
leaf := ct.MerkleTreeLeaf{
Version: ct.V1,
LeafType: ct.TimestampedEntryLeafType,
TimestampedEntry: &ct.TimestampedEntry{
Timestamp: sct.Timestamp,
EntryType: ct.PrecertLogEntryType,
PrecertEntry: &ct.PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: tbs,
},
Extensions: sct.Extensions,
},
}
submitted.integrateBy = timeFromMS(sct.Timestamp).Add(s.cfg.MMD)
submitted.leafData, err = tls.Marshal(leaf)
if err != nil {
return fmt.Errorf("tls.Marshal(precertLeaf)=(nil,%v); want (_,nil)", err)
}
submitted.leafHash = sha256.Sum256(append([]byte{ct.TreeLeafPrefix}, submitted.leafData...))
s.pending.tryAppendCert(time.Now(), s.cfg.MMD, &submitted)
glog.V(3).Infof("%s: Uploaded %s pre-cert has leaf-hash %x", s.cfg.LogCfg.Prefix, choice, submitted.leafHash)
return nil
}
func (s *hammerState) addPreChainInvalid(ctx context.Context) error {
choices := []Choice{EmptyChain, CertNotPrecert, NoChainToRoot, UnparsableCert}
choice := choices[rand.Intn(len(choices))]
var err error
var prechain []ct.ASN1Cert
switch choice {
case EmptyChain:
case CertNotPrecert:
prechain, err = s.cfg.ChainGenerator.CertChain()
if err != nil {
return fmt.Errorf("failed to make pre-chain(%s): %v", choice, err)
}
case NoChainToRoot:
prechain, _, err = s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return fmt.Errorf("failed to make pre-chain(%s): %v", choice, err)
}
// Drop the intermediate (prechain[1]).
prechain = append(prechain[:1], prechain[2:]...)
case UnparsableCert:
prechain, _, err = s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return fmt.Errorf("failed to make pre-chain(%s): %v", choice, err)
}
// Remove the initial ASN.1 SEQUENCE type byte (0x30) to make an unparsable cert.
prechain[0].Data[0] = 0x00
default:
glog.Exitf("Unhandled choice %s", choice)
}
sct, err := s.client().AddPreChain(ctx, prechain)
glog.V(3).Infof("invalid add-pre-chain(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: add-pre-chain: %+v", sct)
}
return nil
}
func (s *hammerState) getSTH(ctx context.Context) error {
// Shuffle earlier STHs along.
for i := sthCount - 1; i > 0; i-- {
s.sth[i] = s.sth[i-1]
}
var err error
s.sth[0], err = s.client().GetSTH(ctx)
if err != nil {
return fmt.Errorf("failed to get-sth: %v", err)
}
glog.V(2).Infof("%s: Got STH(time=%q, size=%d)", s.cfg.LogCfg.Prefix, timeFromMS(s.sth[0].Timestamp), s.sth[0].TreeSize)
return nil
}
// chooseSTHs gets the current STH, and also picks an earlier STH.
func (s *hammerState) chooseSTHs(ctx context.Context) (*ct.SignedTreeHead, *ct.SignedTreeHead, error) {
// Get current size, and pick an earlier size
sthNow, err := s.client().GetSTH(ctx)
if err != nil {
return nil, nil, fmt.Errorf("failed to get-sth for current tree: %v", err)
}
which := rand.Intn(sthCount)
if s.sth[which] == nil {
glog.V(3).Infof("%s: skipping get-sth-consistency as no earlier STH", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.GetSTHName)
return nil, sthNow, errSkip{}
}
if s.sth[which].TreeSize == 0 {
glog.V(3).Infof("%s: skipping get-sth-consistency as no earlier STH", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.AddChainName, ctfe.GetSTHName)
return nil, sthNow, errSkip{}
}
if s.sth[which].TreeSize == sthNow.TreeSize {
glog.V(3).Infof("%s: skipping get-sth-consistency as same size (%d)", s.cfg.LogCfg.Prefix, sthNow.TreeSize)
s.needOps(ctfe.AddChainName, ctfe.GetSTHName)
return nil, sthNow, errSkip{}
}
return s.sth[which], sthNow, nil
}
func (s *hammerState) getSTHConsistency(ctx context.Context) error {
sthOld, sthNow, err := s.chooseSTHs(ctx)
if err != nil {
// bail on actual errors
if _, ok := err.(errSkip); !ok {
return err
}
// If we're being asked to skip, it's because we don't have an earlier STH,
// if the config says we must only use "known" STHs then we'll have to wait
// until we get a larger STH.
if s.cfg.StrictSTHConsistencySize {
return err
}
// Otherwise, let's use our imagination and make one up, if possible...
if sthNow.TreeSize < 2 {
glog.V(3).Infof("%s: current STH size too small to invent a smaller STH for consistency proof (%d)", s.cfg.LogCfg.Prefix, sthNow.TreeSize)
return errSkip{}
}
sthOld = &ct.SignedTreeHead{TreeSize: uint64(1 + rand.Int63n(int64(sthNow.TreeSize)))}
glog.V(3).Infof("%s: Inventing a smaller STH size for consistency proof (%d)", s.cfg.LogCfg.Prefix, sthOld.TreeSize)
}
proof, err := s.client().GetSTHConsistency(ctx, sthOld.TreeSize, sthNow.TreeSize)
if err != nil {
return fmt.Errorf("failed to get-sth-consistency(%d, %d): %v", sthOld.TreeSize, sthNow.TreeSize, err)
}
if sthOld.Timestamp == 0 {
glog.V(3).Infof("%s: Skipping consistency proof verification for invented STH", s.cfg.LogCfg.Prefix)
return nil
}
if err := s.checkCTConsistencyProof(sthOld, sthNow, proof); err != nil {
return fmt.Errorf("get-sth-consistency(%d, %d) proof check failed: %v", sthOld.TreeSize, sthNow.TreeSize, err)
}
glog.V(2).Infof("%s: Got STH consistency proof (size=%d => %d) len %d",
s.cfg.LogCfg.Prefix, sthOld.TreeSize, sthNow.TreeSize, len(proof))
return nil
}
func (s *hammerState) getSTHConsistencyInvalid(ctx context.Context) error {
lastSize := s.lastTreeSize()
if lastSize == 0 {
return errSkip{}
}
choices := []Choice{ParamTooBig, ParamsInverted, ParamNegative, ParamInvalid}
choice := choices[rand.Intn(len(choices))]
var err error
var proof [][]byte
switch choice {
case ParamTooBig:
first := lastSize + uint64(invalidStretch)
second := first + 100
proof, err = s.client().GetSTHConsistency(ctx, first, second)
case Param2TooBig:
first := lastSize
second := lastSize + uint64(invalidStretch)
proof, err = s.client().GetSTHConsistency(ctx, first, second)
case ParamsInverted:
var sthOld, sthNow *ct.SignedTreeHead
sthOld, sthNow, err = s.chooseSTHs(ctx)
if err != nil {
return err
}
proof, err = s.client().GetSTHConsistency(ctx, sthNow.TreeSize, sthOld.TreeSize)
case ParamNegative, ParamInvalid:
params := make(map[string]string)
switch choice {
case ParamNegative:
params["first"] = "-3"
params["second"] = "-1"
case ParamInvalid:
params["first"] = "foo"
params["second"] = "bar"
}
// Need to use lower-level API to be able to use invalid parameters
var resp ct.GetSTHConsistencyResponse
var httpRsp *http.Response
var body []byte
httpRsp, body, err = s.client().GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp)
if err != nil && httpRsp != nil {
err = client.RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
proof = resp.Consistency
default:
glog.Exitf("Unhandled choice %s", choice)
}
glog.V(3).Infof("invalid get-sth-consistency(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: get-sth-consistency(%s): %+v", choice, proof)
}
return nil
}
func (s *hammerState) getProofByHash(ctx context.Context) error {
submitted := s.pending.oldestIfMMDPassed(time.Now())
if submitted == nil {
// No SCT that is guaranteed to be integrated, so move on.
return errSkip{}
}
// Get an STH that should include this submitted [pre-]cert.
sth, err := s.client().GetSTH(ctx)
if err != nil {
return fmt.Errorf("failed to get-sth for proof: %v", err)
}
// Get and check an inclusion proof.
rsp, err := s.client().GetProofByHash(ctx, submitted.leafHash[:], sth.TreeSize)
if err != nil {
return fmt.Errorf("failed to get-proof-by-hash(size=%d) on cert with SCT @ %v: %v, %+v", sth.TreeSize, timeFromMS(submitted.sct.Timestamp), err, rsp)
}
if err := proof.VerifyInclusion(s.hasher, uint64(rsp.LeafIndex), sth.TreeSize, submitted.leafHash[:], rsp.AuditPath, sth.SHA256RootHash[:]); err != nil {
return fmt.Errorf("failed to VerifyInclusion(%d, %d)=%v", rsp.LeafIndex, sth.TreeSize, err)
}
s.pending.dropOldest()
return nil
}
func (s *hammerState) getProofByHashInvalid(ctx context.Context) error {
lastSize := s.lastTreeSize()
if lastSize == 0 {
return errSkip{}
}
submitted := s.pending.oldestIfMMDPassed(time.Now())
choices := []Choice{ParamInvalid, ParamTooBig, ParamNegative, InvalidBase64}
choice := choices[rand.Intn(len(choices))]
var err error
var rsp *ct.GetProofByHashResponse
switch choice {
case ParamInvalid:
rsp, err = s.client().GetProofByHash(ctx, []byte{0x01, 0x02}, 1) // Hash too short
case ParamTooBig:
if submitted == nil {
return errSkip{}
}
rsp, err = s.client().GetProofByHash(ctx, submitted.leafHash[:], lastSize+uint64(invalidStretch))
case ParamNegative, InvalidBase64:
params := make(map[string]string)
switch choice {
case ParamNegative:
if submitted == nil {
return errSkip{}
}
params["tree_size"] = "-1"
params["hash"] = base64.StdEncoding.EncodeToString(submitted.leafHash[:])
case InvalidBase64:
params["tree_size"] = "1"
params["hash"] = "@^()"
}
var r ct.GetProofByHashResponse
rsp = &r
var httpRsp *http.Response
var body []byte
httpRsp, body, err = s.client().GetAndParse(ctx, ct.GetProofByHashPath, params, &r)
if err != nil && httpRsp != nil {
err = client.RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
default:
glog.Exitf("Unhandled choice %s", choice)
}
glog.V(3).Infof("invalid get-proof-by-hash(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: get-proof-by-hash(%s): %+v", choice, rsp)
}
return nil
}
func (s *hammerState) getEntries(ctx context.Context) error {
if s.sth[0] == nil {
glog.V(3).Infof("%s: skipping get-entries as no earlier STH", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.GetSTHName)
return errSkip{}
}
lastSize := s.lastTreeSize()
if lastSize == 0 {
if s.pending.empty() {
glog.V(3).Infof("%s: skipping get-entries as tree size 0", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.AddChainName, ctfe.GetSTHName)
return errSkip{}
}
glog.V(3).Infof("%s: skipping get-entries as STH stale", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.GetSTHName)
return errSkip{}
}
// Entry indices are zero-based, and may or may not be allowed to extend
// beyond current tree size (RFC 6962 s4.6).
first := rand.Intn(int(lastSize))
span := s.cfg.MaxGetEntries - s.cfg.MinGetEntries
count := s.cfg.MinGetEntries + rand.Intn(int(span))
last := first + count
if !s.cfg.OversizedGetEntries && last >= int(lastSize) {
last = int(lastSize) - 1
}
entries, err := s.client().GetEntries(ctx, int64(first), int64(last))
if err != nil {
return fmt.Errorf("failed to get-entries(%d,%d): %v", first, last, err)
}
for i, entry := range entries {
if want := int64(first + i); entry.Index != want {
return fmt.Errorf("leaf[%d].LeafIndex=%d; want %d", i, entry.Index, want)
}
leaf := entry.Leaf
if leaf.Version != 0 {
return fmt.Errorf("leaf[%d].Version=%v; want V1(0)", i, leaf.Version)
}
if leaf.LeafType != ct.TimestampedEntryLeafType {
return fmt.Errorf("leaf[%d].Version=%v; want TimestampedEntryLeafType", i, leaf.LeafType)
}
ts := leaf.TimestampedEntry
if ts.EntryType != ct.X509LogEntryType && ts.EntryType != ct.PrecertLogEntryType {
return fmt.Errorf("leaf[%d].ts.EntryType=%v; want {X509,Precert}LogEntryType", i, ts.EntryType)
}
}
glog.V(2).Infof("%s: Got entries [%d:%d)\n", s.cfg.LogCfg.Prefix, first, first+len(entries))
return nil
}
func (s *hammerState) getEntriesInvalid(ctx context.Context) error {
lastSize := s.lastTreeSize()
if lastSize == 0 {
return errSkip{}
}
choices := []Choice{ParamTooBig, ParamNegative, ParamsInverted}
choice := choices[rand.Intn(len(choices))]
var first, last int64
switch choice {
case ParamTooBig:
last = int64(lastSize) + invalidStretch
first = last - 4
case ParamNegative:
first = -2
last = 10
case ParamsInverted:
first = 10
last = 5
default:
glog.Exitf("Unhandled choice %s", choice)
}
entries, err := s.client().GetEntries(ctx, first, last)
glog.V(3).Infof("invalid get-entries(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: get-entries(%d,%d): %d entries", first, last, len(entries))
}
return nil
}
func (s *hammerState) getRoots(ctx context.Context) error {
roots, err := s.client().GetAcceptedRoots(ctx)
if err != nil {
return fmt.Errorf("failed to get-roots: %v", err)
}
glog.V(2).Infof("%s: Got roots (len=%d)", s.cfg.LogCfg.Prefix, len(roots))
return nil
}
func sthSize(sth *ct.SignedTreeHead) string {
if sth == nil {
return "n/a"
}
return fmt.Sprintf("%d", sth.TreeSize)
}
func (s *hammerState) label() string {
return strconv.FormatInt(s.cfg.LogCfg.LogId, 10)
}
func (s *hammerState) String() string {
s.mu.RLock()
defer s.mu.RUnlock()
details := ""
statusOK := strconv.Itoa(http.StatusOK)
totalReqs := 0
totalInvalidReqs := 0
totalErrs := 0
for _, ep := range ctfe.Entrypoints {
reqCount := int(reqs.Value(s.label(), string(ep)))
totalReqs += reqCount
if s.cfg.EPBias.Bias[ep] > 0 {
details += fmt.Sprintf(" %s=%d/%d", ep, int(rsps.Value(s.label(), string(ep), statusOK)), reqCount)
}
totalInvalidReqs += int(invalidReqs.Value(s.label(), string(ep)))
totalErrs += int(errs.Value(s.label(), string(ep)))
}
return fmt.Sprintf("%10s: lastSTH.size=%s ops: total=%d invalid=%d errs=%v%s", s.cfg.LogCfg.Prefix, sthSize(s.sth[0]), totalReqs, totalInvalidReqs, totalErrs, details)
}
func (s *hammerState) performOp(ctx context.Context, ep ctfe.EntrypointName) (int, error) {
s.cfg.Limiter.Wait()
s.mu.Lock()
defer s.mu.Unlock()
if s.cfg.RequestDeadline > 0 {
cctx, cancel := context.WithTimeout(ctx, s.cfg.RequestDeadline)
defer cancel()
ctx = cctx
}
status := http.StatusOK
var err error
switch ep {
case ctfe.AddChainName:
err = s.addMultiple(ctx, s.addChain)
case ctfe.AddPreChainName:
err = s.addMultiple(ctx, s.addPreChain)
case ctfe.GetSTHName:
err = s.getSTH(ctx)
case ctfe.GetSTHConsistencyName:
err = s.getSTHConsistency(ctx)
case ctfe.GetProofByHashName:
err = s.getProofByHash(ctx)
case ctfe.GetEntriesName:
err = s.getEntries(ctx)
case ctfe.GetRootsName:
err = s.getRoots(ctx)
case ctfe.GetEntryAndProofName:
status = http.StatusNotImplemented
glog.V(2).Infof("%s: hammering entrypoint %s not yet implemented", s.cfg.LogCfg.Prefix, ep)
default:
err = fmt.Errorf("internal error: unknown entrypoint %s selected", ep)
}
return status, err
}
func (s *hammerState) performInvalidOp(ctx context.Context, ep ctfe.EntrypointName) error {
s.cfg.Limiter.Wait()
switch ep {
case ctfe.AddChainName:
return s.addChainInvalid(ctx)
case ctfe.AddPreChainName:
return s.addPreChainInvalid(ctx)
case ctfe.GetSTHConsistencyName:
return s.getSTHConsistencyInvalid(ctx)
case ctfe.GetProofByHashName:
return s.getProofByHashInvalid(ctx)
case ctfe.GetEntriesName:
return s.getEntriesInvalid(ctx)
case ctfe.GetSTHName, ctfe.GetRootsName:
return fmt.Errorf("no invalid request possible for entrypoint %s", ep)
case ctfe.GetEntryAndProofName:
return fmt.Errorf("hammering entrypoint %s not yet implemented", ep)
}
return fmt.Errorf("internal error: unknown entrypoint %s", ep)
}
func (s *hammerState) chooseOp() (ctfe.EntrypointName, bool) {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.nextOp) > 0 {
ep := s.nextOp[0]
s.nextOp = s.nextOp[1:]
if s.cfg.EPBias.Bias[ep] > 0 {
return ep, false
}
}
ep := s.cfg.EPBias.Choose()
return ep, s.cfg.EPBias.Invalid(ep)
}
// Perform a random operation on the log, retrying if necessary. If non-empty, the
// returned entrypoint should be performed next to unblock dependencies.
func (s *hammerState) retryOneOp(ctx context.Context) error {
ep, invalid := s.chooseOp()
if invalid {
glog.V(3).Infof("perform invalid %s operation", ep)
invalidReqs.Inc(s.label(), string(ep))
err := s.performInvalidOp(ctx, ep)
if _, ok := err.(errSkip); ok {
glog.V(2).Infof("invalid operation %s was skipped", ep)
return nil
}
return err
}
glog.V(3).Infof("perform %s operation", ep)
deadline := time.Now().Add(s.cfg.MaxRetryDuration)
for {
start := time.Now()
reqs.Inc(s.label(), string(ep))
status, err := s.performOp(ctx, ep)
period := time.Since(start)
rspLatency.Observe(period.Seconds(), s.label(), string(ep), strconv.Itoa(status))
switch err.(type) {
case nil:
rsps.Inc(s.label(), string(ep), strconv.Itoa(status))
return nil
case errSkip:
glog.V(2).Infof("operation %s was skipped", ep)
return nil
default:
errs.Inc(s.label(), string(ep))
if s.cfg.IgnoreErrors {
left := time.Until(deadline)
if left < 0 {
glog.Warningf("%s: gave up retrying failed op %v after %v, returning last err: %v", s.cfg.LogCfg.Prefix, ep, s.cfg.MaxRetryDuration, err)
return err
}
glog.Warningf("%s: op %v failed after %v (will retry for %v more): %v", s.cfg.LogCfg.Prefix, ep, period, left, err)
} else {
return err
}
}
}
}
// checkCTConsistencyProof checks the given consistency proof.
func (s *hammerState) checkCTConsistencyProof(sth1, sth2 *ct.SignedTreeHead, pf [][]byte) error {
return proof.VerifyConsistency(s.hasher, sth1.TreeSize, sth2.TreeSize, pf, sth1.SHA256RootHash[:], sth2.SHA256RootHash[:])
}
// HammerCTLog performs load/stress operations according to given config.
func HammerCTLog(ctx context.Context, cfg HammerConfig) error {
s, err := newHammerState(&cfg)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go schedule.Every(ctx, cfg.EmitInterval, func(ctx context.Context) {
glog.Info(s.String())
})
for count := uint64(1); count < cfg.Operations; count++ {
if err := s.retryOneOp(ctx); err != nil {
return err
}
// Terminate from the loop if the context is cancelled.
if err := ctx.Err(); err != nil {
return err
}
}
glog.Infof("%s: completed %d operations on log", cfg.LogCfg.Prefix, cfg.Operations)
return nil
}
Do not print context canceled errors. (#928)
* Do not print context canceled errors.
* Do not retry operations where the context was canceled.
* Remove old comment.
* Check for context error rather than specifically context canceled.
Co-authored-by: Jay Hou <c77a5be5c3c62b45a3bd0620f51a63a0fc6540a5@google.com>
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"math/rand"
"net/http"
"strconv"
"sync"
"time"
"github.com/golang/glog"
"github.com/google/certificate-transparency-go/client"
"github.com/google/certificate-transparency-go/schedule"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/trillian/ctfe"
"github.com/google/certificate-transparency-go/trillian/ctfe/configpb"
"github.com/google/certificate-transparency-go/x509"
"github.com/google/trillian/monitoring"
"github.com/transparency-dev/merkle"
"github.com/transparency-dev/merkle/proof"
"github.com/transparency-dev/merkle/rfc6962"
ct "github.com/google/certificate-transparency-go"
)
const (
// How many STHs and SCTs to hold on to.
sthCount = 10
sctCount = 10
// How far beyond current tree size to request for invalid requests.
invalidStretch = int64(1000000000)
)
var (
// Metrics are all per-log (label "logid"), but may also be
// per-entrypoint (label "ep") or per-return-code (label "rc").
once sync.Once
reqs monitoring.Counter // logid, ep => value
errs monitoring.Counter // logid, ep => value
rsps monitoring.Counter // logid, ep, rc => value
rspLatency monitoring.Histogram // logid, ep, rc => values
invalidReqs monitoring.Counter // logid, ep => value
)
// setupMetrics initializes all the exported metrics.
func setupMetrics(mf monitoring.MetricFactory) {
reqs = mf.NewCounter("reqs", "Number of valid requests sent", "logid", "ep")
errs = mf.NewCounter("errs", "Number of error responses received for valid requests", "logid", "ep")
rsps = mf.NewCounter("rsps", "Number of responses received for valid requests", "logid", "ep", "rc")
rspLatency = mf.NewHistogram("rsp_latency", "Latency of valid responses in seconds", "logid", "ep", "rc")
invalidReqs = mf.NewCounter("invalid_reqs", "Number of deliberately-invalid requests sent", "logid", "ep")
}
// errSkip indicates that a test operation should be skipped.
type errSkip struct{}
func (e errSkip) Error() string {
return "test operation skipped"
}
// Choice represents a random decision about a hammer operation.
type Choice string
// Constants for per-operation choices.
const (
ParamTooBig = Choice("ParamTooBig")
Param2TooBig = Choice("Param2TooBig")
ParamNegative = Choice("ParamNegative")
ParamInvalid = Choice("ParamInvalid")
ParamsInverted = Choice("ParamsInverted")
InvalidBase64 = Choice("InvalidBase64")
EmptyChain = Choice("EmptyChain")
CertNotPrecert = Choice("CertNotPrecert")
PrecertNotCert = Choice("PrecertNotCert")
NoChainToRoot = Choice("NoChainToRoot")
UnparsableCert = Choice("UnparsableCert")
NewCert = Choice("NewCert")
LastCert = Choice("LastCert")
FirstCert = Choice("FirstCert")
)
// Limiter is an interface to allow different rate limiters to be used with the
// hammer.
type Limiter interface {
Wait()
}
type unLimited struct{}
func (u unLimited) Wait() {
}
// HammerConfig provides configuration for a stress/load test.
type HammerConfig struct {
// Configuration for the log.
LogCfg *configpb.LogConfig
// How to create process-wide metrics.
MetricFactory monitoring.MetricFactory
// Maximum merge delay.
MMD time.Duration
// Certificate chain generator.
ChainGenerator ChainGenerator
// ClientPool provides the clients used to make requests.
ClientPool ClientPool
// Bias values to favor particular log operations.
EPBias HammerBias
// Range of how many entries to get.
MinGetEntries, MaxGetEntries int
// OversizedGetEntries governs whether get-entries requests that go beyond the
// current tree size are allowed (with a truncated response expected).
OversizedGetEntries bool
// Number of operations to perform.
Operations uint64
// Rate limiter
Limiter Limiter
// MaxParallelChains sets the upper limit for the number of parallel
// add-*-chain requests to make when the biasing model says to perfom an add.
MaxParallelChains int
// EmitInterval defines how frequently stats are logged.
EmitInterval time.Duration
// IgnoreErrors controls whether a hammer run fails immediately on any error.
IgnoreErrors bool
// MaxRetryDuration governs how long to keep retrying when IgnoreErrors is true.
MaxRetryDuration time.Duration
// RequestDeadline indicates the deadline to set on each request to the log.
RequestDeadline time.Duration
// DuplicateChance sets the probability of attempting to add a duplicate when
// calling add[-pre]-chain (as the N in 1-in-N). Set to 0 to disable sending
// duplicates.
DuplicateChance int
// StrictSTHConsistencySize if set to true will cause Hammer to only request
// STH consistency proofs between tree sizes for which it's seen valid STHs.
// If set to false, Hammer will request a consistency proof between the
// current tree size, and a random smaller size greater than zero.
StrictSTHConsistencySize bool
}
// HammerBias indicates the bias for selecting different log operations.
type HammerBias struct {
Bias map[ctfe.EntrypointName]int
total int
// InvalidChance gives the odds of performing an invalid operation, as the N in 1-in-N.
InvalidChance map[ctfe.EntrypointName]int
}
// Choose randomly picks an operation to perform according to the biases.
func (hb HammerBias) Choose() ctfe.EntrypointName {
if hb.total == 0 {
for _, ep := range ctfe.Entrypoints {
hb.total += hb.Bias[ep]
}
}
which := rand.Intn(hb.total)
for _, ep := range ctfe.Entrypoints {
which -= hb.Bias[ep]
if which < 0 {
return ep
}
}
panic("random choice out of range")
}
// Invalid randomly chooses whether an operation should be invalid.
func (hb HammerBias) Invalid(ep ctfe.EntrypointName) bool {
chance := hb.InvalidChance[ep]
if chance <= 0 {
return false
}
return rand.Intn(chance) == 0
}
type submittedCert struct {
leafData []byte
leafHash [sha256.Size]byte
sct *ct.SignedCertificateTimestamp
integrateBy time.Time
precert bool
}
// pendingCerts holds certificates that have been submitted that we want
// to check inclusion proofs for. The array is ordered from oldest to
// most recent, but new entries are only appended when enough time has
// passed since the last append, so the SCTs that get checked are spread
// out across the MMD period.
type pendingCerts struct {
mu sync.Mutex
certs [sctCount]*submittedCert
}
func (pc *pendingCerts) empty() bool {
pc.mu.Lock()
defer pc.mu.Unlock()
return pc.certs[0] == nil
}
// tryAppendCert locks mu, checks whether it's possible to append the cert, and
// appends it if so.
func (pc *pendingCerts) tryAppendCert(now time.Time, mmd time.Duration, submitted *submittedCert) {
pc.mu.Lock()
defer pc.mu.Unlock()
if pc.canAppend(now, mmd) {
which := 0
for ; which < sctCount; which++ {
if pc.certs[which] == nil {
break
}
}
pc.certs[which] = submitted
}
}
// canAppend checks whether a pending cert can be appended.
// It must be called with mu locked.
func (pc *pendingCerts) canAppend(now time.Time, mmd time.Duration) bool {
if pc.certs[sctCount-1] != nil {
return false // full already
}
if pc.certs[0] == nil {
return true // nothing yet
}
// Only allow append if enough time has passed, namely MMD/#savedSCTs.
last := sctCount - 1
for ; last >= 0; last-- {
if pc.certs[last] != nil {
break
}
}
lastTime := timeFromMS(pc.certs[last].sct.Timestamp)
nextTime := lastTime.Add(mmd / sctCount)
return now.After(nextTime)
}
// oldestIfMMDPassed returns the oldest submitted certificate if the maximum
// merge delay has passed, i.e. it is expected to be integrated as of now. This
// function locks mu.
func (pc *pendingCerts) oldestIfMMDPassed(now time.Time) *submittedCert {
pc.mu.Lock()
defer pc.mu.Unlock()
if pc.certs[0] == nil {
return nil
}
submitted := pc.certs[0]
if !now.After(submitted.integrateBy) {
// Oldest cert not due to be integrated yet, so neither will any others.
return nil
}
return submitted
}
// dropOldest removes the oldest submitted certificate.
func (pc *pendingCerts) dropOldest() {
pc.mu.Lock()
defer pc.mu.Unlock()
// Can pop the oldest cert and shuffle the others along, which make room for
// another cert to be stored.
for i := 0; i < (sctCount - 1); i++ {
pc.certs[i] = pc.certs[i+1]
}
pc.certs[sctCount-1] = nil
}
// hammerState tracks the operations that have been performed during a test run, including
// earlier SCTs/STHs for later checking.
type hammerState struct {
cfg *HammerConfig
// Store the first submitted and the most recently submitted [pre-]chain,
// to allow submission of both old and new duplicates.
chainMu sync.Mutex
firstChain, lastChain []ct.ASN1Cert
firstChainIntegrated time.Time
firstPreChain, lastPreChain []ct.ASN1Cert
firstPreChainIntegrated time.Time
firstTBS, lastTBS []byte
mu sync.RWMutex
// STHs are arranged from later to earlier (so [0] is the most recent), and the
// discovery of new STHs will push older ones off the end.
sth [sthCount]*ct.SignedTreeHead
// Submitted certs also run from later to earlier, but the discovery of new SCTs
// does not affect the existing contents of the array, so if the array is full it
// keeps the same elements. Instead, the oldest entry is removed (and a space
// created) when we are able to get an inclusion proof for it.
pending pendingCerts
// Operations that are required to fix dependencies.
nextOp []ctfe.EntrypointName
hasher merkle.LogHasher
}
func newHammerState(cfg *HammerConfig) (*hammerState, error) {
mf := cfg.MetricFactory
if mf == nil {
mf = monitoring.InertMetricFactory{}
}
once.Do(func() { setupMetrics(mf) })
if cfg.MinGetEntries <= 0 {
cfg.MinGetEntries = 1
}
if cfg.MaxGetEntries <= cfg.MinGetEntries {
cfg.MaxGetEntries = cfg.MinGetEntries + 300
}
if cfg.EmitInterval <= 0 {
cfg.EmitInterval = 10 * time.Second
}
if cfg.Limiter == nil {
cfg.Limiter = unLimited{}
}
if cfg.MaxRetryDuration <= 0 {
cfg.MaxRetryDuration = 60 * time.Second
}
if cfg.LogCfg.IsMirror {
glog.Warningf("%v: disabling add-[pre-]chain for mirror log", cfg.LogCfg.Prefix)
cfg.EPBias.Bias[ctfe.AddChainName] = 0
cfg.EPBias.Bias[ctfe.AddPreChainName] = 0
}
state := hammerState{
cfg: cfg,
nextOp: make([]ctfe.EntrypointName, 0),
hasher: rfc6962.DefaultHasher,
}
return &state, nil
}
func (s *hammerState) client() *client.LogClient {
return s.cfg.ClientPool.Next()
}
func (s *hammerState) lastTreeSize() uint64 {
if s.sth[0] == nil {
return 0
}
return s.sth[0].TreeSize
}
func (s *hammerState) needOps(ops ...ctfe.EntrypointName) {
glog.V(2).Infof("need operations %+v to satisfy dependencies", ops)
s.nextOp = append(s.nextOp, ops...)
}
// addMultiple calls the passed in function a random number
// (1 <= n < MaxParallelChains) of times.
// The first of any errors returned by calls to addOne will be returned by this function.
func (s *hammerState) addMultiple(ctx context.Context, addOne func(context.Context) error) error {
var wg sync.WaitGroup
numAdds := rand.Intn(s.cfg.MaxParallelChains) + 1
glog.V(2).Infof("%s: do %d parallel add operations...", s.cfg.LogCfg.Prefix, numAdds)
errs := make(chan error, numAdds)
for i := 0; i < numAdds; i++ {
wg.Add(1)
go func() {
if err := addOne(ctx); err != nil {
errs <- err
}
wg.Done()
}()
}
wg.Wait()
glog.V(2).Infof("%s: do %d parallel add operations...done", s.cfg.LogCfg.Prefix, numAdds)
select {
case err := <-errs:
return err
default:
}
return nil
}
func (s *hammerState) getChain() (Choice, []ct.ASN1Cert, error) {
s.chainMu.Lock()
defer s.chainMu.Unlock()
choice := s.chooseCertToAdd()
// Override choice if necessary
if s.lastChain == nil {
choice = NewCert
}
if choice == FirstCert && time.Now().Before(s.firstChainIntegrated) {
choice = NewCert
}
switch choice {
case NewCert:
chain, err := s.cfg.ChainGenerator.CertChain()
if err != nil {
return choice, nil, fmt.Errorf("failed to make fresh cert: %v", err)
}
if s.firstChain == nil {
s.firstChain = chain
s.firstChainIntegrated = time.Now().Add(s.cfg.MMD)
}
s.lastChain = chain
return choice, chain, nil
case FirstCert:
return choice, s.firstChain, nil
case LastCert:
return choice, s.lastChain, nil
}
return choice, nil, fmt.Errorf("unhandled choice %s", choice)
}
func (s *hammerState) addChain(ctx context.Context) error {
choice, chain, err := s.getChain()
if err != nil {
return fmt.Errorf("failed to make chain (%s): %v", choice, err)
}
sct, err := s.client().AddChain(ctx, chain)
if err != nil {
if err, ok := err.(client.RspError); ok {
glog.Errorf("%s: add-chain(%s): error %v HTTP status %d body %s", s.cfg.LogCfg.Prefix, choice, err.Error(), err.StatusCode, err.Body)
}
return fmt.Errorf("failed to add-chain(%s): %v", choice, err)
}
glog.V(2).Infof("%s: Uploaded %s cert, got SCT(time=%q)", s.cfg.LogCfg.Prefix, choice, timeFromMS(sct.Timestamp))
// Calculate leaf hash = SHA256(0x00 | tls-encode(MerkleTreeLeaf))
submitted := submittedCert{precert: false, sct: sct}
leaf := ct.MerkleTreeLeaf{
Version: ct.V1,
LeafType: ct.TimestampedEntryLeafType,
TimestampedEntry: &ct.TimestampedEntry{
Timestamp: sct.Timestamp,
EntryType: ct.X509LogEntryType,
X509Entry: &(chain[0]),
Extensions: sct.Extensions,
},
}
submitted.integrateBy = timeFromMS(sct.Timestamp).Add(s.cfg.MMD)
submitted.leafData, err = tls.Marshal(leaf)
if err != nil {
return fmt.Errorf("failed to tls.Marshal leaf cert: %v", err)
}
submitted.leafHash = sha256.Sum256(append([]byte{ct.TreeLeafPrefix}, submitted.leafData...))
s.pending.tryAppendCert(time.Now(), s.cfg.MMD, &submitted)
glog.V(3).Infof("%s: Uploaded %s cert has leaf-hash %x", s.cfg.LogCfg.Prefix, choice, submitted.leafHash)
return nil
}
func (s *hammerState) addChainInvalid(ctx context.Context) error {
choices := []Choice{EmptyChain, PrecertNotCert, NoChainToRoot, UnparsableCert}
choice := choices[rand.Intn(len(choices))]
var err error
var chain []ct.ASN1Cert
switch choice {
case EmptyChain:
case PrecertNotCert:
chain, _, err = s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return fmt.Errorf("failed to make chain(%s): %v", choice, err)
}
case NoChainToRoot:
chain, err = s.cfg.ChainGenerator.CertChain()
if err != nil {
return fmt.Errorf("failed to make chain(%s): %v", choice, err)
}
// Drop the intermediate (chain[1]).
chain = append(chain[:1], chain[2:]...)
case UnparsableCert:
chain, err = s.cfg.ChainGenerator.CertChain()
if err != nil {
return fmt.Errorf("failed to make chain(%s): %v", choice, err)
}
// Remove the initial ASN.1 SEQUENCE type byte (0x30) to make an unparsable cert.
chain[0].Data[0] = 0x00
default:
glog.Exitf("Unhandled choice %s", choice)
}
sct, err := s.client().AddChain(ctx, chain)
glog.V(3).Infof("invalid add-chain(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: add-chain(%s): %+v", choice, sct)
}
return nil
}
// chooseCertToAdd determines whether to add a new or pre-existing cert.
func (s *hammerState) chooseCertToAdd() Choice {
if s.cfg.DuplicateChance > 0 && rand.Intn(s.cfg.DuplicateChance) == 0 {
// TODO(drysdale): restore LastCert as an option
return FirstCert
}
return NewCert
}
func (s *hammerState) getPreChain() (Choice, []ct.ASN1Cert, []byte, error) {
s.chainMu.Lock()
defer s.chainMu.Unlock()
choice := s.chooseCertToAdd()
// Override choice if necessary
if s.lastPreChain == nil {
choice = NewCert
}
if choice == FirstCert && time.Now().Before(s.firstPreChainIntegrated) {
choice = NewCert
}
switch choice {
case NewCert:
prechain, tbs, err := s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return choice, nil, nil, fmt.Errorf("failed to make fresh pre-cert: %v", err)
}
if s.firstPreChain == nil {
s.firstPreChain = prechain
s.firstPreChainIntegrated = time.Now().Add(s.cfg.MMD)
s.firstTBS = tbs
}
s.lastPreChain = prechain
s.lastTBS = tbs
return choice, prechain, tbs, nil
case FirstCert:
return choice, s.firstPreChain, s.firstTBS, nil
case LastCert:
return choice, s.lastPreChain, s.lastTBS, nil
}
return choice, nil, nil, fmt.Errorf("unhandled choice %s", choice)
}
func (s *hammerState) addPreChain(ctx context.Context) error {
choice, prechain, tbs, err := s.getPreChain()
if err != nil {
return fmt.Errorf("failed to make pre-cert chain (%s): %v", choice, err)
}
issuer, err := x509.ParseCertificate(prechain[1].Data)
if err != nil {
return fmt.Errorf("failed to parse pre-cert issuer: %v", err)
}
sct, err := s.client().AddPreChain(ctx, prechain)
if err != nil {
if err, ok := err.(client.RspError); ok {
glog.Errorf("%s: add-pre-chain(%s): error %v HTTP status %d body %s", s.cfg.LogCfg.Prefix, choice, err.Error(), err.StatusCode, err.Body)
}
return fmt.Errorf("failed to add-pre-chain: %v", err)
}
glog.V(2).Infof("%s: Uploaded %s pre-cert, got SCT(time=%q)", s.cfg.LogCfg.Prefix, choice, timeFromMS(sct.Timestamp))
// Calculate leaf hash = SHA256(0x00 | tls-encode(MerkleTreeLeaf))
submitted := submittedCert{precert: true, sct: sct}
leaf := ct.MerkleTreeLeaf{
Version: ct.V1,
LeafType: ct.TimestampedEntryLeafType,
TimestampedEntry: &ct.TimestampedEntry{
Timestamp: sct.Timestamp,
EntryType: ct.PrecertLogEntryType,
PrecertEntry: &ct.PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: tbs,
},
Extensions: sct.Extensions,
},
}
submitted.integrateBy = timeFromMS(sct.Timestamp).Add(s.cfg.MMD)
submitted.leafData, err = tls.Marshal(leaf)
if err != nil {
return fmt.Errorf("tls.Marshal(precertLeaf)=(nil,%v); want (_,nil)", err)
}
submitted.leafHash = sha256.Sum256(append([]byte{ct.TreeLeafPrefix}, submitted.leafData...))
s.pending.tryAppendCert(time.Now(), s.cfg.MMD, &submitted)
glog.V(3).Infof("%s: Uploaded %s pre-cert has leaf-hash %x", s.cfg.LogCfg.Prefix, choice, submitted.leafHash)
return nil
}
func (s *hammerState) addPreChainInvalid(ctx context.Context) error {
choices := []Choice{EmptyChain, CertNotPrecert, NoChainToRoot, UnparsableCert}
choice := choices[rand.Intn(len(choices))]
var err error
var prechain []ct.ASN1Cert
switch choice {
case EmptyChain:
case CertNotPrecert:
prechain, err = s.cfg.ChainGenerator.CertChain()
if err != nil {
return fmt.Errorf("failed to make pre-chain(%s): %v", choice, err)
}
case NoChainToRoot:
prechain, _, err = s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return fmt.Errorf("failed to make pre-chain(%s): %v", choice, err)
}
// Drop the intermediate (prechain[1]).
prechain = append(prechain[:1], prechain[2:]...)
case UnparsableCert:
prechain, _, err = s.cfg.ChainGenerator.PreCertChain()
if err != nil {
return fmt.Errorf("failed to make pre-chain(%s): %v", choice, err)
}
// Remove the initial ASN.1 SEQUENCE type byte (0x30) to make an unparsable cert.
prechain[0].Data[0] = 0x00
default:
glog.Exitf("Unhandled choice %s", choice)
}
sct, err := s.client().AddPreChain(ctx, prechain)
glog.V(3).Infof("invalid add-pre-chain(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: add-pre-chain: %+v", sct)
}
return nil
}
func (s *hammerState) getSTH(ctx context.Context) error {
// Shuffle earlier STHs along.
for i := sthCount - 1; i > 0; i-- {
s.sth[i] = s.sth[i-1]
}
var err error
s.sth[0], err = s.client().GetSTH(ctx)
if err != nil {
return fmt.Errorf("failed to get-sth: %v", err)
}
glog.V(2).Infof("%s: Got STH(time=%q, size=%d)", s.cfg.LogCfg.Prefix, timeFromMS(s.sth[0].Timestamp), s.sth[0].TreeSize)
return nil
}
// chooseSTHs gets the current STH, and also picks an earlier STH.
func (s *hammerState) chooseSTHs(ctx context.Context) (*ct.SignedTreeHead, *ct.SignedTreeHead, error) {
// Get current size, and pick an earlier size
sthNow, err := s.client().GetSTH(ctx)
if err != nil {
return nil, nil, fmt.Errorf("failed to get-sth for current tree: %v", err)
}
which := rand.Intn(sthCount)
if s.sth[which] == nil {
glog.V(3).Infof("%s: skipping get-sth-consistency as no earlier STH", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.GetSTHName)
return nil, sthNow, errSkip{}
}
if s.sth[which].TreeSize == 0 {
glog.V(3).Infof("%s: skipping get-sth-consistency as no earlier STH", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.AddChainName, ctfe.GetSTHName)
return nil, sthNow, errSkip{}
}
if s.sth[which].TreeSize == sthNow.TreeSize {
glog.V(3).Infof("%s: skipping get-sth-consistency as same size (%d)", s.cfg.LogCfg.Prefix, sthNow.TreeSize)
s.needOps(ctfe.AddChainName, ctfe.GetSTHName)
return nil, sthNow, errSkip{}
}
return s.sth[which], sthNow, nil
}
func (s *hammerState) getSTHConsistency(ctx context.Context) error {
sthOld, sthNow, err := s.chooseSTHs(ctx)
if err != nil {
// bail on actual errors
if _, ok := err.(errSkip); !ok {
return err
}
// If we're being asked to skip, it's because we don't have an earlier STH,
// if the config says we must only use "known" STHs then we'll have to wait
// until we get a larger STH.
if s.cfg.StrictSTHConsistencySize {
return err
}
// Otherwise, let's use our imagination and make one up, if possible...
if sthNow.TreeSize < 2 {
glog.V(3).Infof("%s: current STH size too small to invent a smaller STH for consistency proof (%d)", s.cfg.LogCfg.Prefix, sthNow.TreeSize)
return errSkip{}
}
sthOld = &ct.SignedTreeHead{TreeSize: uint64(1 + rand.Int63n(int64(sthNow.TreeSize)))}
glog.V(3).Infof("%s: Inventing a smaller STH size for consistency proof (%d)", s.cfg.LogCfg.Prefix, sthOld.TreeSize)
}
proof, err := s.client().GetSTHConsistency(ctx, sthOld.TreeSize, sthNow.TreeSize)
if err != nil {
return fmt.Errorf("failed to get-sth-consistency(%d, %d): %v", sthOld.TreeSize, sthNow.TreeSize, err)
}
if sthOld.Timestamp == 0 {
glog.V(3).Infof("%s: Skipping consistency proof verification for invented STH", s.cfg.LogCfg.Prefix)
return nil
}
if err := s.checkCTConsistencyProof(sthOld, sthNow, proof); err != nil {
return fmt.Errorf("get-sth-consistency(%d, %d) proof check failed: %v", sthOld.TreeSize, sthNow.TreeSize, err)
}
glog.V(2).Infof("%s: Got STH consistency proof (size=%d => %d) len %d",
s.cfg.LogCfg.Prefix, sthOld.TreeSize, sthNow.TreeSize, len(proof))
return nil
}
func (s *hammerState) getSTHConsistencyInvalid(ctx context.Context) error {
lastSize := s.lastTreeSize()
if lastSize == 0 {
return errSkip{}
}
choices := []Choice{ParamTooBig, ParamsInverted, ParamNegative, ParamInvalid}
choice := choices[rand.Intn(len(choices))]
var err error
var proof [][]byte
switch choice {
case ParamTooBig:
first := lastSize + uint64(invalidStretch)
second := first + 100
proof, err = s.client().GetSTHConsistency(ctx, first, second)
case Param2TooBig:
first := lastSize
second := lastSize + uint64(invalidStretch)
proof, err = s.client().GetSTHConsistency(ctx, first, second)
case ParamsInverted:
var sthOld, sthNow *ct.SignedTreeHead
sthOld, sthNow, err = s.chooseSTHs(ctx)
if err != nil {
return err
}
proof, err = s.client().GetSTHConsistency(ctx, sthNow.TreeSize, sthOld.TreeSize)
case ParamNegative, ParamInvalid:
params := make(map[string]string)
switch choice {
case ParamNegative:
params["first"] = "-3"
params["second"] = "-1"
case ParamInvalid:
params["first"] = "foo"
params["second"] = "bar"
}
// Need to use lower-level API to be able to use invalid parameters
var resp ct.GetSTHConsistencyResponse
var httpRsp *http.Response
var body []byte
httpRsp, body, err = s.client().GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp)
if err != nil && httpRsp != nil {
err = client.RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
proof = resp.Consistency
default:
glog.Exitf("Unhandled choice %s", choice)
}
glog.V(3).Infof("invalid get-sth-consistency(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: get-sth-consistency(%s): %+v", choice, proof)
}
return nil
}
func (s *hammerState) getProofByHash(ctx context.Context) error {
submitted := s.pending.oldestIfMMDPassed(time.Now())
if submitted == nil {
// No SCT that is guaranteed to be integrated, so move on.
return errSkip{}
}
// Get an STH that should include this submitted [pre-]cert.
sth, err := s.client().GetSTH(ctx)
if err != nil {
return fmt.Errorf("failed to get-sth for proof: %v", err)
}
// Get and check an inclusion proof.
rsp, err := s.client().GetProofByHash(ctx, submitted.leafHash[:], sth.TreeSize)
if err != nil {
return fmt.Errorf("failed to get-proof-by-hash(size=%d) on cert with SCT @ %v: %v, %+v", sth.TreeSize, timeFromMS(submitted.sct.Timestamp), err, rsp)
}
if err := proof.VerifyInclusion(s.hasher, uint64(rsp.LeafIndex), sth.TreeSize, submitted.leafHash[:], rsp.AuditPath, sth.SHA256RootHash[:]); err != nil {
return fmt.Errorf("failed to VerifyInclusion(%d, %d)=%v", rsp.LeafIndex, sth.TreeSize, err)
}
s.pending.dropOldest()
return nil
}
func (s *hammerState) getProofByHashInvalid(ctx context.Context) error {
lastSize := s.lastTreeSize()
if lastSize == 0 {
return errSkip{}
}
submitted := s.pending.oldestIfMMDPassed(time.Now())
choices := []Choice{ParamInvalid, ParamTooBig, ParamNegative, InvalidBase64}
choice := choices[rand.Intn(len(choices))]
var err error
var rsp *ct.GetProofByHashResponse
switch choice {
case ParamInvalid:
rsp, err = s.client().GetProofByHash(ctx, []byte{0x01, 0x02}, 1) // Hash too short
case ParamTooBig:
if submitted == nil {
return errSkip{}
}
rsp, err = s.client().GetProofByHash(ctx, submitted.leafHash[:], lastSize+uint64(invalidStretch))
case ParamNegative, InvalidBase64:
params := make(map[string]string)
switch choice {
case ParamNegative:
if submitted == nil {
return errSkip{}
}
params["tree_size"] = "-1"
params["hash"] = base64.StdEncoding.EncodeToString(submitted.leafHash[:])
case InvalidBase64:
params["tree_size"] = "1"
params["hash"] = "@^()"
}
var r ct.GetProofByHashResponse
rsp = &r
var httpRsp *http.Response
var body []byte
httpRsp, body, err = s.client().GetAndParse(ctx, ct.GetProofByHashPath, params, &r)
if err != nil && httpRsp != nil {
err = client.RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
default:
glog.Exitf("Unhandled choice %s", choice)
}
glog.V(3).Infof("invalid get-proof-by-hash(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: get-proof-by-hash(%s): %+v", choice, rsp)
}
return nil
}
func (s *hammerState) getEntries(ctx context.Context) error {
if s.sth[0] == nil {
glog.V(3).Infof("%s: skipping get-entries as no earlier STH", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.GetSTHName)
return errSkip{}
}
lastSize := s.lastTreeSize()
if lastSize == 0 {
if s.pending.empty() {
glog.V(3).Infof("%s: skipping get-entries as tree size 0", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.AddChainName, ctfe.GetSTHName)
return errSkip{}
}
glog.V(3).Infof("%s: skipping get-entries as STH stale", s.cfg.LogCfg.Prefix)
s.needOps(ctfe.GetSTHName)
return errSkip{}
}
// Entry indices are zero-based, and may or may not be allowed to extend
// beyond current tree size (RFC 6962 s4.6).
first := rand.Intn(int(lastSize))
span := s.cfg.MaxGetEntries - s.cfg.MinGetEntries
count := s.cfg.MinGetEntries + rand.Intn(int(span))
last := first + count
if !s.cfg.OversizedGetEntries && last >= int(lastSize) {
last = int(lastSize) - 1
}
entries, err := s.client().GetEntries(ctx, int64(first), int64(last))
if err != nil {
return fmt.Errorf("failed to get-entries(%d,%d): %v", first, last, err)
}
for i, entry := range entries {
if want := int64(first + i); entry.Index != want {
return fmt.Errorf("leaf[%d].LeafIndex=%d; want %d", i, entry.Index, want)
}
leaf := entry.Leaf
if leaf.Version != 0 {
return fmt.Errorf("leaf[%d].Version=%v; want V1(0)", i, leaf.Version)
}
if leaf.LeafType != ct.TimestampedEntryLeafType {
return fmt.Errorf("leaf[%d].Version=%v; want TimestampedEntryLeafType", i, leaf.LeafType)
}
ts := leaf.TimestampedEntry
if ts.EntryType != ct.X509LogEntryType && ts.EntryType != ct.PrecertLogEntryType {
return fmt.Errorf("leaf[%d].ts.EntryType=%v; want {X509,Precert}LogEntryType", i, ts.EntryType)
}
}
glog.V(2).Infof("%s: Got entries [%d:%d)\n", s.cfg.LogCfg.Prefix, first, first+len(entries))
return nil
}
func (s *hammerState) getEntriesInvalid(ctx context.Context) error {
lastSize := s.lastTreeSize()
if lastSize == 0 {
return errSkip{}
}
choices := []Choice{ParamTooBig, ParamNegative, ParamsInverted}
choice := choices[rand.Intn(len(choices))]
var first, last int64
switch choice {
case ParamTooBig:
last = int64(lastSize) + invalidStretch
first = last - 4
case ParamNegative:
first = -2
last = 10
case ParamsInverted:
first = 10
last = 5
default:
glog.Exitf("Unhandled choice %s", choice)
}
entries, err := s.client().GetEntries(ctx, first, last)
glog.V(3).Infof("invalid get-entries(%s) => error %v", choice, err)
if err, ok := err.(client.RspError); ok {
glog.V(3).Infof(" HTTP status %d body %s", err.StatusCode, err.Body)
}
if err == nil {
return fmt.Errorf("unexpected success: get-entries(%d,%d): %d entries", first, last, len(entries))
}
return nil
}
func (s *hammerState) getRoots(ctx context.Context) error {
roots, err := s.client().GetAcceptedRoots(ctx)
if err != nil {
return fmt.Errorf("failed to get-roots: %v", err)
}
glog.V(2).Infof("%s: Got roots (len=%d)", s.cfg.LogCfg.Prefix, len(roots))
return nil
}
func sthSize(sth *ct.SignedTreeHead) string {
if sth == nil {
return "n/a"
}
return fmt.Sprintf("%d", sth.TreeSize)
}
func (s *hammerState) label() string {
return strconv.FormatInt(s.cfg.LogCfg.LogId, 10)
}
func (s *hammerState) String() string {
s.mu.RLock()
defer s.mu.RUnlock()
details := ""
statusOK := strconv.Itoa(http.StatusOK)
totalReqs := 0
totalInvalidReqs := 0
totalErrs := 0
for _, ep := range ctfe.Entrypoints {
reqCount := int(reqs.Value(s.label(), string(ep)))
totalReqs += reqCount
if s.cfg.EPBias.Bias[ep] > 0 {
details += fmt.Sprintf(" %s=%d/%d", ep, int(rsps.Value(s.label(), string(ep), statusOK)), reqCount)
}
totalInvalidReqs += int(invalidReqs.Value(s.label(), string(ep)))
totalErrs += int(errs.Value(s.label(), string(ep)))
}
return fmt.Sprintf("%10s: lastSTH.size=%s ops: total=%d invalid=%d errs=%v%s", s.cfg.LogCfg.Prefix, sthSize(s.sth[0]), totalReqs, totalInvalidReqs, totalErrs, details)
}
func (s *hammerState) performOp(ctx context.Context, ep ctfe.EntrypointName) (int, error) {
s.cfg.Limiter.Wait()
s.mu.Lock()
defer s.mu.Unlock()
if s.cfg.RequestDeadline > 0 {
cctx, cancel := context.WithTimeout(ctx, s.cfg.RequestDeadline)
defer cancel()
ctx = cctx
}
status := http.StatusOK
var err error
switch ep {
case ctfe.AddChainName:
err = s.addMultiple(ctx, s.addChain)
case ctfe.AddPreChainName:
err = s.addMultiple(ctx, s.addPreChain)
case ctfe.GetSTHName:
err = s.getSTH(ctx)
case ctfe.GetSTHConsistencyName:
err = s.getSTHConsistency(ctx)
case ctfe.GetProofByHashName:
err = s.getProofByHash(ctx)
case ctfe.GetEntriesName:
err = s.getEntries(ctx)
case ctfe.GetRootsName:
err = s.getRoots(ctx)
case ctfe.GetEntryAndProofName:
status = http.StatusNotImplemented
glog.V(2).Infof("%s: hammering entrypoint %s not yet implemented", s.cfg.LogCfg.Prefix, ep)
default:
err = fmt.Errorf("internal error: unknown entrypoint %s selected", ep)
}
return status, err
}
func (s *hammerState) performInvalidOp(ctx context.Context, ep ctfe.EntrypointName) error {
s.cfg.Limiter.Wait()
switch ep {
case ctfe.AddChainName:
return s.addChainInvalid(ctx)
case ctfe.AddPreChainName:
return s.addPreChainInvalid(ctx)
case ctfe.GetSTHConsistencyName:
return s.getSTHConsistencyInvalid(ctx)
case ctfe.GetProofByHashName:
return s.getProofByHashInvalid(ctx)
case ctfe.GetEntriesName:
return s.getEntriesInvalid(ctx)
case ctfe.GetSTHName, ctfe.GetRootsName:
return fmt.Errorf("no invalid request possible for entrypoint %s", ep)
case ctfe.GetEntryAndProofName:
return fmt.Errorf("hammering entrypoint %s not yet implemented", ep)
}
return fmt.Errorf("internal error: unknown entrypoint %s", ep)
}
func (s *hammerState) chooseOp() (ctfe.EntrypointName, bool) {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.nextOp) > 0 {
ep := s.nextOp[0]
s.nextOp = s.nextOp[1:]
if s.cfg.EPBias.Bias[ep] > 0 {
return ep, false
}
}
ep := s.cfg.EPBias.Choose()
return ep, s.cfg.EPBias.Invalid(ep)
}
// Perform a random operation on the log, retrying if necessary. If non-empty, the
// returned entrypoint should be performed next to unblock dependencies.
func (s *hammerState) retryOneOp(ctx context.Context) error {
ep, invalid := s.chooseOp()
if invalid {
glog.V(3).Infof("perform invalid %s operation", ep)
invalidReqs.Inc(s.label(), string(ep))
err := s.performInvalidOp(ctx, ep)
if _, ok := err.(errSkip); ok {
glog.V(2).Infof("invalid operation %s was skipped", ep)
return nil
}
return err
}
glog.V(3).Infof("perform %s operation", ep)
deadline := time.Now().Add(s.cfg.MaxRetryDuration)
for {
if err := ctx.Err(); err != nil {
return err
}
start := time.Now()
reqs.Inc(s.label(), string(ep))
status, err := s.performOp(ctx, ep)
period := time.Since(start)
rspLatency.Observe(period.Seconds(), s.label(), string(ep), strconv.Itoa(status))
switch err.(type) {
case nil:
rsps.Inc(s.label(), string(ep), strconv.Itoa(status))
return nil
case errSkip:
glog.V(2).Infof("operation %s was skipped", ep)
return nil
default:
errs.Inc(s.label(), string(ep))
if s.cfg.IgnoreErrors {
left := time.Until(deadline)
if left < 0 {
glog.Warningf("%s: gave up retrying failed op %v after %v, returning last err: %v", s.cfg.LogCfg.Prefix, ep, s.cfg.MaxRetryDuration, err)
return err
}
glog.Warningf("%s: op %v failed after %v (will retry for %v more): %v", s.cfg.LogCfg.Prefix, ep, period, left, err)
} else {
return err
}
}
}
}
// checkCTConsistencyProof checks the given consistency proof.
func (s *hammerState) checkCTConsistencyProof(sth1, sth2 *ct.SignedTreeHead, pf [][]byte) error {
return proof.VerifyConsistency(s.hasher, sth1.TreeSize, sth2.TreeSize, pf, sth1.SHA256RootHash[:], sth2.SHA256RootHash[:])
}
// HammerCTLog performs load/stress operations according to given config.
func HammerCTLog(ctx context.Context, cfg HammerConfig) error {
s, err := newHammerState(&cfg)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go schedule.Every(ctx, cfg.EmitInterval, func(ctx context.Context) {
glog.Info(s.String())
})
for count := uint64(1); count < cfg.Operations; count++ {
if err := s.retryOneOp(ctx); err != nil {
return err
}
// Terminate from the loop if the context is cancelled.
if err := ctx.Err(); err != nil {
return err
}
}
glog.Infof("%s: completed %d operations on log", cfg.LogCfg.Prefix, cfg.Operations)
return nil
}
|
//: ----------------------------------------------------------------------------
//: Copyright (C) 2017 Verizon. All Rights Reserved.
//: All Rights Reserved
//:
//: file: ipfix.go
//: details: TODO
//: author: Mehrdad Arshad Rad
//: date: 02/01/2017
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//: ----------------------------------------------------------------------------
package main
import (
"net"
"strconv"
"sync"
"time"
"git.edgecastcdn.net/vflow/ipfix"
"git.edgecastcdn.net/vflow/mirror"
)
type IPFIX struct {
port int
addr string
udpSize int
workers int
stop bool
}
type IPFIXUDPMsg struct {
raddr *net.UDPAddr
body []byte
}
var (
ipfixUdpCh = make(chan IPFIXUDPMsg, 1000)
ipfixMCh = make(chan IPFIXUDPMsg, 1000)
ipfixMirrorEnabled bool
// templates memory cache
mCache ipfix.MemCache
)
func NewIPFIX() *IPFIX {
return &IPFIX{
port: opts.IPFIXPort,
udpSize: opts.IPFIXUDPSize,
workers: opts.IPFIXWorkers,
}
}
func (i *IPFIX) run() {
var wg sync.WaitGroup
hostPort := net.JoinHostPort(i.addr, strconv.Itoa(i.port))
udpAddr, _ := net.ResolveUDPAddr("udp", hostPort)
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
}
for n := 0; n < i.workers; n++ {
go func() {
wg.Add(1)
defer wg.Done()
ipfixWorker()
}()
}
logger.Printf("ipfix is running (workers#: %d)", i.workers)
mCache = ipfix.NewCache()
go func() {
mirrorIPFIXDispatcher(ipfixMCh)
}()
for !i.stop {
b := make([]byte, i.udpSize)
conn.SetReadDeadline(time.Now().Add(1e9))
n, raddr, err := conn.ReadFromUDP(b)
if err != nil {
continue
}
ipfixUdpCh <- IPFIXUDPMsg{raddr, b[:n]}
}
wg.Wait()
}
func (i *IPFIX) shutdown() {
i.stop = true
logger.Println("stopped ipfix service gracefully ...")
time.Sleep(1 * time.Second)
logger.Println("ipfix has been shutdown")
close(ipfixUdpCh)
}
func ipfixWorker() {
var (
msg IPFIXUDPMsg
ok bool
)
for {
if msg, ok = <-ipfixUdpCh; !ok {
break
}
if verbose {
logger.Printf("rcvd ipfix data from: %s, size: %d bytes",
msg.raddr, len(msg.body))
}
if ipfixMirrorEnabled {
ipfixMCh <- IPFIXUDPMsg{msg.raddr, append([]byte{}, msg.body...)}
}
d := ipfix.NewDecoder(msg.raddr.IP, msg.body)
if _, err := d.Decode(mCache); err != nil {
logger.Println(err)
}
}
}
func mirrorIPFIX(dst net.IP, port int, ch chan IPFIXUDPMsg) error {
var (
packet = make([]byte, 1500)
msg IPFIXUDPMsg
pLen int
err error
ipHdr []byte
ipHLen int
ipv4 bool
ip mirror.IP
)
conn, err := mirror.NewRawConn(dst)
if err != nil {
return err
}
udp := mirror.UDP{55117, port, 0, 0}
udpHdr := udp.Marshal()
if dst.To4() != nil {
ipv4 = true
}
if ipv4 {
ip = mirror.NewIPv4HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv4HLen
} else {
ip = mirror.NewIPv6HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv6HLen
}
for {
msg = <-ch
pLen = len(msg.body)
ip.SetAddrs(ipHdr, msg.raddr.IP, dst)
ip.SetLen(ipHdr, pLen+mirror.UDPHLen)
udp.SetLen(udpHdr, pLen)
// IPv6 checksum mandatory
if !ipv4 {
udp.SetChecksum()
}
copy(packet[0:ipHLen], ipHdr)
copy(packet[ipHLen:ipHLen+8], udpHdr)
copy(packet[ipHLen+8:], msg.body)
if err = conn.Send(packet[0 : ipHLen+8+pLen]); err != nil {
return err
}
}
}
func mirrorIPFIXDispatcher(ch chan IPFIXUDPMsg) {
var (
ch4 = make(chan IPFIXUDPMsg, 1000)
ch6 = make(chan IPFIXUDPMsg, 1000)
msg IPFIXUDPMsg
)
if opts.IPFIXMirror == "" {
return
}
for _, mirrorHostPort := range strings.Split(opts.IPFIXMirror, ";") {
host, port, err := net.SplitHostPort(mirrorHostPort)
if err != nil {
logger.Fatalf("wrong ipfix mirror address %s", opts.IPFIXMirror)
}
portNo, _ := strconv.Atoi(port)
dst := net.ParseIP(host)
if dst.To4() != nil {
go mirrorIPFIX(dst, portNo, ch4)
} else {
go mirrorIPFIX(dst, portNo, ch6)
}
}
ipfixMirrorEnabled = true
logger.Println("ipfix mirror service is running ...")
for {
msg = <-ch
if msg.raddr.IP.To4() != nil {
ch4 <- msg
} else {
ch6 <- msg
}
}
}
add strings pkg
//: ----------------------------------------------------------------------------
//: Copyright (C) 2017 Verizon. All Rights Reserved.
//: All Rights Reserved
//:
//: file: ipfix.go
//: details: TODO
//: author: Mehrdad Arshad Rad
//: date: 02/01/2017
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//: ----------------------------------------------------------------------------
package main
import (
"net"
"strconv"
"strings"
"sync"
"time"
"git.edgecastcdn.net/vflow/ipfix"
"git.edgecastcdn.net/vflow/mirror"
)
type IPFIX struct {
port int
addr string
udpSize int
workers int
stop bool
}
type IPFIXUDPMsg struct {
raddr *net.UDPAddr
body []byte
}
var (
ipfixUdpCh = make(chan IPFIXUDPMsg, 1000)
ipfixMCh = make(chan IPFIXUDPMsg, 1000)
ipfixMirrorEnabled bool
// templates memory cache
mCache ipfix.MemCache
)
func NewIPFIX() *IPFIX {
return &IPFIX{
port: opts.IPFIXPort,
udpSize: opts.IPFIXUDPSize,
workers: opts.IPFIXWorkers,
}
}
func (i *IPFIX) run() {
var wg sync.WaitGroup
hostPort := net.JoinHostPort(i.addr, strconv.Itoa(i.port))
udpAddr, _ := net.ResolveUDPAddr("udp", hostPort)
conn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
}
for n := 0; n < i.workers; n++ {
go func() {
wg.Add(1)
defer wg.Done()
ipfixWorker()
}()
}
logger.Printf("ipfix is running (workers#: %d)", i.workers)
mCache = ipfix.NewCache()
go func() {
mirrorIPFIXDispatcher(ipfixMCh)
}()
for !i.stop {
b := make([]byte, i.udpSize)
conn.SetReadDeadline(time.Now().Add(1e9))
n, raddr, err := conn.ReadFromUDP(b)
if err != nil {
continue
}
ipfixUdpCh <- IPFIXUDPMsg{raddr, b[:n]}
}
wg.Wait()
}
func (i *IPFIX) shutdown() {
i.stop = true
logger.Println("stopped ipfix service gracefully ...")
time.Sleep(1 * time.Second)
logger.Println("ipfix has been shutdown")
close(ipfixUdpCh)
}
func ipfixWorker() {
var (
msg IPFIXUDPMsg
ok bool
)
for {
if msg, ok = <-ipfixUdpCh; !ok {
break
}
if verbose {
logger.Printf("rcvd ipfix data from: %s, size: %d bytes",
msg.raddr, len(msg.body))
}
if ipfixMirrorEnabled {
ipfixMCh <- IPFIXUDPMsg{msg.raddr, append([]byte{}, msg.body...)}
}
d := ipfix.NewDecoder(msg.raddr.IP, msg.body)
if _, err := d.Decode(mCache); err != nil {
logger.Println(err)
}
}
}
func mirrorIPFIX(dst net.IP, port int, ch chan IPFIXUDPMsg) error {
var (
packet = make([]byte, 1500)
msg IPFIXUDPMsg
pLen int
err error
ipHdr []byte
ipHLen int
ipv4 bool
ip mirror.IP
)
conn, err := mirror.NewRawConn(dst)
if err != nil {
return err
}
udp := mirror.UDP{55117, port, 0, 0}
udpHdr := udp.Marshal()
if dst.To4() != nil {
ipv4 = true
}
if ipv4 {
ip = mirror.NewIPv4HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv4HLen
} else {
ip = mirror.NewIPv6HeaderTpl(mirror.UDPProto)
ipHdr = ip.Marshal()
ipHLen = mirror.IPv6HLen
}
for {
msg = <-ch
pLen = len(msg.body)
ip.SetAddrs(ipHdr, msg.raddr.IP, dst)
ip.SetLen(ipHdr, pLen+mirror.UDPHLen)
udp.SetLen(udpHdr, pLen)
// IPv6 checksum mandatory
if !ipv4 {
udp.SetChecksum()
}
copy(packet[0:ipHLen], ipHdr)
copy(packet[ipHLen:ipHLen+8], udpHdr)
copy(packet[ipHLen+8:], msg.body)
if err = conn.Send(packet[0 : ipHLen+8+pLen]); err != nil {
return err
}
}
}
func mirrorIPFIXDispatcher(ch chan IPFIXUDPMsg) {
var (
ch4 = make(chan IPFIXUDPMsg, 1000)
ch6 = make(chan IPFIXUDPMsg, 1000)
msg IPFIXUDPMsg
)
if opts.IPFIXMirror == "" {
return
}
for _, mirrorHostPort := range strings.Split(opts.IPFIXMirror, ";") {
host, port, err := net.SplitHostPort(mirrorHostPort)
if err != nil {
logger.Fatalf("wrong ipfix mirror address %s", opts.IPFIXMirror)
}
portNo, _ := strconv.Atoi(port)
dst := net.ParseIP(host)
if dst.To4() != nil {
go mirrorIPFIX(dst, portNo, ch4)
} else {
go mirrorIPFIX(dst, portNo, ch6)
}
}
ipfixMirrorEnabled = true
logger.Println("ipfix mirror service is running ...")
for {
msg = <-ch
if msg.raddr.IP.To4() != nil {
ch4 <- msg
} else {
ch6 <- msg
}
}
}
|
package replication
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
"io"
"strconv"
"time"
)
type TableMapEvent struct {
tableIDSize int
TableID uint64
Flags uint16
Schema []byte
Table []byte
ColumnCount uint64
ColumnType []byte
ColumnMeta []uint16
//len = (ColumnCount + 7) / 8
NullBitmap []byte
}
func (e *TableMapEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
schemaLength := data[pos]
pos++
e.Schema = data[pos : pos+int(schemaLength)]
pos += int(schemaLength)
//skip 0x00
pos++
tableLength := data[pos]
pos++
e.Table = data[pos : pos+int(tableLength)]
pos += int(tableLength)
//skip 0x00
pos++
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
e.ColumnType = data[pos : pos+int(e.ColumnCount)]
pos += int(e.ColumnCount)
var err error
var metaData []byte
if metaData, _, n, err = LengthEnodedString(data[pos:]); err != nil {
return err
}
if err = e.decodeMeta(metaData); err != nil {
return err
}
pos += n
if len(data[pos:]) != nullBitmapSize(int(e.ColumnCount)) {
return io.EOF
}
e.NullBitmap = data[pos:]
return nil
}
func isNullSet(nullBitmap []byte, i int) bool {
return nullBitmap[i/8]&(1<<(uint(i)%8)) > 0
}
func nullBitmapSize(columnCount int) int {
return int(columnCount+7) / 8
}
// see mysql sql/log_event.h
/*
0 byte
MYSQL_TYPE_DECIMAL
MYSQL_TYPE_TINY
MYSQL_TYPE_SHORT
MYSQL_TYPE_LONG
MYSQL_TYPE_NULL
MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONGLONG
MYSQL_TYPE_INT24
MYSQL_TYPE_DATE
MYSQL_TYPE_TIME
MYSQL_TYPE_DATETIME
MYSQL_TYPE_YEAR
1 byte
MYSQL_TYPE_FLOAT
MYSQL_TYPE_DOUBLE
MYSQL_TYPE_BLOB
MYSQL_TYPE_GEOMETRY
//maybe
MYSQL_TYPE_TIME2
MYSQL_TYPE_DATETIME2
MYSQL_TYPE_TIMESTAMP2
2 byte
MYSQL_TYPE_VARCHAR
MYSQL_TYPE_BIT
MYSQL_TYPE_NEWDECIMAL
MYSQL_TYPE_VAR_STRING
MYSQL_TYPE_STRING
This enumeration value is only used internally and cannot exist in a binlog.
MYSQL_TYPE_NEWDATE
MYSQL_TYPE_ENUM
MYSQL_TYPE_SET
MYSQL_TYPE_TINY_BLOB
MYSQL_TYPE_MEDIUM_BLOB
MYSQL_TYPE_LONG_BLOB
*/
func (e *TableMapEvent) decodeMeta(data []byte) error {
pos := 0
e.ColumnMeta = make([]uint16, e.ColumnCount)
for i, t := range e.ColumnType {
switch t {
case MYSQL_TYPE_STRING:
var x uint16 = uint16(data[pos]) << 8 //real type
x += uint16(data[pos+1]) //pack or field length
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_NEWDECIMAL:
var x uint16 = uint16(data[pos]) << 8 //precision
x += uint16(data[pos+1]) //decimals
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_VAR_STRING,
MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BIT:
e.ColumnMeta[i] = binary.LittleEndian.Uint16(data[pos:])
pos += 2
case MYSQL_TYPE_BLOB,
MYSQL_TYPE_DOUBLE,
MYSQL_TYPE_FLOAT,
MYSQL_TYPE_GEOMETRY:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_TIME2,
MYSQL_TYPE_DATETIME2,
MYSQL_TYPE_TIMESTAMP2:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_NEWDATE,
MYSQL_TYPE_ENUM,
MYSQL_TYPE_SET,
MYSQL_TYPE_TINY_BLOB,
MYSQL_TYPE_MEDIUM_BLOB,
MYSQL_TYPE_LONG_BLOB:
return fmt.Errorf("unsupport type in binlog %d", t)
default:
e.ColumnMeta[i] = 0
}
}
return nil
}
func (e *TableMapEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "Flags: %d\n", e.Flags)
fmt.Fprintf(w, "Schema: %s\n", e.Schema)
fmt.Fprintf(w, "Table: %s\n", e.Table)
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Column type: \n%s", hex.Dump(e.ColumnType))
fmt.Fprintf(w, "NULL bitmap: \n%s", hex.Dump(e.NullBitmap))
fmt.Fprintln(w)
}
type RowsEvent struct {
//0, 1, 2
Version int
tableIDSize int
tables map[uint64]*TableMapEvent
needBitmap2 bool
TableID uint64
Flags uint16
//if version == 2
ExtraData []byte
//lenenc_int
ColumnCount uint64
//len = (ColumnCount + 7) / 8
ColumnBitmap1 []byte
//if UPDATE_ROWS_EVENTv1 or v2
//len = (ColumnCount + 7) / 8
ColumnBitmap2 []byte
//rows: invalid: int64, float64, bool, []byte, string
Rows [][]interface{}
}
func (e *RowsEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
if e.Version == 2 {
dataLen := binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.ExtraData = data[pos : pos+int(dataLen-2)]
pos += int(dataLen - 2)
}
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
bitCount := nullBitmapSize(int(e.ColumnCount))
e.ColumnBitmap1 = data[pos : pos+bitCount]
pos += bitCount
if e.needBitmap2 {
e.ColumnBitmap2 = data[pos : pos+bitCount]
pos += bitCount
}
tableEvent, ok := e.tables[e.TableID]
if !ok {
return fmt.Errorf("invalid table id %d, no correspond table map event", e.TableID)
}
var err error
for len(data[pos:]) > 0 {
if n, err = e.decodeRows(data[pos:], tableEvent); err != nil {
return err
}
pos += n
}
return nil
}
func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent) (int, error) {
rows := make([]interface{}, e.ColumnCount)
pos := 0
bitCount := nullBitmapSize(int(e.ColumnCount))
nullBitmap := data[pos : pos+bitCount]
pos += bitCount
var n int
var err error
for i := 0; i < int(e.ColumnCount); i++ {
if isNullSet(nullBitmap, i) {
rows[i] = nil
continue
}
rows[i], n, err = e.decodeValue(data[pos:], table.ColumnType[i], table.ColumnMeta[i])
if err != nil {
return 0, nil
}
pos += n
}
return pos, nil
}
// see mysql sql/log_event.cc log_event_print_value
func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{}, n int, err error) {
var length uint16 = 0
if tp == MYSQL_TYPE_STRING {
if meta >= 256 {
b0 := uint8(meta >> 8)
b1 := uint8(meta & 0xFF)
if b0&0x30 != 0x30 {
length = uint16(b1) | (uint16((b0&0x30)^0x30) << 4)
tp = byte(b0 | 0x30)
} else {
length = meta & 0xFF
}
} else {
length = meta
}
}
switch tp {
case MYSQL_TYPE_NULL:
return nil, 0, nil
case MYSQL_TYPE_LONG:
n = 4
v = int64(binary.LittleEndian.Uint32(data))
case MYSQL_TYPE_TINY:
n = 1
v = int64(data[0])
case MYSQL_TYPE_SHORT:
n = 2
v = int64(binary.LittleEndian.Uint16(data))
case MYSQL_TYPE_INT24:
n = 3
v = int64(FixedLengthInt(data[0:3]))
case MYSQL_TYPE_LONGLONG:
//em, maybe overflow for int64......
n = 8
v = int64(binary.LittleEndian.Uint64(data))
case MYSQL_TYPE_NEWDECIMAL:
prec := uint8(meta >> 8)
scale := uint8(meta & 0xFF)
var f string
//return string first
f, n, err = decodeDecimal(data, int(prec), int(scale))
v = f
case MYSQL_TYPE_FLOAT:
n = 4
v = int64(binary.LittleEndian.Uint32(data))
case MYSQL_TYPE_DOUBLE:
n = 8
v = int64(binary.LittleEndian.Uint64(data))
case MYSQL_TYPE_BIT:
nbits := ((meta >> 8) * 8) + (meta & 0xFF)
n = int(nbits+7) / 8
//use int64 for bit
v, err = decodeBit(data, int(nbits), int(n))
case MYSQL_TYPE_TIMESTAMP:
n = 4
t := binary.LittleEndian.Uint32(data)
v = time.Unix(int64(t), 0)
case MYSQL_TYPE_TIMESTAMP2:
// {
// char buf[MAX_DATE_STRING_REP_LENGTH];
// struct timeval tm;
// my_timestamp_from_binary(&tm, ptr, meta);
// int buflen= my_timeval_to_str(&tm, buf, meta);
// my_b_write(file, buf, buflen);
// my_snprintf(typestr, typestr_length, "TIMESTAMP(%d)", meta);
// return my_timestamp_binary_length(meta);
// }
case MYSQL_TYPE_DATETIME:
n = 8
i64 := binary.LittleEndian.Uint64(data)
d := i64 / 1000000
t := i64 % 1000000
v = time.Date(int(d/10000),
time.Month((d%10000)/100),
int(d%100),
int(t/10000),
int((t%10000)/100),
int(t%100),
0,
time.UTC).Format(TimeFormat)
case MYSQL_TYPE_DATETIME2:
// {
// char buf[MAX_DATE_STRING_REP_LENGTH];
// MYSQL_TIME ltime;
// longlong packed= my_datetime_packed_from_binary(ptr, meta);
// TIME_from_longlong_datetime_packed(<ime, packed);
// int buflen= my_datetime_to_str(<ime, buf, meta);
// my_b_write_quoted(file, (uchar *) buf, buflen);
// my_snprintf(typestr, typestr_length, "DATETIME(%d)", meta);
// return my_datetime_binary_length(meta);
// }
case MYSQL_TYPE_TIME:
n = 3
i32 := uint32(FixedLengthInt(data[0:3]))
if i32 == 0 {
v = "00:00:00"
} else {
sign := ""
if i32 < 0 {
sign = "-"
}
v = fmt.Sprintf("%s%02d:%02d:%02d", sign, i32/10000, (i32%10000)/100, i32%100)
}
case MYSQL_TYPE_TIME2:
// {
// char buf[MAX_DATE_STRING_REP_LENGTH];
// MYSQL_TIME ltime;
// longlong packed= my_time_packed_from_binary(ptr, meta);
// TIME_from_longlong_time_packed(<ime, packed);
// int buflen= my_time_to_str(<ime, buf, meta);
// my_b_write_quoted(file, (uchar *) buf, buflen);
// my_snprintf(typestr, typestr_length, "TIME(%d)", meta);
// return my_time_binary_length(meta);
// }
case MYSQL_TYPE_YEAR:
n = 1
v = time.Date(int(data[0])+1900,
time.January, 0, 0, 0, 0, 0,
time.UTC).Format(TimeFormat)
case MYSQL_TYPE_ENUM:
// switch (meta & 0xFF) {
// case 1:
// my_b_printf(file, "%d", (int) *ptr);
// my_snprintf(typestr, typestr_length, "ENUM(1 byte)");
// return 1;
// case 2:
// {
// int32 i32= uint2korr(ptr);
// my_b_printf(file, "%d", i32);
// my_snprintf(typestr, typestr_length, "ENUM(2 bytes)");
// return 2;
// }
// default:
// my_b_printf(file, "!! Unknown ENUM packlen=%d", meta & 0xFF);
// return 0;
// }
// break;
case MYSQL_TYPE_SET:
// my_b_write_bit(file, ptr , (meta & 0xFF) * 8);
// my_snprintf(typestr, typestr_length, "SET(%d bytes)", meta & 0xFF);
// return meta & 0xFF;
case MYSQL_TYPE_BLOB:
switch meta {
case 1:
case 2:
case 3:
case 4:
default:
err = fmt.Errorf("invalid blob packlen = %d", meta)
}
// switch (meta) {
// case 1:
// length= *ptr;
// my_b_write_quoted(file, ptr + 1, length);
// my_snprintf(typestr, typestr_length, "TINYBLOB/TINYTEXT");
// return length + 1;
// case 2:
// length= uint2korr(ptr);
// my_b_write_quoted(file, ptr + 2, length);
// my_snprintf(typestr, typestr_length, "BLOB/TEXT");
// return length + 2;
// case 3:
// length= uint3korr(ptr);
// my_b_write_quoted(file, ptr + 3, length);
// my_snprintf(typestr, typestr_length, "MEDIUMBLOB/MEDIUMTEXT");
// return length + 3;
// case 4:
// length= uint4korr(ptr);
// my_b_write_quoted(file, ptr + 4, length);
// my_snprintf(typestr, typestr_length, "LONGBLOB/LONGTEXT");
// return length + 4;
// default:
// my_b_printf(file, "!! Unknown BLOB packlen=%d", length);
// return 0;
// }
case MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_VAR_STRING:
length = meta
v, n = decodeString(data, length)
case MYSQL_TYPE_STRING:
v, n = decodeString(data, length)
default:
err = fmt.Errorf("unsupport type %d in binlog and don't know how to handle", tp)
}
return
}
func decodeString(data []byte, length uint16) (v []byte, n int) {
if length < 256 {
length = uint16(data[0])
n = int(length) + 1
v = data[1:n]
} else {
length = binary.LittleEndian.Uint16(data[0:])
n = int(length) + 2
v = data[2:n]
}
return
}
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimal(data []byte, precision int, decimals int) (string, int, error) {
//see python mysql replication and https://github.com/jeremycole/mysql_binlog
pos := 0
integral := (precision - decimals)
uncompIntegral := int(integral / digitsPerInteger)
uncompFractional := int(decimals / digitsPerInteger)
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := decimals - (uncompFractional * digitsPerInteger)
binSize := uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
buf := make([]byte, binSize)
copy(buf, data[:binSize])
//must copy the data for later change
data = buf
// Support negative
// The sign is encoded in the high bit of the the byte
// But this bit can also be used in the value
value := int64(data[pos])
var res bytes.Buffer
var mask int64 = 0
if value&0x80 == 0 {
mask = -1
res.WriteString("-")
}
//clear sign
data[0] ^= 0x80
size := compressedBytes[compIntegral]
if size > 0 {
value = int64(FixedLengthInt(data[pos:pos+size])) ^ mask
res.WriteString(strconv.FormatInt(value, 10))
pos += size
}
for i := 0; i < uncompIntegral; i++ {
value = int64(binary.BigEndian.Uint32(data[pos:])) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
res.WriteString(".")
for i := 0; i < uncompFractional; i++ {
value = int64(binary.BigEndian.Uint32(data[pos:])) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
size = compressedBytes[compFractional]
if size > 0 {
value = int64(FixedLengthInt(data[pos:pos+size])) ^ mask
pos += size
res.WriteString(fmt.Sprintf("%0*d", compFractional, value))
}
return hack.String(res.Bytes()), pos, nil
}
func decodeBit(data []byte, nbits int, length int) (value int64, err error) {
if nbits > 1 {
switch length {
case 1:
value = int64(data[0])
case 2:
value = int64(binary.BigEndian.Uint16(data))
case 3:
value = int64(FixedLengthInt(data[0:3]))
case 4:
value = int64(binary.BigEndian.Uint32(data))
case 5:
value = int64(FixedLengthInt(data[0:5]))
case 6:
value = int64(FixedLengthInt(data[0:6]))
case 7:
value = int64(FixedLengthInt(data[0:7]))
case 8:
value = int64(binary.BigEndian.Uint64(data))
default:
err = fmt.Errorf("invalid bit length %d", length)
}
} else {
if length != 1 {
err = fmt.Errorf("invalid bit length %d", length)
} else {
value = int64(data[0])
}
}
return
}
func (e *RowsEvent) Dump(w io.Writer) {
}
type RowsQueryEvent struct {
Query []byte
}
func (e *RowsQueryEvent) Decode(data []byte) error {
//ignore length byte 1
e.Query = data[1:]
return nil
}
func (e *RowsQueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Query: %s\n", e.Query)
fmt.Fprintln(w)
}
use int64 for enum
package replication
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
. "github.com/siddontang/go-mysql/mysql"
"github.com/siddontang/go/hack"
"io"
"strconv"
"time"
)
type TableMapEvent struct {
tableIDSize int
TableID uint64
Flags uint16
Schema []byte
Table []byte
ColumnCount uint64
ColumnType []byte
ColumnMeta []uint16
//len = (ColumnCount + 7) / 8
NullBitmap []byte
}
func (e *TableMapEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
schemaLength := data[pos]
pos++
e.Schema = data[pos : pos+int(schemaLength)]
pos += int(schemaLength)
//skip 0x00
pos++
tableLength := data[pos]
pos++
e.Table = data[pos : pos+int(tableLength)]
pos += int(tableLength)
//skip 0x00
pos++
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
e.ColumnType = data[pos : pos+int(e.ColumnCount)]
pos += int(e.ColumnCount)
var err error
var metaData []byte
if metaData, _, n, err = LengthEnodedString(data[pos:]); err != nil {
return err
}
if err = e.decodeMeta(metaData); err != nil {
return err
}
pos += n
if len(data[pos:]) != nullBitmapSize(int(e.ColumnCount)) {
return io.EOF
}
e.NullBitmap = data[pos:]
return nil
}
func isNullSet(nullBitmap []byte, i int) bool {
return nullBitmap[i/8]&(1<<(uint(i)%8)) > 0
}
func nullBitmapSize(columnCount int) int {
return int(columnCount+7) / 8
}
// see mysql sql/log_event.h
/*
0 byte
MYSQL_TYPE_DECIMAL
MYSQL_TYPE_TINY
MYSQL_TYPE_SHORT
MYSQL_TYPE_LONG
MYSQL_TYPE_NULL
MYSQL_TYPE_TIMESTAMP
MYSQL_TYPE_LONGLONG
MYSQL_TYPE_INT24
MYSQL_TYPE_DATE
MYSQL_TYPE_TIME
MYSQL_TYPE_DATETIME
MYSQL_TYPE_YEAR
1 byte
MYSQL_TYPE_FLOAT
MYSQL_TYPE_DOUBLE
MYSQL_TYPE_BLOB
MYSQL_TYPE_GEOMETRY
//maybe
MYSQL_TYPE_TIME2
MYSQL_TYPE_DATETIME2
MYSQL_TYPE_TIMESTAMP2
2 byte
MYSQL_TYPE_VARCHAR
MYSQL_TYPE_BIT
MYSQL_TYPE_NEWDECIMAL
MYSQL_TYPE_VAR_STRING
MYSQL_TYPE_STRING
This enumeration value is only used internally and cannot exist in a binlog.
MYSQL_TYPE_NEWDATE
MYSQL_TYPE_ENUM
MYSQL_TYPE_SET
MYSQL_TYPE_TINY_BLOB
MYSQL_TYPE_MEDIUM_BLOB
MYSQL_TYPE_LONG_BLOB
*/
func (e *TableMapEvent) decodeMeta(data []byte) error {
pos := 0
e.ColumnMeta = make([]uint16, e.ColumnCount)
for i, t := range e.ColumnType {
switch t {
case MYSQL_TYPE_STRING:
var x uint16 = uint16(data[pos]) << 8 //real type
x += uint16(data[pos+1]) //pack or field length
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_NEWDECIMAL:
var x uint16 = uint16(data[pos]) << 8 //precision
x += uint16(data[pos+1]) //decimals
e.ColumnMeta[i] = x
pos += 2
case MYSQL_TYPE_VAR_STRING,
MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_BIT:
e.ColumnMeta[i] = binary.LittleEndian.Uint16(data[pos:])
pos += 2
case MYSQL_TYPE_BLOB,
MYSQL_TYPE_DOUBLE,
MYSQL_TYPE_FLOAT,
MYSQL_TYPE_GEOMETRY:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_TIME2,
MYSQL_TYPE_DATETIME2,
MYSQL_TYPE_TIMESTAMP2:
e.ColumnMeta[i] = uint16(data[pos])
pos++
case MYSQL_TYPE_NEWDATE,
MYSQL_TYPE_ENUM,
MYSQL_TYPE_SET,
MYSQL_TYPE_TINY_BLOB,
MYSQL_TYPE_MEDIUM_BLOB,
MYSQL_TYPE_LONG_BLOB:
return fmt.Errorf("unsupport type in binlog %d", t)
default:
e.ColumnMeta[i] = 0
}
}
return nil
}
func (e *TableMapEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "TableID: %d\n", e.TableID)
fmt.Fprintf(w, "Flags: %d\n", e.Flags)
fmt.Fprintf(w, "Schema: %s\n", e.Schema)
fmt.Fprintf(w, "Table: %s\n", e.Table)
fmt.Fprintf(w, "Column count: %d\n", e.ColumnCount)
fmt.Fprintf(w, "Column type: \n%s", hex.Dump(e.ColumnType))
fmt.Fprintf(w, "NULL bitmap: \n%s", hex.Dump(e.NullBitmap))
fmt.Fprintln(w)
}
type RowsEvent struct {
//0, 1, 2
Version int
tableIDSize int
tables map[uint64]*TableMapEvent
needBitmap2 bool
TableID uint64
Flags uint16
//if version == 2
ExtraData []byte
//lenenc_int
ColumnCount uint64
//len = (ColumnCount + 7) / 8
ColumnBitmap1 []byte
//if UPDATE_ROWS_EVENTv1 or v2
//len = (ColumnCount + 7) / 8
ColumnBitmap2 []byte
//rows: invalid: int64, float64, bool, []byte, string
Rows [][]interface{}
}
func (e *RowsEvent) Decode(data []byte) error {
pos := 0
e.TableID = FixedLengthInt(data[0:e.tableIDSize])
pos += e.tableIDSize
e.Flags = binary.LittleEndian.Uint16(data[pos:])
pos += 2
if e.Version == 2 {
dataLen := binary.LittleEndian.Uint16(data[pos:])
pos += 2
e.ExtraData = data[pos : pos+int(dataLen-2)]
pos += int(dataLen - 2)
}
var n int
e.ColumnCount, _, n = LengthEncodedInt(data[pos:])
pos += n
bitCount := nullBitmapSize(int(e.ColumnCount))
e.ColumnBitmap1 = data[pos : pos+bitCount]
pos += bitCount
if e.needBitmap2 {
e.ColumnBitmap2 = data[pos : pos+bitCount]
pos += bitCount
}
tableEvent, ok := e.tables[e.TableID]
if !ok {
return fmt.Errorf("invalid table id %d, no correspond table map event", e.TableID)
}
var err error
for len(data[pos:]) > 0 {
if n, err = e.decodeRows(data[pos:], tableEvent); err != nil {
return err
}
pos += n
}
return nil
}
func (e *RowsEvent) decodeRows(data []byte, table *TableMapEvent) (int, error) {
rows := make([]interface{}, e.ColumnCount)
pos := 0
bitCount := nullBitmapSize(int(e.ColumnCount))
nullBitmap := data[pos : pos+bitCount]
pos += bitCount
var n int
var err error
for i := 0; i < int(e.ColumnCount); i++ {
if isNullSet(nullBitmap, i) {
rows[i] = nil
continue
}
rows[i], n, err = e.decodeValue(data[pos:], table.ColumnType[i], table.ColumnMeta[i])
if err != nil {
return 0, nil
}
pos += n
}
return pos, nil
}
// see mysql sql/log_event.cc log_event_print_value
func (e *RowsEvent) decodeValue(data []byte, tp byte, meta uint16) (v interface{}, n int, err error) {
var length uint16 = 0
if tp == MYSQL_TYPE_STRING {
if meta >= 256 {
b0 := uint8(meta >> 8)
b1 := uint8(meta & 0xFF)
if b0&0x30 != 0x30 {
length = uint16(b1) | (uint16((b0&0x30)^0x30) << 4)
tp = byte(b0 | 0x30)
} else {
length = meta & 0xFF
}
} else {
length = meta
}
}
switch tp {
case MYSQL_TYPE_NULL:
return nil, 0, nil
case MYSQL_TYPE_LONG:
n = 4
v = int64(binary.LittleEndian.Uint32(data))
case MYSQL_TYPE_TINY:
n = 1
v = int64(data[0])
case MYSQL_TYPE_SHORT:
n = 2
v = int64(binary.LittleEndian.Uint16(data))
case MYSQL_TYPE_INT24:
n = 3
v = int64(FixedLengthInt(data[0:3]))
case MYSQL_TYPE_LONGLONG:
//em, maybe overflow for int64......
n = 8
v = int64(binary.LittleEndian.Uint64(data))
case MYSQL_TYPE_NEWDECIMAL:
prec := uint8(meta >> 8)
scale := uint8(meta & 0xFF)
var f string
//return string first
f, n, err = decodeDecimal(data, int(prec), int(scale))
v = f
case MYSQL_TYPE_FLOAT:
n = 4
v = int64(binary.LittleEndian.Uint32(data))
case MYSQL_TYPE_DOUBLE:
n = 8
v = int64(binary.LittleEndian.Uint64(data))
case MYSQL_TYPE_BIT:
nbits := ((meta >> 8) * 8) + (meta & 0xFF)
n = int(nbits+7) / 8
//use int64 for bit
v, err = decodeBit(data, int(nbits), int(n))
case MYSQL_TYPE_TIMESTAMP:
n = 4
t := binary.LittleEndian.Uint32(data)
v = time.Unix(int64(t), 0)
case MYSQL_TYPE_TIMESTAMP2:
// {
// char buf[MAX_DATE_STRING_REP_LENGTH];
// struct timeval tm;
// my_timestamp_from_binary(&tm, ptr, meta);
// int buflen= my_timeval_to_str(&tm, buf, meta);
// my_b_write(file, buf, buflen);
// my_snprintf(typestr, typestr_length, "TIMESTAMP(%d)", meta);
// return my_timestamp_binary_length(meta);
// }
case MYSQL_TYPE_DATETIME:
n = 8
i64 := binary.LittleEndian.Uint64(data)
d := i64 / 1000000
t := i64 % 1000000
v = time.Date(int(d/10000),
time.Month((d%10000)/100),
int(d%100),
int(t/10000),
int((t%10000)/100),
int(t%100),
0,
time.UTC).Format(TimeFormat)
case MYSQL_TYPE_DATETIME2:
// {
// char buf[MAX_DATE_STRING_REP_LENGTH];
// MYSQL_TIME ltime;
// longlong packed= my_datetime_packed_from_binary(ptr, meta);
// TIME_from_longlong_datetime_packed(<ime, packed);
// int buflen= my_datetime_to_str(<ime, buf, meta);
// my_b_write_quoted(file, (uchar *) buf, buflen);
// my_snprintf(typestr, typestr_length, "DATETIME(%d)", meta);
// return my_datetime_binary_length(meta);
// }
case MYSQL_TYPE_TIME:
n = 3
i32 := uint32(FixedLengthInt(data[0:3]))
if i32 == 0 {
v = "00:00:00"
} else {
sign := ""
if i32 < 0 {
sign = "-"
}
v = fmt.Sprintf("%s%02d:%02d:%02d", sign, i32/10000, (i32%10000)/100, i32%100)
}
case MYSQL_TYPE_TIME2:
// {
// char buf[MAX_DATE_STRING_REP_LENGTH];
// MYSQL_TIME ltime;
// longlong packed= my_time_packed_from_binary(ptr, meta);
// TIME_from_longlong_time_packed(<ime, packed);
// int buflen= my_time_to_str(<ime, buf, meta);
// my_b_write_quoted(file, (uchar *) buf, buflen);
// my_snprintf(typestr, typestr_length, "TIME(%d)", meta);
// return my_time_binary_length(meta);
// }
case MYSQL_TYPE_YEAR:
n = 1
v = time.Date(int(data[0])+1900,
time.January, 0, 0, 0, 0, 0,
time.UTC).Format(TimeFormat)
case MYSQL_TYPE_ENUM:
l := meta & 0xFF
switch l {
case 1:
v = int64(data[0])
n = 1
case 2:
v = int64(binary.BigEndian.Uint16(data))
n = 2
default:
err = fmt.Errorf("Unknown ENUM packlen=%d", l)
}
case MYSQL_TYPE_SET:
nbits := meta & 0xFF
n = int(nbits+7) / 8
v, err = decodeBit(data, int(nbits), n)
case MYSQL_TYPE_BLOB:
switch meta {
case 1:
case 2:
case 3:
case 4:
default:
err = fmt.Errorf("invalid blob packlen = %d", meta)
}
// switch (meta) {
// case 1:
// length= *ptr;
// my_b_write_quoted(file, ptr + 1, length);
// my_snprintf(typestr, typestr_length, "TINYBLOB/TINYTEXT");
// return length + 1;
// case 2:
// length= uint2korr(ptr);
// my_b_write_quoted(file, ptr + 2, length);
// my_snprintf(typestr, typestr_length, "BLOB/TEXT");
// return length + 2;
// case 3:
// length= uint3korr(ptr);
// my_b_write_quoted(file, ptr + 3, length);
// my_snprintf(typestr, typestr_length, "MEDIUMBLOB/MEDIUMTEXT");
// return length + 3;
// case 4:
// length= uint4korr(ptr);
// my_b_write_quoted(file, ptr + 4, length);
// my_snprintf(typestr, typestr_length, "LONGBLOB/LONGTEXT");
// return length + 4;
// default:
// my_b_printf(file, "!! Unknown BLOB packlen=%d", length);
// return 0;
// }
case MYSQL_TYPE_VARCHAR,
MYSQL_TYPE_VAR_STRING:
length = meta
v, n = decodeString(data, length)
case MYSQL_TYPE_STRING:
v, n = decodeString(data, length)
default:
err = fmt.Errorf("unsupport type %d in binlog and don't know how to handle", tp)
}
return
}
func decodeString(data []byte, length uint16) (v []byte, n int) {
if length < 256 {
length = uint16(data[0])
n = int(length) + 1
v = data[1:n]
} else {
length = binary.LittleEndian.Uint16(data[0:])
n = int(length) + 2
v = data[2:n]
}
return
}
const digitsPerInteger int = 9
var compressedBytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4}
func decodeDecimal(data []byte, precision int, decimals int) (string, int, error) {
//see python mysql replication and https://github.com/jeremycole/mysql_binlog
pos := 0
integral := (precision - decimals)
uncompIntegral := int(integral / digitsPerInteger)
uncompFractional := int(decimals / digitsPerInteger)
compIntegral := integral - (uncompIntegral * digitsPerInteger)
compFractional := decimals - (uncompFractional * digitsPerInteger)
binSize := uncompIntegral*4 + compressedBytes[compIntegral] +
uncompFractional*4 + compressedBytes[compFractional]
buf := make([]byte, binSize)
copy(buf, data[:binSize])
//must copy the data for later change
data = buf
// Support negative
// The sign is encoded in the high bit of the the byte
// But this bit can also be used in the value
value := int64(data[pos])
var res bytes.Buffer
var mask int64 = 0
if value&0x80 == 0 {
mask = -1
res.WriteString("-")
}
//clear sign
data[0] ^= 0x80
size := compressedBytes[compIntegral]
if size > 0 {
value = int64(FixedLengthInt(data[pos:pos+size])) ^ mask
res.WriteString(strconv.FormatInt(value, 10))
pos += size
}
for i := 0; i < uncompIntegral; i++ {
value = int64(binary.BigEndian.Uint32(data[pos:])) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
res.WriteString(".")
for i := 0; i < uncompFractional; i++ {
value = int64(binary.BigEndian.Uint32(data[pos:])) ^ mask
pos += 4
res.WriteString(fmt.Sprintf("%09d", value))
}
size = compressedBytes[compFractional]
if size > 0 {
value = int64(FixedLengthInt(data[pos:pos+size])) ^ mask
pos += size
res.WriteString(fmt.Sprintf("%0*d", compFractional, value))
}
return hack.String(res.Bytes()), pos, nil
}
func decodeBit(data []byte, nbits int, length int) (value int64, err error) {
if nbits > 1 {
switch length {
case 1:
value = int64(data[0])
case 2:
value = int64(binary.BigEndian.Uint16(data))
case 3:
value = int64(FixedLengthInt(data[0:3]))
case 4:
value = int64(binary.BigEndian.Uint32(data))
case 5:
value = int64(FixedLengthInt(data[0:5]))
case 6:
value = int64(FixedLengthInt(data[0:6]))
case 7:
value = int64(FixedLengthInt(data[0:7]))
case 8:
value = int64(binary.BigEndian.Uint64(data))
default:
err = fmt.Errorf("invalid bit length %d", length)
}
} else {
if length != 1 {
err = fmt.Errorf("invalid bit length %d", length)
} else {
value = int64(data[0])
}
}
return
}
func (e *RowsEvent) Dump(w io.Writer) {
}
type RowsQueryEvent struct {
Query []byte
}
func (e *RowsQueryEvent) Decode(data []byte) error {
//ignore length byte 1
e.Query = data[1:]
return nil
}
func (e *RowsQueryEvent) Dump(w io.Writer) {
fmt.Fprintf(w, "Query: %s\n", e.Query)
fmt.Fprintln(w)
}
|
package upcloud
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"log"
"time"
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/UpCloudLtd/upcloud-go-api/upcloud"
"github.com/UpCloudLtd/upcloud-go-api/upcloud/request"
"github.com/UpCloudLtd/upcloud-go-api/upcloud/service"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceUpCloudServer() *schema.Resource {
return &schema.Resource{
CreateContext: resourceUpCloudServerCreate,
ReadContext: resourceUpCloudServerRead,
UpdateContext: resourceUpCloudServerUpdate,
DeleteContext: resourceUpCloudServerDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Schema: map[string]*schema.Schema{
"hostname": {
Description: "A valid domain name",
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 128),
},
"title": {
Description: "A short, informational description",
Type: schema.TypeString,
Computed: true,
},
"zone": {
Description: "The zone in which the server will be hosted",
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"firewall": {
Description: "Are firewall rules active for the server",
Type: schema.TypeBool,
Optional: true,
},
"metadata": {
Description: "Is the metadata service active for the server",
Type: schema.TypeBool,
Optional: true,
},
"cpu": {
Description: "The number of CPU for the server",
Type: schema.TypeInt,
Computed: true,
Optional: true,
ConflictsWith: []string{"plan"},
},
"mem": {
Description: "The size of memory for the server",
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"plan"},
},
"network_interface": {
Type: schema.TypeList,
Description: "One or more blocks describing the network interfaces of the server.",
Required: true,
ForceNew: true,
MinItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip_address_family": {
Type: schema.TypeString,
Description: "The IP address type of this interface (one of `IPv4` or `IPv6`).",
Optional: true,
ForceNew: true,
Default: upcloud.IPAddressFamilyIPv4,
ValidateDiagFunc: func(v interface{}, _ cty.Path) diag.Diagnostics {
switch v.(string) {
case upcloud.IPAddressFamilyIPv4, upcloud.IPAddressFamilyIPv6:
return nil
default:
return diag.Diagnostics{diag.Diagnostic{
Severity: diag.Error,
Summary: "'ip_address_family' has incorrect value",
Detail: fmt.Sprintf(
"'ip_address_family' must be one of %s or %s",
upcloud.IPAddressFamilyIPv4,
upcloud.IPAddressFamilyIPv6),
}}
}
},
},
"ip_address": {
Type: schema.TypeString,
Description: "The assigned IP address.",
Computed: true,
},
"ip_address_floating": {
Type: schema.TypeBool,
Description: "`true` is a floating IP address is attached.",
Computed: true,
},
"mac_address": {
Type: schema.TypeString,
Description: "The assigned MAC address.",
Computed: true,
},
"type": {
Type: schema.TypeString,
Description: "Network interface type. For private network interfaces, a network must be specified with an existing network id.",
Required: true,
ForceNew: true,
ValidateDiagFunc: func(v interface{}, _ cty.Path) diag.Diagnostics {
switch v.(string) {
case upcloud.NetworkTypePrivate, upcloud.NetworkTypeUtility, upcloud.NetworkTypePublic:
return nil
default:
return diag.Diagnostics{diag.Diagnostic{
Severity: diag.Error,
Summary: "'type' has incorrect value",
Detail: fmt.Sprintf(
"'type' must be one of %s, %s or %s",
upcloud.NetworkTypePrivate,
upcloud.NetworkTypePublic,
upcloud.NetworkTypeUtility),
}}
}
},
},
"network": {
Type: schema.TypeString,
Description: "The unique ID of a network to attach this network to.",
ForceNew: true,
Optional: true,
Computed: true,
},
"source_ip_filtering": {
Type: schema.TypeBool,
Description: "`true` if source IP should be filtered.",
ForceNew: true,
Optional: true,
Default: true,
},
"bootable": {
Type: schema.TypeBool,
Description: "`true` if this interface should be used for network booting.",
ForceNew: true,
Optional: true,
Default: false,
},
},
},
},
"user_data": {
Description: "Defines URL for a server setup script, or the script body itself",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"plan": {
Description: "The pricing plan used for the server",
Type: schema.TypeString,
Optional: true,
},
"storage_devices": {
Description: "A list of storage devices associated with the server",
Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"storage": {
Description: "A valid storage UUID",
Type: schema.TypeString,
Required: true,
},
"address": {
Description: "The device address the storage will be attached to. Specify only the bus name (ide/scsi/virtio) to auto-select next available address from that bus.",
Type: schema.TypeString,
Computed: true,
Optional: true,
},
"type": {
Description: "The device type the storage will be attached as",
Type: schema.TypeString,
Computed: true,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"disk", "cdrom"}, false),
},
},
},
},
"template": {
Description: "",
Type: schema.TypeSet,
// NOTE: might want to make this optional
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Description: "The device address the storage will be attached to. Specify only the bus name (ide/scsi/virtio) to auto-select next available address from that bus.",
Type: schema.TypeString,
Computed: true,
ForceNew: true,
Optional: true,
},
"size": {
Description: "The size of the storage in gigabytes",
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(10, 2048),
},
// will be set to value matching the plan
"tier": {
Description: "The storage tier to use",
Type: schema.TypeString,
Computed: true,
},
"title": {
Description: "A short, informative description",
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringLenBetween(0, 64),
},
"storage": {
Description: "A valid storage UUID or template name",
Type: schema.TypeString,
ForceNew: true,
Optional: true,
},
"backup_rule": backupRuleSchema(),
},
},
},
"login": {
Description: "Configure access credentials to the server",
Type: schema.TypeSet,
ForceNew: true,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"user": {
Description: "Username to be create to access the server",
Type: schema.TypeString,
Required: true,
},
"keys": {
Description: "A list of ssh keys to access the server",
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"create_password": {
Description: "Indicates a password should be create to allow access",
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"password_delivery": {
Description: "The delivery method for the server’s root password",
Type: schema.TypeString,
Optional: true,
Default: "none",
ValidateFunc: validation.StringInSlice([]string{"none", "email", "sms"}, false),
},
},
},
},
},
}
}
func resourceUpCloudServerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
r, err := buildServerOpts(d, meta)
if err != nil {
return diag.FromErr(err)
}
server, err := client.CreateServer(r)
if err != nil {
return diag.FromErr(err)
}
d.SetId(server.UUID)
log.Printf("[INFO] Server %s with UUID %s created", server.Title, server.UUID)
server, err = client.WaitForServerState(&request.WaitForServerStateRequest{
UUID: server.UUID,
DesiredState: upcloud.ServerStateStarted,
Timeout: time.Minute * 25,
})
if err != nil {
return diag.FromErr(err)
}
return resourceUpCloudServerRead(ctx, d, meta)
}
func resourceUpCloudServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
var diags diag.Diagnostics
r := &request.GetServerDetailsRequest{
UUID: d.Id(),
}
server, err := client.GetServerDetails(r)
if err != nil {
return diag.FromErr(err)
}
d.Set("hostname", server.Hostname)
d.Set("title", server.Title)
d.Set("zone", server.Zone)
d.Set("cpu", server.CoreNumber)
d.Set("mem", server.MemoryAmount)
networkInterfaces := []map[string]interface{}{}
var connIP string
for _, iface := range server.Networking.Interfaces {
ni := make(map[string]interface{}, 0)
ni["ip_address_family"] = iface.IPAddresses[0].Family
ni["ip_address"] = iface.IPAddresses[0].Address
if !iface.IPAddresses[0].Floating.Empty() {
ni["ip_address_floating"] = iface.IPAddresses[0].Floating.Bool()
}
ni["mac_address"] = iface.MAC
ni["network"] = iface.Network
ni["type"] = iface.Type
if !iface.Bootable.Empty() {
ni["bootable"] = iface.Bootable.Bool()
}
if !iface.SourceIPFiltering.Empty() {
ni["source_ip_filtering"] = iface.SourceIPFiltering.Bool()
}
networkInterfaces = append(networkInterfaces, ni)
if iface.Type == upcloud.NetworkTypePublic &&
iface.IPAddresses[0].Family == upcloud.IPAddressFamilyIPv4 {
connIP = iface.IPAddresses[0].Address
}
}
if len(networkInterfaces) > 0 {
d.Set("network_interface", networkInterfaces)
}
storageDevices := []map[string]interface{}{}
log.Printf("[DEBUG] Configured storage devices in state: %+v", d.Get("storage_devices"))
log.Printf("[DEBUG] Actual storage devices on server: %v", server.StorageDevices)
for _, serverStorage := range server.StorageDevices {
// the template is managed within the server
if serverStorage.UUID == d.Get("template.0.id") {
d.Set("template", []map[string]interface{}{{
"address": serverStorage.Address,
"id": serverStorage.UUID,
"size": serverStorage.Size,
"title": serverStorage.Title,
"storage": d.Get("template.0.storage"),
// FIXME: backupRule cannot be derived from server.storageDevices payload, will not sync if changed elsewhere
"backup_rule": d.Get("template.0.backup_rule"),
// TODO: add when go-api updated ... "tier": serverStorage.Tier,
}})
} else {
storageDevices = append(storageDevices, map[string]interface{}{
"address": serverStorage.Address,
"storage": serverStorage.UUID,
"type": serverStorage.Type,
})
}
}
d.Set("storage_devices", storageDevices)
// Initialize the connection information.
d.SetConnInfo(map[string]string{
"host": connIP,
"password": "",
"type": "ssh",
"user": "root",
})
return diags
}
func resourceUpCloudServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
if err := verifyServerStopped(d.Id(), meta); err != nil {
return diag.FromErr(err)
}
r := &request.ModifyServerRequest{
UUID: d.Id(),
}
if d.Get("firewall").(bool) {
r.Firewall = "on"
} else {
r.Firewall = "off"
}
if plan, ok := d.GetOk("plan"); ok {
r.Plan = plan.(string)
} else {
r.CoreNumber = d.Get("cpu").(int)
r.MemoryAmount = d.Get("mem").(int)
}
r.Hostname = d.Get("hostname").(string)
if _, err := client.ModifyServer(r); err != nil {
return diag.FromErr(err)
}
if err := verifyServerStarted(d.Id(), meta); err != nil {
return diag.FromErr(err)
}
return resourceUpCloudServerRead(ctx, d, meta)
}
func resourceUpCloudServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
var diags diag.Diagnostics
// Verify server is stopped before deletion
if err := verifyServerStopped(d.Id(), meta); err != nil {
return diag.FromErr(err)
}
// Delete server
deleteServerRequest := &request.DeleteServerRequest{
UUID: d.Id(),
}
log.Printf("[INFO] Deleting server (server UUID: %s)", d.Id())
err := client.DeleteServer(deleteServerRequest)
if err != nil {
return diag.FromErr(err)
}
template := d.Get("template").([]map[string]interface{})
if len(template) > 0 {
// Delete server root disk
deleteStorageRequest := &request.DeleteStorageRequest{
UUID: template[0]["id"].(string),
}
log.Printf("[INFO] Deleting server storage (storage UUID: %s)", deleteStorageRequest.UUID)
err = client.DeleteStorage(deleteStorageRequest)
if err != nil {
return diag.FromErr(err)
}
}
return diags
}
func buildServerOpts(d *schema.ResourceData, meta interface{}) (*request.CreateServerRequest, error) {
r := &request.CreateServerRequest{
Zone: d.Get("zone").(string),
Hostname: d.Get("hostname").(string),
Title: fmt.Sprintf("%s (managed by terraform)", d.Get("hostname").(string)),
}
if attr, ok := d.GetOk("firewall"); ok {
if attr.(bool) {
r.Firewall = "on"
} else {
r.Firewall = "off"
}
}
if attr, ok := d.GetOk("metadata"); ok {
if attr.(bool) {
r.Metadata = upcloud.True
} else {
r.Metadata = upcloud.False
}
}
if attr, ok := d.GetOk("cpu"); ok {
r.CoreNumber = attr.(int)
}
if attr, ok := d.GetOk("mem"); ok {
r.MemoryAmount = attr.(int)
}
if attr, ok := d.GetOk("user_data"); ok {
r.UserData = attr.(string)
}
if attr, ok := d.GetOk("plan"); ok {
r.Plan = attr.(string)
}
if login, ok := d.GetOk("login"); ok {
loginOpts, deliveryMethod, err := buildLoginOpts(login, meta)
if err != nil {
return nil, err
}
r.LoginUser = loginOpts
r.PasswordDelivery = deliveryMethod
}
if template, ok := d.GetOk("template"); ok {
template := template.(map[string]interface{})
r.StorageDevices = append(
r.StorageDevices,
request.CreateServerStorageDevice{
Action: "clone",
Address: template["address"].(string),
Size: template["size"].(int),
Storage: template["storage"].(string),
Title: template["title"].(string),
},
)
// TODO: handle backup_rule
}
if storage_devices, ok := d.GetOk("storage_devices"); ok {
storage_devices := storage_devices.([]map[string]interface{})
for _, storage_device := range storage_devices {
r.StorageDevices = append(r.StorageDevices, request.CreateServerStorageDevice{
Address: storage_device["address"].(string),
Type: storage_device["type"].(string),
Storage: storage_device["storage"].(string),
})
}
}
networking, err := buildNetworkOpts(d, meta)
if err != nil {
return nil, err
}
r.Networking = &request.CreateServerNetworking{
Interfaces: networking,
}
return r, nil
}
func buildNetworkOpts(d *schema.ResourceData, meta interface{}) ([]request.CreateServerInterface, error) {
ifaces := []request.CreateServerInterface{}
niCount := d.Get("network_interface.#").(int)
for i := 0; i < niCount; i++ {
keyRoot := fmt.Sprintf("network_interface.%d.", i)
iface := request.CreateServerInterface{
IPAddresses: []request.CreateServerIPAddress{
{
Family: d.Get(keyRoot + "ip_address_family").(string),
},
},
Type: d.Get(keyRoot + "type").(string),
}
iface.SourceIPFiltering = upcloud.FromBool(d.Get(keyRoot + "source_ip_filtering").(bool))
iface.Bootable = upcloud.FromBool(d.Get(keyRoot + "bootable").(bool))
if v, ok := d.GetOk(keyRoot + "network"); ok {
iface.Network = v.(string)
}
ifaces = append(ifaces, iface)
}
return ifaces, nil
}
func buildLoginOpts(v interface{}, meta interface{}) (*request.LoginUser, string, error) {
// Construct LoginUser struct from the schema
r := &request.LoginUser{}
e := v.(*schema.Set).List()[0]
m := e.(map[string]interface{})
// Set username as is
r.Username = m["user"].(string)
// Set 'create_password' to "yes" or "no" depending on the bool value.
// Would be nice if the API would just get a standard bool str.
createPassword := "no"
b := m["create_password"].(bool)
if b {
createPassword = "yes"
}
r.CreatePassword = createPassword
// Handle SSH keys one by one
keys := make([]string, 0)
for _, k := range m["keys"].([]interface{}) {
key := k.(string)
keys = append(keys, key)
}
r.SSHKeys = keys
// Define password delivery method none/email/sms
deliveryMethod := m["password_delivery"].(string)
return r, deliveryMethod, nil
}
func verifyServerStopped(id string, meta interface{}) error {
client := meta.(*service.Service)
// Get current server state
r := &request.GetServerDetailsRequest{
UUID: id,
}
server, err := client.GetServerDetails(r)
if err != nil {
return err
}
if server.State != upcloud.ServerStateStopped {
// Soft stop with 2 minute timeout, after which hard stop occurs
stopRequest := &request.StopServerRequest{
UUID: id,
StopType: "soft",
Timeout: time.Minute * 2,
}
log.Printf("[INFO] Stopping server (server UUID: %s)", id)
_, err := client.StopServer(stopRequest)
if err != nil {
return err
}
_, err = client.WaitForServerState(&request.WaitForServerStateRequest{
UUID: id,
DesiredState: upcloud.ServerStateStopped,
Timeout: time.Minute * 5,
})
if err != nil {
return err
}
}
return nil
}
func verifyServerStarted(id string, meta interface{}) error {
client := meta.(*service.Service)
// Get current server state
r := &request.GetServerDetailsRequest{
UUID: id,
}
server, err := client.GetServerDetails(r)
if err != nil {
return err
}
if server.State != upcloud.ServerStateStarted {
startRequest := &request.StartServerRequest{
UUID: id,
Timeout: time.Minute * 2,
}
log.Printf("[INFO] Starting server (server UUID: %s)", id)
_, err := client.StartServer(startRequest)
if err != nil {
return err
}
_, err = client.WaitForServerState(&request.WaitForServerStateRequest{
UUID: id,
DesiredState: upcloud.ServerStateStarted,
Timeout: time.Minute * 5,
})
if err != nil {
return err
}
}
return nil
}
fix: should pass `storage` when creating a template
package upcloud
import (
"context"
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"log"
"time"
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/UpCloudLtd/upcloud-go-api/upcloud"
"github.com/UpCloudLtd/upcloud-go-api/upcloud/request"
"github.com/UpCloudLtd/upcloud-go-api/upcloud/service"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func resourceUpCloudServer() *schema.Resource {
return &schema.Resource{
CreateContext: resourceUpCloudServerCreate,
ReadContext: resourceUpCloudServerRead,
UpdateContext: resourceUpCloudServerUpdate,
DeleteContext: resourceUpCloudServerDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Schema: map[string]*schema.Schema{
"hostname": {
Description: "A valid domain name",
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 128),
},
"title": {
Description: "A short, informational description",
Type: schema.TypeString,
Computed: true,
},
"zone": {
Description: "The zone in which the server will be hosted",
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"firewall": {
Description: "Are firewall rules active for the server",
Type: schema.TypeBool,
Optional: true,
},
"metadata": {
Description: "Is the metadata service active for the server",
Type: schema.TypeBool,
Optional: true,
},
"cpu": {
Description: "The number of CPU for the server",
Type: schema.TypeInt,
Computed: true,
Optional: true,
ConflictsWith: []string{"plan"},
},
"mem": {
Description: "The size of memory for the server",
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"plan"},
},
"network_interface": {
Type: schema.TypeList,
Description: "One or more blocks describing the network interfaces of the server.",
Required: true,
ForceNew: true,
MinItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ip_address_family": {
Type: schema.TypeString,
Description: "The IP address type of this interface (one of `IPv4` or `IPv6`).",
Optional: true,
ForceNew: true,
Default: upcloud.IPAddressFamilyIPv4,
ValidateDiagFunc: func(v interface{}, _ cty.Path) diag.Diagnostics {
switch v.(string) {
case upcloud.IPAddressFamilyIPv4, upcloud.IPAddressFamilyIPv6:
return nil
default:
return diag.Diagnostics{diag.Diagnostic{
Severity: diag.Error,
Summary: "'ip_address_family' has incorrect value",
Detail: fmt.Sprintf(
"'ip_address_family' must be one of %s or %s",
upcloud.IPAddressFamilyIPv4,
upcloud.IPAddressFamilyIPv6),
}}
}
},
},
"ip_address": {
Type: schema.TypeString,
Description: "The assigned IP address.",
Computed: true,
},
"ip_address_floating": {
Type: schema.TypeBool,
Description: "`true` is a floating IP address is attached.",
Computed: true,
},
"mac_address": {
Type: schema.TypeString,
Description: "The assigned MAC address.",
Computed: true,
},
"type": {
Type: schema.TypeString,
Description: "Network interface type. For private network interfaces, a network must be specified with an existing network id.",
Required: true,
ForceNew: true,
ValidateDiagFunc: func(v interface{}, _ cty.Path) diag.Diagnostics {
switch v.(string) {
case upcloud.NetworkTypePrivate, upcloud.NetworkTypeUtility, upcloud.NetworkTypePublic:
return nil
default:
return diag.Diagnostics{diag.Diagnostic{
Severity: diag.Error,
Summary: "'type' has incorrect value",
Detail: fmt.Sprintf(
"'type' must be one of %s, %s or %s",
upcloud.NetworkTypePrivate,
upcloud.NetworkTypePublic,
upcloud.NetworkTypeUtility),
}}
}
},
},
"network": {
Type: schema.TypeString,
Description: "The unique ID of a network to attach this network to.",
ForceNew: true,
Optional: true,
Computed: true,
},
"source_ip_filtering": {
Type: schema.TypeBool,
Description: "`true` if source IP should be filtered.",
ForceNew: true,
Optional: true,
Default: true,
},
"bootable": {
Type: schema.TypeBool,
Description: "`true` if this interface should be used for network booting.",
ForceNew: true,
Optional: true,
Default: false,
},
},
},
},
"user_data": {
Description: "Defines URL for a server setup script, or the script body itself",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"plan": {
Description: "The pricing plan used for the server",
Type: schema.TypeString,
Optional: true,
},
"storage_devices": {
Description: "A list of storage devices associated with the server",
Type: schema.TypeSet,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"storage": {
Description: "A valid storage UUID",
Type: schema.TypeString,
Required: true,
},
"address": {
Description: "The device address the storage will be attached to. Specify only the bus name (ide/scsi/virtio) to auto-select next available address from that bus.",
Type: schema.TypeString,
Computed: true,
Optional: true,
},
"type": {
Description: "The device type the storage will be attached as",
Type: schema.TypeString,
Computed: true,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"disk", "cdrom"}, false),
},
},
},
},
"template": {
Description: "",
Type: schema.TypeSet,
// NOTE: might want to make this optional
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Description: "The device address the storage will be attached to. Specify only the bus name (ide/scsi/virtio) to auto-select next available address from that bus.",
Type: schema.TypeString,
Computed: true,
ForceNew: true,
Optional: true,
},
"size": {
Description: "The size of the storage in gigabytes",
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntBetween(10, 2048),
},
// will be set to value matching the plan
"tier": {
Description: "The storage tier to use",
Type: schema.TypeString,
Computed: true,
},
"title": {
Description: "A short, informative description",
Type: schema.TypeString,
Optional: true,
Computed: true,
ValidateFunc: validation.StringLenBetween(0, 64),
},
"storage": {
Description: "A valid storage UUID or template name",
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Required: true,
},
"backup_rule": backupRuleSchema(),
},
},
},
"login": {
Description: "Configure access credentials to the server",
Type: schema.TypeSet,
ForceNew: true,
MaxItems: 1,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"user": {
Description: "Username to be create to access the server",
Type: schema.TypeString,
Required: true,
},
"keys": {
Description: "A list of ssh keys to access the server",
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"create_password": {
Description: "Indicates a password should be create to allow access",
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"password_delivery": {
Description: "The delivery method for the server’s root password",
Type: schema.TypeString,
Optional: true,
Default: "none",
ValidateFunc: validation.StringInSlice([]string{"none", "email", "sms"}, false),
},
},
},
},
},
}
}
func resourceUpCloudServerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
r, err := buildServerOpts(d, meta)
if err != nil {
return diag.FromErr(err)
}
server, err := client.CreateServer(r)
if err != nil {
return diag.FromErr(err)
}
d.SetId(server.UUID)
log.Printf("[INFO] Server %s with UUID %s created", server.Title, server.UUID)
server, err = client.WaitForServerState(&request.WaitForServerStateRequest{
UUID: server.UUID,
DesiredState: upcloud.ServerStateStarted,
Timeout: time.Minute * 25,
})
if err != nil {
return diag.FromErr(err)
}
return resourceUpCloudServerRead(ctx, d, meta)
}
func resourceUpCloudServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
var diags diag.Diagnostics
r := &request.GetServerDetailsRequest{
UUID: d.Id(),
}
server, err := client.GetServerDetails(r)
if err != nil {
return diag.FromErr(err)
}
d.Set("hostname", server.Hostname)
d.Set("title", server.Title)
d.Set("zone", server.Zone)
d.Set("cpu", server.CoreNumber)
d.Set("mem", server.MemoryAmount)
networkInterfaces := []map[string]interface{}{}
var connIP string
for _, iface := range server.Networking.Interfaces {
ni := make(map[string]interface{}, 0)
ni["ip_address_family"] = iface.IPAddresses[0].Family
ni["ip_address"] = iface.IPAddresses[0].Address
if !iface.IPAddresses[0].Floating.Empty() {
ni["ip_address_floating"] = iface.IPAddresses[0].Floating.Bool()
}
ni["mac_address"] = iface.MAC
ni["network"] = iface.Network
ni["type"] = iface.Type
if !iface.Bootable.Empty() {
ni["bootable"] = iface.Bootable.Bool()
}
if !iface.SourceIPFiltering.Empty() {
ni["source_ip_filtering"] = iface.SourceIPFiltering.Bool()
}
networkInterfaces = append(networkInterfaces, ni)
if iface.Type == upcloud.NetworkTypePublic &&
iface.IPAddresses[0].Family == upcloud.IPAddressFamilyIPv4 {
connIP = iface.IPAddresses[0].Address
}
}
if len(networkInterfaces) > 0 {
d.Set("network_interface", networkInterfaces)
}
storageDevices := []map[string]interface{}{}
log.Printf("[DEBUG] Configured storage devices in state: %+v", d.Get("storage_devices"))
log.Printf("[DEBUG] Actual storage devices on server: %v", server.StorageDevices)
for _, serverStorage := range server.StorageDevices {
// the template is managed within the server
if serverStorage.UUID == d.Get("template.0.id") {
d.Set("template", []map[string]interface{}{{
"address": serverStorage.Address,
"id": serverStorage.UUID,
"size": serverStorage.Size,
"title": serverStorage.Title,
"storage": d.Get("template.0.storage"),
// FIXME: backupRule cannot be derived from server.storageDevices payload, will not sync if changed elsewhere
"backup_rule": d.Get("template.0.backup_rule"),
// TODO: add when go-api updated ... "tier": serverStorage.Tier,
}})
} else {
storageDevices = append(storageDevices, map[string]interface{}{
"address": serverStorage.Address,
"storage": serverStorage.UUID,
"type": serverStorage.Type,
})
}
}
d.Set("storage_devices", storageDevices)
// Initialize the connection information.
d.SetConnInfo(map[string]string{
"host": connIP,
"password": "",
"type": "ssh",
"user": "root",
})
return diags
}
func resourceUpCloudServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
if err := verifyServerStopped(d.Id(), meta); err != nil {
return diag.FromErr(err)
}
r := &request.ModifyServerRequest{
UUID: d.Id(),
}
if d.Get("firewall").(bool) {
r.Firewall = "on"
} else {
r.Firewall = "off"
}
if plan, ok := d.GetOk("plan"); ok {
r.Plan = plan.(string)
} else {
r.CoreNumber = d.Get("cpu").(int)
r.MemoryAmount = d.Get("mem").(int)
}
r.Hostname = d.Get("hostname").(string)
if _, err := client.ModifyServer(r); err != nil {
return diag.FromErr(err)
}
if err := verifyServerStarted(d.Id(), meta); err != nil {
return diag.FromErr(err)
}
return resourceUpCloudServerRead(ctx, d, meta)
}
func resourceUpCloudServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*service.Service)
var diags diag.Diagnostics
// Verify server is stopped before deletion
if err := verifyServerStopped(d.Id(), meta); err != nil {
return diag.FromErr(err)
}
// Delete server
deleteServerRequest := &request.DeleteServerRequest{
UUID: d.Id(),
}
log.Printf("[INFO] Deleting server (server UUID: %s)", d.Id())
err := client.DeleteServer(deleteServerRequest)
if err != nil {
return diag.FromErr(err)
}
template := d.Get("template").([]map[string]interface{})
if len(template) > 0 {
// Delete server root disk
deleteStorageRequest := &request.DeleteStorageRequest{
UUID: template[0]["id"].(string),
}
log.Printf("[INFO] Deleting server storage (storage UUID: %s)", deleteStorageRequest.UUID)
err = client.DeleteStorage(deleteStorageRequest)
if err != nil {
return diag.FromErr(err)
}
}
return diags
}
func buildServerOpts(d *schema.ResourceData, meta interface{}) (*request.CreateServerRequest, error) {
r := &request.CreateServerRequest{
Zone: d.Get("zone").(string),
Hostname: d.Get("hostname").(string),
Title: fmt.Sprintf("%s (managed by terraform)", d.Get("hostname").(string)),
}
if attr, ok := d.GetOk("firewall"); ok {
if attr.(bool) {
r.Firewall = "on"
} else {
r.Firewall = "off"
}
}
if attr, ok := d.GetOk("metadata"); ok {
if attr.(bool) {
r.Metadata = upcloud.True
} else {
r.Metadata = upcloud.False
}
}
if attr, ok := d.GetOk("cpu"); ok {
r.CoreNumber = attr.(int)
}
if attr, ok := d.GetOk("mem"); ok {
r.MemoryAmount = attr.(int)
}
if attr, ok := d.GetOk("user_data"); ok {
r.UserData = attr.(string)
}
if attr, ok := d.GetOk("plan"); ok {
r.Plan = attr.(string)
}
if login, ok := d.GetOk("login"); ok {
loginOpts, deliveryMethod, err := buildLoginOpts(login, meta)
if err != nil {
return nil, err
}
r.LoginUser = loginOpts
r.PasswordDelivery = deliveryMethod
}
if template, ok := d.GetOk("template"); ok {
template := template.(map[string]interface{})
r.StorageDevices = append(
r.StorageDevices,
request.CreateServerStorageDevice{
Action: "clone",
Address: template["address"].(string),
Size: template["size"].(int),
Storage: template["storage"].(string),
Title: template["title"].(string),
},
)
// TODO: handle backup_rule
}
if storage_devices, ok := d.GetOk("storage_devices"); ok {
storage_devices := storage_devices.([]map[string]interface{})
for _, storage_device := range storage_devices {
r.StorageDevices = append(r.StorageDevices, request.CreateServerStorageDevice{
Address: storage_device["address"].(string),
Type: storage_device["type"].(string),
Storage: storage_device["storage"].(string),
})
}
}
networking, err := buildNetworkOpts(d, meta)
if err != nil {
return nil, err
}
r.Networking = &request.CreateServerNetworking{
Interfaces: networking,
}
return r, nil
}
func buildNetworkOpts(d *schema.ResourceData, meta interface{}) ([]request.CreateServerInterface, error) {
ifaces := []request.CreateServerInterface{}
niCount := d.Get("network_interface.#").(int)
for i := 0; i < niCount; i++ {
keyRoot := fmt.Sprintf("network_interface.%d.", i)
iface := request.CreateServerInterface{
IPAddresses: []request.CreateServerIPAddress{
{
Family: d.Get(keyRoot + "ip_address_family").(string),
},
},
Type: d.Get(keyRoot + "type").(string),
}
iface.SourceIPFiltering = upcloud.FromBool(d.Get(keyRoot + "source_ip_filtering").(bool))
iface.Bootable = upcloud.FromBool(d.Get(keyRoot + "bootable").(bool))
if v, ok := d.GetOk(keyRoot + "network"); ok {
iface.Network = v.(string)
}
ifaces = append(ifaces, iface)
}
return ifaces, nil
}
func buildLoginOpts(v interface{}, meta interface{}) (*request.LoginUser, string, error) {
// Construct LoginUser struct from the schema
r := &request.LoginUser{}
e := v.(*schema.Set).List()[0]
m := e.(map[string]interface{})
// Set username as is
r.Username = m["user"].(string)
// Set 'create_password' to "yes" or "no" depending on the bool value.
// Would be nice if the API would just get a standard bool str.
createPassword := "no"
b := m["create_password"].(bool)
if b {
createPassword = "yes"
}
r.CreatePassword = createPassword
// Handle SSH keys one by one
keys := make([]string, 0)
for _, k := range m["keys"].([]interface{}) {
key := k.(string)
keys = append(keys, key)
}
r.SSHKeys = keys
// Define password delivery method none/email/sms
deliveryMethod := m["password_delivery"].(string)
return r, deliveryMethod, nil
}
func verifyServerStopped(id string, meta interface{}) error {
client := meta.(*service.Service)
// Get current server state
r := &request.GetServerDetailsRequest{
UUID: id,
}
server, err := client.GetServerDetails(r)
if err != nil {
return err
}
if server.State != upcloud.ServerStateStopped {
// Soft stop with 2 minute timeout, after which hard stop occurs
stopRequest := &request.StopServerRequest{
UUID: id,
StopType: "soft",
Timeout: time.Minute * 2,
}
log.Printf("[INFO] Stopping server (server UUID: %s)", id)
_, err := client.StopServer(stopRequest)
if err != nil {
return err
}
_, err = client.WaitForServerState(&request.WaitForServerStateRequest{
UUID: id,
DesiredState: upcloud.ServerStateStopped,
Timeout: time.Minute * 5,
})
if err != nil {
return err
}
}
return nil
}
func verifyServerStarted(id string, meta interface{}) error {
client := meta.(*service.Service)
// Get current server state
r := &request.GetServerDetailsRequest{
UUID: id,
}
server, err := client.GetServerDetails(r)
if err != nil {
return err
}
if server.State != upcloud.ServerStateStarted {
startRequest := &request.StartServerRequest{
UUID: id,
Timeout: time.Minute * 2,
}
log.Printf("[INFO] Starting server (server UUID: %s)", id)
_, err := client.StartServer(startRequest)
if err != nil {
return err
}
_, err = client.WaitForServerState(&request.WaitForServerStateRequest{
UUID: id,
DesiredState: upcloud.ServerStateStarted,
Timeout: time.Minute * 5,
})
if err != nil {
return err
}
}
return nil
}
|
package tests
import (
"github.com/phzfi/RIC/server/images"
"github.com/phzfi/RIC/server/testutils"
"testing"
)
func TestImageWatermark(t *testing.T) {
testfolder := "../../testimages/watermark/"
testimage := testfolder + "towatermark.jpg"
resfolder := "../../testresults/images/"
tolerance := 0.002
wmimage := images.NewImage()
defer wmimage.Destroy()
err := wmimage.FromFile(testfolder + "watermark.png")
if err != nil {
t.Fatal(err)
}
horizontal := 0.0
vertical := 0.0
cases := []testutils.TestCase{
{testimage, testfolder + "marked1.jpg", resfolder + "marked1.jpg"},
{testimage, testfolder + "marked2.jpg", resfolder + "marked2.jpg"},
{testimage, testfolder + "marked3.jpg", resfolder + "marked3.jpg"},
}
for _, c := range cases {
img := images.NewImage()
defer img.Destroy()
err := img.FromFile(c.Testfn)
if err != nil {
t.Fatal(err)
}
err = img.Watermark(wmimage, horizontal, vertical)
if err != nil {
t.Fatal(err)
}
blob := img.Blob()
horizontal = horizontal + 0.5
vertical = vertical + 0.5
err = testutils.CheckDistortion(blob, c.Reffn, tolerance, c.Resfn)
if err != nil {
t.Fatal(err)
}
}
}
renamed image_test.go to watermark_test.go and moved to server-folder
|
package main
import (
"bufio"
"flag"
"io"
"log"
"os"
"os/exec"
"path"
)
func main() {
flag.Parse()
configPath := flag.Arg(0)
_, err := os.Stat(configPath)
if err != nil && os.IsNotExist(err) {
log.Fatal("ERROR: Could not find configuration file")
}
rules, err := ParseConfig(configPath)
if err != nil {
log.Fatalf("ERROR: Could not parse config file: %s", err)
}
log.Printf("Successfully loaded configuration file. Number of rules: %d", len(rules))
dirs := make([]string, 0)
for _, rule := range rules {
dirs = append(dirs, rule.Path)
}
watcher, err := WatchDirs(dirs)
if err != nil {
log.Println("ERROR: Could not start watching directories")
log.Fatal(err)
}
defer func() {
err = watcher.Stop()
if err != nil {
log.Fatal("ERROR: Did not shut down cleanly")
}
}()
queue := Manage(watcher.Events, rules)
log.Println("Watchgopher is now ready process file events")
for payload := range queue {
workOff(payload)
}
}
func workOff(pl CmdPayload) {
if pl.LogOutput {
outp, err := pl.Cmd.StdoutPipe()
if err != nil {
logCmdErr(pl.Cmd, err)
}
errp, err := pl.Cmd.StderrPipe()
if err != nil {
logCmdErr(pl.Cmd, err)
}
_, filename := path.Split(pl.Cmd.Path)
if outp != nil {
go pipeToLog(filename, "STDOUT", outp)
}
if errp != nil {
go pipeToLog(filename, "STDERR", errp)
}
}
log.Printf("%s, ARGS: %s -- START\n", pl.Cmd.Path, pl.Cmd.Args[1:])
if err := pl.Cmd.Start(); err != nil {
logCmdErr(pl.Cmd, err)
return
}
err := pl.Cmd.Wait()
if err != nil {
logCmdErr(pl.Cmd, err)
return
}
log.Printf("%s, ARGS: %s -- SUCCESS\n", pl.Cmd.Path, pl.Cmd.Args[1:])
}
func pipeToLog(filename, prefix string, pipe io.ReadCloser) {
reader := bufio.NewReader(pipe)
for {
line, err := reader.ReadBytes('\n')
if err == io.EOF {
break
}
if err != nil {
log.Printf("[%s %s] Reading Error: %s", filename, prefix, err)
break
}
log.Printf("[%s %s] %s", filename, prefix, line)
}
}
func logCmdErr(cmd *exec.Cmd, err error) {
log.Printf("%s, ARGS: %s -- ERROR: %s\n", cmd.Path, cmd.Args[1:], err)
}
Add VERSION
package main
import (
"bufio"
"flag"
"io"
"log"
"os"
"os/exec"
"path"
)
const VERSION = "0.1"
func main() {
log.Printf("Starting watchgopher %s ...\n", VERSION)
flag.Parse()
configPath := flag.Arg(0)
_, err := os.Stat(configPath)
if err != nil && os.IsNotExist(err) {
log.Fatal("ERROR: Could not find configuration file")
}
rules, err := ParseConfig(configPath)
if err != nil {
log.Fatalf("ERROR: Could not parse config file: %s", err)
}
log.Printf("Successfully loaded configuration file. Number of rules: %d", len(rules))
dirs := make([]string, 0)
for _, rule := range rules {
dirs = append(dirs, rule.Path)
}
watcher, err := WatchDirs(dirs)
if err != nil {
log.Println("ERROR: Could not start watching directories")
log.Fatal(err)
}
defer func() {
err = watcher.Stop()
if err != nil {
log.Fatal("ERROR: Did not shut down cleanly")
}
}()
queue := Manage(watcher.Events, rules)
log.Println("Watchgopher is now ready process file events")
for payload := range queue {
workOff(payload)
}
}
func workOff(pl CmdPayload) {
if pl.LogOutput {
outp, err := pl.Cmd.StdoutPipe()
if err != nil {
logCmdErr(pl.Cmd, err)
}
errp, err := pl.Cmd.StderrPipe()
if err != nil {
logCmdErr(pl.Cmd, err)
}
_, filename := path.Split(pl.Cmd.Path)
if outp != nil {
go pipeToLog(filename, "STDOUT", outp)
}
if errp != nil {
go pipeToLog(filename, "STDERR", errp)
}
}
log.Printf("%s, ARGS: %s -- START\n", pl.Cmd.Path, pl.Cmd.Args[1:])
if err := pl.Cmd.Start(); err != nil {
logCmdErr(pl.Cmd, err)
return
}
err := pl.Cmd.Wait()
if err != nil {
logCmdErr(pl.Cmd, err)
return
}
log.Printf("%s, ARGS: %s -- SUCCESS\n", pl.Cmd.Path, pl.Cmd.Args[1:])
}
func pipeToLog(filename, prefix string, pipe io.ReadCloser) {
reader := bufio.NewReader(pipe)
for {
line, err := reader.ReadBytes('\n')
if err == io.EOF {
break
}
if err != nil {
log.Printf("[%s %s] Reading Error: %s", filename, prefix, err)
break
}
log.Printf("[%s %s] %s", filename, prefix, line)
}
}
func logCmdErr(cmd *exec.Cmd, err error) {
log.Printf("%s, ARGS: %s -- ERROR: %s\n", cmd.Path, cmd.Args[1:], err)
}
|
package mark
import (
"io/ioutil"
"regexp"
"strings"
"testing"
)
func TestRender(t *testing.T) {
cases := map[string]string{
"foobar": "<p>foobar</p>",
" foo bar": "<p>foo bar</p>",
"|foo|bar": "<p>|foo|bar</p>",
"foo \nbar": "<p>foo<br>bar</p>",
"__bar__ foo": "<p><strong>bar</strong> foo</p>",
"**bar** foo __bar__": "<p><strong>bar</strong> foo <strong>bar</strong></p>",
"**bar**__baz__": "<p><strong>bar</strong><strong>baz</strong></p>",
"**bar**foo__bar__": "<p><strong>bar</strong>foo<strong>bar</strong></p>",
"_bar_baz": "<p><em>bar</em>baz</p>",
"_foo_~~bar~~ baz": "<p><em>foo</em><del>bar</del> baz</p>",
"~~baz~~ _baz_": "<p><del>baz</del> <em>baz</em></p>",
"`bool` and thats it.": "<p><code>bool</code> and thats it.</p>",
// Html
"<!--hello-->": "<!--hello-->",
// Emphasis mixim
"___foo___": "<p><strong><em>foo</em></strong></p>",
"__foo _bar___": "<p><strong>foo <em>bar</em></strong></p>",
"__*foo*__": "<p><strong><em>foo</em></strong></p>",
"_**mixim**_": "<p><em><strong>mixim</strong></em></p>",
"~~__*mixim*__~~": "<p><del><strong><em>mixim</em></strong></del></p>",
"~~*mixim*~~": "<p><del><em>mixim</em></del></p>",
// Paragraph
"1 \n2 \n3": "<p>1<br>2<br>3</p>",
"1\n\n2": "<p>1</p>\n<p>2</p>",
"1\n\n\n2": "<p>1</p>\n<p>2</p>",
"1\n\n\n\n\n\n\n\n2": "<p>1</p>\n<p>2</p>",
// Heading
"# 1\n## 2": "<h1 id=\"1\">1</h1>\n<h2 id=\"2\">2</h2>",
"# 1\np\n## 2\n### 3\n4\n===": "<h1 id=\"1\">1</h1>\n<p>p</p>\n<h2 id=\"2\">2</h2>\n<h3 id=\"3\">3</h3>\n<h1 id=\"4\">4</h1>",
"Hello\n===": "<h1 id=\"hello\">Hello</h1>",
// Links
"[text](link \"title\")": "<p><a href=\"link\" title=\"title\">text</a></p>",
"[text](link)": "<p><a href=\"link\">text</a></p>",
"[](link)": "<p><a href=\"link\"></a></p>",
"Link: [example](#)": "<p>Link: <a href=\"#\">example</a></p>",
"Link: [not really": "<p>Link: [not really</p>",
"http://localhost:3000": "<p><a href=\"http://localhost:3000\">http://localhost:3000</a></p>",
"Link: http://yeah.com": "<p>Link: <a href=\"http://yeah.com\">http://yeah.com</a></p>",
"<http://foo.com>": "<p><a href=\"http://foo.com\">http://foo.com</a></p>",
"Link: <http://l.co>": "<p>Link: <a href=\"http://l.co\">http://l.co</a></p>",
"Link: <not really": "<p>Link: <not really</p>",
// CodeBlock
"\tfoo\n\tbar": "<pre><code>foo\nbar</code></pre>",
"\tfoo\nbar": "<pre><code>foo\n</code></pre>\n<p>bar</p>",
// GfmCodeBlock
"```js\nvar a;\n```": "<pre><code class=\"lang-js\">\nvar a;\n</code></pre>",
"~~~\nvar b;~~let d = 1~~~~": "<pre><code>\nvar b;~~let d = 1~~~~</code></pre>",
"~~~js\n": "<pre><code class=\"lang-js\">\n</code></pre>",
// Hr
"foo\n****\nbar": "<p>foo</p>\n<hr>\n<p>bar</p>",
"foo\n___": "<p>foo</p>\n<hr>",
// Images
"": "<p><img src=\"url\" alt=\"name\"></p>",
"": "<p><img src=\"url\" alt=\"name\" title=\"title\"></p>",
"img: ![name]()": "<p>img: <img src=\"\" alt=\"name\"></p>",
// Lists
"- foo\n- bar": "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>",
"* foo\n* bar": "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>",
"+ foo\n+ bar": "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>",
// // Ordered Lists
"1. one\n2. two\n3. three": "<ol>\n<li>one</li>\n<li>two</li>\n<li>three</li>\n</ol>",
"1. one\n 1. one of one": "<ol>\n<li>one<ol>\n<li>one of one</li>\n</ol></li>\n</ol>",
"2. two\n 3. three": "<ol>\n<li>two<ol>\n<li>three</li>\n</ol></li>\n</ol>",
// Special characters escaping
"< hello": "<p>< hello</p>",
"hello >": "<p>hello ></p>",
"foo & bar": "<p>foo & bar</p>",
"'foo'": "<p>'foo'</p>",
"\"foo\"": "<p>"foo"</p>",
"©": "<p>©</p>",
// Backslash escaping
"\\**foo\\**": "<p>*<em>foo*</em></p>",
"\\*foo\\*": "<p>*foo*</p>",
"\\_underscores\\_": "<p>_underscores_</p>",
"\\## header": "<p>## header</p>",
"header\n\\===": "<p>header\n\\===</p>",
}
for input, expected := range cases {
if actual := Render(input); actual != expected {
t.Errorf("%s: got\n%+v\nexpected\n%+v", input, actual, expected)
}
}
}
func TestData(t *testing.T) {
var testFiles []string
files, err := ioutil.ReadDir("test")
if err != nil {
t.Error("Couldn't open 'test' directory")
}
for _, file := range files {
if name := file.Name(); strings.HasSuffix(name, ".text") {
testFiles = append(testFiles, "test/"+strings.TrimSuffix(name, ".text"))
}
}
re := regexp.MustCompile(`\n`)
for _, file := range testFiles {
html, err := ioutil.ReadFile(file + ".html")
if err != nil {
t.Errorf("Error to read html file: %s", file)
}
text, err := ioutil.ReadFile(file + ".text")
if err != nil {
t.Errorf("Error to read text file: %s", file)
}
// Remove '\n'
sHTML := re.ReplaceAllLiteralString(string(html), "")
output := Render(string(text))
opts := DefaultOptions()
if strings.Contains(file, "smartypants") {
opts.Smartypants = true
output = New(string(text), opts).Render()
}
if strings.Contains(file, "smartyfractions") {
opts.Fractions = true
output = New(string(text), opts).Render()
}
sText := re.ReplaceAllLiteralString(output, "")
if sHTML != sText {
t.Errorf("%s: got\n\t%+v\nexpected\n\t%+v", file, sText, sHTML)
}
}
}
// TODO: Add more tests for it.
func TestRenderFn(t *testing.T) {
m := New("hello world", nil)
m.AddRenderFn(NodeParagraph, func(n Node) (s string) {
if p, ok := n.(*ParagraphNode); ok {
s += "<p class=\"mv-msg\">"
for _, pp := range p.Nodes {
s += pp.Render()
}
s += "</p>"
}
return
})
expected := "<p class=\"mv-msg\">hello world</p>"
if actual := m.Render(); actual != expected {
t.Errorf("RenderFn: got\n\t%+v\nexpected\n\t%+v", actual, expected)
}
}
type CommonMarkSpec struct {
name string
input string
expected string
}
var CMCases = []CommonMarkSpec{
{"6", "- `one\n- two`", "<ul><li>`one</li><li>two`</li></ul>"},
{"7", "***\n---\n___", "<hr><hr><hr>"},
{"8", "+++", "<p>+++</p>"},
{"9", "===", "<p>===</p>"},
{"10", "--\n**\n__", "<p>--**__</p>"},
{"11", " ***\n ***\n ***", "<hr><hr><hr>"},
{"12", " ***", "<pre><code>***</code></pre>"},
{"14", "_____________________________________", "<hr>"},
{"15", " - - -", "<hr>"},
{"16", " ** * ** * ** * **", "<hr>"},
{"17", "- - - -", "<hr>"},
{"18", "- - - - ", "<hr>"},
{"20", " *-*", "<p><em>-</em></p>"},
{"21", "- foo\n***\n- bar", "<ul>\n<li>foo</li>\n</ul>\n<hr>\n<ul>\n<li>bar</li>\n</ul>"},
{"22", "Foo\n***\nbar", "<p>Foo</p><hr><p>bar</p>"},
{"23", "Foo\n---\nbar", "<h2>Foo</h2><p>bar</p>"},
{"24", "* Foo\n* * *\n* Bar", "<ul>\n<li>Foo</li>\n</ul>\n<hr>\n<ul>\n<li>Bar</li>\n</ul>"},
{"25", "- Foo\n- * * *", "<ul>\n<li>Foo</li>\n<li>\n<hr>\n</li>\n</ul>"},
{"26", `# foo
## foo
### foo
#### foo
##### foo
###### foo`, `<h1>foo</h1>
<h2>foo</h2>
<h3>foo</h3>
<h4>foo</h4>
<h5>foo</h5>
<h6>foo</h6>`},
{"27", "####### foo", "<p>####### foo</p>"},
{"28", "#5 bolt\n\n#foobar", "<p>#5 bolt</p>\n<p>#foobar</p>"},
{"29", "\\## foo", "<p>## foo</p>"},
{"30", "# foo *bar* \\*baz\\*", "<h1>foo <em>bar</em> *baz*</h1>"},
{"31", "# foo ", "<h1>foo</h1>"},
{"32", ` ### foo
## foo
# foo`, `<h3>foo</h3>
<h2>foo</h2>
<h1>foo</h1>`},
{"33", " # foo", "<pre><code># foo</code></pre>"},
{"34", `
foo
# bar`, `
<p>foo
# bar</p>`},
{"35", `## foo ##
### bar ###`, `<h2>foo</h2>
<h3>bar</h3>`},
{"36", `# foo ##################################
##### foo ##`, `<h1>foo</h1>
<h5>foo</h5>`},
{"37", "### foo ### ", "<h3>foo</h3>"},
{"38", "### foo ### b", "<h3>foo ### b</h3>"},
{"39", "# foo#", "<h1>foo#</h1>"},
{"40", `
### foo \###
## foo #\##
# foo \#`, `
<h3>foo ###</h3>
<h2>foo ###</h2>
<h1>foo #</h1>`},
{"41", `****
## foo
****`, `<hr>
<h2>foo</h2>
<hr>`},
{"42", `Foo bar
# baz
Bar foo`, `<p>Foo bar</p>
<h1>baz</h1>
<p>Bar foo</p>`},
{"43", `
##
#
### ###`, `
<h2></h2>
<h1></h1>
<h3></h3>`},
{"44", `
Foo *bar*
=========
Foo *bar*
---------`, `
<h1>Foo <em>bar</em></h1>
<h2>Foo <em>bar</em></h2>`},
{"45", `Foo
-------------------------
Foo
=`, `<h2>Foo</h2>
<h1>Foo</h1>`},
{"46", ` Foo
---
Foo
-----
Foo
===`, `<h2>Foo</h2>
<h2>Foo</h2>
<h1>Foo</h1>`},
{"47", ` Foo
---
Foo
---`, `<pre><code>Foo
---
Foo
</code></pre>
<hr>`},
{"48", `Foo
---- `, "<h2>Foo</h2>"},
{"49", `
Foo
---`, `
<p>Foo
---</p>`},
{"50", `Foo
= =
Foo
--- -`, `<p>Foo
= =</p>
<p>Foo</p>
<hr>`},
{"51", `Foo
-----`, "<h2>Foo</h2>"},
{"52", `Foo\
----`, "<h2>Foo\\</h2>"},
{"53", "`Foo\n----\n`\n\n<a title=\"a lot\n---\nof dashes\"/>", "<h2>`Foo</h2>\n<p>`</p>\n<h2><a title="a lot</h2>\n<p>of dashes"/></p>"},
{"54", `
> Foo
---`, `
<blockquote>
<p>Foo</p>
</blockquote>
<hr>`},
{"55", `- Foo
---`, `<ul>
<li>Foo</li>
</ul>
<hr>`},
{"57", `---
Foo
---
Bar
---
Baz`, `<hr>
<h2>Foo</h2>
<h2>Bar</h2>
<p>Baz</p>`},
{"58", "====", "<p>====</p>"},
{"59", `---
---`, "<hr><hr>"},
{"60", `- foo
-----`, `<ul>
<li>foo</li>
</ul>
<hr>`},
{"61", ` foo
---`, `<pre><code>foo
</code></pre>
<hr>`},
{"62", `
> foo
-----`, `
<blockquote>
<p>foo</p>
</blockquote>
<hr>`},
{"63", `
\> foo
------`, `
<h2>> foo</h2>`},
{"64", ` a simple
indented code block`, `<pre><code>a simple
indented code block
</code></pre>`},
{"65", `
- foo
bar`, `
<ul>
<li>
<p>foo</p>
<p>bar</p>
</li>
</ul>`},
{"66", `1. foo
- bar`, `<ol>
<li>
<p>foo</p>
<ul>
<li>bar</li>
</ul>
</li>
</ol>`},
{"67", ` <a/>
*hi*
- one`, `<pre><code><a/>
*hi*
- one
</code></pre>`},
{"68", `
chunk1
chunk2
chunk3`, `
<pre><code>chunk1
chunk2
chunk3
</code></pre>`},
{"69", `
chunk1
chunk2`, `
<pre><code>chunk1
chunk2
</code></pre>`},
{"70", `
Foo
bar`, `
<p>Foo
bar</p>`},
{"71", ` foo
bar`, `<pre><code>foo
</code></pre>
<p>bar</p>`},
{"72", `# Header
foo
Header
------
foo
----`, `<h1>Header</h1>
<pre><code>foo
</code></pre>
<h2>Header</h2>
<pre><code>foo
</code></pre>
<hr>`},
{"73", ` foo
bar`, `<pre><code> foo
bar
</code></pre>`},
{"74", `
foo
`, `<pre><code>foo
</code></pre>`},
{"75", " foo ", `<pre><code>foo
</code></pre>`},
{"76", "```\n< \n>\n```", `<pre><code><
>
</code></pre>`},
{"77", `~~~
<
>
~~~`, `<pre><code><
>
</code></pre>`},
{"78", "```\naaa\n~~~\n```", `<pre><code>aaa
~~~
</code></pre>`},
{"79", "~~~\naaa\n```\n~~~", "<pre><code>aaa\n```\n</code></pre>"},
{"80", "````\naaa\n```\n``````", "<pre><code>aaa\n```\n</code></pre>"},
{"81", `
~~~~
aaa
~~~
~~~~`, `
<pre><code>aaa
~~~
</code></pre>`},
{"82", "```", "<pre><code></code></pre>"},
{"83", "`````\n\n```\naaa", "<pre><code>\n```\naaa\n</code></pre>"},
{"84", "> ```\n> aaa\n\nbbb", `
<blockquote>
<pre><code>aaa
</code></pre>
</blockquote>
<p>bbb</p>`},
{"85", "```\n\n \n```", "<pre><code>\n \n</code></pre>"},
{"86", "```\n```", `<pre><code></code></pre>`},
{"87", " ```\n aaa\naaa\n```", `
<pre><code>aaa
aaa
</code></pre>`},
{"88", " ```\naaa\n aaa\naaa\n ```", `
<pre><code>aaa
aaa
aaa
</code></pre>`},
{"89", " ```\n aaa\n aaa\n aaa\n ```", `
<pre><code>aaa
aaa
aaa
</code></pre>`},
{"90", " ```\n aaa\n ```", "<pre><code>```\naaa\n```\n</code></pre>"},
{"91", "```\naaa\n ```", `<pre><code>aaa
</code></pre>`},
{"92", " ```\naaa\n ```", `<pre><code>aaa
</code></pre>`},
{"93", "```\naaa\n ```", "<pre><code>aaa\n ```\n</code></pre>"},
{"95", `
~~~~~~
aaa
~~~ ~~`, `
<pre><code>aaa
~~~ ~~
</code></pre>`},
{"96", "foo\n```\nbar\n```\nbaz", `<p>foo</p>
<pre><code>bar
</code></pre>
<p>baz</p>`},
{"97", `foo
---
~~~
bar
~~~
# baz`, `<h2>foo</h2>
<pre><code>bar
</code></pre>
<h1>baz</h1>`},
{"102", "```\n``` aaa\n```", "<pre><code>``` aaa\n</code></pre>"},
{"103", `
<table>
<tr>
<td>
hi
</td>
</tr>
</table>
okay.`, `
<table>
<tr>
<td>
hi
</td>
</tr>
</table>
<p>okay.</p>`},
// Move out the id, beacuse the regexp below
{"107", `
<div
class="bar">
</div>`, `
<div
class="bar">
</div>`},
{"108", `
<div class="bar
baz">
</div>`, `
<div class="bar
baz">
</div>`},
{"113", `<div><a href="bar">*foo*</a></div>`, `<div><a href="bar">*foo*</a></div>`},
{"114", `
<table><tr><td>
foo
</td></tr></table>`, `
<table><tr><td>
foo
</td></tr></table>`},
{"117", `
<Warning>
*bar*
</Warning>`, `
<Warning>
*bar*
</Warning>`},
{"121", "<del>*foo*</del>", "<p><del><em>foo</em></del></p>"},
{"122", `
<pre language="haskell"><code>
import Text.HTML.TagSoup
main :: IO ()
main = print $ parseTags tags
</code></pre>`, `
<pre language="haskell"><code>
import Text.HTML.TagSoup
main :: IO ()
main = print $ parseTags tags
</code></pre>`},
{"123", `
<script type="text/javascript">
// JavaScript example
document.getElementById("demo").innerHTML = "Hello JavaScript!";
</script>`, `
<script type="text/javascript">
// JavaScript example
document.getElementById("demo").innerHTML = "Hello JavaScript!";
</script>`},
{"124", `
<style
type="text/css">
h1 {color:red;}
p {color:blue;}
</style>`, `
<style
type="text/css">
h1 {color:red;}
p {color:blue;}
</style>`},
{"127", `
- <div>
- foo`, `
<ul>
<li>
<div>
</li>
<li>foo</li>
</ul>`},
{"137", `
Foo
<div>
bar
</div>`, `
<p>Foo</p>
<div>
bar
</div>`},
{"139", `
Foo
<a href="bar">
baz`, `
<p>Foo
<a href="bar">
baz</p>`},
{"141", `
<div>
*Emphasized* text.
</div>`, `
<div>
*Emphasized* text.
</div>
`},
{"142", `
<table>
<tr>
<td>
Hi
</td>
</tr>
</table>`, `
<table>
<tr>
<td>
Hi
</td>
</tr>
</table>
`},
{"144", `
[foo]: /url "title"
[foo]`, `<p><a href="/url" title="title">foo</a></p>`},
{"145", `
[foo]:
/url
'the title'
[foo]`, `<p><a href="/url" title="the title">foo</a></p>`},
{"148", `
[foo]: /url '
title
line1
line2
'
[foo]`, `
<p><a href="/url" title="
title
line1
line2
">foo</a></p>`},
{"150", `
[foo]:
/url
[foo]`, `<p><a href="/url">foo</a></p>`},
{"151", `
[foo]:
[foo]`, `
<p>[foo]:</p>
<p>[foo]</p>`},
{"153", `
[foo]
[foo]: url`, `<p><a href="url">foo</a></p>`},
{"154", `
[foo]
[foo]: first
[foo]: second`, `<p><a href="first">foo</a></p>`},
{"155", `
[FOO]: /url
[Foo]`, `<p><a href="/url">Foo</a></p>`},
{"157", "[foo]: /url", ""},
{"158", `
[
foo
]: /url
bar`, "<p>bar</p>"},
{"159", `[foo]: /url "title" ok`, "<p>[foo]: /url "title" ok</p>"},
{"160", `
[foo]: /url
"title" ok`, "<p>"title" ok</p>"},
{"161", `
[foo]: /url "title"
[foo]`, `
<pre><code>[foo]: /url "title"
</code></pre>
<p>[foo]</p>`},
{"162", "```\n[foo]: /url\n```\n\n[foo]", `
<pre><code>[foo]: /url
</code></pre>
<p>[foo]</p>`},
{"166", `
[foo]
> [foo]: /url`, `
<p><a href="/url">foo</a></p>
<blockquote>
</blockquote>`},
{"167", `
aaa
bbb`, `
<p>aaa</p>
<p>bbb</p>`},
{"168", `
aaa
bbb
ccc
ddd`, `
<p>aaa
bbb</p>
<p>ccc
ddd</p>`},
{"169", `
aaa
bbb`, `
<p>aaa</p>
<p>bbb</p>`},
{"170", `
aaa
bbb`, `
<p>aaa
bbb</p>`},
{"171", `
aaa
bbb
ccc`, `
<p>aaa
bbb
ccc</p>`},
{"172", `
aaa
bbb`, `
<p>aaa
bbb</p>`},
{"173", `
aaa
bbb`, `
<pre><code>aaa
</code></pre>
<p>bbb</p>`},
{"174", `
aaa
bbb `, `
<p>aaa<br>
bbb</p>`},
{"175", `
aaa
# aaa
`, `
<p>aaa</p>
<h1>aaa</h1>`},
{"176", `
> # Foo
> bar
> baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"177", `
># Foo
>bar
> baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"178", `
> # Foo
> bar
> baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"179", `
> # Foo
> bar
> baz`, `
<pre><code>> # Foo
> bar
> baz
</code></pre>`},
{"180", `
> # Foo
> bar
baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"181", `
> bar
baz
> foo`, `
<blockquote>
<p>bar
baz
foo</p>
</blockquote>`},
{"182", `
> foo
---`, `
<blockquote>
<p>foo</p>
</blockquote>
<hr>`},
{"186", `
>`, `
<blockquote>
</blockquote>`},
{"187", `
>
>
> `, `
<blockquote>
</blockquote>`},
{"188", `
>
> foo
> `, `
<blockquote>
<p>foo</p>
</blockquote>`},
{"189", `
> foo
> bar`, `
<blockquote>
<p>foo</p>
</blockquote>
<blockquote>
<p>bar</p>
</blockquote>`},
{"190", `
> foo
> bar`, `
<blockquote>
<p>foo
bar</p>
</blockquote>`},
{"191", `
> foo
>
> bar`, `
<blockquote>
<p>foo</p>
<p>bar</p>
</blockquote>`},
{"192", `
foo
> bar`, `
<p>foo</p>
<blockquote>
<p>bar</p>
</blockquote>`},
{"193", `
> aaa
***
> bbb`, `
<blockquote>
<p>aaa</p>
</blockquote>
<hr>
<blockquote>
<p>bbb</p>
</blockquote>`},
{"194", `
> bar
baz`, `
<blockquote>
<p>bar
baz</p>
</blockquote>`},
{"195", `
> bar
baz`, `
<blockquote>
<p>bar</p>
</blockquote>
<p>baz</p>`},
{"197", `
> > > foo
bar`, `
<blockquote>
<blockquote>
<blockquote>
<p>foo
bar</p>
</blockquote>
</blockquote>
</blockquote>`},
{"198", `
>>> foo
> bar
>>baz`, `
<blockquote>
<blockquote>
<blockquote>
<p>foo
bar
baz</p>
</blockquote>
</blockquote>
</blockquote>`},
{"199", `
> code
> not code`, `
<blockquote>
<pre><code>code
</code></pre>
</blockquote>
<blockquote>
<p>not code</p>
</blockquote>`},
{"200", `
A paragraph
with two lines.
indented code
> A block quote.`, `
<p>A paragraph
with two lines.</p>
<pre><code>indented code
</code></pre>
<blockquote>
<p>A block quote.</p>
</blockquote>`},
{"201", `
1. A paragraph
with two lines.
indented code
> A block quote.`, `
<ol>
<li>
<p>A paragraph
with two lines.</p>
<pre><code>indented code
</code></pre>
<blockquote>
<p>A block quote.</p>
</blockquote>
</li>
</ol>`},
{"203", `
- one
two`, `
<ul>
<li>
<p>one</p>
<p>two</p>
</li>
</ul>`},
{"205", `
- one
two`, `
<ul>
<li>
<p>one</p>
<p>two</p>
</li>
</ul>`},
{"206", `
> > 1. one
>>
>> two`, `
<blockquote>
<blockquote>
<ol>
<li>
<p>one</p>
<p>two</p>
</li>
</ol>
</blockquote>
</blockquote>`},
{"207", `
>>- one
>>
> > two`, `
<blockquote>
<blockquote>
<ul>
<li>one</li>
</ul>
<p>two</p>
</blockquote>
</blockquote>`},
{"208", `-one
2.two`, `
<p>-one</p>
<p>2.two</p>`},
{"210", `
1. foo
~~~
bar
~~~
baz
> bam`, `
<ol>
<li>
<p>foo</p>
<pre><code>bar
</code></pre>
<p>baz</p>
<blockquote>
<p>bam</p>
</blockquote>
</li>
</ol>`},
{"212", `1234567890. not ok`, `<p>1234567890. not ok</p>`},
{"215", `-1. not ok`, `<p>-1. not ok</p>`},
{"216", `
- foo
bar`, `
<ul>
<li>
<p>foo</p>
<pre><code>bar
</code></pre>
</li>
</ul>`},
{"218", `
indented code
paragraph
more code`, `
<pre><code>indented code
</code></pre>
<p>paragraph</p>
<pre><code>more code
</code></pre>`},
{"221", `
foo
bar`, `
<p>foo</p>
<p>bar</p>`},
{"223", `
- foo
bar`, `
<ul>
<li>
<p>foo</p>
<p>bar</p>
</li>
</ul>`},
{"226", `
- foo
-
- bar`, `
<ul>
<li>foo</li>
<li></li>
<li>bar</li>
</ul>`},
{"232", `
1. A paragraph
with two lines.
indented code
> A block quote.`, `
<pre><code>1. A paragraph
with two lines.
indented code
> A block quote.
</code></pre>`},
{"234", `
1. A paragraph
with two lines.`, `
<ol>
<li>A paragraph
with two lines.</li>
</ol>`},
{"235", `
> 1. > Blockquote
continued here.`, `
<blockquote>
<ol>
<li>
<blockquote>
<p>Blockquote
continued here.</p>
</blockquote>
</li>
</ol>
</blockquote>`},
{"236", `
> 1. > Blockquote
continued here.`, `
<blockquote>
<ol>
<li>
<blockquote>
<p>Blockquote
continued here.</p>
</blockquote>
</li>
</ol>
</blockquote>`},
{"237", `
- foo
- bar
- baz`, `
<ul>
<li>foo
<ul>
<li>bar
<ul>
<li>baz</li>
</ul>
</li>
</ul>
</li>
</ul>`},
{"241", "- - foo", `
<ul>
<li>
<ul>
<li>foo</li>
</ul>
</li>
</ul>`},
{"243", `
- # Foo
- Bar
---
baz`, `
<ul>
<li>
<h1>Foo</h1>
</li>
<li>
<h2>Bar</h2>
baz</li>
</ul>`},
{"246", `
Foo
- bar
- baz`, `
<p>Foo</p>
<ul>
<li>bar</li>
<li>baz</li>
</ul>`},
{"248", `
- foo
- bar
- baz`, `
<ul>
<li>
<p>foo</p>
</li>
<li>
<p>bar</p>
</li>
</ul>
<ul>
<li>baz</li>
</ul>`},
{"249", `
- foo
bar
- baz`, `
<ul>
<li>foo</li>
</ul>
<p>bar</p>
<ul>
<li>baz</li>
</ul>`},
{"250", `
- foo
- bar
- baz
bim`, `
<ul>
<li>foo
<ul>
<li>bar
<ul>
<li>baz</li>
</ul>
</li>
</ul>
</li>
</ul>
<pre><code> bim
</code></pre>`},
{"251", `
- foo
- bar
- baz
- bim`, `
<ul>
<li>foo</li>
<li>bar</li>
</ul>
<ul>
<li>baz</li>
<li>bim</li>
</ul>`},
{"252", `
- foo
notcode
- foo
code`, `
<ul>
<li>
<p>foo</p>
<p>notcode</p>
</li>
<li>
<p>foo</p>
</li>
</ul>
<pre><code>code
</code></pre>`},
{"261", `
* a
> b
>
* c`, `
<ul>
<li>a
<blockquote>
<p>b</p>
</blockquote>
</li>
<li>c</li>
</ul>`},
{"263", "- a", `
<ul>
<li>a</li>
</ul>`},
{"264", `
- a
- b`, `
<ul>
<li>a
<ul>
<li>b</li>
</ul>
</li>
</ul>`},
{"265", "\n1. ```\n foo\n ```\n\n bar", `
<ol>
<li>
<pre><code>foo
</code></pre>
<p>bar</p>
</li>
</ol>`},
{"266", `
* foo
* bar
baz`, `
<ul>
<li>
<p>foo</p>
<ul>
<li>bar</li>
</ul>
<p>baz</p>
</li>
</ul>`},
{"267", `
- a
- b
- c
- d
- e
- f`, `
<ul>
<li>
<p>a</p>
<ul>
<li>b</li>
<li>c</li>
</ul>
</li>
<li>
<p>d</p>
<ul>
<li>e</li>
<li>f</li>
</ul>
</li>
</ul>`},
{"268", "`hi`lo`", "<p><code>hi</code>lo`</p>"},
{"273", `
foo\
bar
`, `
<p>foo<br>
bar</p>`},
{"275", ` \[\]`, `<pre><code>\[\]
</code></pre>`},
{"276", `
~~~
\[\]
~~~`, `
<pre><code>\[\]
</code></pre>`},
{"294", "`foo`", `<p><code>foo</code></p>`},
{"300", "`foo\\`bar`", "<p><code>foo\\</code>bar`</p>"},
{"303", "`<a href=\"`\">`", "<p><code><a href="</code>">`</p>"},
{"308", "`foo", "<p>`foo</p>"},
{"309", "*foo bar*", "<p><em>foo bar</em></p>"},
{"310", "a * foo bar*", "<p>a * foo bar*</p>"},
{"313", "foo*bar*", "<p>foo<em>bar</em></p>"},
{"314", "5*6*78", "<p>5<em>6</em>78</p>"},
{"315", "_foo bar_", "<p><em>foo bar</em></p>"},
{"322", "foo-_(bar)_", "<p>foo-<em>(bar)</em></p>"},
{"323", "_foo*", "<p>_foo*</p>"},
{"328", "*foo*bar", "<p><em>foo</em>bar</p>"},
{"335", "_(bar)_.", "<p><em>(bar)</em>.</p>"},
{"336", "**foo bar**", "<p><strong>foo bar</strong></p>"},
{"339", "foo**bar**", "<p>foo<strong>bar</strong></p>"},
{"340", "__foo bar__", "<p><strong>foo bar</strong></p>"},
{"348", "foo-__(bar)__", "<p>foo-<strong>(bar)</strong></p>"},
{"352", "**Gomphocarpus (*Gomphocarpus physocarpus*, syn.*Asclepias physocarpa*)**",
"<p><strong>Gomphocarpus (<em>Gomphocarpus physocarpus</em>, syn.<em>Asclepias physocarpa</em>)</strong></p>"},
{"353", "**foo \"*bar*\" foo**", "<p><strong>foo "<em>bar</em>" foo</strong></p>"},
{"354", "**foo**bar", "<p><strong>foo</strong>bar</p>"},
{"361", "__(bar)__.", "<p><strong>(bar)</strong>.</p>"},
{"362", "*foo [bar](/url)*", "<p><em>foo <a href=\"/url\">bar</a></em></p>"},
{"363", "*foo\nbar*", "<p><em>foo\nbar</em></p>"},
{"375", "** is not an empty emphasis", "<p>** is not an empty emphasis</p>"},
{"377", "**foo [bar](/url)**", "<p><strong>foo <a href=\"/url\">bar</a></strong></p>"},
{"378", "**foo\nbar**", "<p><strong>foo\nbar</strong></p>"},
{"379", "__foo _bar_ baz__", "<p><strong>foo <em>bar</em> baz</strong></p>"},
{"383", "**foo *bar* baz**", "<p><strong>foo <em>bar</em> baz</strong></p>"},
{"385", "***foo* bar**", "<p><strong><em>foo</em> bar</strong></p>"},
{"386", "**foo *bar***", "<p><strong>foo <em>bar</em></strong></p>"},
{"389", "__ is not an empty emphasis", "<p>__ is not an empty emphasis</p>"},
{"392", "foo *\\**", "<p>foo <em>*</em></p>"},
{"393", "foo *_*", "<p>foo <em>_</em></p>"},
{"395", "foo **\\***", "<p>foo <strong>*</strong></p>"},
{"396", "foo **_**", "<p>foo <strong>_</strong></p>"},
{"404", "foo _\\__", "<p>foo <em>_</em></p>"},
{"405", "foo _*_", "<p>foo <em>*</em></p>"},
{"407", "foo __\\___", "<p>foo <strong>_</strong></p>"},
{"408", "foo __*__", "<p>foo <strong>*</strong></p>"},
{"415", "**foo**", "<p><strong>foo</strong></p>"},
{"416", "*_foo_*", "<p><em><em>foo</em></em></p>"},
{"417", "__foo__", "<p><strong>foo</strong></p>"},
{"418", "_*foo*_", "<p><em><em>foo</em></em></p>"},
{"419", "****foo****", "<p><strong><strong>foo</strong></strong></p>"},
{"420", "____foo____", "<p><strong><strong>foo</strong></strong></p>"},
{"422", "***foo***", "<p><strong><em>foo</em></strong></p>"},
{"424", "*foo _bar* baz_", "<p><em>foo _bar</em> baz_</p>"},
{"438", "[link](/uri \"title\")", "<p><a href=\"/uri\" title=\"title\">link</a></p>"},
{"439", "[link](/uri)", "<p><a href=\"/uri\">link</a></p>"},
{"440", "[link]()", "<p><a href=\"\">link</a></p>"},
{"441", "[link](<>)", "<p><a href=\"\">link</a></p>"},
{"451", `
[link](#fragment)
[link](http://example.com#fragment)
[link](http://example.com?foo=bar&baz#fragment)`, `
<p><a href="#fragment">link</a></p>
<p><a href="http://example.com#fragment">link</a></p>
<p><a href="http://example.com?foo=bar&baz#fragment">link</a></p>`},
{"455", `
[link](/url "title")
[link](/url 'title')
[link](/url (title))`, `
<p><a href="/url" title="title">link</a>
<a href="/url" title="title">link</a>
<a href="/url" title="title">link</a></p>`},
{"458", `[link](/url 'title "and" title')`, `<p><a href="/url" title="title "and" title">link</a></p>`},
{"460", "[link] (/uri)", "<p>[link] (/uri)</p>"},
{"461", "[link [foo [bar]]](/uri)", `<p><a href="/uri">link [foo [bar]]</a></p>`},
{"463", "[link [bar](/uri)", `<p>[link <a href="/uri">bar</a></p>`},
{"471", "[foo *bar](baz*)", `<p><a href="baz*">foo *bar</a></p>`},
{"472", "*foo [bar* baz]", "<p><em>foo [bar</em> baz]</p>"},
{"476", `
[foo][bar]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"477", `
[link [foo [bar]]][ref]
[ref]: /uri`, `<p><a href="/uri">link [foo [bar]]</a></p>`},
{"484", `
[foo *bar][ref]
[ref]: /uri`, `<p><a href="/uri">foo *bar</a></p>`},
{"488", `
[foo][BaR]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"489", `
[Толпой][Толпой] is a Russian word.
[ТОЛПОЙ]: /url`, `<p><a href="/url">Толпой</a> is a Russian word.</p>`},
{"491", `
[foo] [bar]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"492", `
[foo]
[bar]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"493", `
[foo]: /url1
[foo]: /url2
[bar][foo]`, `<p><a href="/url1">bar</a></p>`},
{"496", `
[foo][ref[bar]]
[ref[bar]]: /uri`, `
<p>[foo][ref[bar]]</p>
<p>[ref[bar]]: /uri</p>`},
{"497", `
[[[foo]]]
[[[foo]]]: /url`, `
<p>[[[foo]]]</p>
<p>[[[foo]]]: /url</p>`},
{"498", `
[foo][ref\[]
[ref\[]: /uri`, `<p><a href="/uri">foo</a></p>`},
{"499", `
[]
[]: /uri`, `
<p>[]</p>
<p>[]: /uri</p>`},
{"501", `
[foo][]
[foo]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"502", `
[*foo* bar][]
[*foo* bar]: /url "title"`, `
<p><a href="/url" title="title"><em>foo</em> bar</a></p>`},
{"503", `
[Foo][]
[foo]: /url "title"`, `<p><a href="/url" title="title">Foo</a></p>`},
{"504", `
[foo]
[]
[foo]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"505", `
[foo]
[foo]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"506", `
[*foo* bar]
[*foo* bar]: /url "title"`, `
<p><a href="/url" title="title"><em>foo</em> bar</a></p>`},
{"508", `
[[bar [foo]
[foo]: /url`, `<p>[[bar <a href="/url">foo</a></p>`},
{"509", `
[Foo]
[foo]: /url "title"`, `<p><a href="/url" title="title">Foo</a></p>`},
{"510", `
[foo] bar
[foo]: /url`, `<p><a href="/url">foo</a> bar</p>`},
{"511", `
\[foo]
[foo]: /url "title"`, `<p>[foo]</p>`},
{"513", `
[foo][bar]
[foo]: /url1
[bar]: /url2`, `<p><a href="/url2">foo</a></p>`},
{"515", `
[foo][bar][baz]
[baz]: /url1
[bar]: /url2`, `<p><a href="/url2">foo</a><a href="/url1">baz</a></p>`},
{"517", ``, `<p><img src="/url" alt="foo" title="title"></p>`},
{"523", ``, `<p><img src="train.jpg" alt="foo"></p>`},
{"524", `My `,
`<p>My <img src="/path/to/train.jpg" alt="foo bar" title="title"></p>`},
{"525", ``, `<p><img src="url" alt="foo"></p>`},
{"526", ``, `<p><img src="/url" alt=""></p>`},
{"527", `
![foo] [bar]
[bar]: /url`, `<p><img src="/url" alt="foo"></p>`},
{"528", `
![foo] [bar]
[BAR]: /url`, `<p><img src="/url" alt="foo"></p>`},
{"529", `
![foo][]
[foo]: /url "title"`, `<p><img src="/url" alt="foo" title="title"></p>`},
{"531", `
![Foo][]
[foo]: /url "title"`, `<p><img src="/url" alt="Foo" title="title"></p>`},
{"532", `
![foo]
[]
[foo]: /url "title"`, `<p><img src="/url" alt="foo" title="title"></p>`},
{"533", `
![foo]
[foo]: /url "title"`, `<p><img src="/url" alt="foo" title="title"></p>`},
{"535", `
![[foo]]
[[foo]]: /url "title"`, `
<p>![[foo]]</p>
<p>[[foo]]: /url "title"</p>`},
{"536", `
![Foo]
[foo]: /url "title"`, `<p><img src="/url" alt="Foo" title="title"></p>`},
{"537", `
\!\[foo]
[foo]: /url "title"`, `<p>![foo]</p>`},
{"538", `
\![foo]
[foo]: /url "title"`, `<p>!<a href="/url" title="title">foo</a></p>`},
{"539", `<http://foo.bar.baz>`, `<p><a href="http://foo.bar.baz">http://foo.bar.baz</a></p>`},
{"540", `<http://foo.bar.baz/test?q=hello&id=22&boolean>`,
`<p><a href="http://foo.bar.baz/test?q=hello&id=22&boolean">http://foo.bar.baz/test?q=hello&id=22&boolean</a></p>`},
{"541", `<irc://foo.bar:2233/baz>`, `<p><a href="irc://foo.bar:2233/baz">irc://foo.bar:2233/baz</a></p>`},
{"542", `<MAILTO:FOO@BAR.BAZ>`, `<p><a href="MAILTO:FOO@BAR.BAZ">MAILTO:FOO@BAR.BAZ</a></p>`},
{"548", "<>", "<p><></p>"},
{"554", `foo@bar.example.com`, `<p>foo@bar.example.com</p>`},
{"555", "<a><bab><c2c>", "<p><a><bab><c2c></p>"},
{"556", "<a/><b2/>", "<p><a/><b2/></p>"},
{"557", `
<a /><b2
data="foo" >`, `
<p><a /><b2
data="foo" ></p>`},
{"558", `
<a foo="bar" bam = 'baz <em>"</em>'
_boolean zoop:33=zoop:33 />`, `
<p><a foo="bar" bam = 'baz <em>"</em>'
_boolean zoop:33=zoop:33 /></p>`},
{"572", "foo <![CDATA[>&<]]>", "<p>foo <![CDATA[>&<]]></p>"},
{"576", `
foo
baz`, `
<p>foo<br>
baz</p>`},
{"577", `
foo\
baz`, `
<p>foo<br>
baz</p>`},
{"578", `
foo
baz`, `<p>foo<br>baz</p>`},
{"581", `
*foo
bar*`, `
<p><em>foo<br>
bar</em></p>`},
{"582", `
*foo\
bar*`, `
<p><em>foo<br>
bar</em></p>`},
{"587", `foo\`, `<p>foo\</p>`},
{"588", `foo `, `<p>foo</p>`},
{"589", `### foo\`, `<h3>foo\</h3>`},
{"590", `### foo `, `<h3>foo</h3>`},
{"591", `
foo
baz`, `
<p>foo
baz</p>`},
{"592", `
foo
baz`, `
<p>foo
baz</p>`},
{"594", `Foo χρῆν`, `<p>Foo χρῆν</p>`},
{"595", `Multiple spaces`, `<p>Multiple spaces</p>`},
}
func TestCommonMark(t *testing.T) {
reID := regexp.MustCompile(` +?id=".*"`)
for _, c := range CMCases {
// Remove the auto-hashing until it'll be in the configuration
actual := reID.ReplaceAllString(Render(c.input), "")
if strings.Replace(actual, "\n", "", -1) != strings.Replace(c.expected, "\n", "", -1) {
t.Errorf("\ninput:%s\ngot:\n%s\nexpected:\n%s\nlink: http://spec.commonmark.org/0.21/#example-%s\n",
c.input, actual, c.expected, c.name)
}
}
}
test(mark): commonmark-316
package mark
import (
"io/ioutil"
"regexp"
"strings"
"testing"
)
func TestRender(t *testing.T) {
cases := map[string]string{
"foobar": "<p>foobar</p>",
" foo bar": "<p>foo bar</p>",
"|foo|bar": "<p>|foo|bar</p>",
"foo \nbar": "<p>foo<br>bar</p>",
"__bar__ foo": "<p><strong>bar</strong> foo</p>",
"**bar** foo __bar__": "<p><strong>bar</strong> foo <strong>bar</strong></p>",
"**bar**__baz__": "<p><strong>bar</strong><strong>baz</strong></p>",
"**bar**foo__bar__": "<p><strong>bar</strong>foo<strong>bar</strong></p>",
"_bar_baz": "<p><em>bar</em>baz</p>",
"_foo_~~bar~~ baz": "<p><em>foo</em><del>bar</del> baz</p>",
"~~baz~~ _baz_": "<p><del>baz</del> <em>baz</em></p>",
"`bool` and thats it.": "<p><code>bool</code> and thats it.</p>",
// Html
"<!--hello-->": "<!--hello-->",
// Emphasis mixim
"___foo___": "<p><strong><em>foo</em></strong></p>",
"__foo _bar___": "<p><strong>foo <em>bar</em></strong></p>",
"__*foo*__": "<p><strong><em>foo</em></strong></p>",
"_**mixim**_": "<p><em><strong>mixim</strong></em></p>",
"~~__*mixim*__~~": "<p><del><strong><em>mixim</em></strong></del></p>",
"~~*mixim*~~": "<p><del><em>mixim</em></del></p>",
// Paragraph
"1 \n2 \n3": "<p>1<br>2<br>3</p>",
"1\n\n2": "<p>1</p>\n<p>2</p>",
"1\n\n\n2": "<p>1</p>\n<p>2</p>",
"1\n\n\n\n\n\n\n\n2": "<p>1</p>\n<p>2</p>",
// Heading
"# 1\n## 2": "<h1 id=\"1\">1</h1>\n<h2 id=\"2\">2</h2>",
"# 1\np\n## 2\n### 3\n4\n===": "<h1 id=\"1\">1</h1>\n<p>p</p>\n<h2 id=\"2\">2</h2>\n<h3 id=\"3\">3</h3>\n<h1 id=\"4\">4</h1>",
"Hello\n===": "<h1 id=\"hello\">Hello</h1>",
// Links
"[text](link \"title\")": "<p><a href=\"link\" title=\"title\">text</a></p>",
"[text](link)": "<p><a href=\"link\">text</a></p>",
"[](link)": "<p><a href=\"link\"></a></p>",
"Link: [example](#)": "<p>Link: <a href=\"#\">example</a></p>",
"Link: [not really": "<p>Link: [not really</p>",
"http://localhost:3000": "<p><a href=\"http://localhost:3000\">http://localhost:3000</a></p>",
"Link: http://yeah.com": "<p>Link: <a href=\"http://yeah.com\">http://yeah.com</a></p>",
"<http://foo.com>": "<p><a href=\"http://foo.com\">http://foo.com</a></p>",
"Link: <http://l.co>": "<p>Link: <a href=\"http://l.co\">http://l.co</a></p>",
"Link: <not really": "<p>Link: <not really</p>",
// CodeBlock
"\tfoo\n\tbar": "<pre><code>foo\nbar</code></pre>",
"\tfoo\nbar": "<pre><code>foo\n</code></pre>\n<p>bar</p>",
// GfmCodeBlock
"```js\nvar a;\n```": "<pre><code class=\"lang-js\">\nvar a;\n</code></pre>",
"~~~\nvar b;~~let d = 1~~~~": "<pre><code>\nvar b;~~let d = 1~~~~</code></pre>",
"~~~js\n": "<pre><code class=\"lang-js\">\n</code></pre>",
// Hr
"foo\n****\nbar": "<p>foo</p>\n<hr>\n<p>bar</p>",
"foo\n___": "<p>foo</p>\n<hr>",
// Images
"": "<p><img src=\"url\" alt=\"name\"></p>",
"": "<p><img src=\"url\" alt=\"name\" title=\"title\"></p>",
"img: ![name]()": "<p>img: <img src=\"\" alt=\"name\"></p>",
// Lists
"- foo\n- bar": "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>",
"* foo\n* bar": "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>",
"+ foo\n+ bar": "<ul>\n<li>foo</li>\n<li>bar</li>\n</ul>",
// // Ordered Lists
"1. one\n2. two\n3. three": "<ol>\n<li>one</li>\n<li>two</li>\n<li>three</li>\n</ol>",
"1. one\n 1. one of one": "<ol>\n<li>one<ol>\n<li>one of one</li>\n</ol></li>\n</ol>",
"2. two\n 3. three": "<ol>\n<li>two<ol>\n<li>three</li>\n</ol></li>\n</ol>",
// Special characters escaping
"< hello": "<p>< hello</p>",
"hello >": "<p>hello ></p>",
"foo & bar": "<p>foo & bar</p>",
"'foo'": "<p>'foo'</p>",
"\"foo\"": "<p>"foo"</p>",
"©": "<p>©</p>",
// Backslash escaping
"\\**foo\\**": "<p>*<em>foo*</em></p>",
"\\*foo\\*": "<p>*foo*</p>",
"\\_underscores\\_": "<p>_underscores_</p>",
"\\## header": "<p>## header</p>",
"header\n\\===": "<p>header\n\\===</p>",
}
for input, expected := range cases {
if actual := Render(input); actual != expected {
t.Errorf("%s: got\n%+v\nexpected\n%+v", input, actual, expected)
}
}
}
func TestData(t *testing.T) {
var testFiles []string
files, err := ioutil.ReadDir("test")
if err != nil {
t.Error("Couldn't open 'test' directory")
}
for _, file := range files {
if name := file.Name(); strings.HasSuffix(name, ".text") {
testFiles = append(testFiles, "test/"+strings.TrimSuffix(name, ".text"))
}
}
re := regexp.MustCompile(`\n`)
for _, file := range testFiles {
html, err := ioutil.ReadFile(file + ".html")
if err != nil {
t.Errorf("Error to read html file: %s", file)
}
text, err := ioutil.ReadFile(file + ".text")
if err != nil {
t.Errorf("Error to read text file: %s", file)
}
// Remove '\n'
sHTML := re.ReplaceAllLiteralString(string(html), "")
output := Render(string(text))
opts := DefaultOptions()
if strings.Contains(file, "smartypants") {
opts.Smartypants = true
output = New(string(text), opts).Render()
}
if strings.Contains(file, "smartyfractions") {
opts.Fractions = true
output = New(string(text), opts).Render()
}
sText := re.ReplaceAllLiteralString(output, "")
if sHTML != sText {
t.Errorf("%s: got\n\t%+v\nexpected\n\t%+v", file, sText, sHTML)
}
}
}
// TODO: Add more tests for it.
func TestRenderFn(t *testing.T) {
m := New("hello world", nil)
m.AddRenderFn(NodeParagraph, func(n Node) (s string) {
if p, ok := n.(*ParagraphNode); ok {
s += "<p class=\"mv-msg\">"
for _, pp := range p.Nodes {
s += pp.Render()
}
s += "</p>"
}
return
})
expected := "<p class=\"mv-msg\">hello world</p>"
if actual := m.Render(); actual != expected {
t.Errorf("RenderFn: got\n\t%+v\nexpected\n\t%+v", actual, expected)
}
}
type CommonMarkSpec struct {
name string
input string
expected string
}
var CMCases = []CommonMarkSpec{
{"6", "- `one\n- two`", "<ul><li>`one</li><li>two`</li></ul>"},
{"7", "***\n---\n___", "<hr><hr><hr>"},
{"8", "+++", "<p>+++</p>"},
{"9", "===", "<p>===</p>"},
{"10", "--\n**\n__", "<p>--**__</p>"},
{"11", " ***\n ***\n ***", "<hr><hr><hr>"},
{"12", " ***", "<pre><code>***</code></pre>"},
{"14", "_____________________________________", "<hr>"},
{"15", " - - -", "<hr>"},
{"16", " ** * ** * ** * **", "<hr>"},
{"17", "- - - -", "<hr>"},
{"18", "- - - - ", "<hr>"},
{"20", " *-*", "<p><em>-</em></p>"},
{"21", "- foo\n***\n- bar", "<ul>\n<li>foo</li>\n</ul>\n<hr>\n<ul>\n<li>bar</li>\n</ul>"},
{"22", "Foo\n***\nbar", "<p>Foo</p><hr><p>bar</p>"},
{"23", "Foo\n---\nbar", "<h2>Foo</h2><p>bar</p>"},
{"24", "* Foo\n* * *\n* Bar", "<ul>\n<li>Foo</li>\n</ul>\n<hr>\n<ul>\n<li>Bar</li>\n</ul>"},
{"25", "- Foo\n- * * *", "<ul>\n<li>Foo</li>\n<li>\n<hr>\n</li>\n</ul>"},
{"26", `# foo
## foo
### foo
#### foo
##### foo
###### foo`, `<h1>foo</h1>
<h2>foo</h2>
<h3>foo</h3>
<h4>foo</h4>
<h5>foo</h5>
<h6>foo</h6>`},
{"27", "####### foo", "<p>####### foo</p>"},
{"28", "#5 bolt\n\n#foobar", "<p>#5 bolt</p>\n<p>#foobar</p>"},
{"29", "\\## foo", "<p>## foo</p>"},
{"30", "# foo *bar* \\*baz\\*", "<h1>foo <em>bar</em> *baz*</h1>"},
{"31", "# foo ", "<h1>foo</h1>"},
{"32", ` ### foo
## foo
# foo`, `<h3>foo</h3>
<h2>foo</h2>
<h1>foo</h1>`},
{"33", " # foo", "<pre><code># foo</code></pre>"},
{"34", `
foo
# bar`, `
<p>foo
# bar</p>`},
{"35", `## foo ##
### bar ###`, `<h2>foo</h2>
<h3>bar</h3>`},
{"36", `# foo ##################################
##### foo ##`, `<h1>foo</h1>
<h5>foo</h5>`},
{"37", "### foo ### ", "<h3>foo</h3>"},
{"38", "### foo ### b", "<h3>foo ### b</h3>"},
{"39", "# foo#", "<h1>foo#</h1>"},
{"40", `
### foo \###
## foo #\##
# foo \#`, `
<h3>foo ###</h3>
<h2>foo ###</h2>
<h1>foo #</h1>`},
{"41", `****
## foo
****`, `<hr>
<h2>foo</h2>
<hr>`},
{"42", `Foo bar
# baz
Bar foo`, `<p>Foo bar</p>
<h1>baz</h1>
<p>Bar foo</p>`},
{"43", `
##
#
### ###`, `
<h2></h2>
<h1></h1>
<h3></h3>`},
{"44", `
Foo *bar*
=========
Foo *bar*
---------`, `
<h1>Foo <em>bar</em></h1>
<h2>Foo <em>bar</em></h2>`},
{"45", `Foo
-------------------------
Foo
=`, `<h2>Foo</h2>
<h1>Foo</h1>`},
{"46", ` Foo
---
Foo
-----
Foo
===`, `<h2>Foo</h2>
<h2>Foo</h2>
<h1>Foo</h1>`},
{"47", ` Foo
---
Foo
---`, `<pre><code>Foo
---
Foo
</code></pre>
<hr>`},
{"48", `Foo
---- `, "<h2>Foo</h2>"},
{"49", `
Foo
---`, `
<p>Foo
---</p>`},
{"50", `Foo
= =
Foo
--- -`, `<p>Foo
= =</p>
<p>Foo</p>
<hr>`},
{"51", `Foo
-----`, "<h2>Foo</h2>"},
{"52", `Foo\
----`, "<h2>Foo\\</h2>"},
{"53", "`Foo\n----\n`\n\n<a title=\"a lot\n---\nof dashes\"/>", "<h2>`Foo</h2>\n<p>`</p>\n<h2><a title="a lot</h2>\n<p>of dashes"/></p>"},
{"54", `
> Foo
---`, `
<blockquote>
<p>Foo</p>
</blockquote>
<hr>`},
{"55", `- Foo
---`, `<ul>
<li>Foo</li>
</ul>
<hr>`},
{"57", `---
Foo
---
Bar
---
Baz`, `<hr>
<h2>Foo</h2>
<h2>Bar</h2>
<p>Baz</p>`},
{"58", "====", "<p>====</p>"},
{"59", `---
---`, "<hr><hr>"},
{"60", `- foo
-----`, `<ul>
<li>foo</li>
</ul>
<hr>`},
{"61", ` foo
---`, `<pre><code>foo
</code></pre>
<hr>`},
{"62", `
> foo
-----`, `
<blockquote>
<p>foo</p>
</blockquote>
<hr>`},
{"63", `
\> foo
------`, `
<h2>> foo</h2>`},
{"64", ` a simple
indented code block`, `<pre><code>a simple
indented code block
</code></pre>`},
{"65", `
- foo
bar`, `
<ul>
<li>
<p>foo</p>
<p>bar</p>
</li>
</ul>`},
{"66", `1. foo
- bar`, `<ol>
<li>
<p>foo</p>
<ul>
<li>bar</li>
</ul>
</li>
</ol>`},
{"67", ` <a/>
*hi*
- one`, `<pre><code><a/>
*hi*
- one
</code></pre>`},
{"68", `
chunk1
chunk2
chunk3`, `
<pre><code>chunk1
chunk2
chunk3
</code></pre>`},
{"69", `
chunk1
chunk2`, `
<pre><code>chunk1
chunk2
</code></pre>`},
{"70", `
Foo
bar`, `
<p>Foo
bar</p>`},
{"71", ` foo
bar`, `<pre><code>foo
</code></pre>
<p>bar</p>`},
{"72", `# Header
foo
Header
------
foo
----`, `<h1>Header</h1>
<pre><code>foo
</code></pre>
<h2>Header</h2>
<pre><code>foo
</code></pre>
<hr>`},
{"73", ` foo
bar`, `<pre><code> foo
bar
</code></pre>`},
{"74", `
foo
`, `<pre><code>foo
</code></pre>`},
{"75", " foo ", `<pre><code>foo
</code></pre>`},
{"76", "```\n< \n>\n```", `<pre><code><
>
</code></pre>`},
{"77", `~~~
<
>
~~~`, `<pre><code><
>
</code></pre>`},
{"78", "```\naaa\n~~~\n```", `<pre><code>aaa
~~~
</code></pre>`},
{"79", "~~~\naaa\n```\n~~~", "<pre><code>aaa\n```\n</code></pre>"},
{"80", "````\naaa\n```\n``````", "<pre><code>aaa\n```\n</code></pre>"},
{"81", `
~~~~
aaa
~~~
~~~~`, `
<pre><code>aaa
~~~
</code></pre>`},
{"82", "```", "<pre><code></code></pre>"},
{"83", "`````\n\n```\naaa", "<pre><code>\n```\naaa\n</code></pre>"},
{"84", "> ```\n> aaa\n\nbbb", `
<blockquote>
<pre><code>aaa
</code></pre>
</blockquote>
<p>bbb</p>`},
{"85", "```\n\n \n```", "<pre><code>\n \n</code></pre>"},
{"86", "```\n```", `<pre><code></code></pre>`},
{"87", " ```\n aaa\naaa\n```", `
<pre><code>aaa
aaa
</code></pre>`},
{"88", " ```\naaa\n aaa\naaa\n ```", `
<pre><code>aaa
aaa
aaa
</code></pre>`},
{"89", " ```\n aaa\n aaa\n aaa\n ```", `
<pre><code>aaa
aaa
aaa
</code></pre>`},
{"90", " ```\n aaa\n ```", "<pre><code>```\naaa\n```\n</code></pre>"},
{"91", "```\naaa\n ```", `<pre><code>aaa
</code></pre>`},
{"92", " ```\naaa\n ```", `<pre><code>aaa
</code></pre>`},
{"93", "```\naaa\n ```", "<pre><code>aaa\n ```\n</code></pre>"},
{"95", `
~~~~~~
aaa
~~~ ~~`, `
<pre><code>aaa
~~~ ~~
</code></pre>`},
{"96", "foo\n```\nbar\n```\nbaz", `<p>foo</p>
<pre><code>bar
</code></pre>
<p>baz</p>`},
{"97", `foo
---
~~~
bar
~~~
# baz`, `<h2>foo</h2>
<pre><code>bar
</code></pre>
<h1>baz</h1>`},
{"102", "```\n``` aaa\n```", "<pre><code>``` aaa\n</code></pre>"},
{"103", `
<table>
<tr>
<td>
hi
</td>
</tr>
</table>
okay.`, `
<table>
<tr>
<td>
hi
</td>
</tr>
</table>
<p>okay.</p>`},
// Move out the id, beacuse the regexp below
{"107", `
<div
class="bar">
</div>`, `
<div
class="bar">
</div>`},
{"108", `
<div class="bar
baz">
</div>`, `
<div class="bar
baz">
</div>`},
{"113", `<div><a href="bar">*foo*</a></div>`, `<div><a href="bar">*foo*</a></div>`},
{"114", `
<table><tr><td>
foo
</td></tr></table>`, `
<table><tr><td>
foo
</td></tr></table>`},
{"117", `
<Warning>
*bar*
</Warning>`, `
<Warning>
*bar*
</Warning>`},
{"121", "<del>*foo*</del>", "<p><del><em>foo</em></del></p>"},
{"122", `
<pre language="haskell"><code>
import Text.HTML.TagSoup
main :: IO ()
main = print $ parseTags tags
</code></pre>`, `
<pre language="haskell"><code>
import Text.HTML.TagSoup
main :: IO ()
main = print $ parseTags tags
</code></pre>`},
{"123", `
<script type="text/javascript">
// JavaScript example
document.getElementById("demo").innerHTML = "Hello JavaScript!";
</script>`, `
<script type="text/javascript">
// JavaScript example
document.getElementById("demo").innerHTML = "Hello JavaScript!";
</script>`},
{"124", `
<style
type="text/css">
h1 {color:red;}
p {color:blue;}
</style>`, `
<style
type="text/css">
h1 {color:red;}
p {color:blue;}
</style>`},
{"127", `
- <div>
- foo`, `
<ul>
<li>
<div>
</li>
<li>foo</li>
</ul>`},
{"137", `
Foo
<div>
bar
</div>`, `
<p>Foo</p>
<div>
bar
</div>`},
{"139", `
Foo
<a href="bar">
baz`, `
<p>Foo
<a href="bar">
baz</p>`},
{"141", `
<div>
*Emphasized* text.
</div>`, `
<div>
*Emphasized* text.
</div>
`},
{"142", `
<table>
<tr>
<td>
Hi
</td>
</tr>
</table>`, `
<table>
<tr>
<td>
Hi
</td>
</tr>
</table>
`},
{"144", `
[foo]: /url "title"
[foo]`, `<p><a href="/url" title="title">foo</a></p>`},
{"145", `
[foo]:
/url
'the title'
[foo]`, `<p><a href="/url" title="the title">foo</a></p>`},
{"148", `
[foo]: /url '
title
line1
line2
'
[foo]`, `
<p><a href="/url" title="
title
line1
line2
">foo</a></p>`},
{"150", `
[foo]:
/url
[foo]`, `<p><a href="/url">foo</a></p>`},
{"151", `
[foo]:
[foo]`, `
<p>[foo]:</p>
<p>[foo]</p>`},
{"153", `
[foo]
[foo]: url`, `<p><a href="url">foo</a></p>`},
{"154", `
[foo]
[foo]: first
[foo]: second`, `<p><a href="first">foo</a></p>`},
{"155", `
[FOO]: /url
[Foo]`, `<p><a href="/url">Foo</a></p>`},
{"157", "[foo]: /url", ""},
{"158", `
[
foo
]: /url
bar`, "<p>bar</p>"},
{"159", `[foo]: /url "title" ok`, "<p>[foo]: /url "title" ok</p>"},
{"160", `
[foo]: /url
"title" ok`, "<p>"title" ok</p>"},
{"161", `
[foo]: /url "title"
[foo]`, `
<pre><code>[foo]: /url "title"
</code></pre>
<p>[foo]</p>`},
{"162", "```\n[foo]: /url\n```\n\n[foo]", `
<pre><code>[foo]: /url
</code></pre>
<p>[foo]</p>`},
{"166", `
[foo]
> [foo]: /url`, `
<p><a href="/url">foo</a></p>
<blockquote>
</blockquote>`},
{"167", `
aaa
bbb`, `
<p>aaa</p>
<p>bbb</p>`},
{"168", `
aaa
bbb
ccc
ddd`, `
<p>aaa
bbb</p>
<p>ccc
ddd</p>`},
{"169", `
aaa
bbb`, `
<p>aaa</p>
<p>bbb</p>`},
{"170", `
aaa
bbb`, `
<p>aaa
bbb</p>`},
{"171", `
aaa
bbb
ccc`, `
<p>aaa
bbb
ccc</p>`},
{"172", `
aaa
bbb`, `
<p>aaa
bbb</p>`},
{"173", `
aaa
bbb`, `
<pre><code>aaa
</code></pre>
<p>bbb</p>`},
{"174", `
aaa
bbb `, `
<p>aaa<br>
bbb</p>`},
{"175", `
aaa
# aaa
`, `
<p>aaa</p>
<h1>aaa</h1>`},
{"176", `
> # Foo
> bar
> baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"177", `
># Foo
>bar
> baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"178", `
> # Foo
> bar
> baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"179", `
> # Foo
> bar
> baz`, `
<pre><code>> # Foo
> bar
> baz
</code></pre>`},
{"180", `
> # Foo
> bar
baz`, `
<blockquote>
<h1>Foo</h1>
<p>bar
baz</p>
</blockquote>`},
{"181", `
> bar
baz
> foo`, `
<blockquote>
<p>bar
baz
foo</p>
</blockquote>`},
{"182", `
> foo
---`, `
<blockquote>
<p>foo</p>
</blockquote>
<hr>`},
{"186", `
>`, `
<blockquote>
</blockquote>`},
{"187", `
>
>
> `, `
<blockquote>
</blockquote>`},
{"188", `
>
> foo
> `, `
<blockquote>
<p>foo</p>
</blockquote>`},
{"189", `
> foo
> bar`, `
<blockquote>
<p>foo</p>
</blockquote>
<blockquote>
<p>bar</p>
</blockquote>`},
{"190", `
> foo
> bar`, `
<blockquote>
<p>foo
bar</p>
</blockquote>`},
{"191", `
> foo
>
> bar`, `
<blockquote>
<p>foo</p>
<p>bar</p>
</blockquote>`},
{"192", `
foo
> bar`, `
<p>foo</p>
<blockquote>
<p>bar</p>
</blockquote>`},
{"193", `
> aaa
***
> bbb`, `
<blockquote>
<p>aaa</p>
</blockquote>
<hr>
<blockquote>
<p>bbb</p>
</blockquote>`},
{"194", `
> bar
baz`, `
<blockquote>
<p>bar
baz</p>
</blockquote>`},
{"195", `
> bar
baz`, `
<blockquote>
<p>bar</p>
</blockquote>
<p>baz</p>`},
{"197", `
> > > foo
bar`, `
<blockquote>
<blockquote>
<blockquote>
<p>foo
bar</p>
</blockquote>
</blockquote>
</blockquote>`},
{"198", `
>>> foo
> bar
>>baz`, `
<blockquote>
<blockquote>
<blockquote>
<p>foo
bar
baz</p>
</blockquote>
</blockquote>
</blockquote>`},
{"199", `
> code
> not code`, `
<blockquote>
<pre><code>code
</code></pre>
</blockquote>
<blockquote>
<p>not code</p>
</blockquote>`},
{"200", `
A paragraph
with two lines.
indented code
> A block quote.`, `
<p>A paragraph
with two lines.</p>
<pre><code>indented code
</code></pre>
<blockquote>
<p>A block quote.</p>
</blockquote>`},
{"201", `
1. A paragraph
with two lines.
indented code
> A block quote.`, `
<ol>
<li>
<p>A paragraph
with two lines.</p>
<pre><code>indented code
</code></pre>
<blockquote>
<p>A block quote.</p>
</blockquote>
</li>
</ol>`},
{"203", `
- one
two`, `
<ul>
<li>
<p>one</p>
<p>two</p>
</li>
</ul>`},
{"205", `
- one
two`, `
<ul>
<li>
<p>one</p>
<p>two</p>
</li>
</ul>`},
{"206", `
> > 1. one
>>
>> two`, `
<blockquote>
<blockquote>
<ol>
<li>
<p>one</p>
<p>two</p>
</li>
</ol>
</blockquote>
</blockquote>`},
{"207", `
>>- one
>>
> > two`, `
<blockquote>
<blockquote>
<ul>
<li>one</li>
</ul>
<p>two</p>
</blockquote>
</blockquote>`},
{"208", `-one
2.two`, `
<p>-one</p>
<p>2.two</p>`},
{"210", `
1. foo
~~~
bar
~~~
baz
> bam`, `
<ol>
<li>
<p>foo</p>
<pre><code>bar
</code></pre>
<p>baz</p>
<blockquote>
<p>bam</p>
</blockquote>
</li>
</ol>`},
{"212", `1234567890. not ok`, `<p>1234567890. not ok</p>`},
{"215", `-1. not ok`, `<p>-1. not ok</p>`},
{"216", `
- foo
bar`, `
<ul>
<li>
<p>foo</p>
<pre><code>bar
</code></pre>
</li>
</ul>`},
{"218", `
indented code
paragraph
more code`, `
<pre><code>indented code
</code></pre>
<p>paragraph</p>
<pre><code>more code
</code></pre>`},
{"221", `
foo
bar`, `
<p>foo</p>
<p>bar</p>`},
{"223", `
- foo
bar`, `
<ul>
<li>
<p>foo</p>
<p>bar</p>
</li>
</ul>`},
{"226", `
- foo
-
- bar`, `
<ul>
<li>foo</li>
<li></li>
<li>bar</li>
</ul>`},
{"232", `
1. A paragraph
with two lines.
indented code
> A block quote.`, `
<pre><code>1. A paragraph
with two lines.
indented code
> A block quote.
</code></pre>`},
{"234", `
1. A paragraph
with two lines.`, `
<ol>
<li>A paragraph
with two lines.</li>
</ol>`},
{"235", `
> 1. > Blockquote
continued here.`, `
<blockquote>
<ol>
<li>
<blockquote>
<p>Blockquote
continued here.</p>
</blockquote>
</li>
</ol>
</blockquote>`},
{"236", `
> 1. > Blockquote
continued here.`, `
<blockquote>
<ol>
<li>
<blockquote>
<p>Blockquote
continued here.</p>
</blockquote>
</li>
</ol>
</blockquote>`},
{"237", `
- foo
- bar
- baz`, `
<ul>
<li>foo
<ul>
<li>bar
<ul>
<li>baz</li>
</ul>
</li>
</ul>
</li>
</ul>`},
{"241", "- - foo", `
<ul>
<li>
<ul>
<li>foo</li>
</ul>
</li>
</ul>`},
{"243", `
- # Foo
- Bar
---
baz`, `
<ul>
<li>
<h1>Foo</h1>
</li>
<li>
<h2>Bar</h2>
baz</li>
</ul>`},
{"246", `
Foo
- bar
- baz`, `
<p>Foo</p>
<ul>
<li>bar</li>
<li>baz</li>
</ul>`},
{"248", `
- foo
- bar
- baz`, `
<ul>
<li>
<p>foo</p>
</li>
<li>
<p>bar</p>
</li>
</ul>
<ul>
<li>baz</li>
</ul>`},
{"249", `
- foo
bar
- baz`, `
<ul>
<li>foo</li>
</ul>
<p>bar</p>
<ul>
<li>baz</li>
</ul>`},
{"250", `
- foo
- bar
- baz
bim`, `
<ul>
<li>foo
<ul>
<li>bar
<ul>
<li>baz</li>
</ul>
</li>
</ul>
</li>
</ul>
<pre><code> bim
</code></pre>`},
{"251", `
- foo
- bar
- baz
- bim`, `
<ul>
<li>foo</li>
<li>bar</li>
</ul>
<ul>
<li>baz</li>
<li>bim</li>
</ul>`},
{"252", `
- foo
notcode
- foo
code`, `
<ul>
<li>
<p>foo</p>
<p>notcode</p>
</li>
<li>
<p>foo</p>
</li>
</ul>
<pre><code>code
</code></pre>`},
{"261", `
* a
> b
>
* c`, `
<ul>
<li>a
<blockquote>
<p>b</p>
</blockquote>
</li>
<li>c</li>
</ul>`},
{"263", "- a", `
<ul>
<li>a</li>
</ul>`},
{"264", `
- a
- b`, `
<ul>
<li>a
<ul>
<li>b</li>
</ul>
</li>
</ul>`},
{"265", "\n1. ```\n foo\n ```\n\n bar", `
<ol>
<li>
<pre><code>foo
</code></pre>
<p>bar</p>
</li>
</ol>`},
{"266", `
* foo
* bar
baz`, `
<ul>
<li>
<p>foo</p>
<ul>
<li>bar</li>
</ul>
<p>baz</p>
</li>
</ul>`},
{"267", `
- a
- b
- c
- d
- e
- f`, `
<ul>
<li>
<p>a</p>
<ul>
<li>b</li>
<li>c</li>
</ul>
</li>
<li>
<p>d</p>
<ul>
<li>e</li>
<li>f</li>
</ul>
</li>
</ul>`},
{"268", "`hi`lo`", "<p><code>hi</code>lo`</p>"},
{"273", `
foo\
bar
`, `
<p>foo<br>
bar</p>`},
{"275", ` \[\]`, `<pre><code>\[\]
</code></pre>`},
{"276", `
~~~
\[\]
~~~`, `
<pre><code>\[\]
</code></pre>`},
{"294", "`foo`", `<p><code>foo</code></p>`},
{"300", "`foo\\`bar`", "<p><code>foo\\</code>bar`</p>"},
{"303", "`<a href=\"`\">`", "<p><code><a href="</code>">`</p>"},
{"308", "`foo", "<p>`foo</p>"},
{"309", "*foo bar*", "<p><em>foo bar</em></p>"},
{"310", "a * foo bar*", "<p>a * foo bar*</p>"},
{"313", "foo*bar*", "<p>foo<em>bar</em></p>"},
{"314", "5*6*78", "<p>5<em>6</em>78</p>"},
{"315", "_foo bar_", "<p><em>foo bar</em></p>"},
{"316", "_ foo bar_", "<p>_ foo bar_</p>"},
{"322", "foo-_(bar)_", "<p>foo-<em>(bar)</em></p>"},
{"323", "_foo*", "<p>_foo*</p>"},
{"328", "*foo*bar", "<p><em>foo</em>bar</p>"},
{"335", "_(bar)_.", "<p><em>(bar)</em>.</p>"},
{"336", "**foo bar**", "<p><strong>foo bar</strong></p>"},
{"339", "foo**bar**", "<p>foo<strong>bar</strong></p>"},
{"340", "__foo bar__", "<p><strong>foo bar</strong></p>"},
{"348", "foo-__(bar)__", "<p>foo-<strong>(bar)</strong></p>"},
{"352", "**Gomphocarpus (*Gomphocarpus physocarpus*, syn.*Asclepias physocarpa*)**",
"<p><strong>Gomphocarpus (<em>Gomphocarpus physocarpus</em>, syn.<em>Asclepias physocarpa</em>)</strong></p>"},
{"353", "**foo \"*bar*\" foo**", "<p><strong>foo "<em>bar</em>" foo</strong></p>"},
{"354", "**foo**bar", "<p><strong>foo</strong>bar</p>"},
{"361", "__(bar)__.", "<p><strong>(bar)</strong>.</p>"},
{"362", "*foo [bar](/url)*", "<p><em>foo <a href=\"/url\">bar</a></em></p>"},
{"363", "*foo\nbar*", "<p><em>foo\nbar</em></p>"},
{"375", "** is not an empty emphasis", "<p>** is not an empty emphasis</p>"},
{"377", "**foo [bar](/url)**", "<p><strong>foo <a href=\"/url\">bar</a></strong></p>"},
{"378", "**foo\nbar**", "<p><strong>foo\nbar</strong></p>"},
{"379", "__foo _bar_ baz__", "<p><strong>foo <em>bar</em> baz</strong></p>"},
{"383", "**foo *bar* baz**", "<p><strong>foo <em>bar</em> baz</strong></p>"},
{"385", "***foo* bar**", "<p><strong><em>foo</em> bar</strong></p>"},
{"386", "**foo *bar***", "<p><strong>foo <em>bar</em></strong></p>"},
{"389", "__ is not an empty emphasis", "<p>__ is not an empty emphasis</p>"},
{"392", "foo *\\**", "<p>foo <em>*</em></p>"},
{"393", "foo *_*", "<p>foo <em>_</em></p>"},
{"395", "foo **\\***", "<p>foo <strong>*</strong></p>"},
{"396", "foo **_**", "<p>foo <strong>_</strong></p>"},
{"404", "foo _\\__", "<p>foo <em>_</em></p>"},
{"405", "foo _*_", "<p>foo <em>*</em></p>"},
{"407", "foo __\\___", "<p>foo <strong>_</strong></p>"},
{"408", "foo __*__", "<p>foo <strong>*</strong></p>"},
{"415", "**foo**", "<p><strong>foo</strong></p>"},
{"416", "*_foo_*", "<p><em><em>foo</em></em></p>"},
{"417", "__foo__", "<p><strong>foo</strong></p>"},
{"418", "_*foo*_", "<p><em><em>foo</em></em></p>"},
{"419", "****foo****", "<p><strong><strong>foo</strong></strong></p>"},
{"420", "____foo____", "<p><strong><strong>foo</strong></strong></p>"},
{"422", "***foo***", "<p><strong><em>foo</em></strong></p>"},
{"424", "*foo _bar* baz_", "<p><em>foo _bar</em> baz_</p>"},
{"438", "[link](/uri \"title\")", "<p><a href=\"/uri\" title=\"title\">link</a></p>"},
{"439", "[link](/uri)", "<p><a href=\"/uri\">link</a></p>"},
{"440", "[link]()", "<p><a href=\"\">link</a></p>"},
{"441", "[link](<>)", "<p><a href=\"\">link</a></p>"},
{"451", `
[link](#fragment)
[link](http://example.com#fragment)
[link](http://example.com?foo=bar&baz#fragment)`, `
<p><a href="#fragment">link</a></p>
<p><a href="http://example.com#fragment">link</a></p>
<p><a href="http://example.com?foo=bar&baz#fragment">link</a></p>`},
{"455", `
[link](/url "title")
[link](/url 'title')
[link](/url (title))`, `
<p><a href="/url" title="title">link</a>
<a href="/url" title="title">link</a>
<a href="/url" title="title">link</a></p>`},
{"458", `[link](/url 'title "and" title')`, `<p><a href="/url" title="title "and" title">link</a></p>`},
{"460", "[link] (/uri)", "<p>[link] (/uri)</p>"},
{"461", "[link [foo [bar]]](/uri)", `<p><a href="/uri">link [foo [bar]]</a></p>`},
{"463", "[link [bar](/uri)", `<p>[link <a href="/uri">bar</a></p>`},
{"471", "[foo *bar](baz*)", `<p><a href="baz*">foo *bar</a></p>`},
{"472", "*foo [bar* baz]", "<p><em>foo [bar</em> baz]</p>"},
{"476", `
[foo][bar]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"477", `
[link [foo [bar]]][ref]
[ref]: /uri`, `<p><a href="/uri">link [foo [bar]]</a></p>`},
{"484", `
[foo *bar][ref]
[ref]: /uri`, `<p><a href="/uri">foo *bar</a></p>`},
{"488", `
[foo][BaR]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"489", `
[Толпой][Толпой] is a Russian word.
[ТОЛПОЙ]: /url`, `<p><a href="/url">Толпой</a> is a Russian word.</p>`},
{"491", `
[foo] [bar]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"492", `
[foo]
[bar]
[bar]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"493", `
[foo]: /url1
[foo]: /url2
[bar][foo]`, `<p><a href="/url1">bar</a></p>`},
{"496", `
[foo][ref[bar]]
[ref[bar]]: /uri`, `
<p>[foo][ref[bar]]</p>
<p>[ref[bar]]: /uri</p>`},
{"497", `
[[[foo]]]
[[[foo]]]: /url`, `
<p>[[[foo]]]</p>
<p>[[[foo]]]: /url</p>`},
{"498", `
[foo][ref\[]
[ref\[]: /uri`, `<p><a href="/uri">foo</a></p>`},
{"499", `
[]
[]: /uri`, `
<p>[]</p>
<p>[]: /uri</p>`},
{"501", `
[foo][]
[foo]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"502", `
[*foo* bar][]
[*foo* bar]: /url "title"`, `
<p><a href="/url" title="title"><em>foo</em> bar</a></p>`},
{"503", `
[Foo][]
[foo]: /url "title"`, `<p><a href="/url" title="title">Foo</a></p>`},
{"504", `
[foo]
[]
[foo]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"505", `
[foo]
[foo]: /url "title"`, `<p><a href="/url" title="title">foo</a></p>`},
{"506", `
[*foo* bar]
[*foo* bar]: /url "title"`, `
<p><a href="/url" title="title"><em>foo</em> bar</a></p>`},
{"508", `
[[bar [foo]
[foo]: /url`, `<p>[[bar <a href="/url">foo</a></p>`},
{"509", `
[Foo]
[foo]: /url "title"`, `<p><a href="/url" title="title">Foo</a></p>`},
{"510", `
[foo] bar
[foo]: /url`, `<p><a href="/url">foo</a> bar</p>`},
{"511", `
\[foo]
[foo]: /url "title"`, `<p>[foo]</p>`},
{"513", `
[foo][bar]
[foo]: /url1
[bar]: /url2`, `<p><a href="/url2">foo</a></p>`},
{"515", `
[foo][bar][baz]
[baz]: /url1
[bar]: /url2`, `<p><a href="/url2">foo</a><a href="/url1">baz</a></p>`},
{"517", ``, `<p><img src="/url" alt="foo" title="title"></p>`},
{"523", ``, `<p><img src="train.jpg" alt="foo"></p>`},
{"524", `My `,
`<p>My <img src="/path/to/train.jpg" alt="foo bar" title="title"></p>`},
{"525", ``, `<p><img src="url" alt="foo"></p>`},
{"526", ``, `<p><img src="/url" alt=""></p>`},
{"527", `
![foo] [bar]
[bar]: /url`, `<p><img src="/url" alt="foo"></p>`},
{"528", `
![foo] [bar]
[BAR]: /url`, `<p><img src="/url" alt="foo"></p>`},
{"529", `
![foo][]
[foo]: /url "title"`, `<p><img src="/url" alt="foo" title="title"></p>`},
{"531", `
![Foo][]
[foo]: /url "title"`, `<p><img src="/url" alt="Foo" title="title"></p>`},
{"532", `
![foo]
[]
[foo]: /url "title"`, `<p><img src="/url" alt="foo" title="title"></p>`},
{"533", `
![foo]
[foo]: /url "title"`, `<p><img src="/url" alt="foo" title="title"></p>`},
{"535", `
![[foo]]
[[foo]]: /url "title"`, `
<p>![[foo]]</p>
<p>[[foo]]: /url "title"</p>`},
{"536", `
![Foo]
[foo]: /url "title"`, `<p><img src="/url" alt="Foo" title="title"></p>`},
{"537", `
\!\[foo]
[foo]: /url "title"`, `<p>![foo]</p>`},
{"538", `
\![foo]
[foo]: /url "title"`, `<p>!<a href="/url" title="title">foo</a></p>`},
{"539", `<http://foo.bar.baz>`, `<p><a href="http://foo.bar.baz">http://foo.bar.baz</a></p>`},
{"540", `<http://foo.bar.baz/test?q=hello&id=22&boolean>`,
`<p><a href="http://foo.bar.baz/test?q=hello&id=22&boolean">http://foo.bar.baz/test?q=hello&id=22&boolean</a></p>`},
{"541", `<irc://foo.bar:2233/baz>`, `<p><a href="irc://foo.bar:2233/baz">irc://foo.bar:2233/baz</a></p>`},
{"542", `<MAILTO:FOO@BAR.BAZ>`, `<p><a href="MAILTO:FOO@BAR.BAZ">MAILTO:FOO@BAR.BAZ</a></p>`},
{"548", "<>", "<p><></p>"},
{"554", `foo@bar.example.com`, `<p>foo@bar.example.com</p>`},
{"555", "<a><bab><c2c>", "<p><a><bab><c2c></p>"},
{"556", "<a/><b2/>", "<p><a/><b2/></p>"},
{"557", `
<a /><b2
data="foo" >`, `
<p><a /><b2
data="foo" ></p>`},
{"558", `
<a foo="bar" bam = 'baz <em>"</em>'
_boolean zoop:33=zoop:33 />`, `
<p><a foo="bar" bam = 'baz <em>"</em>'
_boolean zoop:33=zoop:33 /></p>`},
{"572", "foo <![CDATA[>&<]]>", "<p>foo <![CDATA[>&<]]></p>"},
{"576", `
foo
baz`, `
<p>foo<br>
baz</p>`},
{"577", `
foo\
baz`, `
<p>foo<br>
baz</p>`},
{"578", `
foo
baz`, `<p>foo<br>baz</p>`},
{"581", `
*foo
bar*`, `
<p><em>foo<br>
bar</em></p>`},
{"582", `
*foo\
bar*`, `
<p><em>foo<br>
bar</em></p>`},
{"587", `foo\`, `<p>foo\</p>`},
{"588", `foo `, `<p>foo</p>`},
{"589", `### foo\`, `<h3>foo\</h3>`},
{"590", `### foo `, `<h3>foo</h3>`},
{"591", `
foo
baz`, `
<p>foo
baz</p>`},
{"592", `
foo
baz`, `
<p>foo
baz</p>`},
{"594", `Foo χρῆν`, `<p>Foo χρῆν</p>`},
{"595", `Multiple spaces`, `<p>Multiple spaces</p>`},
}
func TestCommonMark(t *testing.T) {
reID := regexp.MustCompile(` +?id=".*"`)
for _, c := range CMCases {
// Remove the auto-hashing until it'll be in the configuration
actual := reID.ReplaceAllString(Render(c.input), "")
if strings.Replace(actual, "\n", "", -1) != strings.Replace(c.expected, "\n", "", -1) {
t.Errorf("\ninput:%s\ngot:\n%s\nexpected:\n%s\nlink: http://spec.commonmark.org/0.21/#example-%s\n",
c.input, actual, c.expected, c.name)
}
}
}
|
package junos
import (
"encoding/xml"
"errors"
"fmt"
"log"
"net/url"
"regexp"
"strconv"
"strings"
)
// Addresses contains a list of address objects.
type Addresses struct {
Addresses []Address `xml:"address"`
}
// An Address contains information about each individual address object.
type Address struct {
ID int `xml:"id"`
Name string `xml:"name"`
AddressType string `xml:"address-type"`
Description string `xml:"description"`
IPAddress string `xml:"ip-address"`
}
// Services contains a list of service objects.
type Services struct {
Services []Service `xml:"service"`
}
// A Service contains information about each individual service object.
type Service struct {
ID int `xml:"id"`
Name string `xml:"name"`
IsGroup bool `xml:"is-group"`
Description string `xml:"description"`
}
// A Policy contains information about each individual firewall policy.
type Policy struct {
ID int `xml:"id"`
Name string `xml:"name"`
Description string `xml:"description"`
}
// Policies contains a list of firewall policies.
type Policies struct {
Policies []Policy `xml:"firewall-policy"`
}
// SecurityDevices contains a list of security devices.
type SecurityDevices struct {
XMLName xml.Name `xml:"devices"`
Devices []SecurityDevice `xml:"device"`
}
// A SecurityDevice contains information about each individual security device.
type SecurityDevice struct {
ID int `xml:"id"`
Family string `xml:"device-family"`
Platform string `xml:"platform"`
IPAddress string `xml:"device-ip"`
Name string `xml:"name"`
}
// Variables contains a list of all polymorphic (variable) objects.
type Variables struct {
Variables []Variable `xml:"variable-definition"`
}
// A Variable contains information about each individual polymorphic (variable) object.
type Variable struct {
ID int `xml:"id"`
Name string `xml:"name"`
Description string `xml:"description"`
}
type existingVariable struct {
XMLName xml.Name `xml:"variable-definition"`
Name string `xml:"name"`
Description string `xml:"description"`
Type string `xml:"type"`
Version int `xml:"edit-version"`
DefaultName string `xml:"default-name"`
DefaultValue string `xml:"default-value-detail>default-value"`
VariableValuesList []variableValues `xml:"variable-values-list>variable-values"`
}
type variableValues struct {
XMLName xml.Name `xml:"variable-values"`
DeviceMOID string `xml:"device>moid"`
DeviceName string `xml:"device>name"`
VariableValue string `xml:"variable-value-detail>variable-value"`
VariableName string `xml:"variable-value-detail>name"`
}
// addressesXML is XML we send (POST) for creating an address object.
var addressesXML = `
<address>
<name>%s</name>
<address-type>%s</address-type>
<host-name/>
<edit-version/>
<members/>
<address-version>IPV4</address-version>
<definition-type>CUSTOM</definition-type>
<ip-address>%s</ip-address>
<description>%s</description>
</address>
`
// serviceXML is XML we send (POST) for creating a service object.
var serviceXML = `
<service>
<name>%s</name>
<description>%s</description>
<is-group>false</is-group>
<protocols>
<protocol>
<name>%s</name>
<dst-port>%s</dst-port>
<sunrpc-protocol-type>%s</sunrpc-protocol-type>
<msrpc-protocol-type>%s</msrpc-protocol-type>
<protocol-number>%d</protocol-number>
<protocol-type>%s</protocol-type>
<disable-timeout>%s</disable-timeout>
%s
</protocol>
</protocols>
</service>
`
// addressGroupXML is XML we send (POST) for adding an address group.
var addressGroupXML = `
<address>
<name>%s</name>
<address-type>GROUP</address-type>
<host-name/>
<edit-version/>
<address-version>IPV4</address-version>
<definition-type>CUSTOM</definition-type>
<description>%s</description>
</address>
`
// serviceGroupXML is XML we send (POST) for adding a service group.
var serviceGroupXML = `
<service>
<name>%s</name>
<is-group>true</is-group>
<description>%s</description>
</service>
`
// removeXML is XML we send (POST) for removing an address or service from a group.
var removeXML = `
<diff>
<remove sel="%s/members/member[name='%s']"/>
</diff>
`
// addGroupMemberXML is XML we send (POST) for adding addresses or services to a group.
var addGroupMemberXML = `
<diff>
<add sel="%s/members">
<member>
<name>%s</name>
</member>
</add>
</diff>
`
// renameXML is XML we send (POST) for renaming an address or service object.
var renameXML = `
<diff>
<replace sel="%s/name">
<name>%s</name>
</replace>
</diff>
`
// updateDeviceXML is XML we send (POST) for updating a security device.
var updateDeviceXML = `
<update-devices>
<sd-ids>
<id>%d</id>
</sd-ids>
<service-types>
<service-type>POLICY</service-type>
</service-types>
<update-options>
<enable-policy-rematch-srx-only>false</enable-policy-rematch-srx-only>
</update-options>
</update-devices>
`
// publishPolicyXML is XML we send (POST) for publishing a changed policy.
var publishPolicyXML = `
<publish>
<policy-ids>
<policy-id>%d</policy-id>
</policy-ids>
</publish>
`
// createVariableXML is the XML we send (POST) for adding a new variable object.
var createVariableXML = `
<variable-definition>
<name>%s</name>
<type>%s</type>
<description>%s</description>
<context>DEVICE</context>
<default-name>%s</default-name>
<default-value-detail>
<default-value>%d</default-value>
</default-value-detail>
</variable-definition>
`
var modifyVariableXML = `
<variable-definition>
<name>%s</name>
<type>%s</type>
<description>%s</description>
<edit-version>%d</edit-version>
<context>DEVICE</context>
<default-name>%s</default-name>
<default-value-detail>
<default-value>%s</default-value>
</default-value-detail>
<variable-values-list>
%s
</variable-values-list>
</variable-definition>
`
// getObjectID returns the ID of the address or service object.
func (s *JunosSpace) getObjectID(object interface{}, otype bool) (int, error) {
var err error
var objectID int
var services *Services
ipRegex := regexp.MustCompile(`(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d+)`)
if !otype {
services, err = s.Services(object.(string))
}
objects, err := s.Addresses(object.(string))
if err != nil {
return 0, err
}
switch object.(type) {
case int:
objectID = object.(int)
case string:
if !otype {
for _, o := range services.Services {
if o.Name == object {
objectID = o.ID
}
}
}
if ipRegex.MatchString(object.(string)) {
for _, o := range objects.Addresses {
if o.IPAddress == object {
objectID = o.ID
}
}
}
for _, o := range objects.Addresses {
if o.Name == object {
objectID = o.ID
}
}
}
return objectID, nil
}
// getPolicyID returns the ID of a firewall policy.
func (s *JunosSpace) getPolicyID(object string) (int, error) {
var err error
var objectID int
objects, err := s.Policies()
if err != nil {
return 0, err
}
for _, o := range objects.Policies {
if o.Name == object {
objectID = o.ID
}
}
return objectID, nil
}
// getVariableID returns the ID of a polymorphic (variable) object.
func (s *JunosSpace) getVariableID(variable string) (int, error) {
var err error
var variableID int
vars, err := s.Variables()
if err != nil {
return 0, err
}
for _, v := range vars.Variables {
if v.Name == variable {
variableID = v.ID
}
}
return variableID, nil
}
func (s *JunosSpace) modifyVariableContent(data *existingVariable, moid, firewall, obj string, vid int) string {
// var varValuesList = "<variable-values-list>"
var varValuesList string
for _, d := range data.VariableValuesList {
varValuesList += fmt.Sprintf("<variable-values><device><moid>%s</moid><name>%s</name></device>", d.DeviceMOID, d.DeviceName)
varValuesList += fmt.Sprintf("<variable-value-detail><variable-value>%s</variable-value><name>%s</name></variable-value-detail></variable-values>", d.VariableValue, d.VariableName)
}
varValuesList += fmt.Sprintf("<variable-values><device><moid>%s</moid><name>%s</name></device>", moid, firewall)
varValuesList += fmt.Sprintf("<variable-value-detail><variable-value>net.juniper.jnap.sm.om.jpa.AddressEntity:%d</variable-value><name>%s</name></variable-value-detail></variable-values>", vid, obj)
// varValuesList += "</variable-values-list>"
return varValuesList
}
// Addresses queries the Junos Space server and returns all of the information
// about each address that is managed by Space.
func (s *JunosSpace) Addresses(filter string) (*Addresses, error) {
var addresses Addresses
p := url.Values{}
p.Set("filter", "(global eq '')")
if filter != "all" {
p.Set("filter", fmt.Sprintf("(global eq '%s')", filter))
}
req := &APIRequest{
Method: "get",
URL: fmt.Sprintf("/api/juniper/sd/address-management/addresses?%s", p.Encode()),
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &addresses)
if err != nil {
return nil, err
}
return &addresses, nil
}
// AddAddress adds a new address object to Junos Space, and returns the Job ID.
func (s *JunosSpace) AddAddress(name, ip, desc string) (int, error) {
var job jobID
var addrType = "IPADDRESS"
if strings.Contains(ip, "/") {
addrType = "NETWORK"
}
address := fmt.Sprintf(addressesXML, name, addrType, ip, desc)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/address-management/addresses",
Body: address,
ContentType: contentAddress,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, err
}
return job.ID, nil
}
// AddService adds a new service object to Junos Space, and returns the Job ID. If adding just
// a single port, then enter in the same number in both the "low" and "high" parameters. For a
// range of ports, enter the starting port in "low" and the uppper limit in "high."
func (s *JunosSpace) AddService(proto, name string, low, high int, desc string, timeout int) (int, error) {
var job jobID
var port string
var protoNumber int
var inactivity string
var secs string
ptype := fmt.Sprintf("PROTOCOL_%s", strings.ToUpper(proto))
protocol := strings.ToUpper(proto)
protoNumber = 6
if proto == "udp" {
protoNumber = 17
}
port = strconv.Itoa(low)
if low < high {
port = fmt.Sprintf("%s-%s", strconv.Itoa(low), strconv.Itoa(high))
}
inactivity = "false"
secs = fmt.Sprintf("<inactivity-timeout>%d</inactivity-timeout>", timeout)
if timeout == 0 {
inactivity = "true"
secs = "<inactivity-timeout/>"
}
service := fmt.Sprintf(serviceXML, name, desc, name, port, protocol, protocol, protoNumber, ptype, inactivity, secs)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/service-management/services",
Body: service,
ContentType: contentService,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, err
}
return job.ID, nil
}
// AddGroup adds a new address or service group to Junos Space, and returns the Job ID.
func (s *JunosSpace) AddGroup(otype bool, name, desc string) error {
uri := "/api/juniper/sd/address-management/addresses"
addGroupXML := addressGroupXML
content := contentAddress
if !otype {
uri = "/api/juniper/sd/service-management/services"
addGroupXML = serviceGroupXML
content = contentService
}
groupXML := fmt.Sprintf(addGroupXML, name, desc)
req := &APIRequest{
Method: "post",
URL: uri,
Body: groupXML,
ContentType: content,
}
_, err := s.APICall(req)
if err != nil {
return err
}
return nil
}
// ModifyObject modifies an existing address, service or group. The actions are as follows:
//
// "otype" is either "true" (for address) or "false" (for service)
//
// ModifyObject(otype, "add", "existing-group", "member-to-add")
// ModifyObject(otype, "remove", "existing-group", "member-to-remove")
// ModifyObject(otype, "rename", "old-name", "new-name")
// ModifyObject(otype, "delete", "object-to-delete")
func (s *JunosSpace) ModifyObject(otype bool, actions ...interface{}) error {
var err error
var uri string
var content string
var rel string
objectID, err := s.getObjectID(actions[1], otype)
if err != nil {
return err
}
if objectID != 0 {
var req *APIRequest
uri = fmt.Sprintf("/api/juniper/sd/address-management/addresses/%d", objectID)
content = contentAddressPatch
rel = "address"
if !otype {
uri = fmt.Sprintf("/api/juniper/sd/service-management/services/%d", objectID)
content = contentServicePatch
rel = "service"
}
switch actions[0] {
case "add":
req = &APIRequest{
Method: "patch",
URL: uri,
Body: fmt.Sprintf(addGroupMemberXML, rel, actions[2]),
ContentType: content,
}
case "remove":
req = &APIRequest{
Method: "patch",
URL: uri,
Body: fmt.Sprintf(removeXML, rel, actions[2]),
ContentType: content,
}
case "rename":
req = &APIRequest{
Method: "patch",
URL: uri,
Body: fmt.Sprintf(renameXML, rel, actions[2]),
ContentType: content,
}
case "delete":
req = &APIRequest{
Method: "delete",
URL: uri,
}
}
_, err = s.APICall(req)
if err != nil {
return err
}
}
return nil
}
// Services queries the Junos Space server and returns all of the information
// about each service that is managed by Space.
func (s *JunosSpace) Services(filter string) (*Services, error) {
var services Services
p := url.Values{}
p.Set("filter", "(global eq '')")
if filter != "all" {
p.Set("filter", fmt.Sprintf("(global eq '%s')", filter))
}
req := &APIRequest{
Method: "get",
URL: fmt.Sprintf("/api/juniper/sd/service-management/services?%s", p.Encode()),
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &services)
if err != nil {
return nil, err
}
return &services, nil
}
// SecurityDevices queries the Junos Space server and returns all of the information
// about each security device that is managed by Space.
func (s *JunosSpace) SecurityDevices() (*SecurityDevices, error) {
var devices SecurityDevices
req := &APIRequest{
Method: "get",
URL: "/api/juniper/sd/device-management/devices",
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &devices)
if err != nil {
return nil, err
}
return &devices, nil
}
// Policies returns a list of all firewall policies managed by Junos Space.
func (s *JunosSpace) Policies() (*Policies, error) {
var policies Policies
req := &APIRequest{
Method: "get",
URL: "/api/juniper/sd/fwpolicy-management/firewall-policies",
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &policies)
if err != nil {
return nil, err
}
return &policies, nil
}
// PublishPolicy publishes a changed firewall policy. If "true" is specified for
// <update>, then Junos Space will also update the device.
func (s *JunosSpace) PublishPolicy(object interface{}, update bool) (int, error) {
var err error
var job jobID
var id int
var uri = "/api/juniper/sd/fwpolicy-management/publish"
switch object.(type) {
case int:
id = object.(int)
case string:
id, err = s.getPolicyID(object.(string))
if err != nil {
return 0, err
}
if id == 0 {
return 0, errors.New("no policy found")
}
}
publish := fmt.Sprintf(publishPolicyXML, id)
if update {
uri = "/api/juniper/sd/fwpolicy-management/publish?update=true"
}
req := &APIRequest{
Method: "post",
URL: uri,
Body: publish,
ContentType: contentPublish,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, errors.New("no policy changes to publish")
}
return job.ID, nil
}
// UpdateDevice will update a changed security device, synchronizing it with
// Junos Space.
func (s *JunosSpace) UpdateDevice(device interface{}) (int, error) {
var job jobID
deviceID, err := s.getDeviceID(device, true)
if err != nil {
return 0, err
}
update := fmt.Sprintf(updateDeviceXML, deviceID)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/device-management/update-devices",
Body: update,
ContentType: contentUpdateDevices,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, err
}
return job.ID, nil
}
// Variables returns a listing of all polymorphic (variable) objects.
func (s *JunosSpace) Variables() (*Variables, error) {
var vars Variables
req := &APIRequest{
Method: "get",
URL: "/api/juniper/sd/variable-management/variable-definitions",
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &vars)
if err != nil {
return nil, err
}
return &vars, nil
}
// AddVariable creates a new polymorphic object (variable) on the Junos Space server.
func (s *JunosSpace) AddVariable(name, vtype, desc, obj string) error {
objID, err := s.getObjectID(obj, true)
if err != nil {
return err
}
varBody := fmt.Sprintf(createVariableXML, name, strings.ToUpper(vtype), desc, obj, objID)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/variable-management/variable-definitions",
Body: varBody,
ContentType: contentVariable,
}
_, err = s.APICall(req)
if err != nil {
return err
}
return nil
}
// ModifyVariable adds or deletes entries to the polymorphic (variable) object.
func (s *JunosSpace) ModifyVariable(actions ...interface{}) error {
var err error
var req *APIRequest
var varData existingVariable
var deviceID int
var varID int
var moid string
var vid int
var data []byte
var firewall = actions[2]
var variable = actions[1]
var address = actions[3]
deviceID, err = s.getDeviceID(firewall, true)
if err != nil {
return err
}
log.Println(deviceID)
moid = fmt.Sprintf("net.juniper.jnap.sm.om.jpa.SecurityDeviceEntity:%d", deviceID)
varID, err = s.getVariableID(variable)
if err != nil {
return err
}
vid, err = s.getObjectID(address, true)
if err != nil {
return err
}
existing := &APIRequest{
Method: "get",
URL: fmt.Sprintf("/api/juniper/sd/variable-management/variable-definitions/%d", varID),
}
data, err = s.APICall(existing)
if err != nil {
return err
}
err = xml.Unmarshal(data, &varData)
if err != nil {
return err
}
varContent := s.modifyVariableContent(&varData, moid, firewall, address, vid)
modifyVariable := fmt.Sprintf(modifyVariableXML, varData.Name, varData.Type, varData.Description, varData.Version, varData.DefaultName, varData.DefaultValue, varContent)
log.Println(modifyVariable)
if varID != 0 {
switch actions[0].(string) {
case "delete":
req = &APIRequest{
Method: "delete",
URL: fmt.Sprintf("/api/juniper/sd/variable-management/variable-definitions/%d", varID),
ContentType: contentVariable,
}
case "add":
req = &APIRequest{
Method: "put",
URL: fmt.Sprintf("/api/juniper/sd/variable-management/variable-definitions/%d", varID),
Body: modifyVariable,
ContentType: contentVariable,
}
}
}
_, err = s.APICall(req)
if err != nil {
return err
}
return nil
}
Testing device ID return
package junos
import (
"encoding/xml"
"errors"
"fmt"
"log"
"net/url"
"regexp"
"strconv"
"strings"
)
// Addresses contains a list of address objects.
type Addresses struct {
Addresses []Address `xml:"address"`
}
// An Address contains information about each individual address object.
type Address struct {
ID int `xml:"id"`
Name string `xml:"name"`
AddressType string `xml:"address-type"`
Description string `xml:"description"`
IPAddress string `xml:"ip-address"`
}
// Services contains a list of service objects.
type Services struct {
Services []Service `xml:"service"`
}
// A Service contains information about each individual service object.
type Service struct {
ID int `xml:"id"`
Name string `xml:"name"`
IsGroup bool `xml:"is-group"`
Description string `xml:"description"`
}
// A Policy contains information about each individual firewall policy.
type Policy struct {
ID int `xml:"id"`
Name string `xml:"name"`
Description string `xml:"description"`
}
// Policies contains a list of firewall policies.
type Policies struct {
Policies []Policy `xml:"firewall-policy"`
}
// SecurityDevices contains a list of security devices.
type SecurityDevices struct {
XMLName xml.Name `xml:"devices"`
Devices []SecurityDevice `xml:"device"`
}
// A SecurityDevice contains information about each individual security device.
type SecurityDevice struct {
ID int `xml:"id"`
Family string `xml:"device-family"`
Platform string `xml:"platform"`
IPAddress string `xml:"device-ip"`
Name string `xml:"name"`
}
// Variables contains a list of all polymorphic (variable) objects.
type Variables struct {
Variables []Variable `xml:"variable-definition"`
}
// A Variable contains information about each individual polymorphic (variable) object.
type Variable struct {
ID int `xml:"id"`
Name string `xml:"name"`
Description string `xml:"description"`
}
type existingVariable struct {
XMLName xml.Name `xml:"variable-definition"`
Name string `xml:"name"`
Description string `xml:"description"`
Type string `xml:"type"`
Version int `xml:"edit-version"`
DefaultName string `xml:"default-name"`
DefaultValue string `xml:"default-value-detail>default-value"`
VariableValuesList []variableValues `xml:"variable-values-list>variable-values"`
}
type variableValues struct {
XMLName xml.Name `xml:"variable-values"`
DeviceMOID string `xml:"device>moid"`
DeviceName string `xml:"device>name"`
VariableValue string `xml:"variable-value-detail>variable-value"`
VariableName string `xml:"variable-value-detail>name"`
}
// addressesXML is XML we send (POST) for creating an address object.
var addressesXML = `
<address>
<name>%s</name>
<address-type>%s</address-type>
<host-name/>
<edit-version/>
<members/>
<address-version>IPV4</address-version>
<definition-type>CUSTOM</definition-type>
<ip-address>%s</ip-address>
<description>%s</description>
</address>
`
// serviceXML is XML we send (POST) for creating a service object.
var serviceXML = `
<service>
<name>%s</name>
<description>%s</description>
<is-group>false</is-group>
<protocols>
<protocol>
<name>%s</name>
<dst-port>%s</dst-port>
<sunrpc-protocol-type>%s</sunrpc-protocol-type>
<msrpc-protocol-type>%s</msrpc-protocol-type>
<protocol-number>%d</protocol-number>
<protocol-type>%s</protocol-type>
<disable-timeout>%s</disable-timeout>
%s
</protocol>
</protocols>
</service>
`
// addressGroupXML is XML we send (POST) for adding an address group.
var addressGroupXML = `
<address>
<name>%s</name>
<address-type>GROUP</address-type>
<host-name/>
<edit-version/>
<address-version>IPV4</address-version>
<definition-type>CUSTOM</definition-type>
<description>%s</description>
</address>
`
// serviceGroupXML is XML we send (POST) for adding a service group.
var serviceGroupXML = `
<service>
<name>%s</name>
<is-group>true</is-group>
<description>%s</description>
</service>
`
// removeXML is XML we send (POST) for removing an address or service from a group.
var removeXML = `
<diff>
<remove sel="%s/members/member[name='%s']"/>
</diff>
`
// addGroupMemberXML is XML we send (POST) for adding addresses or services to a group.
var addGroupMemberXML = `
<diff>
<add sel="%s/members">
<member>
<name>%s</name>
</member>
</add>
</diff>
`
// renameXML is XML we send (POST) for renaming an address or service object.
var renameXML = `
<diff>
<replace sel="%s/name">
<name>%s</name>
</replace>
</diff>
`
// updateDeviceXML is XML we send (POST) for updating a security device.
var updateDeviceXML = `
<update-devices>
<sd-ids>
<id>%d</id>
</sd-ids>
<service-types>
<service-type>POLICY</service-type>
</service-types>
<update-options>
<enable-policy-rematch-srx-only>false</enable-policy-rematch-srx-only>
</update-options>
</update-devices>
`
// publishPolicyXML is XML we send (POST) for publishing a changed policy.
var publishPolicyXML = `
<publish>
<policy-ids>
<policy-id>%d</policy-id>
</policy-ids>
</publish>
`
// createVariableXML is the XML we send (POST) for adding a new variable object.
var createVariableXML = `
<variable-definition>
<name>%s</name>
<type>%s</type>
<description>%s</description>
<context>DEVICE</context>
<default-name>%s</default-name>
<default-value-detail>
<default-value>%d</default-value>
</default-value-detail>
</variable-definition>
`
var modifyVariableXML = `
<variable-definition>
<name>%s</name>
<type>%s</type>
<description>%s</description>
<edit-version>%d</edit-version>
<context>DEVICE</context>
<default-name>%s</default-name>
<default-value-detail>
<default-value>%s</default-value>
</default-value-detail>
<variable-values-list>
%s
</variable-values-list>
</variable-definition>
`
// getObjectID returns the ID of the address or service object.
func (s *JunosSpace) getObjectID(object interface{}, otype bool) (int, error) {
var err error
var objectID int
var services *Services
ipRegex := regexp.MustCompile(`(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d+)`)
if !otype {
services, err = s.Services(object.(string))
}
objects, err := s.Addresses(object.(string))
if err != nil {
return 0, err
}
switch object.(type) {
case int:
objectID = object.(int)
case string:
if !otype {
for _, o := range services.Services {
if o.Name == object {
objectID = o.ID
}
}
}
if ipRegex.MatchString(object.(string)) {
for _, o := range objects.Addresses {
if o.IPAddress == object {
objectID = o.ID
}
}
}
for _, o := range objects.Addresses {
if o.Name == object {
objectID = o.ID
}
}
}
return objectID, nil
}
// getPolicyID returns the ID of a firewall policy.
func (s *JunosSpace) getPolicyID(object string) (int, error) {
var err error
var objectID int
objects, err := s.Policies()
if err != nil {
return 0, err
}
for _, o := range objects.Policies {
if o.Name == object {
objectID = o.ID
}
}
return objectID, nil
}
// getVariableID returns the ID of a polymorphic (variable) object.
func (s *JunosSpace) getVariableID(variable string) (int, error) {
var err error
var variableID int
vars, err := s.Variables()
if err != nil {
return 0, err
}
for _, v := range vars.Variables {
if v.Name == variable {
variableID = v.ID
}
}
return variableID, nil
}
func (s *JunosSpace) modifyVariableContent(data *existingVariable, moid, firewall, address string, vid int) string {
// var varValuesList = "<variable-values-list>"
var varValuesList string
for _, d := range data.VariableValuesList {
varValuesList += fmt.Sprintf("<variable-values><device><moid>%s</moid><name>%s</name></device>", d.DeviceMOID, d.DeviceName)
varValuesList += fmt.Sprintf("<variable-value-detail><variable-value>%s</variable-value><name>%s</name></variable-value-detail></variable-values>", d.VariableValue, d.VariableName)
}
varValuesList += fmt.Sprintf("<variable-values><device><moid>%s</moid><name>%s</name></device>", moid, firewall)
varValuesList += fmt.Sprintf("<variable-value-detail><variable-value>net.juniper.jnap.sm.om.jpa.AddressEntity:%d</variable-value><name>%s</name></variable-value-detail></variable-values>", vid, address)
return varValuesList
}
// Addresses queries the Junos Space server and returns all of the information
// about each address that is managed by Space.
func (s *JunosSpace) Addresses(filter string) (*Addresses, error) {
var addresses Addresses
p := url.Values{}
p.Set("filter", "(global eq '')")
if filter != "all" {
p.Set("filter", fmt.Sprintf("(global eq '%s')", filter))
}
req := &APIRequest{
Method: "get",
URL: fmt.Sprintf("/api/juniper/sd/address-management/addresses?%s", p.Encode()),
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &addresses)
if err != nil {
return nil, err
}
return &addresses, nil
}
// AddAddress adds a new address object to Junos Space, and returns the Job ID.
func (s *JunosSpace) AddAddress(name, ip, desc string) (int, error) {
var job jobID
var addrType = "IPADDRESS"
if strings.Contains(ip, "/") {
addrType = "NETWORK"
}
address := fmt.Sprintf(addressesXML, name, addrType, ip, desc)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/address-management/addresses",
Body: address,
ContentType: contentAddress,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, err
}
return job.ID, nil
}
// AddService adds a new service object to Junos Space, and returns the Job ID. If adding just
// a single port, then enter in the same number in both the "low" and "high" parameters. For a
// range of ports, enter the starting port in "low" and the uppper limit in "high."
func (s *JunosSpace) AddService(proto, name string, low, high int, desc string, timeout int) (int, error) {
var job jobID
var port string
var protoNumber int
var inactivity string
var secs string
ptype := fmt.Sprintf("PROTOCOL_%s", strings.ToUpper(proto))
protocol := strings.ToUpper(proto)
protoNumber = 6
if proto == "udp" {
protoNumber = 17
}
port = strconv.Itoa(low)
if low < high {
port = fmt.Sprintf("%s-%s", strconv.Itoa(low), strconv.Itoa(high))
}
inactivity = "false"
secs = fmt.Sprintf("<inactivity-timeout>%d</inactivity-timeout>", timeout)
if timeout == 0 {
inactivity = "true"
secs = "<inactivity-timeout/>"
}
service := fmt.Sprintf(serviceXML, name, desc, name, port, protocol, protocol, protoNumber, ptype, inactivity, secs)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/service-management/services",
Body: service,
ContentType: contentService,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, err
}
return job.ID, nil
}
// AddGroup adds a new address or service group to Junos Space, and returns the Job ID.
func (s *JunosSpace) AddGroup(otype bool, name, desc string) error {
uri := "/api/juniper/sd/address-management/addresses"
addGroupXML := addressGroupXML
content := contentAddress
if !otype {
uri = "/api/juniper/sd/service-management/services"
addGroupXML = serviceGroupXML
content = contentService
}
groupXML := fmt.Sprintf(addGroupXML, name, desc)
req := &APIRequest{
Method: "post",
URL: uri,
Body: groupXML,
ContentType: content,
}
_, err := s.APICall(req)
if err != nil {
return err
}
return nil
}
// ModifyObject modifies an existing address, service or group. The actions are as follows:
//
// "otype" is either "true" (for address) or "false" (for service)
//
// ModifyObject(otype, "add", "existing-group", "member-to-add")
// ModifyObject(otype, "remove", "existing-group", "member-to-remove")
// ModifyObject(otype, "rename", "old-name", "new-name")
// ModifyObject(otype, "delete", "object-to-delete")
func (s *JunosSpace) ModifyObject(otype bool, actions ...interface{}) error {
var err error
var uri string
var content string
var rel string
objectID, err := s.getObjectID(actions[1], otype)
if err != nil {
return err
}
if objectID != 0 {
var req *APIRequest
uri = fmt.Sprintf("/api/juniper/sd/address-management/addresses/%d", objectID)
content = contentAddressPatch
rel = "address"
if !otype {
uri = fmt.Sprintf("/api/juniper/sd/service-management/services/%d", objectID)
content = contentServicePatch
rel = "service"
}
switch actions[0] {
case "add":
req = &APIRequest{
Method: "patch",
URL: uri,
Body: fmt.Sprintf(addGroupMemberXML, rel, actions[2]),
ContentType: content,
}
case "remove":
req = &APIRequest{
Method: "patch",
URL: uri,
Body: fmt.Sprintf(removeXML, rel, actions[2]),
ContentType: content,
}
case "rename":
req = &APIRequest{
Method: "patch",
URL: uri,
Body: fmt.Sprintf(renameXML, rel, actions[2]),
ContentType: content,
}
case "delete":
req = &APIRequest{
Method: "delete",
URL: uri,
}
}
_, err = s.APICall(req)
if err != nil {
return err
}
}
return nil
}
// Services queries the Junos Space server and returns all of the information
// about each service that is managed by Space.
func (s *JunosSpace) Services(filter string) (*Services, error) {
var services Services
p := url.Values{}
p.Set("filter", "(global eq '')")
if filter != "all" {
p.Set("filter", fmt.Sprintf("(global eq '%s')", filter))
}
req := &APIRequest{
Method: "get",
URL: fmt.Sprintf("/api/juniper/sd/service-management/services?%s", p.Encode()),
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &services)
if err != nil {
return nil, err
}
return &services, nil
}
// SecurityDevices queries the Junos Space server and returns all of the information
// about each security device that is managed by Space.
func (s *JunosSpace) SecurityDevices() (*SecurityDevices, error) {
var devices SecurityDevices
req := &APIRequest{
Method: "get",
URL: "/api/juniper/sd/device-management/devices",
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &devices)
if err != nil {
return nil, err
}
return &devices, nil
}
// Policies returns a list of all firewall policies managed by Junos Space.
func (s *JunosSpace) Policies() (*Policies, error) {
var policies Policies
req := &APIRequest{
Method: "get",
URL: "/api/juniper/sd/fwpolicy-management/firewall-policies",
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &policies)
if err != nil {
return nil, err
}
return &policies, nil
}
// PublishPolicy publishes a changed firewall policy. If "true" is specified for
// <update>, then Junos Space will also update the device.
func (s *JunosSpace) PublishPolicy(object interface{}, update bool) (int, error) {
var err error
var job jobID
var id int
var uri = "/api/juniper/sd/fwpolicy-management/publish"
switch object.(type) {
case int:
id = object.(int)
case string:
id, err = s.getPolicyID(object.(string))
if err != nil {
return 0, err
}
if id == 0 {
return 0, errors.New("no policy found")
}
}
publish := fmt.Sprintf(publishPolicyXML, id)
if update {
uri = "/api/juniper/sd/fwpolicy-management/publish?update=true"
}
req := &APIRequest{
Method: "post",
URL: uri,
Body: publish,
ContentType: contentPublish,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, errors.New("no policy changes to publish")
}
return job.ID, nil
}
// UpdateDevice will update a changed security device, synchronizing it with
// Junos Space.
func (s *JunosSpace) UpdateDevice(device interface{}) (int, error) {
var job jobID
deviceID, err := s.getDeviceID(device, true)
if err != nil {
return 0, err
}
update := fmt.Sprintf(updateDeviceXML, deviceID)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/device-management/update-devices",
Body: update,
ContentType: contentUpdateDevices,
}
data, err := s.APICall(req)
if err != nil {
return 0, err
}
err = xml.Unmarshal(data, &job)
if err != nil {
return 0, err
}
return job.ID, nil
}
// Variables returns a listing of all polymorphic (variable) objects.
func (s *JunosSpace) Variables() (*Variables, error) {
var vars Variables
req := &APIRequest{
Method: "get",
URL: "/api/juniper/sd/variable-management/variable-definitions",
}
data, err := s.APICall(req)
if err != nil {
return nil, err
}
err = xml.Unmarshal(data, &vars)
if err != nil {
return nil, err
}
return &vars, nil
}
// AddVariable creates a new polymorphic object (variable) on the Junos Space server.
func (s *JunosSpace) AddVariable(name, vtype, desc, obj string) error {
objID, err := s.getObjectID(obj, true)
if err != nil {
return err
}
varBody := fmt.Sprintf(createVariableXML, name, strings.ToUpper(vtype), desc, obj, objID)
req := &APIRequest{
Method: "post",
URL: "/api/juniper/sd/variable-management/variable-definitions",
Body: varBody,
ContentType: contentVariable,
}
_, err = s.APICall(req)
if err != nil {
return err
}
return nil
}
// ModifyVariable adds or deletes entries to the polymorphic (variable) object.
func (s *JunosSpace) ModifyVariable(actions ...interface{}) error {
var err error
var req *APIRequest
var varData existingVariable
var deviceID int
var varID int
var moid string
var vid int
var data []byte
deviceID, err = s.getDeviceID(actions[2], true)
if err != nil {
return err
}
log.Println(deviceID)
moid = fmt.Sprintf("net.juniper.jnap.sm.om.jpa.SecurityDeviceEntity:%d", deviceID)
varID, err = s.getVariableID(actions[1])
if err != nil {
return err
}
vid, err = s.getObjectID(actions[3], true)
if err != nil {
return err
}
existing := &APIRequest{
Method: "get",
URL: fmt.Sprintf("/api/juniper/sd/variable-management/variable-definitions/%d", varID),
}
data, err = s.APICall(existing)
if err != nil {
return err
}
err = xml.Unmarshal(data, &varData)
if err != nil {
return err
}
varContent := s.modifyVariableContent(&varData, moid, actions[2].(string), actions[3].(string), vid)
modifyVariable := fmt.Sprintf(modifyVariableXML, varData.Name, varData.Type, varData.Description, varData.Version, varData.DefaultName, varData.DefaultValue, varContent)
log.Println(modifyVariable)
if varID != 0 {
switch actions[0].(string) {
case "delete":
req = &APIRequest{
Method: "delete",
URL: fmt.Sprintf("/api/juniper/sd/variable-management/variable-definitions/%d", varID),
ContentType: contentVariable,
}
case "add":
req = &APIRequest{
Method: "put",
URL: fmt.Sprintf("/api/juniper/sd/variable-management/variable-definitions/%d", varID),
Body: modifyVariable,
ContentType: contentVariable,
}
}
}
_, err = s.APICall(req)
if err != nil {
return err
}
return nil
}
|
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mat
import (
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/blas/blas64"
"gonum.org/v1/gonum/internal/asm/f64"
)
// Inner computes the generalized inner product
// x^T A y
// between column vectors x and y with matrix A. This is only a true inner product if
// A is symmetric positive definite, though the operation works for any matrix A.
//
// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix.
func Inner(x Vector, A Matrix, y Vector) float64 {
m, n := A.Dims()
if x.Len() != m {
panic(ErrShape)
}
if y.Len() != n {
panic(ErrShape)
}
if m == 0 || n == 0 {
return 0
}
var sum float64
switch b := A.(type) {
case RawSymmetricer:
bmat := b.RawSymmetric()
if bmat.Uplo != blas.Upper {
// Panic as a string not a mat.Error.
panic(badSymTriangle)
}
var xmat, ymat blas64.Vector
if xrv, ok := x.(RawVectorer); ok {
xmat = xrv.RawVector()
} else {
break
}
if yrv, ok := y.(RawVectorer); ok {
ymat = yrv.RawVector()
} else {
break
}
for i := 0; i < x.Len(); i++ {
xi := x.AtVec(i)
if xi != 0 {
if ymat.Inc == 1 {
sum += xi * f64.DotUnitary(
bmat.Data[i*bmat.Stride+i:i*bmat.Stride+n],
ymat.Data[i:],
)
} else {
sum += xi * f64.DotInc(
bmat.Data[i*bmat.Stride+i:i*bmat.Stride+n],
ymat.Data[i*ymat.Inc:], uintptr(n-i),
1, uintptr(ymat.Inc),
0, 0,
)
}
}
yi := y.AtVec(i)
if i != n-1 && yi != 0 {
if xmat.Inc == 1 {
sum += yi * f64.DotUnitary(
bmat.Data[i*bmat.Stride+i+1:i*bmat.Stride+n],
xmat.Data[i+1:],
)
} else {
sum += yi * f64.DotInc(
bmat.Data[i*bmat.Stride+i+1:i*bmat.Stride+n],
xmat.Data[(i+1)*xmat.Inc:], uintptr(n-i-1),
1, uintptr(xmat.Inc),
0, 0,
)
}
}
}
return sum
case RawMatrixer:
bmat := b.RawMatrix()
var ymat blas64.Vector
if yrv, ok := y.(RawVectorer); ok {
ymat = yrv.RawVector()
} else {
break
}
for i := 0; i < x.Len(); i++ {
xi := x.AtVec(i)
if xi != 0 {
if ymat.Inc == 1 {
sum += xi * f64.DotUnitary(
bmat.Data[i*bmat.Stride:i*bmat.Stride+n],
ymat.Data,
)
} else {
sum += xi * f64.DotInc(
bmat.Data[i*bmat.Stride:i*bmat.Stride+n],
ymat.Data, uintptr(n),
1, uintptr(ymat.Inc),
0, 0,
)
}
}
}
return sum
}
for i := 0; i < x.Len(); i++ {
xi := x.AtVec(i)
for j := 0; j < y.Len(); j++ {
sum += xi * A.At(i, j) * y.AtVec(j)
}
}
return sum
}
mat: clean up Inner parameter naming
// Copyright ©2014 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mat
import (
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/blas/blas64"
"gonum.org/v1/gonum/internal/asm/f64"
)
// Inner computes the generalized inner product
// x^T A y
// between column vectors x and y with matrix A. This is only a true inner product if
// A is symmetric positive definite, though the operation works for any matrix A.
//
// Inner panics if x.Len != m or y.Len != n when A is an m x n matrix.
func Inner(x Vector, a Matrix, y Vector) float64 {
m, n := a.Dims()
if x.Len() != m {
panic(ErrShape)
}
if y.Len() != n {
panic(ErrShape)
}
if m == 0 || n == 0 {
return 0
}
var sum float64
switch a := a.(type) {
case RawSymmetricer:
amat := a.RawSymmetric()
if amat.Uplo != blas.Upper {
// Panic as a string not a mat.Error.
panic(badSymTriangle)
}
var xmat, ymat blas64.Vector
if xrv, ok := x.(RawVectorer); ok {
xmat = xrv.RawVector()
} else {
break
}
if yrv, ok := y.(RawVectorer); ok {
ymat = yrv.RawVector()
} else {
break
}
for i := 0; i < x.Len(); i++ {
xi := x.AtVec(i)
if xi != 0 {
if ymat.Inc == 1 {
sum += xi * f64.DotUnitary(
amat.Data[i*amat.Stride+i:i*amat.Stride+n],
ymat.Data[i:],
)
} else {
sum += xi * f64.DotInc(
amat.Data[i*amat.Stride+i:i*amat.Stride+n],
ymat.Data[i*ymat.Inc:], uintptr(n-i),
1, uintptr(ymat.Inc),
0, 0,
)
}
}
yi := y.AtVec(i)
if i != n-1 && yi != 0 {
if xmat.Inc == 1 {
sum += yi * f64.DotUnitary(
amat.Data[i*amat.Stride+i+1:i*amat.Stride+n],
xmat.Data[i+1:],
)
} else {
sum += yi * f64.DotInc(
amat.Data[i*amat.Stride+i+1:i*amat.Stride+n],
xmat.Data[(i+1)*xmat.Inc:], uintptr(n-i-1),
1, uintptr(xmat.Inc),
0, 0,
)
}
}
}
return sum
case RawMatrixer:
amat := a.RawMatrix()
var ymat blas64.Vector
if yrv, ok := y.(RawVectorer); ok {
ymat = yrv.RawVector()
} else {
break
}
for i := 0; i < x.Len(); i++ {
xi := x.AtVec(i)
if xi != 0 {
if ymat.Inc == 1 {
sum += xi * f64.DotUnitary(
amat.Data[i*amat.Stride:i*amat.Stride+n],
ymat.Data,
)
} else {
sum += xi * f64.DotInc(
amat.Data[i*amat.Stride:i*amat.Stride+n],
ymat.Data, uintptr(n),
1, uintptr(ymat.Inc),
0, 0,
)
}
}
}
return sum
}
for i := 0; i < x.Len(); i++ {
xi := x.AtVec(i)
for j := 0; j < y.Len(); j++ {
sum += xi * a.At(i, j) * y.AtVec(j)
}
}
return sum
}
|
package sp
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
)
const (
apiURL = "http://ws.spotify.com"
)
type Spotify struct{}
// The API returns strings for some things that one would expect
// to be numbers (for example, popularity))
// FloatString takes a string and converts it to a float64.
type FloatString string
func (f *FloatString) UnmarshalJSON(i interface{}) float64 {
s := i.(string)
n, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Fatal(err)
}
return n
}
// Same as FloatString, but for integers.
type IntString string
func (f *IntString) UnmarshalJSON(i interface{}) int64 {
s := i.(string)
n, err := strconv.ParseInt(s, 10, 0)
if err != nil {
log.Fatal(err)
}
return n
}
type Album struct {
Name string
Popularity FloatString
ExternalIds []struct {
Type string
Id IntString
} `json:"external-ids"`
Href string
Artists []struct {
Href string
Name string
}
Availability struct {
Territories string
}
}
type Info struct {
NumResults int
Limit int
Offset int
Query string
Type string
Page int
}
type SearchAlbumsResponse struct {
Info Info
Albums []Album
}
type Artist struct {
Href string
Name string
Popularity FloatString
}
type SearchArtistsResponse struct {
Info Info
Artists []Artist
}
func (r *Spotify) getRequest(params map[string]string, endpoint string) ([]byte, error) {
v := url.Values{}
for key, val := range params {
v.Set(key, val)
}
u := apiURL + endpoint + "?" + v.Encode()
resp, err := http.Get(u)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func (r *Spotify) SearchAlbums(q string) (SearchAlbumsResponse, error) {
p := map[string]string{"q": q}
e := "/search/1/album.json"
resp, err := r.getRequest(p, e)
if err != nil {
return SearchAlbumsResponse{}, err
}
var s SearchAlbumsResponse
err = json.Unmarshal(resp, &s)
if err != nil {
return SearchAlbumsResponse{}, err
}
return s, nil
}
func (r *Spotify) SearchArtists(q string) (SearchArtistsResponse, error) {
p := map[string]string{"q": q}
e := "/search/1/artist.json"
resp, err := r.getRequest(p, e)
if err != nil {
return SearchArtistsResponse{}, err
}
var s SearchArtistsResponse
err = json.Unmarshal(resp, &s)
if err != nil {
return SearchArtistsResponse{}, err
}
return s, nil
}
add SearchTracks
package sp
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
)
const (
apiURL = "http://ws.spotify.com"
)
type Spotify struct{}
// The API returns strings for some things that one would expect
// to be numbers (for example, popularity))
// FloatString takes a string and converts it to a float64.
type FloatString string
func (f *FloatString) UnmarshalJSON(i interface{}) float64 {
s := i.(string)
n, err := strconv.ParseFloat(s, 64)
if err != nil {
log.Fatal(err)
}
return n
}
// Same as FloatString, but for integers.
type IntString string
func (f *IntString) UnmarshalJSON(i interface{}) int64 {
s := i.(string)
n, err := strconv.ParseInt(s, 10, 0)
if err != nil {
log.Fatal(err)
}
return n
}
type ExternalId struct {
Type string
Id IntString
}
type Artist struct {
Href string
Name string
Popularity FloatString `json:"omitempty"`
}
type Album struct {
Name string
Released string `json:"omitempty"`
Popularity FloatString `json:"omitempty"`
ExternalIds []ExternalId `json:"external-ids"`
Length float64 `json:"omitempty"`
Href string
Artists []Artist `json:"omitempty"`
Availability struct {
Territories string
}
}
type Info struct {
NumResults int `json:"num_results"`
Limit int
Offset int
Query string
Type string
Page int
}
type SearchAlbumsResponse struct {
Info Info
Albums []Album
}
type SearchArtistsResponse struct {
Info Info
Artists []Artist
}
type Track struct {
Album Album
Name string
ExternalIds []ExternalId `json:"external-ids"`
Popularity FloatString
Explicit bool
Length float64
Href string
Artists []Artist
TrackNumber IntString `json:"track-number"`
}
type SearchTracksResponse struct {
Info Info
Tracks []Track
}
func (r *Spotify) getRequest(params map[string]string, endpoint string) ([]byte, error) {
v := url.Values{}
for key, val := range params {
v.Set(key, val)
}
u := apiURL + endpoint + "?" + v.Encode()
resp, err := http.Get(u)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
func (r *Spotify) SearchAlbums(q string) (SearchAlbumsResponse, error) {
p := map[string]string{"q": q}
e := "/search/1/album.json"
resp, err := r.getRequest(p, e)
if err != nil {
return SearchAlbumsResponse{}, err
}
var s SearchAlbumsResponse
err = json.Unmarshal(resp, &s)
if err != nil {
return SearchAlbumsResponse{}, err
}
return s, nil
}
func (r *Spotify) SearchArtists(q string) (SearchArtistsResponse, error) {
p := map[string]string{"q": q}
e := "/search/1/artist.json"
resp, err := r.getRequest(p, e)
if err != nil {
return SearchArtistsResponse{}, err
}
var s SearchArtistsResponse
err = json.Unmarshal(resp, &s)
if err != nil {
return SearchArtistsResponse{}, err
}
return s, nil
}
func (r *Spotify) SearchTracks(q string) (SearchTracksResponse, error) {
p := map[string]string{"q": q}
e := "/search/1/track.json"
resp, err := r.getRequest(p, e)
if err != nil {
return SearchTracksResponse{}, err
}
var s SearchTracksResponse
err = json.Unmarshal(resp, &s)
if err != nil {
return SearchTracksResponse{}, err
}
return s, nil
}
|
// Copyright (c) 2017 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"fmt"
"strings"
"github.com/sirupsen/logrus"
)
var (
// DefaultLogger is the default logger
DefaultLogger Logger
// DefaultRegistry is the default logging registry
DefaultRegistry Registry
)
// LogLevel represents severity of log record
type LogLevel uint32
const (
// PanicLevel - highest level of severity. Logs and then calls panic with the message passed in.
PanicLevel LogLevel = iota
// FatalLevel - logs and then calls `os.Exit(1)`.
FatalLevel
// ErrorLevel - used for errors that should definitely be noted.
ErrorLevel
// WarnLevel - non-critical entries that deserve eyes.
WarnLevel
// InfoLevel - general operational entries about what's going on inside the application.
InfoLevel
// DebugLevel - enabled for debugging, very verbose logging.
DebugLevel
)
// String converts the LogLevel to a string. E.g. PanicLevel becomes "panic".
func (level LogLevel) String() string {
switch level {
case PanicLevel:
return "panic"
case FatalLevel:
return "fatal"
case ErrorLevel:
return "error"
case WarnLevel:
return "warn"
case InfoLevel:
return "info"
case DebugLevel:
return "debug"
default:
return fmt.Sprintf("unknown(%d)", level)
}
}
// ParseLogLevel parses string representation of LogLevel.
func ParseLogLevel(level string) LogLevel {
switch strings.ToLower(level) {
case "debug":
return DebugLevel
case "info":
return InfoLevel
case "warn", "warning":
return WarnLevel
case "error":
return ErrorLevel
case "fatal":
return FatalLevel
case "panic":
return PanicLevel
default:
return InfoLevel
}
}
// Fields is a type accepted by WithFields method. It can be used to instantiate map using shorter notation.
type Fields map[string]interface{}
// LogWithLevel allows to log with different log levels
type LogWithLevel interface {
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Warn(args ...interface{})
Warnf(format string, args ...interface{})
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Panic(args ...interface{})
Panicf(format string, args ...interface{})
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// Logger provides logging capabilities
type Logger interface {
// GetName return the logger name
GetName() string
// SetLevel modifies the LogLevel
SetLevel(level LogLevel)
// GetLevel returns currently set logLevel
GetLevel() LogLevel
// WithField creates one structured field
WithField(key string, value interface{}) LogWithLevel
// WithFields creates multiple structured fields
WithFields(fields Fields) LogWithLevel
// Add hook to send log to external address
AddHook(hook logrus.Hook)
LogWithLevel
}
// LoggerFactory is API for the plugins that want to create their own loggers.
type LoggerFactory interface {
NewLogger(name string) Logger
}
// Registry groups multiple Logger instances and allows to mange their log levels.
type Registry interface {
// LoggerFactory allow to create new loggers
LoggerFactory
// List Loggers returns a map (loggerName => log level)
ListLoggers() map[string]string
// SetLevel modifies log level of selected logger in the registry
SetLevel(logger, level string) error
// GetLevel returns the currently set log level of the logger from registry
GetLevel(logger string) (string, error)
// Lookup returns a logger instance identified by name from registry
Lookup(loggerName string) (logger Logger, found bool)
// ClearRegistry removes all loggers except the default one from registry
ClearRegistry()
// HookConfigs stores hooks from log manager to be used for new loggers
AddHook(hook logrus.Hook)
}
// PluginLogger is intended for:
// 1. small plugins (that just need one logger; name corresponds to plugin name)
// 2. large plugins that need multiple loggers (all loggers share same name prefix)
type PluginLogger interface {
// Plugin has by default possibility to log
// Logger name is initialized with plugin name
Logger
// LoggerFactory can be optionally used by large plugins
// to create child loggers (their names are prefixed by plugin logger name)
LoggerFactory
}
// ForPlugin is used to initialize plugin logger by name
// and optionally created children (their name prefixed by plugin logger name)
func ForPlugin(name string) PluginLogger {
if logger, found := DefaultRegistry.Lookup(name); found {
DefaultLogger.Debugf("using plugin logger for %q that was already initialized", name)
return &pluginLogger{
Logger: logger,
LoggerFactory: &prefixedLoggerFactory{name, DefaultRegistry},
}
}
return NewPluginLogger(name, DefaultRegistry)
}
// NewPluginLogger creates new logger with given LoggerFactory.
func NewPluginLogger(name string, factory LoggerFactory) PluginLogger {
return &pluginLogger{
Logger: factory.NewLogger(name),
LoggerFactory: &prefixedLoggerFactory{name, factory},
}
}
type pluginLogger struct {
Logger
LoggerFactory
}
type prefixedLoggerFactory struct {
prefix string
factory LoggerFactory
}
func (p *prefixedLoggerFactory) NewLogger(name string) Logger {
return p.factory.NewLogger(p.prefix + name)
}
Introduce ParentLogger and include SetOutput, SetFormatter in Logger API
Signed-off-by: Ondrej Fabry <237151fbd58e417e6d72d788598ce96c14d7b9e5@cisco.com>
// Copyright (c) 2017 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"fmt"
"io"
"strings"
"github.com/sirupsen/logrus"
)
var (
// DefaultLogger is the default logger
DefaultLogger Logger
// DefaultRegistry is the default logging registry
DefaultRegistry Registry
)
// LogLevel represents severity of log record
type LogLevel uint32
const (
// PanicLevel - highest level of severity. Logs and then calls panic with the message passed in.
PanicLevel LogLevel = iota
// FatalLevel - logs and then calls `os.Exit(1)`.
FatalLevel
// ErrorLevel - used for errors that should definitely be noted.
ErrorLevel
// WarnLevel - non-critical entries that deserve eyes.
WarnLevel
// InfoLevel - general operational entries about what's going on inside the application.
InfoLevel
// DebugLevel - enabled for debugging, very verbose logging.
DebugLevel
)
// String converts the LogLevel to a string. E.g. PanicLevel becomes "panic".
func (level LogLevel) String() string {
switch level {
case PanicLevel:
return "panic"
case FatalLevel:
return "fatal"
case ErrorLevel:
return "error"
case WarnLevel:
return "warn"
case InfoLevel:
return "info"
case DebugLevel:
return "debug"
default:
return fmt.Sprintf("unknown(%d)", level)
}
}
// ParseLogLevel parses string representation of LogLevel.
func ParseLogLevel(level string) LogLevel {
switch strings.ToLower(level) {
case "debug":
return DebugLevel
case "info":
return InfoLevel
case "warn", "warning":
return WarnLevel
case "error":
return ErrorLevel
case "fatal":
return FatalLevel
case "panic":
return PanicLevel
default:
return InfoLevel
}
}
// Fields is a type accepted by WithFields method. It can be used to instantiate map using shorter notation.
type Fields map[string]interface{}
// LogWithLevel allows to log with different log levels
type LogWithLevel interface {
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Warn(args ...interface{})
Warnf(format string, args ...interface{})
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Panic(args ...interface{})
Panicf(format string, args ...interface{})
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// Logger provides logging capabilities
type Logger interface {
// GetName returns the logger name
GetName() string
// SetLevel modifies the log level
SetLevel(level LogLevel)
// GetLevel returns currently set log level
GetLevel() LogLevel
// WithField creates one structured field
WithField(key string, value interface{}) LogWithLevel
// WithFields creates multiple structured fields
WithFields(fields Fields) LogWithLevel
// Add hook to send log to external address
AddHook(hook logrus.Hook)
// SetOutput sets output writer
SetOutput(out io.Writer)
// SetFormatter sets custom formatter
SetFormatter(formatter logrus.Formatter)
LogWithLevel
}
// LoggerFactory is API for the plugins that want to create their own loggers.
type LoggerFactory interface {
NewLogger(name string) Logger
}
// Registry groups multiple Logger instances and allows to mange their log levels.
type Registry interface {
// LoggerFactory allow to create new loggers
LoggerFactory
// List Loggers returns a map (loggerName => log level)
ListLoggers() map[string]string
// SetLevel modifies log level of selected logger in the registry
SetLevel(logger, level string) error
// GetLevel returns the currently set log level of the logger from registry
GetLevel(logger string) (string, error)
// Lookup returns a logger instance identified by name from registry
Lookup(loggerName string) (logger Logger, found bool)
// ClearRegistry removes all loggers except the default one from registry
ClearRegistry()
// HookConfigs stores hooks from log manager to be used for new loggers
AddHook(hook logrus.Hook)
}
// PluginLogger is intended for:
// 1. small plugins (that just need one logger; name corresponds to plugin name)
// 2. large plugins that need multiple loggers (all loggers share same name prefix)
type PluginLogger interface {
// Plugin has by default possibility to log
// Logger name is initialized with plugin name
Logger
// LoggerFactory can be optionally used by large plugins
// to create child loggers (their names are prefixed by plugin logger name)
LoggerFactory
}
// ForPlugin is used to initialize plugin logger by name
// and optionally created children (their name prefixed by plugin logger name)
func ForPlugin(name string) PluginLogger {
if logger, found := DefaultRegistry.Lookup(name); found {
DefaultLogger.Debugf("using plugin logger for %q that was already initialized", name)
return &ParentLogger{
Logger: logger,
Prefix: name,
Factory: DefaultRegistry,
}
}
return NewParentLogger(name, DefaultRegistry)
}
// NewParentLogger creates new parent logger with given LoggerFactory and name as prefix.
func NewParentLogger(name string, factory LoggerFactory) *ParentLogger {
return &ParentLogger{
Logger: factory.NewLogger(name),
Prefix: name,
Factory: factory,
}
}
type ParentLogger struct {
Logger
Prefix string
Factory LoggerFactory
}
func (p *ParentLogger) NewLogger(name string) Logger {
return p.Factory.NewLogger(fmt.Sprintf("%s/%s", p.Prefix, name))
}
|
package logging
import (
"encoding/json"
"io/ioutil"
"log/syslog"
"github.com/Shopify/sarama"
"github.com/mailgun/kafka-pixy/config"
"github.com/mailgun/logrus-hooks/kafkahook"
"github.com/mailgun/logrus-hooks/levelfilter"
"github.com/pkg/errors"
"github.com/samuel/go-zookeeper/zk"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/syslog"
)
// Init initializes sirupsen/logrus hooks from the JSON config string. It also
// sets the sirupsen/logrus as a logger for 3rd party libraries.
func Init(jsonCfg string, cfg *config.App) error {
var loggingCfg []loggerCfg
if err := json.Unmarshal([]byte(jsonCfg), &loggingCfg); err != nil {
return errors.Wrap(err, "failed to parse logger config")
}
formatter := &textFormatter{}
log.SetFormatter(formatter)
stdoutEnabled := false
nonStdoutEnabled := false
for _, loggerCfg := range loggingCfg {
switch loggerCfg.Name {
case "console":
stdoutEnabled = true
case "syslog":
h, err := logrus_syslog.NewSyslogHook("udp", "127.0.0.1:514", syslog.LOG_INFO|syslog.LOG_MAIL, "kafka-pixy")
if err != nil {
continue
}
log.AddHook(levelfilter.New(h, loggerCfg.level()))
nonStdoutEnabled = true
case "udplog":
if cfg == nil {
return errors.Errorf("App config must be provided")
}
// If a Kafka cluster is not specified in logging config or does
// not exist in the Kafka-Pixy config, then the default cluster is
// used.
cluster := loggerCfg.Params["cluster"]
proxyCfg := cfg.Proxies[cluster]
if proxyCfg == nil {
proxyCfg = cfg.Proxies[cfg.DefaultCluster]
}
// If the log topic is not specified then "udplog" is assumed.
topic := loggerCfg.Params["topic"]
if topic == "" {
topic = "udplog"
}
h, err := kafkahook.New(kafkahook.Config{
Endpoints: proxyCfg.Kafka.SeedPeers,
Topic: topic,
})
if err != nil {
continue
}
log.AddHook(levelfilter.New(h, loggerCfg.level()))
nonStdoutEnabled = true
}
}
if !stdoutEnabled || nonStdoutEnabled {
log.SetOutput(ioutil.Discard)
}
saramaLogger := log.New()
saramaLogger.Formatter = &saramaFormatter{formatter}
sarama.Logger = saramaLogger
zk.DefaultLogger = log.StandardLogger()
return nil
}
// loggerCfg represents a configuration of an individual logger.
type loggerCfg struct {
// Name defines a logger to be used. It can be one of: console, syslog, or
// udplog.
Name string `json:"name"`
// Severity indicates the minimum severity a logger will be logging messages at.
Severity string `json:"severity"`
// Logger parameters
Params map[string]string `json:"params"`
}
func (lc *loggerCfg) level() log.Level {
level, err := log.ParseLevel(lc.Severity)
if err != nil {
return log.WarnLevel
}
return level
}
// saramaFormatter is a sirupsen/logrus formatter that strips trailing new
// lines from the log lines.
type saramaFormatter struct {
parentFormatter log.Formatter
}
func (f *saramaFormatter) Format(entry *log.Entry) ([]byte, error) {
lastByteIdx := len(entry.Message) - 1
if lastByteIdx >= 0 && entry.Message[lastByteIdx] == '\n' {
entry.Message = entry.Message[:lastByteIdx]
}
return f.parentFormatter.Format(entry)
}
Fix Shopify/sarama log output
Output to console should only be enabled if it is
explicitly configured.
package logging
import (
"encoding/json"
"io/ioutil"
"log/syslog"
"github.com/Shopify/sarama"
"github.com/mailgun/kafka-pixy/config"
"github.com/mailgun/logrus-hooks/kafkahook"
"github.com/mailgun/logrus-hooks/levelfilter"
"github.com/pkg/errors"
"github.com/samuel/go-zookeeper/zk"
log "github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/syslog"
)
// Init initializes sirupsen/logrus hooks from the JSON config string. It also
// sets the sirupsen/logrus as a logger for 3rd party libraries.
func Init(jsonCfg string, cfg *config.App) error {
var loggingCfg []loggerCfg
if err := json.Unmarshal([]byte(jsonCfg), &loggingCfg); err != nil {
return errors.Wrap(err, "failed to parse logger config")
}
formatter := &textFormatter{}
log.SetFormatter(formatter)
stdoutEnabled := false
nonStdoutEnabled := false
for _, loggerCfg := range loggingCfg {
switch loggerCfg.Name {
case "console":
stdoutEnabled = true
case "syslog":
h, err := logrus_syslog.NewSyslogHook("udp", "127.0.0.1:514", syslog.LOG_INFO|syslog.LOG_MAIL, "kafka-pixy")
if err != nil {
continue
}
log.AddHook(levelfilter.New(h, loggerCfg.level()))
nonStdoutEnabled = true
case "udplog":
if cfg == nil {
return errors.Errorf("App config must be provided")
}
// If a Kafka cluster is not specified in logging config or does
// not exist in the Kafka-Pixy config, then the default cluster is
// used.
cluster := loggerCfg.Params["cluster"]
proxyCfg := cfg.Proxies[cluster]
if proxyCfg == nil {
proxyCfg = cfg.Proxies[cfg.DefaultCluster]
}
// If the log topic is not specified then "udplog" is assumed.
topic := loggerCfg.Params["topic"]
if topic == "" {
topic = "udplog"
}
h, err := kafkahook.New(kafkahook.Config{
Endpoints: proxyCfg.Kafka.SeedPeers,
Topic: topic,
})
if err != nil {
continue
}
log.AddHook(levelfilter.New(h, loggerCfg.level()))
nonStdoutEnabled = true
}
}
saramaLogger := log.New()
saramaLogger.Formatter = &saramaFormatter{formatter}
sarama.Logger = saramaLogger
zk.DefaultLogger = log.StandardLogger()
if !stdoutEnabled || nonStdoutEnabled {
log.SetOutput(ioutil.Discard)
saramaLogger.Out = ioutil.Discard
}
return nil
}
// loggerCfg represents a configuration of an individual logger.
type loggerCfg struct {
// Name defines a logger to be used. It can be one of: console, syslog, or
// udplog.
Name string `json:"name"`
// Severity indicates the minimum severity a logger will be logging messages at.
Severity string `json:"severity"`
// Logger parameters
Params map[string]string `json:"params"`
}
func (lc *loggerCfg) level() log.Level {
level, err := log.ParseLevel(lc.Severity)
if err != nil {
return log.WarnLevel
}
return level
}
// saramaFormatter is a sirupsen/logrus formatter that strips trailing new
// lines from the log lines.
type saramaFormatter struct {
parentFormatter log.Formatter
}
func (f *saramaFormatter) Format(entry *log.Entry) ([]byte, error) {
lastByteIdx := len(entry.Message) - 1
if lastByteIdx >= 0 && entry.Message[lastByteIdx] == '\n' {
entry.Message = entry.Message[:lastByteIdx]
}
return f.parentFormatter.Format(entry)
}
|
// Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.
// Use of this source code is governed by a MIT license that can
// be found in the LICENSE file.
package termui
import (
"fmt"
)
// This is the implementation of multi-colored or stacked bar graph. This is different from default barGraph which is implemented in bar.go
// Multi-Colored-BarChart creates multiple bars in a widget:
/*
bc := termui.NewMBarChart()
data := make([][]int, 2)
data[0] := []int{3, 2, 5, 7, 9, 4}
data[1] := []int{7, 8, 5, 3, 1, 6}
bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"}
bc.BorderLabel = "Bar Chart"
bc.Data = data
bc.Width = 26
bc.Height = 10
bc.DataLabels = bclabels
bc.TextColor = termui.ColorGreen
bc.BarColor = termui.ColorRed
bc.NumColor = termui.ColorYellow
*/
type MBarChart struct {
Block
BarColor [NumberofColors]Attribute
TextColor Attribute
NumColor [NumberofColors]Attribute
Data [NumberofColors][]int
DataLabels []string
BarWidth int
BarGap int
labels [][]rune
dataNum [NumberofColors][][]rune
numBar int
scale float64
max int
minDataLen int
numStack int
ShowScale bool
maxScale []rune
}
// NewBarChart returns a new *BarChart with current theme.
func NewMBarChart() *MBarChart {
bc := &MBarChart{Block: *NewBlock()}
bc.BarColor[0] = ThemeAttr("mbarchart.bar.bg")
bc.NumColor[0] = ThemeAttr("mbarchart.num.fg")
bc.TextColor = ThemeAttr("mbarchart.text.fg")
bc.BarGap = 1
bc.BarWidth = 3
return bc
}
func (bc *MBarChart) layout() {
bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth)
bc.labels = make([][]rune, bc.numBar)
DataLen := 0
LabelLen := len(bc.DataLabels)
bc.minDataLen = 9999 //Set this to some very hight value so that we find the minimum one We want to know which array among data[][] has got the least length
// We need to know how many stack/data array data[0] , data[1] are there
for i := 0; i < len(bc.Data); i++ {
if bc.Data[i] == nil {
break
}
DataLen++
}
bc.numStack = DataLen
//We need to know what is the minimum size of data array data[0] could have 10 elements data[1] could have only 5, so we plot only 5 bar graphs
for i := 0; i < DataLen; i++ {
if bc.minDataLen > len(bc.Data[i]) {
bc.minDataLen = len(bc.Data[i])
}
}
if LabelLen > bc.minDataLen {
LabelLen = bc.minDataLen
}
for i := 0; i < LabelLen && i < bc.numBar; i++ {
bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth)
}
for i := 0; i < bc.numStack; i++ {
bc.dataNum[i] = make([][]rune, len(bc.Data[i]))
//For each stack of bar calculate the rune
for j := 0; j < LabelLen && i < bc.numBar; j++ {
n := bc.Data[i][j]
s := fmt.Sprint(n)
bc.dataNum[i][j] = trimStr2Runes(s, bc.BarWidth)
}
//If color is not defined by default then populate a color that is different from the previous bar
if bc.BarColor[i] == ColorDefault && bc.NumColor[i] == ColorDefault {
if i == 0 {
bc.BarColor[i] = ColorBlack
} else {
bc.BarColor[i] = bc.BarColor[i-1] + 1
if bc.BarColor[i] > NumberofColors {
bc.BarColor[i] = ColorBlack
}
}
bc.NumColor[i] = (NumberofColors + 1) - bc.BarColor[i] //Make NumColor opposite of barColor for visibility
}
}
//If Max value is not set then we have to populate, this time the max value will be max(sum(d1[0],d2[0],d3[0]) .... sum(d1[n], d2[n], d3[n]))
if bc.max == 0 {
bc.max = -1
}
for i := 0; i < bc.minDataLen && i < LabelLen; i++ {
var dsum int
for j := 0; j < bc.numStack; j++ {
dsum += bc.Data[j][i]
}
if dsum > bc.max {
bc.max = dsum
}
}
//Finally Calculate max sale
if bc.ShowScale {
s := fmt.Sprintf("%d", bc.max)
bc.maxScale = trimStr2Runes(s, len(s))
bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-2)
} else {
bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1)
}
}
func (bc *MBarChart) SetMax(max int) {
if max > 0 {
bc.max = max
}
}
// Buffer implements Bufferer interface.
func (bc *MBarChart) Buffer() Buffer {
buf := bc.Block.Buffer()
bc.layout()
var oftX int
for i := 0; i < bc.numBar && i < bc.minDataLen && i < len(bc.DataLabels); i++ {
ph := 0 //Previous Height to stack up
oftX = i * (bc.BarWidth + bc.BarGap)
for i1 := 0; i1 < bc.numStack; i1++ {
h := int(float64(bc.Data[i1][i]) / bc.scale)
// plot bars
for j := 0; j < bc.BarWidth; j++ {
for k := 0; k < h; k++ {
c := Cell{
Ch: ' ',
Bg: bc.BarColor[i1],
}
if bc.BarColor[i1] == ColorDefault { // when color is default, space char treated as transparent!
c.Bg |= AttrReverse
}
x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k - ph
buf.Set(x, y, c)
}
}
ph += h
}
// plot text
for j, k := 0, 0; j < len(bc.labels[i]); j++ {
w := charWidth(bc.labels[i][j])
c := Cell{
Ch: bc.labels[i][j],
Bg: bc.Bg,
Fg: bc.TextColor,
}
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1
x := bc.innerArea.Max.X + oftX + ((bc.BarWidth - len(bc.labels[i])) / 2) + k
buf.Set(x, y, c)
k += w
}
// plot num
ph = 0 //re-initialize previous height
for i1 := 0; i1 < bc.numStack; i1++ {
h := int(float64(bc.Data[i1][i]) / bc.scale)
for j := 0; j < len(bc.dataNum[i1][i]) && h > 0; j++ {
c := Cell{
Ch: bc.dataNum[i1][i][j],
Fg: bc.NumColor[i1],
Bg: bc.BarColor[i1],
}
if bc.BarColor[i1] == ColorDefault { // the same as above
c.Bg |= AttrReverse
}
if h == 0 {
c.Bg = bc.Bg
}
x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i1][i]))/2 + j
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - ph
buf.Set(x, y, c)
}
ph += h
}
}
if bc.ShowScale {
//Currently bar graph only supprts data range from 0 to MAX
//Plot 0
c := Cell{
Ch: '0',
Bg: bc.Bg,
Fg: bc.TextColor,
}
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2
x := bc.X
buf.Set(x, y, c)
//Plot the maximum sacle value
for i := 0; i < len(bc.maxScale); i++ {
c := Cell{
Ch: bc.maxScale[i],
Bg: bc.Bg,
Fg: bc.TextColor,
}
y := bc.innerArea.Min.Y
x := bc.X + i
buf.Set(x, y, c)
}
}
return buf
}
add custom number format func to mbarchart
// Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.
// Use of this source code is governed by a MIT license that can
// be found in the LICENSE file.
package termui
import (
"fmt"
)
// This is the implementation of multi-colored or stacked bar graph. This is different from default barGraph which is implemented in bar.go
// Multi-Colored-BarChart creates multiple bars in a widget:
/*
bc := termui.NewMBarChart()
data := make([][]int, 2)
data[0] := []int{3, 2, 5, 7, 9, 4}
data[1] := []int{7, 8, 5, 3, 1, 6}
bclabels := []string{"S0", "S1", "S2", "S3", "S4", "S5"}
bc.BorderLabel = "Bar Chart"
bc.Data = data
bc.Width = 26
bc.Height = 10
bc.DataLabels = bclabels
bc.TextColor = termui.ColorGreen
bc.BarColor = termui.ColorRed
bc.NumColor = termui.ColorYellow
*/
type MBarChart struct {
Block
BarColor [NumberofColors]Attribute
TextColor Attribute
NumColor [NumberofColors]Attribute
NumFmt func(int) string
Data [NumberofColors][]int
DataLabels []string
BarWidth int
BarGap int
labels [][]rune
dataNum [NumberofColors][][]rune
numBar int
scale float64
max int
minDataLen int
numStack int
ShowScale bool
maxScale []rune
}
// NewBarChart returns a new *BarChart with current theme.
func NewMBarChart() *MBarChart {
bc := &MBarChart{Block: *NewBlock()}
bc.BarColor[0] = ThemeAttr("mbarchart.bar.bg")
bc.NumColor[0] = ThemeAttr("mbarchart.num.fg")
bc.TextColor = ThemeAttr("mbarchart.text.fg")
bc.NumFmt = func(n int) string { return fmt.Sprint(n) }
bc.BarGap = 1
bc.BarWidth = 3
return bc
}
func (bc *MBarChart) layout() {
bc.numBar = bc.innerArea.Dx() / (bc.BarGap + bc.BarWidth)
bc.labels = make([][]rune, bc.numBar)
DataLen := 0
LabelLen := len(bc.DataLabels)
bc.minDataLen = 9999 //Set this to some very hight value so that we find the minimum one We want to know which array among data[][] has got the least length
// We need to know how many stack/data array data[0] , data[1] are there
for i := 0; i < len(bc.Data); i++ {
if bc.Data[i] == nil {
break
}
DataLen++
}
bc.numStack = DataLen
//We need to know what is the minimum size of data array data[0] could have 10 elements data[1] could have only 5, so we plot only 5 bar graphs
for i := 0; i < DataLen; i++ {
if bc.minDataLen > len(bc.Data[i]) {
bc.minDataLen = len(bc.Data[i])
}
}
if LabelLen > bc.minDataLen {
LabelLen = bc.minDataLen
}
for i := 0; i < LabelLen && i < bc.numBar; i++ {
bc.labels[i] = trimStr2Runes(bc.DataLabels[i], bc.BarWidth)
}
for i := 0; i < bc.numStack; i++ {
bc.dataNum[i] = make([][]rune, len(bc.Data[i]))
//For each stack of bar calculate the rune
for j := 0; j < LabelLen && i < bc.numBar; j++ {
n := bc.Data[i][j]
s := bc.NumFmt(n)
bc.dataNum[i][j] = trimStr2Runes(s, bc.BarWidth)
}
//If color is not defined by default then populate a color that is different from the previous bar
if bc.BarColor[i] == ColorDefault && bc.NumColor[i] == ColorDefault {
if i == 0 {
bc.BarColor[i] = ColorBlack
} else {
bc.BarColor[i] = bc.BarColor[i-1] + 1
if bc.BarColor[i] > NumberofColors {
bc.BarColor[i] = ColorBlack
}
}
bc.NumColor[i] = (NumberofColors + 1) - bc.BarColor[i] //Make NumColor opposite of barColor for visibility
}
}
//If Max value is not set then we have to populate, this time the max value will be max(sum(d1[0],d2[0],d3[0]) .... sum(d1[n], d2[n], d3[n]))
if bc.max == 0 {
bc.max = -1
}
for i := 0; i < bc.minDataLen && i < LabelLen; i++ {
var dsum int
for j := 0; j < bc.numStack; j++ {
dsum += bc.Data[j][i]
}
if dsum > bc.max {
bc.max = dsum
}
}
//Finally Calculate max sale
if bc.ShowScale {
s := bc.NumFmt(bc.max)
bc.maxScale = trimStr2Runes(s, len(s))
bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-2)
} else {
bc.scale = float64(bc.max) / float64(bc.innerArea.Dy()-1)
}
}
func (bc *MBarChart) SetMax(max int) {
if max > 0 {
bc.max = max
}
}
// Buffer implements Bufferer interface.
func (bc *MBarChart) Buffer() Buffer {
buf := bc.Block.Buffer()
bc.layout()
var oftX int
for i := 0; i < bc.numBar && i < bc.minDataLen && i < len(bc.DataLabels); i++ {
ph := 0 //Previous Height to stack up
oftX = i * (bc.BarWidth + bc.BarGap)
for i1 := 0; i1 < bc.numStack; i1++ {
h := int(float64(bc.Data[i1][i]) / bc.scale)
// plot bars
for j := 0; j < bc.BarWidth; j++ {
for k := 0; k < h; k++ {
c := Cell{
Ch: ' ',
Bg: bc.BarColor[i1],
}
if bc.BarColor[i1] == ColorDefault { // when color is default, space char treated as transparent!
c.Bg |= AttrReverse
}
x := bc.innerArea.Min.X + i*(bc.BarWidth+bc.BarGap) + j
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - k - ph
buf.Set(x, y, c)
}
}
ph += h
}
// plot text
for j, k := 0, 0; j < len(bc.labels[i]); j++ {
w := charWidth(bc.labels[i][j])
c := Cell{
Ch: bc.labels[i][j],
Bg: bc.Bg,
Fg: bc.TextColor,
}
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 1
x := bc.innerArea.Max.X + oftX + ((bc.BarWidth - len(bc.labels[i])) / 2) + k
buf.Set(x, y, c)
k += w
}
// plot num
ph = 0 //re-initialize previous height
for i1 := 0; i1 < bc.numStack; i1++ {
h := int(float64(bc.Data[i1][i]) / bc.scale)
for j := 0; j < len(bc.dataNum[i1][i]) && h > 0; j++ {
c := Cell{
Ch: bc.dataNum[i1][i][j],
Fg: bc.NumColor[i1],
Bg: bc.BarColor[i1],
}
if bc.BarColor[i1] == ColorDefault { // the same as above
c.Bg |= AttrReverse
}
if h == 0 {
c.Bg = bc.Bg
}
x := bc.innerArea.Min.X + oftX + (bc.BarWidth-len(bc.dataNum[i1][i]))/2 + j
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2 - ph
buf.Set(x, y, c)
}
ph += h
}
}
if bc.ShowScale {
//Currently bar graph only supprts data range from 0 to MAX
//Plot 0
c := Cell{
Ch: '0',
Bg: bc.Bg,
Fg: bc.TextColor,
}
y := bc.innerArea.Min.Y + bc.innerArea.Dy() - 2
x := bc.X
buf.Set(x, y, c)
//Plot the maximum sacle value
for i := 0; i < len(bc.maxScale); i++ {
c := Cell{
Ch: bc.maxScale[i],
Bg: bc.Bg,
Fg: bc.TextColor,
}
y := bc.innerArea.Min.Y
x := bc.X + i
buf.Set(x, y, c)
}
}
return buf
}
|
package rock7
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kellydunn/golang-geo"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
)
func fakeRequest(handler http.Handler, req *http.Request) (code int, returnBody string) {
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
return w.Code, w.Body.String()
}
func constructRequest(method, msg string, params ...url.Values) *http.Request {
var param url.Values
if len(params) == 0 {
param = url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12345")
param.Add("transmit_time", time.Now().UTC().Format("06-02-01 15:04:05"))
param.Add("iridium_latitude", "54.123")
param.Add("iridium_longitude", "23.987")
param.Add("iridium_cep", "2")
param.Add("data", hex.EncodeToString([]byte(msg)))
} else {
param = params[0]
}
req, _ := http.NewRequest(method, "http://localhost/recieve", bytes.NewBufferString(param.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func compareMessage(msg Message, body string) bool {
diff := time.Now().UTC().Sub(msg.TransmitTime)
return msg.Data == body &&
msg.MOMSN == 12345 &&
msg.IMEI == "123456789" &&
msg.IridiumCep == 2 &&
*msg.IridumPos == *geo.NewPoint(54.123, 23.987) &&
diff.Seconds() < 1
}
func TestInterface(t *testing.T) {
http.Handle("/recieve", NewEndpoint())
}
func TestSimpleMessage(t *testing.T) {
endpoint := NewEndpoint()
go func() {
for i := 0; i < 5; i++ {
code, _ := fakeRequest(endpoint, constructRequest("POST", fmt.Sprintf("Request %v", i)))
if code != 200 {
t.Fatalf("Recieved non-OK status %v", code)
}
}
}()
for i := 0; i < 5; i++ {
message := <-endpoint.GetChannel()
if !compareMessage(message, fmt.Sprintf("Request %v", i)) {
t.Fatalf("Failed")
}
}
}
func TestWrongMethod(t *testing.T) {
endpoint := NewEndpoint()
go func() {
<-endpoint.GetChannel()
t.Fatal("Recieved a message, even though none should have been sent.")
}()
if code, _ := fakeRequest(endpoint, constructRequest("GET", "RequestData")); code == 200 {
t.Fatalf("Non-OK code expected, got %v", code)
}
}
func TestWrongMomsn(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("Call did not fail\n")
}
}()
endpoint := NewEndpoint()
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "abc")
req := constructRequest("POST", "RequestData", param)
fakeRequest(endpoint, req)
t.Fatal("This point should not have been reached.")
}
test for malformed requests
package rock7
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kellydunn/golang-geo"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
)
func fakeRequest(handler http.Handler, req *http.Request) (code int, returnBody string) {
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
return w.Code, w.Body.String()
}
func constructRequest(method, msg string, params ...url.Values) *http.Request {
var param url.Values
if len(params) == 0 {
param = url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12345")
param.Add("transmit_time", time.Now().UTC().Format("06-02-01 15:04:05"))
param.Add("iridium_latitude", "54.123")
param.Add("iridium_longitude", "23.987")
param.Add("iridium_cep", "2")
param.Add("data", hex.EncodeToString([]byte(msg)))
} else {
param = params[0]
}
req, _ := http.NewRequest(method, "http://localhost/recieve", bytes.NewBufferString(param.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func compareMessage(msg Message, body string) bool {
diff := time.Now().UTC().Sub(msg.TransmitTime)
return msg.Data == body &&
msg.MOMSN == 12345 &&
msg.IMEI == "123456789" &&
msg.IridiumCep == 2 &&
*msg.IridumPos == *geo.NewPoint(54.123, 23.987) &&
diff.Seconds() < 1
}
func TestInterface(t *testing.T) {
http.Handle("/recieve", NewEndpoint())
}
func TestSimpleMessage(t *testing.T) {
endpoint := NewEndpoint()
go func() {
for i := 0; i < 5; i++ {
code, _ := fakeRequest(endpoint, constructRequest("POST", fmt.Sprintf("Request %v", i)))
if code != 200 {
t.Fatalf("Recieved non-OK status %v", code)
}
}
}()
for i := 0; i < 5; i++ {
message := <-endpoint.GetChannel()
if !compareMessage(message, fmt.Sprintf("Request %v", i)) {
t.Fatalf("Failed")
}
}
}
func TestWrongMethod(t *testing.T) {
endpoint := NewEndpoint()
go func() {
<-endpoint.GetChannel()
t.Fatal("Recieved a message, even though none should have been sent.")
}()
if code, _ := fakeRequest(endpoint, constructRequest("GET", "RequestData")); code == 200 {
t.Fatalf("Non-OK code expected, got %v", code)
}
}
func TestWrongMomsn(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("Call did not fail\n")
}
}()
endpoint := NewEndpoint()
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "abc")
req := constructRequest("POST", "RequestData", param)
fakeRequest(endpoint, req)
t.Fatal("This point should not have been reached.")
}
func TestWrongTime(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("Call did not fail\n")
}
}()
endpoint := NewEndpoint()
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12")
param.Add("transmit_time", "06/02/01")
req := constructRequest("POST", "RequestData", param)
fakeRequest(endpoint, req)
t.Fatal("This point should not have been reached.")
}
func TestWrongGeoPos(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("Call did not fail\n")
}
}()
endpoint := NewEndpoint()
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12")
param.Add("transmit_time", "06-02-01 15:04:05")
param.Add("iridium_latitude", "ab25ad8g.12")
req := constructRequest("POST", "RequestData", param)
fakeRequest(endpoint, req)
t.Fatal("This point should not have been reached.")
}
func TestWrongHex(t *testing.T) {
defer func() {
if r := recover(); r == nil {
t.Fatalf("Call did not fail\n")
}
}()
endpoint := NewEndpoint()
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12345")
param.Add("transmit_time", time.Now().UTC().Format("06-02-01 15:04:05"))
param.Add("iridium_latitude", "54.123")
param.Add("iridium_longitude", "23.987")
param.Add("iridium_cep", "2")
param.Add("data", "afadfpoi3a5oudf")
req := constructRequest("POST", "RequestData", param)
fakeRequest(endpoint, req)
t.Fatal("This point should not have been reached.")
}
|
package rock7
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kellydunn/golang-geo"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
)
func fakeRequest(handler http.Handler, msg string) (code int, returnBody string) {
req := constructRequest(msg)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
return w.Code, w.Body.String()
}
func constructRequest(msg string) *http.Request {
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12345")
param.Add("transmit_time", time.Now().Format("12-10-10 10:41:50"))
param.Add("iridium_latitude", "54.123")
param.Add("iridium_longitude", "23.987")
param.Add("iridium_cep", "2")
param.Add("data", hex.EncodeToString([]byte(msg)))
req, _ := http.NewRequest("POST", "http://localhost/recieve", bytes.NewBufferString(param.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func compareMessage(msg Message, body string) bool {
return msg.Data == body && msg.MOMSN == 12345 && msg.IMEI == "123456789" && msg.IridiumCep == 2 && *msg.IridumPos == *geo.NewPoint(54.123, 23.987)
}
func TestInterface(t *testing.T) {
http.Handle("/recieve", NewEndpoint())
}
func TestSimpleMessage(t *testing.T) {
endpoint := NewEndpoint()
go func() {
for i := 0; i < 5; i++ {
fakeRequest(endpoint, fmt.Sprintf("Request %v", i))
}
}()
for i := 0; i < 5; i++ {
message := <-endpoint.GetChannel()
if !compareMessage(message, fmt.Sprintf("Request %v", i)) {
t.Fatalf("Failed")
}
}
}
Add test
package rock7
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/kellydunn/golang-geo"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
)
func fakeRequest(handler http.Handler, method, msg string) (code int, returnBody string) {
req := constructRequest(method, msg)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
return w.Code, w.Body.String()
}
func constructRequest(method, msg string) *http.Request {
param := url.Values{}
param.Add("imei", "123456789")
param.Add("momsn", "12345")
param.Add("transmit_time", time.Now().Format("12-10-10 10:41:50"))
param.Add("iridium_latitude", "54.123")
param.Add("iridium_longitude", "23.987")
param.Add("iridium_cep", "2")
param.Add("data", hex.EncodeToString([]byte(msg)))
req, _ := http.NewRequest(method, "http://localhost/recieve", bytes.NewBufferString(param.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func compareMessage(msg Message, body string) bool {
return msg.Data == body && msg.MOMSN == 12345 && msg.IMEI == "123456789" && msg.IridiumCep == 2 && *msg.IridumPos == *geo.NewPoint(54.123, 23.987)
}
func TestInterface(t *testing.T) {
http.Handle("/recieve", NewEndpoint())
}
func TestSimpleMessage(t *testing.T) {
endpoint := NewEndpoint()
go func() {
for i := 0; i < 5; i++ {
fakeRequest(endpoint, "POST", fmt.Sprintf("Request %v", i))
}
}()
for i := 0; i < 5; i++ {
message := <-endpoint.GetChannel()
if !compareMessage(message, fmt.Sprintf("Request %v", i)) {
t.Fatalf("Failed")
}
}
}
func TestWrongMethod(t *testing.T) {
endpoint := NewEndpoint()
go func() {
<-endpoint.GetChannel()
t.Fatal("Recieved a message, even though none should have been sent.")
}()
if code, _ := fakeRequest(endpoint, "GET", "RequestData"); code == 200 {
t.Fatalf("Non-OK code expected, got %v", code)
}
}
|
package engine
import (
"fmt"
"log"
sm "github.com/Ariemeth/frame-assault-2/engine/shaderManager"
tm "github.com/Ariemeth/frame-assault-2/engine/textureManager"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/glfw/v3.1/glfw"
"github.com/go-gl/mathgl/mgl32"
)
const windowWidth = 800
const windowHeight = 600
//Engine constitutes the rendering engine
type Engine struct {
window *glfw.Window
shaders sm.ShaderManager
textures tm.TextureManager
scene Scene
}
//Init is called to initialize glfw and opengl
func (e *Engine) Init() {
if err := glfw.Init(); err != nil {
log.Fatalln("failed to initialize glfw:", err)
}
e.window = createWindow()
initGL()
e.shaders = sm.NewShaderManager()
e.textures = tm.NewTextureManager()
e.scene = NewScene()
}
//Run is runs the main engine loop
func (e *Engine) Run() {
defer glfw.Terminate()
program, isLoaded := e.shaders.GetShader(e.shaders.GetDefaultShader())
if isLoaded {
gl.UseProgram(program)
} else {
fmt.Println("Unable to load default shader")
}
//////////////////////////////
projection := mgl32.Perspective(mgl32.DegToRad(45.0), float32(windowWidth)/windowHeight, 0.1, 10.0)
projectionUniform := gl.GetUniformLocation(program, gl.Str("projection\x00"))
gl.UniformMatrix4fv(projectionUniform, 1, false, &projection[0])
camera := mgl32.LookAtV(mgl32.Vec3{3, 3, 3}, mgl32.Vec3{0, 0, 0}, mgl32.Vec3{0, 1, 0})
cameraUniform := gl.GetUniformLocation(program, gl.Str("camera\x00"))
gl.UniformMatrix4fv(cameraUniform, 1, false, &camera[0])
model := mgl32.Ident4()
modelUniform := gl.GetUniformLocation(program, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
textureUniform := gl.GetUniformLocation(program, gl.Str("tex\x00"))
gl.Uniform1i(textureUniform, 0)
gl.BindFragDataLocation(program, 0, gl.Str("outputColor\x00"))
// Load the texture
e.textures.LoadTexture("assets/textures/square.png", "square")
// Configure the vertex data
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(cubeVertices)*4, gl.Ptr(cubeVertices), gl.STATIC_DRAW)
vertAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
texCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texCoordAttrib)
gl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 5*4, gl.PtrOffset(3*4))
angle := 0.0
previousTime := glfw.GetTime()
///////////////////////////////
for !e.window.ShouldClose() {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
//Update
time := glfw.GetTime()
elapsed := time - previousTime
previousTime = time
e.scene.Update()
angle += elapsed
model = mgl32.HomogRotate3D(float32(angle), mgl32.Vec3{0, 1, 0})
// Render
e.scene.Render()
gl.UseProgram(program)
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
gl.BindVertexArray(vao)
gl.ActiveTexture(gl.TEXTURE0)
texture, isLoaded := e.textures.GetTexture("square")
if isLoaded {
gl.BindTexture(gl.TEXTURE_2D, texture)
} else {
fmt.Println("Unable to load texture")
}
gl.DrawArrays(gl.TRIANGLES, 0, 6*2*3)
// Maintenance
e.window.SwapBuffers()
glfw.PollEvents()
}
}
//LoadShaders loads a vertex and fragment shader as a shader program
func (e *Engine) LoadShaders(shader sm.Shader, shouldBeDefault bool) {
e.shaders.LoadProgram(shader, shouldBeDefault)
}
func createWindow() *glfw.Window {
glfw.WindowHint(glfw.Resizable, glfw.False)
glfw.WindowHint(glfw.ContextVersionMajor, 4)
glfw.WindowHint(glfw.ContextVersionMinor, 1)
glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile)
glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True)
window, err := glfw.CreateWindow(windowWidth, windowHeight, "Frame Assault", nil, nil)
if err != nil {
panic(err)
}
window.MakeContextCurrent()
window.SetKeyCallback(onKey)
window.SetMouseButtonCallback(onMouseButton)
window.SetCloseCallback(onClose)
window.SetScrollCallback(onScroll)
window.SetCursorPosCallback(onCursorPos)
return window
}
func initGL() string {
// Initialize Glow
if err := gl.Init(); err != nil {
panic(err)
}
version := gl.GoStr(gl.GetString(gl.VERSION))
fmt.Println("OpenGL version", version)
fmt.Println("OpenGl shading version", gl.GoStr(gl.GetString(gl.SHADING_LANGUAGE_VERSION)))
fmt.Println("OpenGl renderer", gl.GoStr(gl.GetString(gl.RENDERER)))
// Configure global settings
gl.Enable(gl.DEPTH_TEST)
gl.DepthFunc(gl.LESS)
gl.ClearColor(1.0, 1.0, 1.0, 1.0)
return version
}
func onKey(window *glfw.Window, k glfw.Key, s int, action glfw.Action, mods glfw.ModifierKey) {
if action != glfw.Press {
return
}
switch glfw.Key(k) {
case glfw.KeyEscape:
window.SetShouldClose(true)
default:
return
}
}
func onMouseButton(window *glfw.Window, b glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) {
if action != glfw.Press {
return
}
switch glfw.MouseButton(b) {
case glfw.MouseButtonLeft:
return
case glfw.MouseButtonRight:
return
default:
return
}
}
func onClose(window *glfw.Window) {
window.SetShouldClose(true)
}
func onScroll(window *glfw.Window, xoff float64, yoff float64) {
}
func onCursorPos(window *glfw.Window, xpos float64, ypos float64) {
}
var cubeVertices = []float32{
// X, Y, Z, U, V
// Bottom
-1.0, -1.0, -1.0, 0.0, 0.0,
1.0, -1.0, -1.0, 1.0, 0.0,
-1.0, -1.0, 1.0, 0.0, 1.0,
1.0, -1.0, -1.0, 1.0, 0.0,
1.0, -1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 1.0, 0.0, 1.0,
// Top
-1.0, 1.0, -1.0, 0.0, 0.0,
-1.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, -1.0, 1.0, 0.0,
1.0, 1.0, -1.0, 1.0, 0.0,
-1.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
// Front
-1.0, -1.0, 1.0, 1.0, 0.0,
1.0, -1.0, 1.0, 0.0, 0.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
1.0, -1.0, 1.0, 0.0, 0.0,
1.0, 1.0, 1.0, 0.0, 1.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
// Back
-1.0, -1.0, -1.0, 0.0, 0.0,
-1.0, 1.0, -1.0, 0.0, 1.0,
1.0, -1.0, -1.0, 1.0, 0.0,
1.0, -1.0, -1.0, 1.0, 0.0,
-1.0, 1.0, -1.0, 0.0, 1.0,
1.0, 1.0, -1.0, 1.0, 1.0,
// Left
-1.0, -1.0, 1.0, 0.0, 1.0,
-1.0, 1.0, -1.0, 1.0, 0.0,
-1.0, -1.0, -1.0, 0.0, 0.0,
-1.0, -1.0, 1.0, 0.0, 1.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
-1.0, 1.0, -1.0, 1.0, 0.0,
// Right
1.0, -1.0, 1.0, 1.0, 1.0,
1.0, -1.0, -1.0, 1.0, 0.0,
1.0, 1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 1.0, 1.0,
1.0, 1.0, -1.0, 0.0, 0.0,
1.0, 1.0, 1.0, 0.0, 1.0,
}
Added ability to load and set the current scene
package engine
import (
"fmt"
"log"
sm "github.com/Ariemeth/frame-assault-2/engine/shaderManager"
tm "github.com/Ariemeth/frame-assault-2/engine/textureManager"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/glfw/v3.1/glfw"
"github.com/go-gl/mathgl/mgl32"
)
const windowWidth = 800
const windowHeight = 600
//Engine constitutes the rendering engine
type Engine struct {
window *glfw.Window
shaders sm.ShaderManager
textures tm.TextureManager
scenes map[string]Scene
currentScene Scene
}
//Init is called to initialize glfw and opengl
func (e *Engine) Init() {
if err := glfw.Init(); err != nil {
log.Fatalln("failed to initialize glfw:", err)
}
e.window = createWindow()
initGL()
e.shaders = sm.NewShaderManager()
e.textures = tm.NewTextureManager()
e.scenes = make(map[string]Scene)
}
//Run is runs the main engine loop
func (e *Engine) Run() {
defer glfw.Terminate()
program, isLoaded := e.shaders.GetShader(e.shaders.GetDefaultShader())
if isLoaded {
gl.UseProgram(program)
} else {
fmt.Println("Unable to load default shader")
}
//////////////////////////////
projection := mgl32.Perspective(mgl32.DegToRad(45.0), float32(windowWidth)/windowHeight, 0.1, 10.0)
projectionUniform := gl.GetUniformLocation(program, gl.Str("projection\x00"))
gl.UniformMatrix4fv(projectionUniform, 1, false, &projection[0])
camera := mgl32.LookAtV(mgl32.Vec3{3, 3, 3}, mgl32.Vec3{0, 0, 0}, mgl32.Vec3{0, 1, 0})
cameraUniform := gl.GetUniformLocation(program, gl.Str("camera\x00"))
gl.UniformMatrix4fv(cameraUniform, 1, false, &camera[0])
model := mgl32.Ident4()
modelUniform := gl.GetUniformLocation(program, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
textureUniform := gl.GetUniformLocation(program, gl.Str("tex\x00"))
gl.Uniform1i(textureUniform, 0)
gl.BindFragDataLocation(program, 0, gl.Str("outputColor\x00"))
// Load the texture
e.textures.LoadTexture("assets/textures/square.png", "square")
// Configure the vertex data
var vao uint32
gl.GenVertexArrays(1, &vao)
gl.BindVertexArray(vao)
var vbo uint32
gl.GenBuffers(1, &vbo)
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(cubeVertices)*4, gl.Ptr(cubeVertices), gl.STATIC_DRAW)
vertAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 5*4, gl.PtrOffset(0))
texCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texCoordAttrib)
gl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 5*4, gl.PtrOffset(3*4))
angle := 0.0
previousTime := glfw.GetTime()
///////////////////////////////
for !e.window.ShouldClose() {
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
//Update
time := glfw.GetTime()
elapsed := time - previousTime
previousTime = time
if e.currentScene != nil {
e.currentScene.Update()
}
angle += elapsed
model = mgl32.HomogRotate3D(float32(angle), mgl32.Vec3{0, 1, 0})
// Render
if e.currentScene != nil {
e.currentScene.Render()
}
gl.UseProgram(program)
gl.UniformMatrix4fv(modelUniform, 1, false, &model[0])
gl.BindVertexArray(vao)
gl.ActiveTexture(gl.TEXTURE0)
texture, isLoaded := e.textures.GetTexture("square")
if isLoaded {
gl.BindTexture(gl.TEXTURE_2D, texture)
} else {
fmt.Println("Unable to load texture")
}
gl.DrawArrays(gl.TRIANGLES, 0, 6*2*3)
// Maintenance
e.window.SwapBuffers()
glfw.PollEvents()
}
}
//LoadShaders loads a vertex and fragment shader as a shader program
func (e *Engine) LoadShaders(shader sm.Shader, shouldBeDefault bool) {
e.shaders.LoadProgram(shader, shouldBeDefault)
}
func createWindow() *glfw.Window {
glfw.WindowHint(glfw.Resizable, glfw.False)
glfw.WindowHint(glfw.ContextVersionMajor, 4)
glfw.WindowHint(glfw.ContextVersionMinor, 1)
glfw.WindowHint(glfw.OpenGLProfile, glfw.OpenGLCoreProfile)
glfw.WindowHint(glfw.OpenGLForwardCompatible, glfw.True)
window, err := glfw.CreateWindow(windowWidth, windowHeight, "Frame Assault", nil, nil)
if err != nil {
panic(err)
}
window.MakeContextCurrent()
window.SetKeyCallback(onKey)
window.SetMouseButtonCallback(onMouseButton)
window.SetCloseCallback(onClose)
window.SetScrollCallback(onScroll)
window.SetCursorPosCallback(onCursorPos)
return window
}
//AddScene adds a scene into the engine
func (e *Engine) AddScene(scene Scene, name string) {
e.scenes[name] = scene
}
//LoadScene loads a scene
func (e *Engine) LoadScene(name string) {
scene, status := e.scenes[name]
if status {
e.currentScene = scene
}
}
func initGL() string {
// Initialize Glow
if err := gl.Init(); err != nil {
panic(err)
}
version := gl.GoStr(gl.GetString(gl.VERSION))
fmt.Println("OpenGL version", version)
fmt.Println("OpenGl shading version", gl.GoStr(gl.GetString(gl.SHADING_LANGUAGE_VERSION)))
fmt.Println("OpenGl renderer", gl.GoStr(gl.GetString(gl.RENDERER)))
// Configure global settings
gl.Enable(gl.DEPTH_TEST)
gl.DepthFunc(gl.LESS)
gl.ClearColor(1.0, 1.0, 1.0, 1.0)
return version
}
func onKey(window *glfw.Window, k glfw.Key, s int, action glfw.Action, mods glfw.ModifierKey) {
if action != glfw.Press {
return
}
switch glfw.Key(k) {
case glfw.KeyEscape:
window.SetShouldClose(true)
default:
return
}
}
func onMouseButton(window *glfw.Window, b glfw.MouseButton, action glfw.Action, mods glfw.ModifierKey) {
if action != glfw.Press {
return
}
switch glfw.MouseButton(b) {
case glfw.MouseButtonLeft:
return
case glfw.MouseButtonRight:
return
default:
return
}
}
func onClose(window *glfw.Window) {
window.SetShouldClose(true)
}
func onScroll(window *glfw.Window, xoff float64, yoff float64) {
}
func onCursorPos(window *glfw.Window, xpos float64, ypos float64) {
}
var cubeVertices = []float32{
// X, Y, Z, U, V
// Bottom
-1.0, -1.0, -1.0, 0.0, 0.0,
1.0, -1.0, -1.0, 1.0, 0.0,
-1.0, -1.0, 1.0, 0.0, 1.0,
1.0, -1.0, -1.0, 1.0, 0.0,
1.0, -1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 1.0, 0.0, 1.0,
// Top
-1.0, 1.0, -1.0, 0.0, 0.0,
-1.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, -1.0, 1.0, 0.0,
1.0, 1.0, -1.0, 1.0, 0.0,
-1.0, 1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
// Front
-1.0, -1.0, 1.0, 1.0, 0.0,
1.0, -1.0, 1.0, 0.0, 0.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
1.0, -1.0, 1.0, 0.0, 0.0,
1.0, 1.0, 1.0, 0.0, 1.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
// Back
-1.0, -1.0, -1.0, 0.0, 0.0,
-1.0, 1.0, -1.0, 0.0, 1.0,
1.0, -1.0, -1.0, 1.0, 0.0,
1.0, -1.0, -1.0, 1.0, 0.0,
-1.0, 1.0, -1.0, 0.0, 1.0,
1.0, 1.0, -1.0, 1.0, 1.0,
// Left
-1.0, -1.0, 1.0, 0.0, 1.0,
-1.0, 1.0, -1.0, 1.0, 0.0,
-1.0, -1.0, -1.0, 0.0, 0.0,
-1.0, -1.0, 1.0, 0.0, 1.0,
-1.0, 1.0, 1.0, 1.0, 1.0,
-1.0, 1.0, -1.0, 1.0, 0.0,
// Right
1.0, -1.0, 1.0, 1.0, 1.0,
1.0, -1.0, -1.0, 1.0, 0.0,
1.0, 1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 1.0, 1.0,
1.0, 1.0, -1.0, 0.0, 0.0,
1.0, 1.0, 1.0, 0.0, 1.0,
}
|
package worker
import (
"fmt"
"github.com/go-redis/redis"
"github.com/stitchfix/flotilla-os/config"
"github.com/stitchfix/flotilla-os/execution/engine"
flotillaLog "github.com/stitchfix/flotilla-os/log"
"github.com/stitchfix/flotilla-os/queue"
"github.com/stitchfix/flotilla-os/state"
"gopkg.in/tomb.v2"
"time"
)
type submitWorker struct {
sm state.Manager
eksEngine engine.Engine
emrEngine engine.Engine
conf config.Config
log flotillaLog.Logger
pollInterval time.Duration
t tomb.Tomb
redisClient *redis.Client
}
func (sw *submitWorker) Initialize(conf config.Config, sm state.Manager, eksEngine engine.Engine, emrEngine engine.Engine, log flotillaLog.Logger, pollInterval time.Duration, qm queue.Manager) error {
sw.pollInterval = pollInterval
sw.conf = conf
sw.sm = sm
sw.eksEngine = eksEngine
sw.emrEngine = emrEngine
sw.log = log
sw.redisClient = redis.NewClient(&redis.Options{Addr: conf.GetString("redis_address"), DB: conf.GetInt("redis_db")})
_ = sw.log.Log("message", "initialized a submit worker")
return nil
}
func (sw *submitWorker) GetTomb() *tomb.Tomb {
return &sw.t
}
//
// Run lists queues, consumes runs from them, and executes them using the execution engine
//
func (sw *submitWorker) Run() error {
for {
select {
case <-sw.t.Dying():
sw.log.Log("message", "A submit worker was terminated")
return nil
default:
sw.runOnce()
time.Sleep(sw.pollInterval)
}
}
}
func (sw *submitWorker) runOnce() {
var receipts []engine.RunReceipt
var run state.Run
var err error
receipts, err = sw.eksEngine.PollRuns()
if err != nil {
sw.log.Log("message", "Error receiving runs", "error", fmt.Sprintf("%+v", err))
}
for _, runReceipt := range receipts {
if runReceipt.Run == nil {
continue
}
//
// Fetch run from state manager to ensure its existence
//
run, err = sw.sm.GetRun(runReceipt.Run.RunID)
if err != nil {
sw.log.Log("message", "Error fetching run from state, acking", "run_id", runReceipt.Run.RunID, "error", fmt.Sprintf("%+v", err))
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
continue
}
//
// Only valid to process if it's in the StatusQueued state
//
if run.Status == state.StatusQueued {
var (
launched state.Run
retryable bool
)
// 1. Check for existence of run.ExecutableType; set to `task_definition`
// if not set.
if run.ExecutableType == nil {
defaultExecutableType := state.ExecutableTypeDefinition
run.ExecutableType = &defaultExecutableType
}
// 2. Check for existence of run.ExecutableID; set to run.DefinitionID if
// not set.
if run.ExecutableID == nil {
defID := run.DefinitionID
run.ExecutableID = &defID
}
// 3. Switch by executable type.
switch *run.ExecutableType {
case state.ExecutableTypeDefinition:
var d state.Definition
d, err = sw.sm.GetDefinition(*run.ExecutableID)
if err != nil {
sw.logFailedToGetExecutableMessage(run, err)
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
continue
}
// Execute the run using the execution engine.
launched, retryable, err = sw.eksEngine.Execute(d, run, sw.sm)
break
case state.ExecutableTypeTemplate:
var tpl state.Template
tpl, err = sw.sm.GetTemplateByID(*run.ExecutableID)
if err != nil {
sw.logFailedToGetExecutableMessage(run, err)
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
continue
}
// Execute the run using the execution engine.
sw.log.Log("message", "Submitting", "run_id", run.RunID)
launched, retryable, err = sw.eksEngine.Execute(tpl, run, sw.sm)
break
default:
// If executable type is invalid; log message and continue processing
// other runs.
sw.log.Log("message", "submit worker failed", "run_id", run.RunID, "error", "invalid executable type")
continue
}
if err != nil {
sw.log.Log("message", "Error executing run", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err), "retryable", retryable)
if !retryable {
// Set status to StatusStopped, and ack
launched.Status = state.StatusStopped
} else {
// Don't change status, don't ack
continue
}
}
//
// Emit event with current definition
//
err = sw.log.Event("eventClassName", "FlotillaSubmitTask", "executable_id", *run.ExecutableID, "run_id", run.RunID)
if err != nil {
sw.log.Log("message", "Failed to emit event", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
//
// UpdateStatus the status and information of the run;
// either the run submitted successfully -or- it did not and is not retryable
//
if _, err = sw.sm.UpdateRun(run.RunID, launched); err != nil {
sw.log.Log("message", "Failed to update run status", "run_id", run.RunID, "status", launched.Status, "error", fmt.Sprintf("%+v", err))
}
} else {
sw.log.Log("message", "Received run that is not runnable", "run_id", run.RunID, "status", run.Status)
}
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
}
}
func (sw *submitWorker) logFailedToGetExecutableMessage(run state.Run, err error) {
sw.log.Log(
"message", "Error fetching executable for run",
"run_id", run.RunID,
"executable_id", run.ExecutableID,
"executable_type", run.ExecutableType,
"error", err.Error())
}
update submit worker
package worker
import (
"fmt"
"github.com/go-redis/redis"
"github.com/stitchfix/flotilla-os/config"
"github.com/stitchfix/flotilla-os/execution/engine"
flotillaLog "github.com/stitchfix/flotilla-os/log"
"github.com/stitchfix/flotilla-os/queue"
"github.com/stitchfix/flotilla-os/state"
"gopkg.in/tomb.v2"
"time"
)
type submitWorker struct {
sm state.Manager
eksEngine engine.Engine
emrEngine engine.Engine
conf config.Config
log flotillaLog.Logger
pollInterval time.Duration
t tomb.Tomb
redisClient *redis.Client
}
func (sw *submitWorker) Initialize(conf config.Config, sm state.Manager, eksEngine engine.Engine, emrEngine engine.Engine, log flotillaLog.Logger, pollInterval time.Duration, qm queue.Manager) error {
sw.pollInterval = pollInterval
sw.conf = conf
sw.sm = sm
sw.eksEngine = eksEngine
sw.emrEngine = emrEngine
sw.log = log
sw.redisClient = redis.NewClient(&redis.Options{Addr: conf.GetString("redis_address"), DB: conf.GetInt("redis_db")})
_ = sw.log.Log("message", "initialized a submit worker")
return nil
}
func (sw *submitWorker) GetTomb() *tomb.Tomb {
return &sw.t
}
//
// Run lists queues, consumes runs from them, and executes them using the execution engine
//
func (sw *submitWorker) Run() error {
for {
select {
case <-sw.t.Dying():
sw.log.Log("message", "A submit worker was terminated")
return nil
default:
sw.runOnce()
time.Sleep(sw.pollInterval)
}
}
}
func (sw *submitWorker) runOnce() {
var receipts []engine.RunReceipt
var run state.Run
var err error
receipts, err = sw.eksEngine.PollRuns()
if err != nil {
sw.log.Log("message", "Error receiving runs", "error", fmt.Sprintf("%+v", err))
}
for _, runReceipt := range receipts {
if runReceipt.Run == nil {
continue
}
//
// Fetch run from state manager to ensure its existence
//
run, err = sw.sm.GetRun(runReceipt.Run.RunID)
if err != nil {
sw.log.Log("message", "Error fetching run from state, acking", "run_id", runReceipt.Run.RunID, "error", fmt.Sprintf("%+v", err))
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
continue
}
//
// Only valid to process if it's in the StatusQueued state
//
if run.Status == state.StatusQueued {
var (
launched state.Run
retryable bool
)
// 1. Check for existence of run.ExecutableType; set to `task_definition`
// if not set.
if run.ExecutableType == nil {
defaultExecutableType := state.ExecutableTypeDefinition
run.ExecutableType = &defaultExecutableType
}
// 2. Check for existence of run.ExecutableID; set to run.DefinitionID if
// not set.
if run.ExecutableID == nil {
defID := run.DefinitionID
run.ExecutableID = &defID
}
// 3. Switch by executable type.
switch *run.ExecutableType {
case state.ExecutableTypeDefinition:
var d state.Definition
d, err = sw.sm.GetDefinition(*run.ExecutableID)
if err != nil {
sw.logFailedToGetExecutableMessage(run, err)
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
continue
}
// Execute the run using the execution engine.
if run.Engine == nil || *run.Engine == state.EKSEngine {
launched, retryable, err = sw.eksEngine.Execute(d, run, sw.sm)
} else {
launched, retryable, err = sw.emrEngine.Execute(d, run, sw.sm)
}
break
case state.ExecutableTypeTemplate:
var tpl state.Template
tpl, err = sw.sm.GetTemplateByID(*run.ExecutableID)
if err != nil {
sw.logFailedToGetExecutableMessage(run, err)
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
continue
}
// Execute the run using the execution engine.
sw.log.Log("message", "Submitting", "run_id", run.RunID)
launched, retryable, err = sw.eksEngine.Execute(tpl, run, sw.sm)
break
default:
// If executable type is invalid; log message and continue processing
// other runs.
sw.log.Log("message", "submit worker failed", "run_id", run.RunID, "error", "invalid executable type")
continue
}
if err != nil {
sw.log.Log("message", "Error executing run", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err), "retryable", retryable)
if !retryable {
// Set status to StatusStopped, and ack
launched.Status = state.StatusStopped
} else {
// Don't change status, don't ack
continue
}
}
//
// Emit event with current definition
//
err = sw.log.Event("eventClassName", "FlotillaSubmitTask", "executable_id", *run.ExecutableID, "run_id", run.RunID)
if err != nil {
sw.log.Log("message", "Failed to emit event", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
//
// UpdateStatus the status and information of the run;
// either the run submitted successfully -or- it did not and is not retryable
//
if _, err = sw.sm.UpdateRun(run.RunID, launched); err != nil {
sw.log.Log("message", "Failed to update run status", "run_id", run.RunID, "status", launched.Status, "error", fmt.Sprintf("%+v", err))
}
} else {
sw.log.Log("message", "Received run that is not runnable", "run_id", run.RunID, "status", run.Status)
}
if err = runReceipt.Done(); err != nil {
sw.log.Log("message", "Acking run failed", "run_id", run.RunID, "error", fmt.Sprintf("%+v", err))
}
}
}
func (sw *submitWorker) logFailedToGetExecutableMessage(run state.Run, err error) {
sw.log.Log(
"message", "Error fetching executable for run",
"run_id", run.RunID,
"executable_id", run.ExecutableID,
"executable_type", run.ExecutableType,
"error", err.Error())
}
|
package workers
import (
"fmt"
"github.com/APTrust/exchange/constants"
"github.com/APTrust/exchange/context"
"github.com/APTrust/exchange/models"
"github.com/APTrust/exchange/network"
"github.com/nsqio/go-nsq"
"sync"
"time"
)
const (
GENERIC_FILE_BATCH_SIZE = 50
)
// Records ingest data (objects, files and events) in Pharos
type APTRecorder struct {
Context *context.Context
RecordChannel chan *models.IngestState
CleanupChannel chan *models.IngestState
WaitGroup sync.WaitGroup
}
func NewAPTRecorder(_context *context.Context) (*APTRecorder) {
recorder := &APTRecorder{
Context: _context,
}
// Set up buffered channels
workerBufferSize := _context.Config.RecordWorker.Workers * 10
recorder.RecordChannel = make(chan *models.IngestState, workerBufferSize)
recorder.CleanupChannel = make(chan *models.IngestState, workerBufferSize)
// Set up a limited number of go routines
for i := 0; i < _context.Config.RecordWorker.Workers; i++ {
go recorder.record()
go recorder.cleanup()
}
return recorder
}
// This is the callback that NSQ workers use to handle messages from NSQ.
func (recorder *APTRecorder) HandleMessage(message *nsq.Message) (error) {
ingestState, err := GetIngestState(message, recorder.Context, false)
if err != nil {
recorder.Context.MessageLog.Error(err.Error())
return err
}
// If this item was queued more than once, and this process or any
// other is currently working on it, just finish the message and
// assume that the in-progress worker will take care of the original.
if ingestState.WorkItem.Node != "" && ingestState.WorkItem.Pid != 0 {
recorder.Context.MessageLog.Info("Marking WorkItem %d (%s/%s) as finished " +
"without doing any work, because this item is currently in process by " +
"node %s, pid %s. WorkItem was last updated at %s.",
ingestState.WorkItem.Id, ingestState.WorkItem.Bucket,
ingestState.WorkItem.Name, ingestState.WorkItem.Node,
ingestState.WorkItem.Pid, ingestState.WorkItem.UpdatedAt)
message.Finish()
return nil
}
// Disable auto response, so we can tell NSQ when we need to
// that we're still working on this item.
message.DisableAutoResponse()
// Clear out any old errors, because we're going to retry
// whatever may have failed on the last run.
ingestState.IngestManifest.RecordResult.ClearErrors()
// Tell Pharos that we've started to record this item.
err = MarkWorkItemStarted(ingestState, recorder.Context,
constants.StageRecord, "Recording object, file and event metadata in Pharos.")
if err != nil {
recorder.Context.MessageLog.Error(err.Error())
return err
}
recorder.Context.MessageLog.Info("Putting %s/%s into record channel",
ingestState.IngestManifest.S3Bucket, ingestState.IngestManifest.S3Key)
recorder.RecordChannel <- ingestState
// Return no error, so NSQ knows we're OK.
return nil
}
// Step 1: Record data in Pharos
func (recorder *APTRecorder) record () {
for ingestState := range recorder.RecordChannel {
ingestState.IngestManifest.RecordResult.Start()
ingestState.IngestManifest.RecordResult.Attempted = true
ingestState.IngestManifest.RecordResult.AttemptNumber += 1
recorder.buildEventsAndChecksums(ingestState)
if !ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.saveAllPharosData(ingestState)
}
recorder.CleanupChannel <- ingestState
}
}
// Step 2: Delete tar file from staging area and from receiving bucket.
func (recorder *APTRecorder) cleanup () {
for ingestState := range recorder.CleanupChannel {
// See if we have fatal errors, or too many recurring transient errors
attemptNumber := ingestState.IngestManifest.RecordResult.AttemptNumber
maxAttempts := int(recorder.Context.Config.StoreWorker.MaxAttempts)
itsTimeToGiveUp := (ingestState.IngestManifest.HasFatalErrors() ||
(ingestState.IngestManifest.HasErrors() && attemptNumber >= maxAttempts))
if itsTimeToGiveUp {
recorder.Context.MessageLog.Error("Failed to record %s/%s. Errors: %s.",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.AllErrorsAsString())
ingestState.FinishNSQ()
MarkWorkItemFailed(ingestState, recorder.Context)
} else if ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.Context.MessageLog.Info("Requeueing WorkItem %d (%s/%s) due to transient errors. %s",
ingestState.WorkItem.Id, ingestState.WorkItem.Bucket,
ingestState.WorkItem.Name,
ingestState.IngestManifest.AllErrorsAsString())
ingestState.RequeueNSQ(1000)
MarkWorkItemRequeued(ingestState, recorder.Context)
} else {
MarkWorkItemStarted(ingestState, recorder.Context, constants.StageCleanup,
"Bag has been stored and recorded. Deleting files from receiving bucket " +
"and staging area.")
DeleteBagFromStaging(ingestState, recorder.Context, ingestState.IngestManifest.RecordResult)
recorder.deleteBagFromReceivingBucket(ingestState)
MarkWorkItemSucceeded(ingestState, recorder.Context, constants.StageCleanup)
ingestState.FinishNSQ()
}
// Save our WorkItemState
ingestState.IngestManifest.RecordResult.Finish()
RecordWorkItemState(ingestState, recorder.Context, ingestState.IngestManifest.RecordResult)
}
}
// Make sure the IntellectualObject and its component files have
// all of the checksums and PREMIS events we'll need to save.
// We build these now so that the PREMIS events will have UUIDs,
// and if we ever have to re-record this IntellectualObject after
// a partial save, we'll know which events are already recorded
// in Pharos and which were not. This was a problem in the old
// system, where record failured were common, and PREMIS events
// often wound up being recorded twice.
func (recorder *APTRecorder) buildEventsAndChecksums (ingestState *models.IngestState) {
obj := ingestState.IngestManifest.Object
err := obj.BuildIngestEvents()
if err != nil {
ingestState.IngestManifest.RecordResult.AddError(err.Error())
}
err = obj.BuildIngestChecksums()
if err != nil {
ingestState.IngestManifest.RecordResult.AddError(err.Error())
}
}
func (recorder *APTRecorder) saveAllPharosData (ingestState *models.IngestState) {
if (ingestState.IngestManifest.Object.Id == 0) {
recorder.saveIntellectualObject(ingestState)
if ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.Context.MessageLog.Error("Error saving IntellectualObject %s/%s: %v",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.RecordResult.AllErrorsAsString())
return
} else {
recorder.Context.MessageLog.Info("Saved %s/%s with id %d",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.Object.Id)
}
} else {
recorder.Context.MessageLog.Info(
"No need to save %s/%s already has id %d",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.Object.Id)
}
recorder.saveGenericFiles(ingestState)
if ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.Context.MessageLog.Error("Error saving one or more GenericFiles for " +
"IntellectualObject %s/%s: %v",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.RecordResult.AllErrorsAsString())
return
}
}
func (recorder *APTRecorder) saveIntellectualObject (ingestState *models.IngestState) {
obj := ingestState.IngestManifest.Object
resp := recorder.Context.PharosClient.IntellectualObjectSave(obj)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(resp.Error.Error())
return
}
savedObject := resp.IntellectualObject()
if savedObject == nil {
ingestState.IngestManifest.RecordResult.AddError(
"Pharos returned nil IntellectualObject after save.")
return
}
obj.Id = savedObject.Id
obj.CreatedAt = savedObject.CreatedAt
obj.UpdatedAt = savedObject.UpdatedAt
obj.PropagateIdsToChildren()
}
func (recorder *APTRecorder) saveGenericFiles (ingestState *models.IngestState) {
filesToCreate := make([]*models.GenericFile, 0)
filesToUpdate := make([]*models.GenericFile, 0)
for i, gf := range ingestState.IngestManifest.Object.GenericFiles {
if i % GENERIC_FILE_BATCH_SIZE == 0 {
recorder.createGenericFiles(ingestState, filesToCreate)
if ingestState.IngestManifest.RecordResult.HasErrors() {
break
}
recorder.updateGenericFiles(ingestState, filesToUpdate)
if ingestState.IngestManifest.RecordResult.HasErrors() {
break
}
filesToCreate = make([]*models.GenericFile, 0)
filesToUpdate = make([]*models.GenericFile, 0)
}
if gf.IngestNeedsSave {
if gf.IngestPreviousVersionExists {
if gf.Id > 0 {
filesToUpdate = append(filesToUpdate, gf)
} else {
msg := fmt.Sprintf("GenericFile %s has a previous version, but its Id is missing.",
gf.Identifier)
recorder.Context.MessageLog.Error(msg)
ingestState.IngestManifest.RecordResult.AddError(msg)
}
} else if gf.IngestNeedsSave && gf.Id == 0 {
filesToCreate = append(filesToCreate, gf)
}
}
}
if !ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.createGenericFiles(ingestState, filesToCreate)
recorder.updateGenericFiles(ingestState, filesToUpdate)
}
}
func (recorder *APTRecorder) createGenericFiles (ingestState *models.IngestState, files []*models.GenericFile) {
if len(files) == 0 {
return
}
resp := recorder.Context.PharosClient.GenericFileSaveBatch(files)
if resp.Error != nil {
body, _ := resp.RawResponseData()
recorder.Context.MessageLog.Error(
"Pharos returned this after attempt to save batch of GenericFiles:\n%s",
string(body))
ingestState.IngestManifest.RecordResult.AddError(resp.Error.Error())
}
// We may have managed to save some files despite the error.
// If so, record what was saved.
for _, savedFile := range resp.GenericFiles() {
gf := ingestState.IngestManifest.Object.FindGenericFile(savedFile.OriginalPath())
if gf == nil {
ingestState.IngestManifest.RecordResult.AddError("After save, could not find file '%s' " +
"in IntellectualObject.", savedFile.OriginalPath())
continue
}
// Merge attributes set by Pharos into our GenericFile record.
// Attributes include Id, CreatedAt, UpdatedAt on GenericFile
// and all of its Checksums and PremisEvents. This also
// propagates the new GenericFile.Id down to the PremisEvents
// and Checksums.
errors := gf.MergeAttributes(savedFile)
for _, err := range errors {
ingestState.IngestManifest.RecordResult.AddError(err.Error())
}
}
}
func (recorder *APTRecorder) updateGenericFiles (ingestState *models.IngestState, files []*models.GenericFile) {
if len(files) == 0 {
return
}
for _, gf := range files {
resp := recorder.Context.PharosClient.GenericFileSave(gf)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(
"Error updating '%s': %v", gf.Identifier, resp.Error)
continue
}
// Shouldn't need to call this. Should already have Id?
gf.PropagateIdsToChildren()
// ----------------------------------------------------------------------
// TODO: Make sure events an cheksums are correct, then delete these calls
// ----------------------------------------------------------------------
//recorder.savePremisEvents(ingestState, gf)
//recorder.saveChecksums(ingestState, gf)
}
}
func (recorder *APTRecorder) savePremisEvents (ingestState *models.IngestState, gf *models.GenericFile) {
// Call this only for files that need update.
// The batch create call creates all of the PremisEvents
// and checksums as well.
// Save new ingest event, fixity check and fixity generation.
// Do not save new identifier assignment, because there isn't one.
// The only events we should have on this object are the ones
// we created during this ingest - not ones that already exist
// in Pharos.
for _, event := range gf.PremisEvents {
resp := recorder.Context.PharosClient.PremisEventSave(event)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(
"While updating '%s', error adding PremisEvent '%s': %v",
gf.Identifier, event.EventType, resp.Error)
}
}
}
func (recorder *APTRecorder) saveChecksums (ingestState *models.IngestState, gf *models.GenericFile) {
// Call this only for files that need update.
// The only cheksums we should have for this object are the
// ones we created during this ingest - not the ones that
// already exist in Pharos. Note that apt_storer.saveFile()
// determines whether this file already exists in Pharos, and
// if so, whether its checksum has changed. If we're updating
// the file here, both of those conditions must be true, and
// we're now saving new checksums for the new version of the
// file.
for _, cs := range gf.Checksums {
resp := recorder.Context.PharosClient.ChecksumSave(cs, gf.Identifier)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(
"While updating '%s', error adding '%s' checksum: %v",
gf.Identifier, cs.Algorithm, resp.Error)
}
}
}
func (recorder *APTRecorder) deleteBagFromReceivingBucket (ingestState *models.IngestState) {
// Remove the bag from the receiving bucket, if ingest succeeded
if recorder.Context.Config.DeleteOnSuccess == false {
// We don't actually delete files if config is dev, test, or integration.
recorder.Context.MessageLog.Info("Skipping deletion step because config.DeleteOnSuccess == false")
// Set deletion timestamp, so we know this method was called.
ingestState.IngestManifest.Object.IngestDeletedFromReceivingAt = time.Now().UTC()
return
}
deleter := network.NewS3ObjectDelete(
constants.AWSVirginia,
ingestState.IngestManifest.S3Bucket,
[]string{ ingestState.IngestManifest.S3Key })
deleter.DeleteList()
if deleter.ErrorMessage != "" {
message := fmt.Sprintf("In cleanup, error deleting S3 item %s/%s: %s",
ingestState.IngestManifest.S3Bucket, ingestState.IngestManifest.S3Key,
deleter.ErrorMessage)
recorder.Context.MessageLog.Warning(message)
ingestState.IngestManifest.CleanupResult.AddError(message)
} else {
message := fmt.Sprintf("Deled S3 item %s/%s",
ingestState.IngestManifest.S3Bucket, ingestState.IngestManifest.S3Key)
recorder.Context.MessageLog.Info(message)
ingestState.IngestManifest.Object.IngestDeletedFromReceivingAt = time.Now().UTC()
}
}
Use RecordWorker.MaxAttempts
package workers
import (
"fmt"
"github.com/APTrust/exchange/constants"
"github.com/APTrust/exchange/context"
"github.com/APTrust/exchange/models"
"github.com/APTrust/exchange/network"
"github.com/nsqio/go-nsq"
"sync"
"time"
)
const (
GENERIC_FILE_BATCH_SIZE = 50
)
// Records ingest data (objects, files and events) in Pharos
type APTRecorder struct {
Context *context.Context
RecordChannel chan *models.IngestState
CleanupChannel chan *models.IngestState
WaitGroup sync.WaitGroup
}
func NewAPTRecorder(_context *context.Context) (*APTRecorder) {
recorder := &APTRecorder{
Context: _context,
}
// Set up buffered channels
workerBufferSize := _context.Config.RecordWorker.Workers * 10
recorder.RecordChannel = make(chan *models.IngestState, workerBufferSize)
recorder.CleanupChannel = make(chan *models.IngestState, workerBufferSize)
// Set up a limited number of go routines
for i := 0; i < _context.Config.RecordWorker.Workers; i++ {
go recorder.record()
go recorder.cleanup()
}
return recorder
}
// This is the callback that NSQ workers use to handle messages from NSQ.
func (recorder *APTRecorder) HandleMessage(message *nsq.Message) (error) {
ingestState, err := GetIngestState(message, recorder.Context, false)
if err != nil {
recorder.Context.MessageLog.Error(err.Error())
return err
}
// If this item was queued more than once, and this process or any
// other is currently working on it, just finish the message and
// assume that the in-progress worker will take care of the original.
if ingestState.WorkItem.Node != "" && ingestState.WorkItem.Pid != 0 {
recorder.Context.MessageLog.Info("Marking WorkItem %d (%s/%s) as finished " +
"without doing any work, because this item is currently in process by " +
"node %s, pid %s. WorkItem was last updated at %s.",
ingestState.WorkItem.Id, ingestState.WorkItem.Bucket,
ingestState.WorkItem.Name, ingestState.WorkItem.Node,
ingestState.WorkItem.Pid, ingestState.WorkItem.UpdatedAt)
message.Finish()
return nil
}
// Disable auto response, so we can tell NSQ when we need to
// that we're still working on this item.
message.DisableAutoResponse()
// Clear out any old errors, because we're going to retry
// whatever may have failed on the last run.
ingestState.IngestManifest.RecordResult.ClearErrors()
// Tell Pharos that we've started to record this item.
err = MarkWorkItemStarted(ingestState, recorder.Context,
constants.StageRecord, "Recording object, file and event metadata in Pharos.")
if err != nil {
recorder.Context.MessageLog.Error(err.Error())
return err
}
recorder.Context.MessageLog.Info("Putting %s/%s into record channel",
ingestState.IngestManifest.S3Bucket, ingestState.IngestManifest.S3Key)
recorder.RecordChannel <- ingestState
// Return no error, so NSQ knows we're OK.
return nil
}
// Step 1: Record data in Pharos
func (recorder *APTRecorder) record () {
for ingestState := range recorder.RecordChannel {
ingestState.IngestManifest.RecordResult.Start()
ingestState.IngestManifest.RecordResult.Attempted = true
ingestState.IngestManifest.RecordResult.AttemptNumber += 1
recorder.buildEventsAndChecksums(ingestState)
if !ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.saveAllPharosData(ingestState)
}
recorder.CleanupChannel <- ingestState
}
}
// Step 2: Delete tar file from staging area and from receiving bucket.
func (recorder *APTRecorder) cleanup () {
for ingestState := range recorder.CleanupChannel {
// See if we have fatal errors, or too many recurring transient errors
attemptNumber := ingestState.IngestManifest.RecordResult.AttemptNumber
maxAttempts := int(recorder.Context.Config.RecordWorker.MaxAttempts)
itsTimeToGiveUp := (ingestState.IngestManifest.HasFatalErrors() ||
(ingestState.IngestManifest.HasErrors() && attemptNumber >= maxAttempts))
if itsTimeToGiveUp {
recorder.Context.MessageLog.Error("Failed to record %s/%s. Errors: %s.",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.AllErrorsAsString())
ingestState.FinishNSQ()
MarkWorkItemFailed(ingestState, recorder.Context)
} else if ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.Context.MessageLog.Info("Requeueing WorkItem %d (%s/%s) due to transient errors. %s",
ingestState.WorkItem.Id, ingestState.WorkItem.Bucket,
ingestState.WorkItem.Name,
ingestState.IngestManifest.AllErrorsAsString())
ingestState.RequeueNSQ(1000)
MarkWorkItemRequeued(ingestState, recorder.Context)
} else {
MarkWorkItemStarted(ingestState, recorder.Context, constants.StageCleanup,
"Bag has been stored and recorded. Deleting files from receiving bucket " +
"and staging area.")
DeleteBagFromStaging(ingestState, recorder.Context, ingestState.IngestManifest.RecordResult)
recorder.deleteBagFromReceivingBucket(ingestState)
MarkWorkItemSucceeded(ingestState, recorder.Context, constants.StageCleanup)
ingestState.FinishNSQ()
}
// Save our WorkItemState
ingestState.IngestManifest.RecordResult.Finish()
RecordWorkItemState(ingestState, recorder.Context, ingestState.IngestManifest.RecordResult)
}
}
// Make sure the IntellectualObject and its component files have
// all of the checksums and PREMIS events we'll need to save.
// We build these now so that the PREMIS events will have UUIDs,
// and if we ever have to re-record this IntellectualObject after
// a partial save, we'll know which events are already recorded
// in Pharos and which were not. This was a problem in the old
// system, where record failured were common, and PREMIS events
// often wound up being recorded twice.
func (recorder *APTRecorder) buildEventsAndChecksums (ingestState *models.IngestState) {
obj := ingestState.IngestManifest.Object
err := obj.BuildIngestEvents()
if err != nil {
ingestState.IngestManifest.RecordResult.AddError(err.Error())
}
err = obj.BuildIngestChecksums()
if err != nil {
ingestState.IngestManifest.RecordResult.AddError(err.Error())
}
}
func (recorder *APTRecorder) saveAllPharosData (ingestState *models.IngestState) {
if (ingestState.IngestManifest.Object.Id == 0) {
recorder.saveIntellectualObject(ingestState)
if ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.Context.MessageLog.Error("Error saving IntellectualObject %s/%s: %v",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.RecordResult.AllErrorsAsString())
return
} else {
recorder.Context.MessageLog.Info("Saved %s/%s with id %d",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.Object.Id)
}
} else {
recorder.Context.MessageLog.Info(
"No need to save %s/%s already has id %d",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.Object.Id)
}
recorder.saveGenericFiles(ingestState)
if ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.Context.MessageLog.Error("Error saving one or more GenericFiles for " +
"IntellectualObject %s/%s: %v",
ingestState.WorkItem.Bucket, ingestState.WorkItem.Name,
ingestState.IngestManifest.RecordResult.AllErrorsAsString())
return
}
}
func (recorder *APTRecorder) saveIntellectualObject (ingestState *models.IngestState) {
obj := ingestState.IngestManifest.Object
resp := recorder.Context.PharosClient.IntellectualObjectSave(obj)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(resp.Error.Error())
return
}
savedObject := resp.IntellectualObject()
if savedObject == nil {
ingestState.IngestManifest.RecordResult.AddError(
"Pharos returned nil IntellectualObject after save.")
return
}
obj.Id = savedObject.Id
obj.CreatedAt = savedObject.CreatedAt
obj.UpdatedAt = savedObject.UpdatedAt
obj.PropagateIdsToChildren()
}
func (recorder *APTRecorder) saveGenericFiles (ingestState *models.IngestState) {
filesToCreate := make([]*models.GenericFile, 0)
filesToUpdate := make([]*models.GenericFile, 0)
for i, gf := range ingestState.IngestManifest.Object.GenericFiles {
if i % GENERIC_FILE_BATCH_SIZE == 0 {
recorder.createGenericFiles(ingestState, filesToCreate)
if ingestState.IngestManifest.RecordResult.HasErrors() {
break
}
recorder.updateGenericFiles(ingestState, filesToUpdate)
if ingestState.IngestManifest.RecordResult.HasErrors() {
break
}
filesToCreate = make([]*models.GenericFile, 0)
filesToUpdate = make([]*models.GenericFile, 0)
}
if gf.IngestNeedsSave {
if gf.IngestPreviousVersionExists {
if gf.Id > 0 {
filesToUpdate = append(filesToUpdate, gf)
} else {
msg := fmt.Sprintf("GenericFile %s has a previous version, but its Id is missing.",
gf.Identifier)
recorder.Context.MessageLog.Error(msg)
ingestState.IngestManifest.RecordResult.AddError(msg)
}
} else if gf.IngestNeedsSave && gf.Id == 0 {
filesToCreate = append(filesToCreate, gf)
}
}
}
if !ingestState.IngestManifest.RecordResult.HasErrors() {
recorder.createGenericFiles(ingestState, filesToCreate)
recorder.updateGenericFiles(ingestState, filesToUpdate)
}
}
func (recorder *APTRecorder) createGenericFiles (ingestState *models.IngestState, files []*models.GenericFile) {
if len(files) == 0 {
return
}
resp := recorder.Context.PharosClient.GenericFileSaveBatch(files)
if resp.Error != nil {
body, _ := resp.RawResponseData()
recorder.Context.MessageLog.Error(
"Pharos returned this after attempt to save batch of GenericFiles:\n%s",
string(body))
ingestState.IngestManifest.RecordResult.AddError(resp.Error.Error())
}
// We may have managed to save some files despite the error.
// If so, record what was saved.
for _, savedFile := range resp.GenericFiles() {
gf := ingestState.IngestManifest.Object.FindGenericFile(savedFile.OriginalPath())
if gf == nil {
ingestState.IngestManifest.RecordResult.AddError("After save, could not find file '%s' " +
"in IntellectualObject.", savedFile.OriginalPath())
continue
}
// Merge attributes set by Pharos into our GenericFile record.
// Attributes include Id, CreatedAt, UpdatedAt on GenericFile
// and all of its Checksums and PremisEvents. This also
// propagates the new GenericFile.Id down to the PremisEvents
// and Checksums.
errors := gf.MergeAttributes(savedFile)
for _, err := range errors {
ingestState.IngestManifest.RecordResult.AddError(err.Error())
}
}
}
func (recorder *APTRecorder) updateGenericFiles (ingestState *models.IngestState, files []*models.GenericFile) {
if len(files) == 0 {
return
}
for _, gf := range files {
resp := recorder.Context.PharosClient.GenericFileSave(gf)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(
"Error updating '%s': %v", gf.Identifier, resp.Error)
continue
}
// Shouldn't need to call this. Should already have Id?
gf.PropagateIdsToChildren()
// ----------------------------------------------------------------------
// TODO: Make sure events an cheksums are correct, then delete these calls
// ----------------------------------------------------------------------
//recorder.savePremisEvents(ingestState, gf)
//recorder.saveChecksums(ingestState, gf)
}
}
func (recorder *APTRecorder) savePremisEvents (ingestState *models.IngestState, gf *models.GenericFile) {
// Call this only for files that need update.
// The batch create call creates all of the PremisEvents
// and checksums as well.
// Save new ingest event, fixity check and fixity generation.
// Do not save new identifier assignment, because there isn't one.
// The only events we should have on this object are the ones
// we created during this ingest - not ones that already exist
// in Pharos.
for _, event := range gf.PremisEvents {
resp := recorder.Context.PharosClient.PremisEventSave(event)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(
"While updating '%s', error adding PremisEvent '%s': %v",
gf.Identifier, event.EventType, resp.Error)
}
}
}
func (recorder *APTRecorder) saveChecksums (ingestState *models.IngestState, gf *models.GenericFile) {
// Call this only for files that need update.
// The only cheksums we should have for this object are the
// ones we created during this ingest - not the ones that
// already exist in Pharos. Note that apt_storer.saveFile()
// determines whether this file already exists in Pharos, and
// if so, whether its checksum has changed. If we're updating
// the file here, both of those conditions must be true, and
// we're now saving new checksums for the new version of the
// file.
for _, cs := range gf.Checksums {
resp := recorder.Context.PharosClient.ChecksumSave(cs, gf.Identifier)
if resp.Error != nil {
ingestState.IngestManifest.RecordResult.AddError(
"While updating '%s', error adding '%s' checksum: %v",
gf.Identifier, cs.Algorithm, resp.Error)
}
}
}
func (recorder *APTRecorder) deleteBagFromReceivingBucket (ingestState *models.IngestState) {
// Remove the bag from the receiving bucket, if ingest succeeded
if recorder.Context.Config.DeleteOnSuccess == false {
// We don't actually delete files if config is dev, test, or integration.
recorder.Context.MessageLog.Info("Skipping deletion step because config.DeleteOnSuccess == false")
// Set deletion timestamp, so we know this method was called.
ingestState.IngestManifest.Object.IngestDeletedFromReceivingAt = time.Now().UTC()
return
}
deleter := network.NewS3ObjectDelete(
constants.AWSVirginia,
ingestState.IngestManifest.S3Bucket,
[]string{ ingestState.IngestManifest.S3Key })
deleter.DeleteList()
if deleter.ErrorMessage != "" {
message := fmt.Sprintf("In cleanup, error deleting S3 item %s/%s: %s",
ingestState.IngestManifest.S3Bucket, ingestState.IngestManifest.S3Key,
deleter.ErrorMessage)
recorder.Context.MessageLog.Warning(message)
ingestState.IngestManifest.CleanupResult.AddError(message)
} else {
message := fmt.Sprintf("Deled S3 item %s/%s",
ingestState.IngestManifest.S3Bucket, ingestState.IngestManifest.S3Key)
recorder.Context.MessageLog.Info(message)
ingestState.IngestManifest.Object.IngestDeletedFromReceivingAt = time.Now().UTC()
}
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
import (
"context"
"math/rand"
"sync"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/eraftpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/pd/v4/pkg/mock/mockhbstream"
"github.com/pingcap/pd/v4/pkg/testutil"
"github.com/pingcap/pd/v4/server/config"
"github.com/pingcap/pd/v4/server/core"
"github.com/pingcap/pd/v4/server/kv"
"github.com/pingcap/pd/v4/server/schedule"
"github.com/pingcap/pd/v4/server/schedule/operator"
"github.com/pingcap/pd/v4/server/schedule/opt"
"github.com/pingcap/pd/v4/server/schedulers"
"github.com/pingcap/pd/v4/server/statistics"
)
func newTestOperator(regionID uint64, regionEpoch *metapb.RegionEpoch, kind operator.OpKind, steps ...operator.OpStep) *operator.Operator {
return operator.NewOperator("test", "test", regionID, regionEpoch, kind, steps...)
}
func (c *testCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
id, err := c.AllocID()
if err != nil {
return nil, err
}
return &metapb.Peer{Id: id, StoreId: storeID}, nil
}
func (c *testCluster) addRegionStore(storeID uint64, regionCount int, regionSizes ...uint64) error {
var regionSize uint64
if len(regionSizes) == 0 {
regionSize = uint64(regionCount) * 10
} else {
regionSize = regionSizes[0]
}
stats := &pdpb.StoreStats{}
stats.Capacity = 1000 * (1 << 20)
stats.Available = stats.Capacity - regionSize
newStore := core.NewStoreInfo(&metapb.Store{Id: storeID},
core.SetStoreStats(stats),
core.SetRegionCount(regionCount),
core.SetRegionSize(int64(regionSize)),
core.SetLastHeartbeatTS(time.Now()),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) addLeaderRegion(regionID uint64, leaderStoreID uint64, followerStoreIDs ...uint64) error {
region := newTestRegionMeta(regionID)
leader, _ := c.AllocPeer(leaderStoreID)
region.Peers = []*metapb.Peer{leader}
for _, followerStoreID := range followerStoreIDs {
peer, _ := c.AllocPeer(followerStoreID)
region.Peers = append(region.Peers, peer)
}
regionInfo := core.NewRegionInfo(region, leader, core.SetApproximateSize(10), core.SetApproximateKeys(10))
return c.putRegion(regionInfo)
}
func (c *testCluster) updateLeaderCount(storeID uint64, leaderCount int) error {
store := c.GetStore(storeID)
newStore := store.Clone(
core.SetLeaderCount(leaderCount),
core.SetLeaderSize(int64(leaderCount)*10),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) addLeaderStore(storeID uint64, leaderCount int) error {
stats := &pdpb.StoreStats{}
newStore := core.NewStoreInfo(&metapb.Store{Id: storeID},
core.SetStoreStats(stats),
core.SetLeaderCount(leaderCount),
core.SetLeaderSize(int64(leaderCount)*10),
core.SetLastHeartbeatTS(time.Now()),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) setStoreDown(storeID uint64) error {
store := c.GetStore(storeID)
newStore := store.Clone(
core.SetStoreState(metapb.StoreState_Up),
core.SetLastHeartbeatTS(time.Time{}),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) setStoreOffline(storeID uint64) error {
store := c.GetStore(storeID)
newStore := store.Clone(core.SetStoreState(metapb.StoreState_Offline))
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) LoadRegion(regionID uint64, followerStoreIDs ...uint64) error {
// regions load from etcd will have no leader
region := newTestRegionMeta(regionID)
region.Peers = []*metapb.Peer{}
for _, id := range followerStoreIDs {
peer, _ := c.AllocPeer(id)
region.Peers = append(region.Peers, peer)
}
return c.putRegion(core.NewRegionInfo(region, nil))
}
var _ = Suite(&testCoordinatorSuite{})
type testCoordinatorSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *testCoordinatorSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
c.Assert(failpoint.Enable("github.com/pingcap/pd/v4/server/schedule/unexpectedOperator", "return(true)"), IsNil)
}
func (s *testCoordinatorSuite) TearDownSuite(c *C) {
s.cancel()
}
func (s *testCoordinatorSuite) TestBasic(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
oc := co.opController
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
oc.AddWaitingOperator(op1)
c.Assert(oc.OperatorCount(op1.Kind()), Equals, uint64(1))
c.Assert(oc.GetOperator(1).RegionID(), Equals, op1.RegionID())
// Region 1 already has an operator, cannot add another one.
op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion)
oc.AddWaitingOperator(op2)
c.Assert(oc.OperatorCount(op2.Kind()), Equals, uint64(0))
// Remove the operator manually, then we can add a new operator.
c.Assert(oc.RemoveOperator(op1), IsTrue)
op3 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion)
oc.AddWaitingOperator(op3)
c.Assert(oc.OperatorCount(op3.Kind()), Equals, uint64(1))
c.Assert(oc.GetOperator(1).RegionID(), Equals, op3.RegionID())
}
func (s *testCoordinatorSuite) TestDispatch(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
// Transfer peer from store 4 to store 1.
c.Assert(tc.addRegionStore(4, 40), IsNil)
c.Assert(tc.addRegionStore(3, 30), IsNil)
c.Assert(tc.addRegionStore(2, 20), IsNil)
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
// Transfer leader from store 4 to store 2.
c.Assert(tc.updateLeaderCount(4, 50), IsNil)
c.Assert(tc.updateLeaderCount(3, 50), IsNil)
c.Assert(tc.updateLeaderCount(2, 20), IsNil)
c.Assert(tc.updateLeaderCount(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(2, 4, 3, 2), IsNil)
// Wait for schedule and turn off balance.
waitOperator(c, co, 1)
testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpBalance, 4, 1)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
waitOperator(c, co, 2)
testutil.CheckTransferLeader(c, co.opController.GetOperator(2), operator.OpBalance, 4, 2)
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
stream := mockhbstream.NewHeartbeatStream()
// Transfer peer.
region := tc.GetRegion(1).Clone()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitRemovePeer(c, stream, region, 4)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Transfer leader.
region = tc.GetRegion(2).Clone()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitTransferLeader(c, stream, region, 2)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
}
func dispatchHeartbeat(co *coordinator, region *core.RegionInfo, stream opt.HeartbeatStream) error {
co.hbStreams.BindStream(region.GetLeader().GetStoreId(), stream)
if err := co.cluster.putRegion(region.Clone()); err != nil {
return err
}
co.opController.Dispatch(region, schedule.DispatchFromHeartBeat)
return nil
}
func (s *testCoordinatorSuite) TestCollectMetrics(c *C) {
tc, co, cleanup := prepare(nil, func(tc *testCluster) {
tc.regionStats = statistics.NewRegionStatistics(tc.GetOpt())
}, func(co *coordinator) { co.run() }, c)
defer cleanup()
// Make sure there are no problem when concurrent write and read
var wg sync.WaitGroup
count := 10
wg.Add(count + 1)
for i := 0; i <= count; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < 1000; j++ {
c.Assert(tc.addRegionStore(uint64(i%5), rand.Intn(200)), IsNil)
}
}(i)
}
for i := 0; i < 1000; i++ {
co.collectHotSpotMetrics()
co.collectSchedulerMetrics()
co.cluster.collectClusterMetrics()
}
co.resetHotSpotMetrics()
co.resetSchedulerMetrics()
co.cluster.resetClusterMetrics()
wg.Wait()
}
func MaxUint64(nums ...uint64) uint64 {
result := uint64(0)
for _, num := range nums {
if num > result {
result = num
}
}
return result
}
func prepare(setCfg func(*config.ScheduleConfig), setTc func(*testCluster), run func(*coordinator), c *C) (*testCluster, *coordinator, func()) {
ctx, cancel := context.WithCancel(context.Background())
cfg, opt, err := newTestScheduleConfig()
c.Assert(err, IsNil)
if setCfg != nil {
setCfg(cfg)
}
tc := newTestCluster(opt)
hbStreams := mockhbstream.NewHeartbeatStreams(tc.getClusterID(), false /* need to run */)
if setTc != nil {
setTc(tc)
}
tc.RaftCluster.configCheck = true
co := newCoordinator(ctx, tc.RaftCluster, hbStreams)
if run != nil {
run(co)
}
return tc, co, func() {
co.stop()
co.wg.Wait()
hbStreams.Close()
cancel()
}
}
func (s *testCoordinatorSuite) checkRegion(c *C, tc *testCluster, co *coordinator, regionID uint64, expectCheckerIsBusy bool, expectAddOperator int) {
checkerIsBusy, ops := co.checkers.CheckRegion(tc.GetRegion(regionID))
c.Assert(checkerIsBusy, Equals, expectCheckerIsBusy)
if ops == nil {
c.Assert(expectAddOperator, Equals, 0)
} else {
c.Assert(co.opController.AddWaitingOperator(ops...), Equals, expectAddOperator)
}
}
func (s *testCoordinatorSuite) TestCheckRegion(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
hbStreams, opt := co.hbStreams, tc.opt
defer cleanup()
c.Assert(tc.addRegionStore(4, 4), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil)
s.checkRegion(c, tc, co, 1, false, 1)
waitOperator(c, co, 1)
testutil.CheckAddPeer(c, co.opController.GetOperator(1), operator.OpReplica, 1)
s.checkRegion(c, tc, co, 1, false, 0)
r := tc.GetRegion(1)
p := &metapb.Peer{Id: 1, StoreId: 1, IsLearner: true}
r = r.Clone(
core.WithAddPeer(p),
core.WithPendingPeers(append(r.GetPendingPeers(), p)),
)
c.Assert(tc.putRegion(r), IsNil)
s.checkRegion(c, tc, co, 1, false, 0)
co.stop()
co.wg.Wait()
tc = newTestCluster(opt)
tc.configCheck = true
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(tc.addRegionStore(4, 4), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.putRegion(r), IsNil)
s.checkRegion(c, tc, co, 1, false, 0)
r = r.Clone(core.WithPendingPeers(nil))
c.Assert(tc.putRegion(r), IsNil)
s.checkRegion(c, tc, co, 1, false, 1)
waitOperator(c, co, 1)
op := co.opController.GetOperator(1)
c.Assert(op.Len(), Equals, 1)
c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(1))
s.checkRegion(c, tc, co, 1, false, 0)
}
func (s *testCoordinatorSuite) TestCheckerIsBusy(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
cfg.ReplicaScheduleLimit = 0 // ensure replica checker is busy
cfg.MergeScheduleLimit = 10
}, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
c.Assert(tc.addRegionStore(1, 0), IsNil)
num := 1 + MaxUint64(co.cluster.GetReplicaScheduleLimit(), co.cluster.GetMergeScheduleLimit())
var operatorKinds = []operator.OpKind{
operator.OpReplica, operator.OpRegion | operator.OpMerge,
}
for i, operatorKind := range operatorKinds {
for j := uint64(0); j < num; j++ {
regionID := j + uint64(i+1)*num
c.Assert(tc.addLeaderRegion(regionID, 1), IsNil)
switch operatorKind {
case operator.OpReplica:
op := newTestOperator(regionID, tc.GetRegion(regionID).GetRegionEpoch(), operatorKind)
c.Assert(co.opController.AddWaitingOperator(op), Equals, 1)
case operator.OpRegion | operator.OpMerge:
if regionID%2 == 1 {
ops, err := operator.CreateMergeRegionOperator("merge-region", co.cluster, tc.GetRegion(regionID), tc.GetRegion(regionID-1), operator.OpMerge)
c.Assert(err, IsNil)
c.Assert(co.opController.AddWaitingOperator(ops...), Equals, len(ops))
}
}
}
}
s.checkRegion(c, tc, co, num, true, 0)
}
func (s *testCoordinatorSuite) TestReplica(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// Turn off balance.
cfg.LeaderScheduleLimit = 0
cfg.RegionScheduleLimit = 0
}, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addRegionStore(4, 4), IsNil)
stream := mockhbstream.NewHeartbeatStream()
// Add peer to store 1.
c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil)
region := tc.GetRegion(1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Peer in store 3 is down, remove peer in store 3 and add peer to store 4.
c.Assert(tc.setStoreDown(3), IsNil)
downPeer := &pdpb.PeerStats{
Peer: region.GetStorePeer(3),
DownSeconds: 24 * 60 * 60,
}
region = region.Clone(
core.WithDownPeers(append(region.GetDownPeers(), downPeer)),
)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 4)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 4)
region = region.Clone(core.WithDownPeers(nil))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Remove peer from store 4.
c.Assert(tc.addLeaderRegion(2, 1, 2, 3, 4), IsNil)
region = tc.GetRegion(2)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitRemovePeer(c, stream, region, 4)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Remove offline peer directly when it's pending.
c.Assert(tc.addLeaderRegion(3, 1, 2, 3), IsNil)
c.Assert(tc.setStoreOffline(3), IsNil)
region = tc.GetRegion(3)
region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetStorePeer(3)}))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
}
func (s *testCoordinatorSuite) TestPeerState(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
// Transfer peer from store 4 to store 1.
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addRegionStore(2, 10), IsNil)
c.Assert(tc.addRegionStore(3, 10), IsNil)
c.Assert(tc.addRegionStore(4, 40), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
stream := mockhbstream.NewHeartbeatStream()
// Wait for schedule.
waitOperator(c, co, 1)
testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpBalance, 4, 1)
region := tc.GetRegion(1).Clone()
// Add new peer.
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 1)
// If the new peer is pending, the operator will not finish.
region = region.Clone(core.WithPendingPeers(append(region.GetPendingPeers(), region.GetStorePeer(1))))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
c.Assert(co.opController.GetOperator(region.GetID()), NotNil)
// The new peer is not pending now, the operator will finish.
// And we will proceed to remove peer in store 4.
region = region.Clone(core.WithPendingPeers(nil))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitRemovePeer(c, stream, region, 4)
c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil)
region = tc.GetRegion(1).Clone()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
}
func (s *testCoordinatorSuite) TestShouldRun(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
c.Assert(tc.addLeaderStore(1, 5), IsNil)
c.Assert(tc.addLeaderStore(2, 2), IsNil)
c.Assert(tc.addLeaderStore(3, 0), IsNil)
c.Assert(tc.addLeaderStore(4, 0), IsNil)
c.Assert(tc.LoadRegion(1, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(2, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(3, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(4, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(5, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(6, 2, 1, 4), IsNil)
c.Assert(tc.LoadRegion(7, 2, 1, 4), IsNil)
c.Assert(co.shouldRun(), IsFalse)
c.Assert(tc.core.Regions.GetStoreRegionCount(4), Equals, 2)
tbl := []struct {
regionID uint64
shouldRun bool
}{
{1, false},
{2, false},
{3, false},
{4, false},
{5, false},
// store4 needs collect two region
{6, false},
{7, true},
}
for _, t := range tbl {
r := tc.GetRegion(t.regionID)
nr := r.Clone(core.WithLeader(r.GetPeers()[0]))
c.Assert(tc.processRegionHeartbeat(nr), IsNil)
c.Assert(co.shouldRun(), Equals, t.shouldRun)
}
nr := &metapb.Region{Id: 6, Peers: []*metapb.Peer{}}
newRegion := core.NewRegionInfo(nr, nil)
c.Assert(tc.processRegionHeartbeat(newRegion), NotNil)
c.Assert(co.cluster.prepareChecker.sum, Equals, 7)
}
func (s *testCoordinatorSuite) TestShouldRunWithNonLeaderRegions(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
c.Assert(tc.addLeaderStore(1, 10), IsNil)
c.Assert(tc.addLeaderStore(2, 0), IsNil)
c.Assert(tc.addLeaderStore(3, 0), IsNil)
for i := 0; i < 10; i++ {
c.Assert(tc.LoadRegion(uint64(i+1), 1, 2, 3), IsNil)
}
c.Assert(co.shouldRun(), IsFalse)
c.Assert(tc.core.Regions.GetStoreRegionCount(1), Equals, 10)
tbl := []struct {
regionID uint64
shouldRun bool
}{
{1, false},
{2, false},
{3, false},
{4, false},
{5, false},
{6, false},
{7, false},
{8, true},
}
for _, t := range tbl {
r := tc.GetRegion(t.regionID)
nr := r.Clone(core.WithLeader(r.GetPeers()[0]))
c.Assert(tc.processRegionHeartbeat(nr), IsNil)
c.Assert(co.shouldRun(), Equals, t.shouldRun)
}
nr := &metapb.Region{Id: 8, Peers: []*metapb.Peer{}}
newRegion := core.NewRegionInfo(nr, nil)
c.Assert(tc.processRegionHeartbeat(newRegion), NotNil)
c.Assert(co.cluster.prepareChecker.sum, Equals, 8)
// Now, after server is prepared, there exist some regions with no leader.
c.Assert(tc.GetRegion(9).GetLeader().GetStoreId(), Equals, uint64(0))
c.Assert(tc.GetRegion(10).GetLeader().GetStoreId(), Equals, uint64(0))
}
func (s *testCoordinatorSuite) TestAddScheduler(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
c.Assert(co.schedulers, HasLen, 4)
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.LabelName), IsNil)
c.Assert(co.schedulers, HasLen, 0)
stream := mockhbstream.NewHeartbeatStream()
// Add stores 1,2,3
c.Assert(tc.addLeaderStore(1, 1), IsNil)
c.Assert(tc.addLeaderStore(2, 1), IsNil)
c.Assert(tc.addLeaderStore(3, 1), IsNil)
// Add regions 1 with leader in store 1 and followers in stores 2,3
c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil)
// Add regions 2 with leader in store 2 and followers in stores 1,3
c.Assert(tc.addLeaderRegion(2, 2, 1, 3), IsNil)
// Add regions 3 with leader in store 3 and followers in stores 1,2
c.Assert(tc.addLeaderRegion(3, 3, 1, 2), IsNil)
oc := co.opController
gls, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"0"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls), NotNil)
c.Assert(co.removeScheduler(gls.GetName()), NotNil)
gls, err = schedule.CreateScheduler(schedulers.GrantLeaderType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls), IsNil)
// Transfer all leaders to store 1.
waitOperator(c, co, 2)
region2 := tc.GetRegion(2)
c.Assert(dispatchHeartbeat(co, region2, stream), IsNil)
region2 = waitTransferLeader(c, stream, region2, 1)
c.Assert(dispatchHeartbeat(co, region2, stream), IsNil)
waitNoResponse(c, stream)
waitOperator(c, co, 3)
region3 := tc.GetRegion(3)
c.Assert(dispatchHeartbeat(co, region3, stream), IsNil)
region3 = waitTransferLeader(c, stream, region3, 1)
c.Assert(dispatchHeartbeat(co, region3, stream), IsNil)
waitNoResponse(c, stream)
}
func (s *testCoordinatorSuite) TestPersistScheduler(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
hbStreams := co.hbStreams
defer cleanup()
// Add stores 1,2
c.Assert(tc.addLeaderStore(1, 1), IsNil)
c.Assert(tc.addLeaderStore(2, 1), IsNil)
c.Assert(co.schedulers, HasLen, 4)
oc := co.opController
storage := tc.RaftCluster.storage
gls1, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls1, "1"), IsNil)
evict, err := schedule.CreateScheduler(schedulers.EvictLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.EvictLeaderType, []string{"2"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(evict, "2"), IsNil)
c.Assert(co.schedulers, HasLen, 6)
sches, _, err := storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 6)
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.LabelName), IsNil)
c.Assert(co.schedulers, HasLen, 2)
c.Assert(co.cluster.opt.Persist(storage), IsNil)
co.stop()
co.wg.Wait()
// make a new coordinator for testing
// whether the schedulers added or removed in dynamic way are recorded in opt
_, newOpt, err := newTestScheduleConfig()
c.Assert(err, IsNil)
_, err = schedule.CreateScheduler(schedulers.AdjacentRegionType, oc, storage, schedule.ConfigJSONDecoder([]byte("null")))
c.Assert(err, IsNil)
// suppose we add a new default enable scheduler
newOpt.AddSchedulerCfg(schedulers.AdjacentRegionType, []string{})
c.Assert(newOpt.GetSchedulers(), HasLen, 5)
c.Assert(newOpt.Reload(storage), IsNil)
// only remains 3 items with independent config.
sches, _, err = storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 3)
// option have 7 items because the default scheduler do not remove.
c.Assert(newOpt.GetSchedulers(), HasLen, 7)
c.Assert(newOpt.Persist(storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(co.schedulers, HasLen, 3)
co.stop()
co.wg.Wait()
// suppose restart PD again
_, newOpt, err = newTestScheduleConfig()
c.Assert(err, IsNil)
c.Assert(newOpt.Reload(storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
storage = tc.RaftCluster.storage
c.Assert(co.schedulers, HasLen, 3)
bls, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(bls), IsNil)
brs, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(brs), IsNil)
c.Assert(co.schedulers, HasLen, 5)
// the scheduler option should contain 7 items
// the `hot scheduler` and `label scheduler` are disabled
c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 7)
c.Assert(co.removeScheduler(schedulers.GrantLeaderName), IsNil)
// the scheduler that is not enable by default will be completely deleted
c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 6)
c.Assert(co.schedulers, HasLen, 4)
c.Assert(co.cluster.opt.Persist(co.cluster.storage), IsNil)
co.stop()
co.wg.Wait()
_, newOpt, err = newTestScheduleConfig()
c.Assert(err, IsNil)
c.Assert(newOpt.Reload(co.cluster.storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(co.schedulers, HasLen, 4)
c.Assert(co.removeScheduler(schedulers.EvictLeaderName), IsNil)
c.Assert(co.schedulers, HasLen, 3)
}
func (s *testCoordinatorSuite) TestRemoveScheduler(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
cfg.ReplicaScheduleLimit = 0
}, nil, func(co *coordinator) { co.run() }, c)
hbStreams := co.hbStreams
defer cleanup()
// Add stores 1,2
c.Assert(tc.addLeaderStore(1, 1), IsNil)
c.Assert(tc.addLeaderStore(2, 1), IsNil)
c.Assert(co.schedulers, HasLen, 4)
oc := co.opController
storage := tc.RaftCluster.storage
gls1, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls1, "1"), IsNil)
c.Assert(co.schedulers, HasLen, 5)
sches, _, err := storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 5)
// remove all schedulers
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.LabelName), IsNil)
c.Assert(co.removeScheduler(schedulers.GrantLeaderName), IsNil)
// all removed
sches, _, err = storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 0)
c.Assert(co.schedulers, HasLen, 0)
c.Assert(co.cluster.opt.Persist(co.cluster.storage), IsNil)
co.stop()
co.wg.Wait()
// suppose restart PD again
_, newOpt, err := newTestScheduleConfig()
c.Assert(err, IsNil)
c.Assert(newOpt.Reload(tc.storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(co.schedulers, HasLen, 0)
// the option remains default scheduler
c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 4)
co.stop()
co.wg.Wait()
}
func (s *testCoordinatorSuite) TestRestart(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// Turn off balance, we test add replica only.
cfg.LeaderScheduleLimit = 0
cfg.RegionScheduleLimit = 0
}, nil, func(co *coordinator) { co.run() }, c)
hbStreams := co.hbStreams
defer cleanup()
// Add 3 stores (1, 2, 3) and a region with 1 replica on store 1.
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
region := tc.GetRegion(1)
tc.prepareChecker.collect(region)
// Add 1 replica on store 2.
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
stream := mockhbstream.NewHeartbeatStream()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 2)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 2)
co.stop()
co.wg.Wait()
// Recreate coodinator then add another replica on store 3.
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 3)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitPromoteLearner(c, stream, region, 3)
}
func BenchmarkPatrolRegion(b *testing.B) {
mergeLimit := uint64(4100)
regionNum := 10000
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
cfg.MergeScheduleLimit = mergeLimit
}, nil, nil, &C{})
defer cleanup()
tc.opt.SetSplitMergeInterval(time.Duration(0))
for i := 1; i < 4; i++ {
if err := tc.addRegionStore(uint64(i), regionNum, 96); err != nil {
return
}
}
for i := 0; i < regionNum; i++ {
if err := tc.addLeaderRegion(uint64(i), 1, 2, 3); err != nil {
return
}
}
listen := make(chan int)
go func() {
oc := co.opController
listen <- 0
for {
if oc.OperatorCount(operator.OpMerge) == mergeLimit {
co.cancel()
co.wg.Add(1)
return
}
}
}()
<-listen
b.ResetTimer()
co.patrolRegions()
}
func waitOperator(c *C, co *coordinator, regionID uint64) {
testutil.WaitUntil(c, func(c *C) bool {
return co.opController.GetOperator(regionID) != nil
})
}
var _ = Suite(&testOperatorControllerSuite{})
type testOperatorControllerSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *testOperatorControllerSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
c.Assert(failpoint.Enable("github.com/pingcap/pd/v4/server/schedule/unexpectedOperator", "return(true)"), IsNil)
}
func (s *testOperatorControllerSuite) TearDownSuite(c *C) {
s.cancel()
}
func (s *testOperatorControllerSuite) TestOperatorCount(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
oc := co.opController
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0))
c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(0))
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
c.Assert(tc.addLeaderRegion(2, 2), IsNil)
{
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
oc.AddWaitingOperator(op1)
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 1:leader
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader)
oc.AddWaitingOperator(op2)
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(2)) // 1:leader, 2:leader
c.Assert(oc.RemoveOperator(op1), IsTrue)
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 2:leader
}
{
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion)
oc.AddWaitingOperator(op1)
c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(1)) // 1:region 2:leader
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1))
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion)
op2.SetPriorityLevel(core.HighPriority)
oc.AddWaitingOperator(op2)
c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(2)) // 1:region 2:region
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0))
}
}
func (s *testOperatorControllerSuite) TestStoreOverloaded(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// scheduling one time needs 60 seconds
// and thus it's large enough to make sure that only schedule one time
cfg.StoreBalanceRate = 1
}, nil, nil, c)
defer cleanup()
oc := co.opController
lb, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(tc.addRegionStore(4, 100), IsNil)
c.Assert(tc.addRegionStore(3, 100), IsNil)
c.Assert(tc.addRegionStore(2, 100), IsNil)
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
region := tc.GetRegion(1).Clone(core.SetApproximateSize(60))
tc.putRegion(region)
{
op1 := lb.Schedule(tc)[0]
c.Assert(op1, NotNil)
c.Assert(oc.AddOperator(op1), IsTrue)
c.Assert(oc.RemoveOperator(op1), IsTrue)
}
for i := 0; i < 10; i++ {
c.Assert(lb.Schedule(tc), IsNil)
}
// reset all stores' limit
// scheduling one time needs 1/10 seconds
oc.SetAllStoresLimit(10, schedule.StoreLimitManual)
for i := 0; i < 10; i++ {
op1 := lb.Schedule(tc)[0]
c.Assert(op1, NotNil)
c.Assert(oc.AddOperator(op1), IsTrue)
c.Assert(oc.RemoveOperator(op1), IsTrue)
}
// sleep 1 seconds to make sure that the token is filled up
time.Sleep(1 * time.Second)
for i := 0; i < 100; i++ {
c.Assert(lb.Schedule(tc), NotNil)
}
}
func (s *testOperatorControllerSuite) TestStoreOverloadedWithReplace(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// scheduling one time needs 2 seconds
cfg.StoreBalanceRate = 30
}, nil, nil, c)
defer cleanup()
oc := co.opController
lb, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(tc.addRegionStore(4, 100), IsNil)
c.Assert(tc.addRegionStore(3, 100), IsNil)
c.Assert(tc.addRegionStore(2, 100), IsNil)
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
c.Assert(tc.addLeaderRegion(2, 1, 3, 4), IsNil)
region := tc.GetRegion(1).Clone(core.SetApproximateSize(60))
tc.putRegion(region)
region = tc.GetRegion(2).Clone(core.SetApproximateSize(60))
tc.putRegion(region)
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 1})
c.Assert(oc.AddOperator(op1), IsTrue)
op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 2})
op2.SetPriorityLevel(core.HighPriority)
c.Assert(oc.AddOperator(op2), IsTrue)
op3 := newTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3})
c.Assert(oc.AddOperator(op3), IsFalse)
c.Assert(lb.Schedule(tc), IsNil)
// sleep 2 seconds to make sure that token is filled up
time.Sleep(2 * time.Second)
c.Assert(lb.Schedule(tc), NotNil)
}
var _ = Suite(&testScheduleControllerSuite{})
type testScheduleControllerSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *testScheduleControllerSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
c.Assert(failpoint.Enable("github.com/pingcap/pd/v4/server/schedule/unexpectedOperator", "return(true)"), IsNil)
}
func (s *testScheduleControllerSuite) TearDownSuite(c *C) {
s.cancel()
}
// FIXME: remove after move into schedulers package
type mockLimitScheduler struct {
schedule.Scheduler
limit uint64
counter *schedule.OperatorController
kind operator.OpKind
}
func (s *mockLimitScheduler) IsScheduleAllowed(cluster opt.Cluster) bool {
return s.counter.OperatorCount(s.kind) < s.limit
}
func (s *testScheduleControllerSuite) TestController(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
oc := co.opController
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
c.Assert(tc.addLeaderRegion(2, 2), IsNil)
scheduler, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""}))
c.Assert(err, IsNil)
lb := &mockLimitScheduler{
Scheduler: scheduler,
counter: oc,
kind: operator.OpLeader,
}
sc := newScheduleController(co, lb)
for i := schedulers.MinScheduleInterval; sc.GetInterval() != schedulers.MaxScheduleInterval; i = sc.GetNextInterval(i) {
c.Assert(sc.GetInterval(), Equals, i)
c.Assert(sc.Schedule(), IsNil)
}
// limit = 2
lb.limit = 2
// count = 0
{
c.Assert(sc.AllowSchedule(), IsTrue)
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op1), Equals, 1)
// count = 1
c.Assert(sc.AllowSchedule(), IsTrue)
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op2), Equals, 1)
// count = 2
c.Assert(sc.AllowSchedule(), IsFalse)
c.Assert(oc.RemoveOperator(op1), IsTrue)
// count = 1
c.Assert(sc.AllowSchedule(), IsTrue)
}
op11 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
// add a PriorityKind operator will remove old operator
{
op3 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpHotRegion)
op3.SetPriorityLevel(core.HighPriority)
c.Assert(oc.AddWaitingOperator(op11), Equals, 1)
c.Assert(sc.AllowSchedule(), IsFalse)
c.Assert(oc.AddWaitingOperator(op3), Equals, 1)
c.Assert(sc.AllowSchedule(), IsTrue)
c.Assert(oc.RemoveOperator(op3), IsTrue)
}
// add a admin operator will remove old operator
{
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op2), Equals, 1)
c.Assert(sc.AllowSchedule(), IsFalse)
op4 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpAdmin)
op4.SetPriorityLevel(core.HighPriority)
c.Assert(oc.AddWaitingOperator(op4), Equals, 1)
c.Assert(sc.AllowSchedule(), IsTrue)
c.Assert(oc.RemoveOperator(op4), IsTrue)
}
// test wrong region id.
{
op5 := newTestOperator(3, &metapb.RegionEpoch{}, operator.OpHotRegion)
c.Assert(oc.AddWaitingOperator(op5), Equals, 0)
}
// test wrong region epoch.
c.Assert(oc.RemoveOperator(op11), IsTrue)
epoch := &metapb.RegionEpoch{
Version: tc.GetRegion(1).GetRegionEpoch().GetVersion() + 1,
ConfVer: tc.GetRegion(1).GetRegionEpoch().GetConfVer(),
}
{
op6 := newTestOperator(1, epoch, operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op6), Equals, 0)
}
epoch.Version--
{
op6 := newTestOperator(1, epoch, operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op6), Equals, 1)
c.Assert(oc.RemoveOperator(op6), IsTrue)
}
}
func (s *testScheduleControllerSuite) TestInterval(c *C) {
_, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
lb, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, co.opController, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""}))
c.Assert(err, IsNil)
sc := newScheduleController(co, lb)
// If no operator for x seconds, the next check should be in x/2 seconds.
idleSeconds := []int{5, 10, 20, 30, 60}
for _, n := range idleSeconds {
sc.nextInterval = schedulers.MinScheduleInterval
for totalSleep := time.Duration(0); totalSleep <= time.Second*time.Duration(n); totalSleep += sc.GetInterval() {
c.Assert(sc.Schedule(), IsNil)
}
c.Assert(sc.GetInterval(), Less, time.Second*time.Duration(n/2))
}
}
func waitAddLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() &&
res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddLearnerNode &&
res.GetChangePeer().GetPeer().GetStoreId() == storeID
}
return false
})
return region.Clone(
core.WithAddPeer(res.GetChangePeer().GetPeer()),
core.WithIncConfVer(),
)
}
func waitPromoteLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() &&
res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddNode &&
res.GetChangePeer().GetPeer().GetStoreId() == storeID
}
return false
})
// Remove learner than add voter.
return region.Clone(
core.WithRemoveStorePeer(storeID),
core.WithAddPeer(res.GetChangePeer().GetPeer()),
)
}
func waitRemovePeer(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() &&
res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_RemoveNode &&
res.GetChangePeer().GetPeer().GetStoreId() == storeID
}
return false
})
return region.Clone(
core.WithRemoveStorePeer(storeID),
core.WithIncConfVer(),
)
}
func waitTransferLeader(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() && res.GetTransferLeader().GetPeer().GetStoreId() == storeID
}
return false
})
return region.Clone(
core.WithLeader(res.GetTransferLeader().GetPeer()),
)
}
func waitNoResponse(c *C, stream mockhbstream.HeartbeatStream) {
testutil.WaitUntil(c, func(c *C) bool {
res := stream.Recv()
return res == nil
})
}
cluster: fix TestDispatch (#2315)
Signed-off-by: disksing <042dc4512fa3d391c5170cf3aa61e6a638f84342@disksing.com>
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
import (
"context"
"math/rand"
"sync"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/eraftpb"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
"github.com/pingcap/pd/v4/pkg/mock/mockhbstream"
"github.com/pingcap/pd/v4/pkg/testutil"
"github.com/pingcap/pd/v4/server/config"
"github.com/pingcap/pd/v4/server/core"
"github.com/pingcap/pd/v4/server/kv"
"github.com/pingcap/pd/v4/server/schedule"
"github.com/pingcap/pd/v4/server/schedule/operator"
"github.com/pingcap/pd/v4/server/schedule/opt"
"github.com/pingcap/pd/v4/server/schedulers"
"github.com/pingcap/pd/v4/server/statistics"
)
func newTestOperator(regionID uint64, regionEpoch *metapb.RegionEpoch, kind operator.OpKind, steps ...operator.OpStep) *operator.Operator {
return operator.NewOperator("test", "test", regionID, regionEpoch, kind, steps...)
}
func (c *testCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
id, err := c.AllocID()
if err != nil {
return nil, err
}
return &metapb.Peer{Id: id, StoreId: storeID}, nil
}
func (c *testCluster) addRegionStore(storeID uint64, regionCount int, regionSizes ...uint64) error {
var regionSize uint64
if len(regionSizes) == 0 {
regionSize = uint64(regionCount) * 10
} else {
regionSize = regionSizes[0]
}
stats := &pdpb.StoreStats{}
stats.Capacity = 1000 * (1 << 20)
stats.Available = stats.Capacity - regionSize
newStore := core.NewStoreInfo(&metapb.Store{Id: storeID},
core.SetStoreStats(stats),
core.SetRegionCount(regionCount),
core.SetRegionSize(int64(regionSize)),
core.SetLastHeartbeatTS(time.Now()),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) addLeaderRegion(regionID uint64, leaderStoreID uint64, followerStoreIDs ...uint64) error {
region := newTestRegionMeta(regionID)
leader, _ := c.AllocPeer(leaderStoreID)
region.Peers = []*metapb.Peer{leader}
for _, followerStoreID := range followerStoreIDs {
peer, _ := c.AllocPeer(followerStoreID)
region.Peers = append(region.Peers, peer)
}
regionInfo := core.NewRegionInfo(region, leader, core.SetApproximateSize(10), core.SetApproximateKeys(10))
return c.putRegion(regionInfo)
}
func (c *testCluster) updateLeaderCount(storeID uint64, leaderCount int) error {
store := c.GetStore(storeID)
newStore := store.Clone(
core.SetLeaderCount(leaderCount),
core.SetLeaderSize(int64(leaderCount)*10),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) addLeaderStore(storeID uint64, leaderCount int) error {
stats := &pdpb.StoreStats{}
newStore := core.NewStoreInfo(&metapb.Store{Id: storeID},
core.SetStoreStats(stats),
core.SetLeaderCount(leaderCount),
core.SetLeaderSize(int64(leaderCount)*10),
core.SetLastHeartbeatTS(time.Now()),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) setStoreDown(storeID uint64) error {
store := c.GetStore(storeID)
newStore := store.Clone(
core.SetStoreState(metapb.StoreState_Up),
core.SetLastHeartbeatTS(time.Time{}),
)
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) setStoreOffline(storeID uint64) error {
store := c.GetStore(storeID)
newStore := store.Clone(core.SetStoreState(metapb.StoreState_Offline))
c.Lock()
defer c.Unlock()
return c.putStoreLocked(newStore)
}
func (c *testCluster) LoadRegion(regionID uint64, followerStoreIDs ...uint64) error {
// regions load from etcd will have no leader
region := newTestRegionMeta(regionID)
region.Peers = []*metapb.Peer{}
for _, id := range followerStoreIDs {
peer, _ := c.AllocPeer(id)
region.Peers = append(region.Peers, peer)
}
return c.putRegion(core.NewRegionInfo(region, nil))
}
var _ = Suite(&testCoordinatorSuite{})
type testCoordinatorSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *testCoordinatorSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
c.Assert(failpoint.Enable("github.com/pingcap/pd/v4/server/schedule/unexpectedOperator", "return(true)"), IsNil)
}
func (s *testCoordinatorSuite) TearDownSuite(c *C) {
s.cancel()
}
func (s *testCoordinatorSuite) TestBasic(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
oc := co.opController
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
oc.AddWaitingOperator(op1)
c.Assert(oc.OperatorCount(op1.Kind()), Equals, uint64(1))
c.Assert(oc.GetOperator(1).RegionID(), Equals, op1.RegionID())
// Region 1 already has an operator, cannot add another one.
op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion)
oc.AddWaitingOperator(op2)
c.Assert(oc.OperatorCount(op2.Kind()), Equals, uint64(0))
// Remove the operator manually, then we can add a new operator.
c.Assert(oc.RemoveOperator(op1), IsTrue)
op3 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion)
oc.AddWaitingOperator(op3)
c.Assert(oc.OperatorCount(op3.Kind()), Equals, uint64(1))
c.Assert(oc.GetOperator(1).RegionID(), Equals, op3.RegionID())
}
func (s *testCoordinatorSuite) TestDispatch(c *C) {
tc, co, cleanup := prepare(nil, func(tc *testCluster) { tc.prepareChecker.isPrepared = true }, nil, c)
defer cleanup()
// Transfer peer from store 4 to store 1.
c.Assert(tc.addRegionStore(4, 40), IsNil)
c.Assert(tc.addRegionStore(3, 30), IsNil)
c.Assert(tc.addRegionStore(2, 20), IsNil)
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
// Transfer leader from store 4 to store 2.
c.Assert(tc.updateLeaderCount(4, 50), IsNil)
c.Assert(tc.updateLeaderCount(3, 50), IsNil)
c.Assert(tc.updateLeaderCount(2, 20), IsNil)
c.Assert(tc.updateLeaderCount(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(2, 4, 3, 2), IsNil)
co.run()
// Wait for schedule and turn off balance.
waitOperator(c, co, 1)
testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpBalance, 4, 1)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
waitOperator(c, co, 2)
testutil.CheckTransferLeader(c, co.opController.GetOperator(2), operator.OpBalance, 4, 2)
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
stream := mockhbstream.NewHeartbeatStream()
// Transfer peer.
region := tc.GetRegion(1).Clone()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitRemovePeer(c, stream, region, 4)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Transfer leader.
region = tc.GetRegion(2).Clone()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitTransferLeader(c, stream, region, 2)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
}
func dispatchHeartbeat(co *coordinator, region *core.RegionInfo, stream opt.HeartbeatStream) error {
co.hbStreams.BindStream(region.GetLeader().GetStoreId(), stream)
if err := co.cluster.putRegion(region.Clone()); err != nil {
return err
}
co.opController.Dispatch(region, schedule.DispatchFromHeartBeat)
return nil
}
func (s *testCoordinatorSuite) TestCollectMetrics(c *C) {
tc, co, cleanup := prepare(nil, func(tc *testCluster) {
tc.regionStats = statistics.NewRegionStatistics(tc.GetOpt())
}, func(co *coordinator) { co.run() }, c)
defer cleanup()
// Make sure there are no problem when concurrent write and read
var wg sync.WaitGroup
count := 10
wg.Add(count + 1)
for i := 0; i <= count; i++ {
go func(i int) {
defer wg.Done()
for j := 0; j < 1000; j++ {
c.Assert(tc.addRegionStore(uint64(i%5), rand.Intn(200)), IsNil)
}
}(i)
}
for i := 0; i < 1000; i++ {
co.collectHotSpotMetrics()
co.collectSchedulerMetrics()
co.cluster.collectClusterMetrics()
}
co.resetHotSpotMetrics()
co.resetSchedulerMetrics()
co.cluster.resetClusterMetrics()
wg.Wait()
}
func MaxUint64(nums ...uint64) uint64 {
result := uint64(0)
for _, num := range nums {
if num > result {
result = num
}
}
return result
}
func prepare(setCfg func(*config.ScheduleConfig), setTc func(*testCluster), run func(*coordinator), c *C) (*testCluster, *coordinator, func()) {
ctx, cancel := context.WithCancel(context.Background())
cfg, opt, err := newTestScheduleConfig()
c.Assert(err, IsNil)
if setCfg != nil {
setCfg(cfg)
}
tc := newTestCluster(opt)
hbStreams := mockhbstream.NewHeartbeatStreams(tc.getClusterID(), false /* need to run */)
if setTc != nil {
setTc(tc)
}
tc.RaftCluster.configCheck = true
co := newCoordinator(ctx, tc.RaftCluster, hbStreams)
if run != nil {
run(co)
}
return tc, co, func() {
co.stop()
co.wg.Wait()
hbStreams.Close()
cancel()
}
}
func (s *testCoordinatorSuite) checkRegion(c *C, tc *testCluster, co *coordinator, regionID uint64, expectCheckerIsBusy bool, expectAddOperator int) {
checkerIsBusy, ops := co.checkers.CheckRegion(tc.GetRegion(regionID))
c.Assert(checkerIsBusy, Equals, expectCheckerIsBusy)
if ops == nil {
c.Assert(expectAddOperator, Equals, 0)
} else {
c.Assert(co.opController.AddWaitingOperator(ops...), Equals, expectAddOperator)
}
}
func (s *testCoordinatorSuite) TestCheckRegion(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
hbStreams, opt := co.hbStreams, tc.opt
defer cleanup()
c.Assert(tc.addRegionStore(4, 4), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil)
s.checkRegion(c, tc, co, 1, false, 1)
waitOperator(c, co, 1)
testutil.CheckAddPeer(c, co.opController.GetOperator(1), operator.OpReplica, 1)
s.checkRegion(c, tc, co, 1, false, 0)
r := tc.GetRegion(1)
p := &metapb.Peer{Id: 1, StoreId: 1, IsLearner: true}
r = r.Clone(
core.WithAddPeer(p),
core.WithPendingPeers(append(r.GetPendingPeers(), p)),
)
c.Assert(tc.putRegion(r), IsNil)
s.checkRegion(c, tc, co, 1, false, 0)
co.stop()
co.wg.Wait()
tc = newTestCluster(opt)
tc.configCheck = true
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(tc.addRegionStore(4, 4), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.putRegion(r), IsNil)
s.checkRegion(c, tc, co, 1, false, 0)
r = r.Clone(core.WithPendingPeers(nil))
c.Assert(tc.putRegion(r), IsNil)
s.checkRegion(c, tc, co, 1, false, 1)
waitOperator(c, co, 1)
op := co.opController.GetOperator(1)
c.Assert(op.Len(), Equals, 1)
c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(1))
s.checkRegion(c, tc, co, 1, false, 0)
}
func (s *testCoordinatorSuite) TestCheckerIsBusy(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
cfg.ReplicaScheduleLimit = 0 // ensure replica checker is busy
cfg.MergeScheduleLimit = 10
}, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
c.Assert(tc.addRegionStore(1, 0), IsNil)
num := 1 + MaxUint64(co.cluster.GetReplicaScheduleLimit(), co.cluster.GetMergeScheduleLimit())
var operatorKinds = []operator.OpKind{
operator.OpReplica, operator.OpRegion | operator.OpMerge,
}
for i, operatorKind := range operatorKinds {
for j := uint64(0); j < num; j++ {
regionID := j + uint64(i+1)*num
c.Assert(tc.addLeaderRegion(regionID, 1), IsNil)
switch operatorKind {
case operator.OpReplica:
op := newTestOperator(regionID, tc.GetRegion(regionID).GetRegionEpoch(), operatorKind)
c.Assert(co.opController.AddWaitingOperator(op), Equals, 1)
case operator.OpRegion | operator.OpMerge:
if regionID%2 == 1 {
ops, err := operator.CreateMergeRegionOperator("merge-region", co.cluster, tc.GetRegion(regionID), tc.GetRegion(regionID-1), operator.OpMerge)
c.Assert(err, IsNil)
c.Assert(co.opController.AddWaitingOperator(ops...), Equals, len(ops))
}
}
}
}
s.checkRegion(c, tc, co, num, true, 0)
}
func (s *testCoordinatorSuite) TestReplica(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// Turn off balance.
cfg.LeaderScheduleLimit = 0
cfg.RegionScheduleLimit = 0
}, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addRegionStore(4, 4), IsNil)
stream := mockhbstream.NewHeartbeatStream()
// Add peer to store 1.
c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil)
region := tc.GetRegion(1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Peer in store 3 is down, remove peer in store 3 and add peer to store 4.
c.Assert(tc.setStoreDown(3), IsNil)
downPeer := &pdpb.PeerStats{
Peer: region.GetStorePeer(3),
DownSeconds: 24 * 60 * 60,
}
region = region.Clone(
core.WithDownPeers(append(region.GetDownPeers(), downPeer)),
)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 4)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 4)
region = region.Clone(core.WithDownPeers(nil))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Remove peer from store 4.
c.Assert(tc.addLeaderRegion(2, 1, 2, 3, 4), IsNil)
region = tc.GetRegion(2)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitRemovePeer(c, stream, region, 4)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
// Remove offline peer directly when it's pending.
c.Assert(tc.addLeaderRegion(3, 1, 2, 3), IsNil)
c.Assert(tc.setStoreOffline(3), IsNil)
region = tc.GetRegion(3)
region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetStorePeer(3)}))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
}
func (s *testCoordinatorSuite) TestPeerState(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
// Transfer peer from store 4 to store 1.
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addRegionStore(2, 10), IsNil)
c.Assert(tc.addRegionStore(3, 10), IsNil)
c.Assert(tc.addRegionStore(4, 40), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
stream := mockhbstream.NewHeartbeatStream()
// Wait for schedule.
waitOperator(c, co, 1)
testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpBalance, 4, 1)
region := tc.GetRegion(1).Clone()
// Add new peer.
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 1)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 1)
// If the new peer is pending, the operator will not finish.
region = region.Clone(core.WithPendingPeers(append(region.GetPendingPeers(), region.GetStorePeer(1))))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
c.Assert(co.opController.GetOperator(region.GetID()), NotNil)
// The new peer is not pending now, the operator will finish.
// And we will proceed to remove peer in store 4.
region = region.Clone(core.WithPendingPeers(nil))
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitRemovePeer(c, stream, region, 4)
c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil)
region = tc.GetRegion(1).Clone()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitNoResponse(c, stream)
}
func (s *testCoordinatorSuite) TestShouldRun(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
c.Assert(tc.addLeaderStore(1, 5), IsNil)
c.Assert(tc.addLeaderStore(2, 2), IsNil)
c.Assert(tc.addLeaderStore(3, 0), IsNil)
c.Assert(tc.addLeaderStore(4, 0), IsNil)
c.Assert(tc.LoadRegion(1, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(2, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(3, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(4, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(5, 1, 2, 3), IsNil)
c.Assert(tc.LoadRegion(6, 2, 1, 4), IsNil)
c.Assert(tc.LoadRegion(7, 2, 1, 4), IsNil)
c.Assert(co.shouldRun(), IsFalse)
c.Assert(tc.core.Regions.GetStoreRegionCount(4), Equals, 2)
tbl := []struct {
regionID uint64
shouldRun bool
}{
{1, false},
{2, false},
{3, false},
{4, false},
{5, false},
// store4 needs collect two region
{6, false},
{7, true},
}
for _, t := range tbl {
r := tc.GetRegion(t.regionID)
nr := r.Clone(core.WithLeader(r.GetPeers()[0]))
c.Assert(tc.processRegionHeartbeat(nr), IsNil)
c.Assert(co.shouldRun(), Equals, t.shouldRun)
}
nr := &metapb.Region{Id: 6, Peers: []*metapb.Peer{}}
newRegion := core.NewRegionInfo(nr, nil)
c.Assert(tc.processRegionHeartbeat(newRegion), NotNil)
c.Assert(co.cluster.prepareChecker.sum, Equals, 7)
}
func (s *testCoordinatorSuite) TestShouldRunWithNonLeaderRegions(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
c.Assert(tc.addLeaderStore(1, 10), IsNil)
c.Assert(tc.addLeaderStore(2, 0), IsNil)
c.Assert(tc.addLeaderStore(3, 0), IsNil)
for i := 0; i < 10; i++ {
c.Assert(tc.LoadRegion(uint64(i+1), 1, 2, 3), IsNil)
}
c.Assert(co.shouldRun(), IsFalse)
c.Assert(tc.core.Regions.GetStoreRegionCount(1), Equals, 10)
tbl := []struct {
regionID uint64
shouldRun bool
}{
{1, false},
{2, false},
{3, false},
{4, false},
{5, false},
{6, false},
{7, false},
{8, true},
}
for _, t := range tbl {
r := tc.GetRegion(t.regionID)
nr := r.Clone(core.WithLeader(r.GetPeers()[0]))
c.Assert(tc.processRegionHeartbeat(nr), IsNil)
c.Assert(co.shouldRun(), Equals, t.shouldRun)
}
nr := &metapb.Region{Id: 8, Peers: []*metapb.Peer{}}
newRegion := core.NewRegionInfo(nr, nil)
c.Assert(tc.processRegionHeartbeat(newRegion), NotNil)
c.Assert(co.cluster.prepareChecker.sum, Equals, 8)
// Now, after server is prepared, there exist some regions with no leader.
c.Assert(tc.GetRegion(9).GetLeader().GetStoreId(), Equals, uint64(0))
c.Assert(tc.GetRegion(10).GetLeader().GetStoreId(), Equals, uint64(0))
}
func (s *testCoordinatorSuite) TestAddScheduler(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
defer cleanup()
c.Assert(co.schedulers, HasLen, 4)
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.LabelName), IsNil)
c.Assert(co.schedulers, HasLen, 0)
stream := mockhbstream.NewHeartbeatStream()
// Add stores 1,2,3
c.Assert(tc.addLeaderStore(1, 1), IsNil)
c.Assert(tc.addLeaderStore(2, 1), IsNil)
c.Assert(tc.addLeaderStore(3, 1), IsNil)
// Add regions 1 with leader in store 1 and followers in stores 2,3
c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil)
// Add regions 2 with leader in store 2 and followers in stores 1,3
c.Assert(tc.addLeaderRegion(2, 2, 1, 3), IsNil)
// Add regions 3 with leader in store 3 and followers in stores 1,2
c.Assert(tc.addLeaderRegion(3, 3, 1, 2), IsNil)
oc := co.opController
gls, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"0"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls), NotNil)
c.Assert(co.removeScheduler(gls.GetName()), NotNil)
gls, err = schedule.CreateScheduler(schedulers.GrantLeaderType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls), IsNil)
// Transfer all leaders to store 1.
waitOperator(c, co, 2)
region2 := tc.GetRegion(2)
c.Assert(dispatchHeartbeat(co, region2, stream), IsNil)
region2 = waitTransferLeader(c, stream, region2, 1)
c.Assert(dispatchHeartbeat(co, region2, stream), IsNil)
waitNoResponse(c, stream)
waitOperator(c, co, 3)
region3 := tc.GetRegion(3)
c.Assert(dispatchHeartbeat(co, region3, stream), IsNil)
region3 = waitTransferLeader(c, stream, region3, 1)
c.Assert(dispatchHeartbeat(co, region3, stream), IsNil)
waitNoResponse(c, stream)
}
func (s *testCoordinatorSuite) TestPersistScheduler(c *C) {
tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c)
hbStreams := co.hbStreams
defer cleanup()
// Add stores 1,2
c.Assert(tc.addLeaderStore(1, 1), IsNil)
c.Assert(tc.addLeaderStore(2, 1), IsNil)
c.Assert(co.schedulers, HasLen, 4)
oc := co.opController
storage := tc.RaftCluster.storage
gls1, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls1, "1"), IsNil)
evict, err := schedule.CreateScheduler(schedulers.EvictLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.EvictLeaderType, []string{"2"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(evict, "2"), IsNil)
c.Assert(co.schedulers, HasLen, 6)
sches, _, err := storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 6)
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.LabelName), IsNil)
c.Assert(co.schedulers, HasLen, 2)
c.Assert(co.cluster.opt.Persist(storage), IsNil)
co.stop()
co.wg.Wait()
// make a new coordinator for testing
// whether the schedulers added or removed in dynamic way are recorded in opt
_, newOpt, err := newTestScheduleConfig()
c.Assert(err, IsNil)
_, err = schedule.CreateScheduler(schedulers.AdjacentRegionType, oc, storage, schedule.ConfigJSONDecoder([]byte("null")))
c.Assert(err, IsNil)
// suppose we add a new default enable scheduler
newOpt.AddSchedulerCfg(schedulers.AdjacentRegionType, []string{})
c.Assert(newOpt.GetSchedulers(), HasLen, 5)
c.Assert(newOpt.Reload(storage), IsNil)
// only remains 3 items with independent config.
sches, _, err = storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 3)
// option have 7 items because the default scheduler do not remove.
c.Assert(newOpt.GetSchedulers(), HasLen, 7)
c.Assert(newOpt.Persist(storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(co.schedulers, HasLen, 3)
co.stop()
co.wg.Wait()
// suppose restart PD again
_, newOpt, err = newTestScheduleConfig()
c.Assert(err, IsNil)
c.Assert(newOpt.Reload(storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
storage = tc.RaftCluster.storage
c.Assert(co.schedulers, HasLen, 3)
bls, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(bls), IsNil)
brs, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(brs), IsNil)
c.Assert(co.schedulers, HasLen, 5)
// the scheduler option should contain 7 items
// the `hot scheduler` and `label scheduler` are disabled
c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 7)
c.Assert(co.removeScheduler(schedulers.GrantLeaderName), IsNil)
// the scheduler that is not enable by default will be completely deleted
c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 6)
c.Assert(co.schedulers, HasLen, 4)
c.Assert(co.cluster.opt.Persist(co.cluster.storage), IsNil)
co.stop()
co.wg.Wait()
_, newOpt, err = newTestScheduleConfig()
c.Assert(err, IsNil)
c.Assert(newOpt.Reload(co.cluster.storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(co.schedulers, HasLen, 4)
c.Assert(co.removeScheduler(schedulers.EvictLeaderName), IsNil)
c.Assert(co.schedulers, HasLen, 3)
}
func (s *testCoordinatorSuite) TestRemoveScheduler(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
cfg.ReplicaScheduleLimit = 0
}, nil, func(co *coordinator) { co.run() }, c)
hbStreams := co.hbStreams
defer cleanup()
// Add stores 1,2
c.Assert(tc.addLeaderStore(1, 1), IsNil)
c.Assert(tc.addLeaderStore(2, 1), IsNil)
c.Assert(co.schedulers, HasLen, 4)
oc := co.opController
storage := tc.RaftCluster.storage
gls1, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"}))
c.Assert(err, IsNil)
c.Assert(co.addScheduler(gls1, "1"), IsNil)
c.Assert(co.schedulers, HasLen, 5)
sches, _, err := storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 5)
// remove all schedulers
c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil)
c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil)
c.Assert(co.removeScheduler(schedulers.LabelName), IsNil)
c.Assert(co.removeScheduler(schedulers.GrantLeaderName), IsNil)
// all removed
sches, _, err = storage.LoadAllScheduleConfig()
c.Assert(err, IsNil)
c.Assert(sches, HasLen, 0)
c.Assert(co.schedulers, HasLen, 0)
c.Assert(co.cluster.opt.Persist(co.cluster.storage), IsNil)
co.stop()
co.wg.Wait()
// suppose restart PD again
_, newOpt, err := newTestScheduleConfig()
c.Assert(err, IsNil)
c.Assert(newOpt.Reload(tc.storage), IsNil)
tc.RaftCluster.opt = newOpt
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(co.schedulers, HasLen, 0)
// the option remains default scheduler
c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 4)
co.stop()
co.wg.Wait()
}
func (s *testCoordinatorSuite) TestRestart(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// Turn off balance, we test add replica only.
cfg.LeaderScheduleLimit = 0
cfg.RegionScheduleLimit = 0
}, nil, func(co *coordinator) { co.run() }, c)
hbStreams := co.hbStreams
defer cleanup()
// Add 3 stores (1, 2, 3) and a region with 1 replica on store 1.
c.Assert(tc.addRegionStore(1, 1), IsNil)
c.Assert(tc.addRegionStore(2, 2), IsNil)
c.Assert(tc.addRegionStore(3, 3), IsNil)
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
region := tc.GetRegion(1)
tc.prepareChecker.collect(region)
// Add 1 replica on store 2.
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
stream := mockhbstream.NewHeartbeatStream()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 2)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitPromoteLearner(c, stream, region, 2)
co.stop()
co.wg.Wait()
// Recreate coodinator then add another replica on store 3.
co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams)
co.run()
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
region = waitAddLearner(c, stream, region, 3)
c.Assert(dispatchHeartbeat(co, region, stream), IsNil)
waitPromoteLearner(c, stream, region, 3)
}
func BenchmarkPatrolRegion(b *testing.B) {
mergeLimit := uint64(4100)
regionNum := 10000
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
cfg.MergeScheduleLimit = mergeLimit
}, nil, nil, &C{})
defer cleanup()
tc.opt.SetSplitMergeInterval(time.Duration(0))
for i := 1; i < 4; i++ {
if err := tc.addRegionStore(uint64(i), regionNum, 96); err != nil {
return
}
}
for i := 0; i < regionNum; i++ {
if err := tc.addLeaderRegion(uint64(i), 1, 2, 3); err != nil {
return
}
}
listen := make(chan int)
go func() {
oc := co.opController
listen <- 0
for {
if oc.OperatorCount(operator.OpMerge) == mergeLimit {
co.cancel()
co.wg.Add(1)
return
}
}
}()
<-listen
b.ResetTimer()
co.patrolRegions()
}
func waitOperator(c *C, co *coordinator, regionID uint64) {
testutil.WaitUntil(c, func(c *C) bool {
return co.opController.GetOperator(regionID) != nil
})
}
var _ = Suite(&testOperatorControllerSuite{})
type testOperatorControllerSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *testOperatorControllerSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
c.Assert(failpoint.Enable("github.com/pingcap/pd/v4/server/schedule/unexpectedOperator", "return(true)"), IsNil)
}
func (s *testOperatorControllerSuite) TearDownSuite(c *C) {
s.cancel()
}
func (s *testOperatorControllerSuite) TestOperatorCount(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
oc := co.opController
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0))
c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(0))
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
c.Assert(tc.addLeaderRegion(2, 2), IsNil)
{
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
oc.AddWaitingOperator(op1)
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 1:leader
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader)
oc.AddWaitingOperator(op2)
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(2)) // 1:leader, 2:leader
c.Assert(oc.RemoveOperator(op1), IsTrue)
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 2:leader
}
{
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion)
oc.AddWaitingOperator(op1)
c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(1)) // 1:region 2:leader
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1))
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion)
op2.SetPriorityLevel(core.HighPriority)
oc.AddWaitingOperator(op2)
c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(2)) // 1:region 2:region
c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0))
}
}
func (s *testOperatorControllerSuite) TestStoreOverloaded(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// scheduling one time needs 60 seconds
// and thus it's large enough to make sure that only schedule one time
cfg.StoreBalanceRate = 1
}, nil, nil, c)
defer cleanup()
oc := co.opController
lb, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(tc.addRegionStore(4, 100), IsNil)
c.Assert(tc.addRegionStore(3, 100), IsNil)
c.Assert(tc.addRegionStore(2, 100), IsNil)
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
region := tc.GetRegion(1).Clone(core.SetApproximateSize(60))
tc.putRegion(region)
{
op1 := lb.Schedule(tc)[0]
c.Assert(op1, NotNil)
c.Assert(oc.AddOperator(op1), IsTrue)
c.Assert(oc.RemoveOperator(op1), IsTrue)
}
for i := 0; i < 10; i++ {
c.Assert(lb.Schedule(tc), IsNil)
}
// reset all stores' limit
// scheduling one time needs 1/10 seconds
oc.SetAllStoresLimit(10, schedule.StoreLimitManual)
for i := 0; i < 10; i++ {
op1 := lb.Schedule(tc)[0]
c.Assert(op1, NotNil)
c.Assert(oc.AddOperator(op1), IsTrue)
c.Assert(oc.RemoveOperator(op1), IsTrue)
}
// sleep 1 seconds to make sure that the token is filled up
time.Sleep(1 * time.Second)
for i := 0; i < 100; i++ {
c.Assert(lb.Schedule(tc), NotNil)
}
}
func (s *testOperatorControllerSuite) TestStoreOverloadedWithReplace(c *C) {
tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) {
// scheduling one time needs 2 seconds
cfg.StoreBalanceRate = 30
}, nil, nil, c)
defer cleanup()
oc := co.opController
lb, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
c.Assert(tc.addRegionStore(4, 100), IsNil)
c.Assert(tc.addRegionStore(3, 100), IsNil)
c.Assert(tc.addRegionStore(2, 100), IsNil)
c.Assert(tc.addRegionStore(1, 10), IsNil)
c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil)
c.Assert(tc.addLeaderRegion(2, 1, 3, 4), IsNil)
region := tc.GetRegion(1).Clone(core.SetApproximateSize(60))
tc.putRegion(region)
region = tc.GetRegion(2).Clone(core.SetApproximateSize(60))
tc.putRegion(region)
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 1})
c.Assert(oc.AddOperator(op1), IsTrue)
op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 2})
op2.SetPriorityLevel(core.HighPriority)
c.Assert(oc.AddOperator(op2), IsTrue)
op3 := newTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3})
c.Assert(oc.AddOperator(op3), IsFalse)
c.Assert(lb.Schedule(tc), IsNil)
// sleep 2 seconds to make sure that token is filled up
time.Sleep(2 * time.Second)
c.Assert(lb.Schedule(tc), NotNil)
}
var _ = Suite(&testScheduleControllerSuite{})
type testScheduleControllerSuite struct {
ctx context.Context
cancel context.CancelFunc
}
func (s *testScheduleControllerSuite) SetUpSuite(c *C) {
s.ctx, s.cancel = context.WithCancel(context.Background())
c.Assert(failpoint.Enable("github.com/pingcap/pd/v4/server/schedule/unexpectedOperator", "return(true)"), IsNil)
}
func (s *testScheduleControllerSuite) TearDownSuite(c *C) {
s.cancel()
}
// FIXME: remove after move into schedulers package
type mockLimitScheduler struct {
schedule.Scheduler
limit uint64
counter *schedule.OperatorController
kind operator.OpKind
}
func (s *mockLimitScheduler) IsScheduleAllowed(cluster opt.Cluster) bool {
return s.counter.OperatorCount(s.kind) < s.limit
}
func (s *testScheduleControllerSuite) TestController(c *C) {
tc, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
oc := co.opController
c.Assert(tc.addLeaderRegion(1, 1), IsNil)
c.Assert(tc.addLeaderRegion(2, 2), IsNil)
scheduler, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""}))
c.Assert(err, IsNil)
lb := &mockLimitScheduler{
Scheduler: scheduler,
counter: oc,
kind: operator.OpLeader,
}
sc := newScheduleController(co, lb)
for i := schedulers.MinScheduleInterval; sc.GetInterval() != schedulers.MaxScheduleInterval; i = sc.GetNextInterval(i) {
c.Assert(sc.GetInterval(), Equals, i)
c.Assert(sc.Schedule(), IsNil)
}
// limit = 2
lb.limit = 2
// count = 0
{
c.Assert(sc.AllowSchedule(), IsTrue)
op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op1), Equals, 1)
// count = 1
c.Assert(sc.AllowSchedule(), IsTrue)
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op2), Equals, 1)
// count = 2
c.Assert(sc.AllowSchedule(), IsFalse)
c.Assert(oc.RemoveOperator(op1), IsTrue)
// count = 1
c.Assert(sc.AllowSchedule(), IsTrue)
}
op11 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader)
// add a PriorityKind operator will remove old operator
{
op3 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpHotRegion)
op3.SetPriorityLevel(core.HighPriority)
c.Assert(oc.AddWaitingOperator(op11), Equals, 1)
c.Assert(sc.AllowSchedule(), IsFalse)
c.Assert(oc.AddWaitingOperator(op3), Equals, 1)
c.Assert(sc.AllowSchedule(), IsTrue)
c.Assert(oc.RemoveOperator(op3), IsTrue)
}
// add a admin operator will remove old operator
{
op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op2), Equals, 1)
c.Assert(sc.AllowSchedule(), IsFalse)
op4 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpAdmin)
op4.SetPriorityLevel(core.HighPriority)
c.Assert(oc.AddWaitingOperator(op4), Equals, 1)
c.Assert(sc.AllowSchedule(), IsTrue)
c.Assert(oc.RemoveOperator(op4), IsTrue)
}
// test wrong region id.
{
op5 := newTestOperator(3, &metapb.RegionEpoch{}, operator.OpHotRegion)
c.Assert(oc.AddWaitingOperator(op5), Equals, 0)
}
// test wrong region epoch.
c.Assert(oc.RemoveOperator(op11), IsTrue)
epoch := &metapb.RegionEpoch{
Version: tc.GetRegion(1).GetRegionEpoch().GetVersion() + 1,
ConfVer: tc.GetRegion(1).GetRegionEpoch().GetConfVer(),
}
{
op6 := newTestOperator(1, epoch, operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op6), Equals, 0)
}
epoch.Version--
{
op6 := newTestOperator(1, epoch, operator.OpLeader)
c.Assert(oc.AddWaitingOperator(op6), Equals, 1)
c.Assert(oc.RemoveOperator(op6), IsTrue)
}
}
func (s *testScheduleControllerSuite) TestInterval(c *C) {
_, co, cleanup := prepare(nil, nil, nil, c)
defer cleanup()
lb, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, co.opController, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""}))
c.Assert(err, IsNil)
sc := newScheduleController(co, lb)
// If no operator for x seconds, the next check should be in x/2 seconds.
idleSeconds := []int{5, 10, 20, 30, 60}
for _, n := range idleSeconds {
sc.nextInterval = schedulers.MinScheduleInterval
for totalSleep := time.Duration(0); totalSleep <= time.Second*time.Duration(n); totalSleep += sc.GetInterval() {
c.Assert(sc.Schedule(), IsNil)
}
c.Assert(sc.GetInterval(), Less, time.Second*time.Duration(n/2))
}
}
func waitAddLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() &&
res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddLearnerNode &&
res.GetChangePeer().GetPeer().GetStoreId() == storeID
}
return false
})
return region.Clone(
core.WithAddPeer(res.GetChangePeer().GetPeer()),
core.WithIncConfVer(),
)
}
func waitPromoteLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() &&
res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddNode &&
res.GetChangePeer().GetPeer().GetStoreId() == storeID
}
return false
})
// Remove learner than add voter.
return region.Clone(
core.WithRemoveStorePeer(storeID),
core.WithAddPeer(res.GetChangePeer().GetPeer()),
)
}
func waitRemovePeer(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() &&
res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_RemoveNode &&
res.GetChangePeer().GetPeer().GetStoreId() == storeID
}
return false
})
return region.Clone(
core.WithRemoveStorePeer(storeID),
core.WithIncConfVer(),
)
}
func waitTransferLeader(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo {
var res *pdpb.RegionHeartbeatResponse
testutil.WaitUntil(c, func(c *C) bool {
if res = stream.Recv(); res != nil {
return res.GetRegionId() == region.GetID() && res.GetTransferLeader().GetPeer().GetStoreId() == storeID
}
return false
})
return region.Clone(
core.WithLeader(res.GetTransferLeader().GetPeer()),
)
}
func waitNoResponse(c *C, stream mockhbstream.HeartbeatStream) {
testutil.WaitUntil(c, func(c *C) bool {
res := stream.Recv()
return res == nil
})
}
|
package meta
import (
"sort"
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/meta/internal"
)
//go:generate protoc --gogo_out=. internal/meta.proto
const (
// DefaultRetentionPolicyReplicaN is the default value of RetentionPolicyInfo.ReplicaN.
DefaultRetentionPolicyReplicaN = 1
// DefaultRetentionPolicyDuration is the default value of RetentionPolicyInfo.Duration.
DefaultRetentionPolicyDuration = 7 * (24 * time.Hour)
// MinRetentionPolicyDuration represents the minimum duration for a policy.
MinRetentionPolicyDuration = time.Hour
)
// Data represents the top level collection of all metadata.
type Data struct {
Term uint64 // associated raft term
Index uint64 // associated raft index
ClusterID uint64
Nodes []NodeInfo
Databases []DatabaseInfo
Users []UserInfo
MaxNodeID uint64
MaxShardGroupID uint64
MaxShardID uint64
}
// Node returns a node by id.
func (data *Data) Node(id uint64) *NodeInfo {
for i := range data.Nodes {
if data.Nodes[i].ID == id {
return &data.Nodes[i]
}
}
return nil
}
// NodeByHost returns a node by hostname.
func (data *Data) NodeByHost(host string) *NodeInfo {
for i := range data.Nodes {
if data.Nodes[i].Host == host {
return &data.Nodes[i]
}
}
return nil
}
// CreateNode adds a node to the metadata.
func (data *Data) CreateNode(host string) error {
// Ensure a node with the same host doesn't already exist.
if data.NodeByHost(host) != nil {
return ErrNodeExists
}
// Append new node.
data.MaxNodeID++
data.Nodes = append(data.Nodes, NodeInfo{
ID: data.MaxNodeID,
Host: host,
})
return nil
}
// DeleteNode removes a node from the metadata.
func (data *Data) DeleteNode(id uint64) error {
for i := range data.Nodes {
if data.Nodes[i].ID == id {
data.Nodes = append(data.Nodes[:i], data.Nodes[i+1:]...)
return nil
}
}
return ErrNodeNotFound
}
// Database returns a database by name.
func (data *Data) Database(name string) *DatabaseInfo {
for i := range data.Databases {
if data.Databases[i].Name == name {
return &data.Databases[i]
}
}
return nil
}
// CreateDatabase creates a new database.
// Returns an error if name is blank or if a database with the same name already exists.
func (data *Data) CreateDatabase(name string) error {
if name == "" {
return ErrDatabaseNameRequired
} else if data.Database(name) != nil {
return ErrDatabaseExists
}
// Append new node.
data.Databases = append(data.Databases, DatabaseInfo{Name: name})
return nil
}
// DropDatabase removes a database by name.
func (data *Data) DropDatabase(name string) error {
for i := range data.Databases {
if data.Databases[i].Name == name {
data.Databases = append(data.Databases[:i], data.Databases[i+1:]...)
return nil
}
}
return ErrDatabaseNotFound
}
// RetentionPolicy returns a retention policy for a database by name.
func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) {
di := data.Database(database)
if di == nil {
return nil, ErrDatabaseNotFound
}
for i := range di.RetentionPolicies {
if di.RetentionPolicies[i].Name == name {
return &di.RetentionPolicies[i], nil
}
}
return nil, ErrRetentionPolicyNotFound
}
// CreateRetentionPolicy creates a new retention policy on a database.
// Returns an error if name is blank or if a database does not exist.
func (data *Data) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) error {
// Validate retention policy.
if rpi.Name == "" {
return ErrRetentionPolicyNameRequired
} else if rpi.ReplicaN < 1 {
return ErrReplicationFactorTooLow
}
// Find database.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
} else if di.RetentionPolicy(rpi.Name) != nil {
return ErrRetentionPolicyExists
}
// Append new policy.
di.RetentionPolicies = append(di.RetentionPolicies, RetentionPolicyInfo{
Name: rpi.Name,
Duration: rpi.Duration,
ShardGroupDuration: shardGroupDuration(rpi.Duration),
ReplicaN: rpi.ReplicaN,
})
return nil
}
// DropRetentionPolicy removes a retention policy from a database by name.
func (data *Data) DropRetentionPolicy(database, name string) error {
// Find database.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
// Remove from list.
for i := range di.RetentionPolicies {
if di.RetentionPolicies[i].Name == name {
di.RetentionPolicies = append(di.RetentionPolicies[:i], di.RetentionPolicies[i+1:]...)
return nil
}
}
return ErrRetentionPolicyNotFound
}
// UpdateRetentionPolicy updates an existing retention policy.
func (data *Data) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error {
// Find database.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
// Find policy.
rpi := di.RetentionPolicy(name)
if rpi == nil {
return ErrRetentionPolicyNotFound
}
// Ensure new policy doesn't match an existing policy.
if rpu.Name != nil && *rpu.Name != name && di.RetentionPolicy(*rpu.Name) != nil {
return ErrRetentionPolicyNameExists
}
// Enforce duration of at least MinRetentionPolicyDuration
if rpu.Duration != nil && *rpu.Duration < MinRetentionPolicyDuration && *rpu.Duration != 0 {
return ErrRetentionPolicyDurationTooLow
}
// Update fields.
if rpu.Name != nil {
rpi.Name = *rpu.Name
}
if rpu.Duration != nil {
rpi.Duration = *rpu.Duration
rpi.ShardGroupDuration = shardGroupDuration(rpi.Duration)
}
if rpu.ReplicaN != nil {
rpi.ReplicaN = *rpu.ReplicaN
}
return nil
}
// SetDefaultRetentionPolicy sets the default retention policy for a database.
func (data *Data) SetDefaultRetentionPolicy(database, name string) error {
// Find database and verify policy exists.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
} else if di.RetentionPolicy(name) == nil {
return ErrRetentionPolicyNotFound
}
// Set default policy.
di.DefaultRetentionPolicy = name
return nil
}
// ShardGroup returns a list of all shard groups on a database and policy.
func (data *Data) ShardGroups(database, policy string) ([]ShardGroupInfo, error) {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return nil, err
} else if rpi == nil {
return nil, ErrRetentionPolicyNotFound
}
groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))
for _, g := range rpi.ShardGroups {
if g.Deleted() {
continue
}
groups = append(groups, g)
}
return groups, nil
}
// ShardGroupsByTimeRange returns a list of all shard groups on a database and policy that may contain data
// for the specified time range. Shard groups are sorted by start time.
func (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax time.Time) ([]ShardGroupInfo, error) {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return nil, err
} else if rpi == nil {
return nil, ErrRetentionPolicyNotFound
}
groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))
for _, g := range rpi.ShardGroups {
if g.Deleted() || !g.Overlaps(tmin, tmax) {
continue
}
groups = append(groups, g)
}
return groups, nil
}
// ShardGroupByTimestamp returns the shard group on a database and policy for a given timestamp.
func (data *Data) ShardGroupByTimestamp(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return nil, err
} else if rpi == nil {
return nil, ErrRetentionPolicyNotFound
}
return rpi.ShardGroupByTimestamp(timestamp), nil
}
// CreateShardGroup creates a shard group on a database and policy for a given timestamp.
func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) error {
// Ensure there are nodes in the metadata.
if len(data.Nodes) == 0 {
return ErrNodesRequired
}
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return err
} else if rpi == nil {
return ErrRetentionPolicyNotFound
}
// Verify that shard group doesn't already exist for this timestamp.
if rpi.ShardGroupByTimestamp(timestamp) != nil {
return ErrShardGroupExists
}
// Require at least one replica but no more replicas than nodes.
replicaN := rpi.ReplicaN
if replicaN == 0 {
replicaN = 1
} else if replicaN > len(data.Nodes) {
replicaN = len(data.Nodes)
}
// Determine shard count by node count divided by replication factor.
// This will ensure nodes will get distributed across nodes evenly and
// replicated the correct number of times.
shardN := len(data.Nodes) / replicaN
// Create the shard group.
data.MaxShardGroupID++
sgi := ShardGroupInfo{}
sgi.ID = data.MaxShardGroupID
sgi.StartTime = timestamp.Truncate(rpi.ShardGroupDuration).UTC()
sgi.EndTime = sgi.StartTime.Add(rpi.ShardGroupDuration).UTC()
// Create shards on the group.
sgi.Shards = make([]ShardInfo, shardN)
for i := range sgi.Shards {
data.MaxShardID++
sgi.Shards[i] = ShardInfo{ID: data.MaxShardID}
}
// Assign data nodes to shards via round robin.
// Start from a repeatably "random" place in the node list.
nodeIndex := int(data.Index % uint64(len(data.Nodes)))
for i := range sgi.Shards {
si := &sgi.Shards[i]
for j := 0; j < replicaN; j++ {
nodeID := data.Nodes[nodeIndex%len(data.Nodes)].ID
si.Owners = append(si.Owners, ShardOwner{NodeID: nodeID})
nodeIndex++
}
}
// Retention policy has a new shard group, so update the policy. Shard
// Groups must be stored in sorted order, as other parts of the system
// assume this to be the case.
rpi.ShardGroups = append(rpi.ShardGroups, sgi)
sort.Sort(ShardGroupInfos(rpi.ShardGroups))
return nil
}
// DeleteShardGroup removes a shard group from a database and retention policy by id.
func (data *Data) DeleteShardGroup(database, policy string, id uint64) error {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return err
} else if rpi == nil {
return ErrRetentionPolicyNotFound
}
// Find shard group by ID and set its deletion timestamp.
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].ID == id {
rpi.ShardGroups[i].DeletedAt = time.Now().UTC()
return nil
}
}
return ErrShardGroupNotFound
}
// CreateContinuousQuery adds a named continuous query to a database.
func (data *Data) CreateContinuousQuery(database, name, query string) error {
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
// Ensure the name doesn't already exist.
for i := range di.ContinuousQueries {
if di.ContinuousQueries[i].Name == name {
return ErrContinuousQueryExists
}
}
// Append new query.
di.ContinuousQueries = append(di.ContinuousQueries, ContinuousQueryInfo{
Name: name,
Query: query,
})
return nil
}
// DropContinuousQuery removes a continuous query.
func (data *Data) DropContinuousQuery(database, name string) error {
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
for i := range di.ContinuousQueries {
if di.ContinuousQueries[i].Name == name {
di.ContinuousQueries = append(di.ContinuousQueries[:i], di.ContinuousQueries[i+1:]...)
return nil
}
}
return ErrContinuousQueryNotFound
}
// User returns a user by username.
func (data *Data) User(username string) *UserInfo {
for i := range data.Users {
if data.Users[i].Name == username {
return &data.Users[i]
}
}
return nil
}
// CreateUser creates a new user.
func (data *Data) CreateUser(name, hash string, admin bool) error {
// Ensure the user doesn't already exist.
if name == "" {
return ErrUsernameRequired
} else if data.User(name) != nil {
return ErrUserExists
}
// Append new user.
data.Users = append(data.Users, UserInfo{
Name: name,
Hash: hash,
Admin: admin,
})
return nil
}
// DropUser removes an existing user by name.
func (data *Data) DropUser(name string) error {
for i := range data.Users {
if data.Users[i].Name == name {
data.Users = append(data.Users[:i], data.Users[i+1:]...)
return nil
}
}
return ErrUserNotFound
}
// UpdateUser updates the password hash of an existing user.
func (data *Data) UpdateUser(name, hash string) error {
for i := range data.Users {
if data.Users[i].Name == name {
data.Users[i].Hash = hash
return nil
}
}
return ErrUserNotFound
}
// SetPrivilege sets a privilege for a user on a database.
func (data *Data) SetPrivilege(name, database string, p influxql.Privilege) error {
ui := data.User(name)
if ui == nil {
return ErrUserNotFound
}
if ui.Privileges == nil {
ui.Privileges = make(map[string]influxql.Privilege)
}
ui.Privileges[database] = p
return nil
}
// SetAdminPrivilege sets the admin privilege for a user.
func (data *Data) SetAdminPrivilege(name string, admin bool) error {
ui := data.User(name)
if ui == nil {
return ErrUserNotFound
}
ui.Admin = admin
return nil
}
// UserPrivileges gets the privileges for a user.
func (data *Data) UserPrivileges(name string) (map[string]influxql.Privilege, error) {
ui := data.User(name)
if ui == nil {
return nil, ErrUserNotFound
}
return ui.Privileges, nil
}
// UserPrivilege gets the privilege for a user on a database.
func (data *Data) UserPrivilege(name, database string) (*influxql.Privilege, error) {
ui := data.User(name)
if ui == nil {
return nil, ErrUserNotFound
}
for db, p := range ui.Privileges {
if db == database {
return &p, nil
}
}
return influxql.NewPrivilege(influxql.NoPrivileges), nil
}
// Clone returns a copy of data with a new version.
func (data *Data) Clone() *Data {
other := *data
// Copy nodes.
if data.Nodes != nil {
other.Nodes = make([]NodeInfo, len(data.Nodes))
for i := range data.Nodes {
other.Nodes[i] = data.Nodes[i].clone()
}
}
// Deep copy databases.
if data.Databases != nil {
other.Databases = make([]DatabaseInfo, len(data.Databases))
for i := range data.Databases {
other.Databases[i] = data.Databases[i].clone()
}
}
// Copy users.
if data.Users != nil {
other.Users = make([]UserInfo, len(data.Users))
for i := range data.Users {
other.Users[i] = data.Users[i].clone()
}
}
return &other
}
// marshal serializes to a protobuf representation.
func (data *Data) marshal() *internal.Data {
pb := &internal.Data{
Term: proto.Uint64(data.Term),
Index: proto.Uint64(data.Index),
ClusterID: proto.Uint64(data.ClusterID),
MaxNodeID: proto.Uint64(data.MaxNodeID),
MaxShardGroupID: proto.Uint64(data.MaxShardGroupID),
MaxShardID: proto.Uint64(data.MaxShardID),
}
pb.Nodes = make([]*internal.NodeInfo, len(data.Nodes))
for i := range data.Nodes {
pb.Nodes[i] = data.Nodes[i].marshal()
}
pb.Databases = make([]*internal.DatabaseInfo, len(data.Databases))
for i := range data.Databases {
pb.Databases[i] = data.Databases[i].marshal()
}
pb.Users = make([]*internal.UserInfo, len(data.Users))
for i := range data.Users {
pb.Users[i] = data.Users[i].marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (data *Data) unmarshal(pb *internal.Data) {
data.Term = pb.GetTerm()
data.Index = pb.GetIndex()
data.ClusterID = pb.GetClusterID()
data.MaxNodeID = pb.GetMaxNodeID()
data.MaxShardGroupID = pb.GetMaxShardGroupID()
data.MaxShardID = pb.GetMaxShardID()
data.Nodes = make([]NodeInfo, len(pb.GetNodes()))
for i, x := range pb.GetNodes() {
data.Nodes[i].unmarshal(x)
}
data.Databases = make([]DatabaseInfo, len(pb.GetDatabases()))
for i, x := range pb.GetDatabases() {
data.Databases[i].unmarshal(x)
}
data.Users = make([]UserInfo, len(pb.GetUsers()))
for i, x := range pb.GetUsers() {
data.Users[i].unmarshal(x)
}
}
// MarshalBinary encodes the metadata to a binary format.
func (data *Data) MarshalBinary() ([]byte, error) {
return proto.Marshal(data.marshal())
}
// UnmarshalBinary decodes the object from a binary format.
func (data *Data) UnmarshalBinary(buf []byte) error {
var pb internal.Data
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
data.unmarshal(&pb)
return nil
}
// NodeInfo represents information about a single node in the cluster.
type NodeInfo struct {
ID uint64
Host string
}
// clone returns a deep copy of ni.
func (ni NodeInfo) clone() NodeInfo { return ni }
// marshal serializes to a protobuf representation.
func (ni NodeInfo) marshal() *internal.NodeInfo {
pb := &internal.NodeInfo{}
pb.ID = proto.Uint64(ni.ID)
pb.Host = proto.String(ni.Host)
return pb
}
// unmarshal deserializes from a protobuf representation.
func (ni *NodeInfo) unmarshal(pb *internal.NodeInfo) {
ni.ID = pb.GetID()
ni.Host = pb.GetHost()
}
// DatabaseInfo represents information about a database in the system.
type DatabaseInfo struct {
Name string
DefaultRetentionPolicy string
RetentionPolicies []RetentionPolicyInfo
ContinuousQueries []ContinuousQueryInfo
}
// RetentionPolicy returns a retention policy by name.
func (di DatabaseInfo) RetentionPolicy(name string) *RetentionPolicyInfo {
for i := range di.RetentionPolicies {
if di.RetentionPolicies[i].Name == name {
return &di.RetentionPolicies[i]
}
}
return nil
}
// ShardInfos returns a list of all shards' info for the database.
func (di DatabaseInfo) ShardInfos() []ShardInfo {
shards := map[uint64]*ShardInfo{}
for i := range di.RetentionPolicies {
for j := range di.RetentionPolicies[i].ShardGroups {
sg := di.RetentionPolicies[i].ShardGroups[j]
// Skip deleted shard groups
if sg.Deleted() {
continue
}
for k := range sg.Shards {
si := &di.RetentionPolicies[i].ShardGroups[j].Shards[k]
shards[si.ID] = si
}
}
}
infos := make([]ShardInfo, 0, len(shards))
for _, info := range shards {
infos = append(infos, *info)
}
return infos
}
// clone returns a deep copy of di.
func (di DatabaseInfo) clone() DatabaseInfo {
other := di
if di.RetentionPolicies != nil {
other.RetentionPolicies = make([]RetentionPolicyInfo, len(di.RetentionPolicies))
for i := range di.RetentionPolicies {
other.RetentionPolicies[i] = di.RetentionPolicies[i].clone()
}
}
// Copy continuous queries.
if di.ContinuousQueries != nil {
other.ContinuousQueries = make([]ContinuousQueryInfo, len(di.ContinuousQueries))
for i := range di.ContinuousQueries {
other.ContinuousQueries[i] = di.ContinuousQueries[i].clone()
}
}
return other
}
// marshal serializes to a protobuf representation.
func (di DatabaseInfo) marshal() *internal.DatabaseInfo {
pb := &internal.DatabaseInfo{}
pb.Name = proto.String(di.Name)
pb.DefaultRetentionPolicy = proto.String(di.DefaultRetentionPolicy)
pb.RetentionPolicies = make([]*internal.RetentionPolicyInfo, len(di.RetentionPolicies))
for i := range di.RetentionPolicies {
pb.RetentionPolicies[i] = di.RetentionPolicies[i].marshal()
}
pb.ContinuousQueries = make([]*internal.ContinuousQueryInfo, len(di.ContinuousQueries))
for i := range di.ContinuousQueries {
pb.ContinuousQueries[i] = di.ContinuousQueries[i].marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (di *DatabaseInfo) unmarshal(pb *internal.DatabaseInfo) {
di.Name = pb.GetName()
di.DefaultRetentionPolicy = pb.GetDefaultRetentionPolicy()
if len(pb.GetRetentionPolicies()) > 0 {
di.RetentionPolicies = make([]RetentionPolicyInfo, len(pb.GetRetentionPolicies()))
for i, x := range pb.GetRetentionPolicies() {
di.RetentionPolicies[i].unmarshal(x)
}
}
if len(pb.GetContinuousQueries()) > 0 {
di.ContinuousQueries = make([]ContinuousQueryInfo, len(pb.GetContinuousQueries()))
for i, x := range pb.GetContinuousQueries() {
di.ContinuousQueries[i].unmarshal(x)
}
}
}
// RetentionPolicyInfo represents metadata about a retention policy.
type RetentionPolicyInfo struct {
Name string
ReplicaN int
Duration time.Duration
ShardGroupDuration time.Duration
ShardGroups []ShardGroupInfo
}
// NewRetentionPolicyInfo returns a new instance of RetentionPolicyInfo with defaults set.
func NewRetentionPolicyInfo(name string) *RetentionPolicyInfo {
return &RetentionPolicyInfo{
Name: name,
ReplicaN: DefaultRetentionPolicyReplicaN,
Duration: DefaultRetentionPolicyDuration,
}
}
// ShardGroupByTimestamp returns the shard group in the policy that contains the timestamp.
func (rpi *RetentionPolicyInfo) ShardGroupByTimestamp(timestamp time.Time) *ShardGroupInfo {
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].Contains(timestamp) && !rpi.ShardGroups[i].Deleted() {
return &rpi.ShardGroups[i]
}
}
return nil
}
// ExpiredShardGroups returns the Shard Groups which are considered expired, for the given time.
func (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInfo {
groups := make([]*ShardGroupInfo, 0)
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].Deleted() {
continue
}
if rpi.Duration != 0 && rpi.ShardGroups[i].EndTime.Add(rpi.Duration).Before(t) {
groups = append(groups, &rpi.ShardGroups[i])
}
}
return groups
}
// DeletedShardGroups returns the Shard Groups which are marked as deleted.
func (rpi *RetentionPolicyInfo) DeletedShardGroups() []*ShardGroupInfo {
groups := make([]*ShardGroupInfo, 0)
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].Deleted() {
groups = append(groups, &rpi.ShardGroups[i])
}
}
return groups
}
// marshal serializes to a protobuf representation.
func (rpi *RetentionPolicyInfo) marshal() *internal.RetentionPolicyInfo {
pb := &internal.RetentionPolicyInfo{
Name: proto.String(rpi.Name),
ReplicaN: proto.Uint32(uint32(rpi.ReplicaN)),
Duration: proto.Int64(int64(rpi.Duration)),
ShardGroupDuration: proto.Int64(int64(rpi.ShardGroupDuration)),
}
pb.ShardGroups = make([]*internal.ShardGroupInfo, len(rpi.ShardGroups))
for i, sgi := range rpi.ShardGroups {
pb.ShardGroups[i] = sgi.marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (rpi *RetentionPolicyInfo) unmarshal(pb *internal.RetentionPolicyInfo) {
rpi.Name = pb.GetName()
rpi.ReplicaN = int(pb.GetReplicaN())
rpi.Duration = time.Duration(pb.GetDuration())
rpi.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration())
if len(pb.GetShardGroups()) > 0 {
rpi.ShardGroups = make([]ShardGroupInfo, len(pb.GetShardGroups()))
for i, x := range pb.GetShardGroups() {
rpi.ShardGroups[i].unmarshal(x)
}
}
}
// clone returns a deep copy of rpi.
func (rpi RetentionPolicyInfo) clone() RetentionPolicyInfo {
other := rpi
if rpi.ShardGroups != nil {
other.ShardGroups = make([]ShardGroupInfo, len(rpi.ShardGroups))
for i := range rpi.ShardGroups {
other.ShardGroups[i] = rpi.ShardGroups[i].clone()
}
}
return other
}
// shardGroupDuration returns the duration for a shard group based on a policy duration.
func shardGroupDuration(d time.Duration) time.Duration {
if d >= 180*24*time.Hour || d == 0 { // 6 months or 0
return 7 * 24 * time.Hour
} else if d >= 2*24*time.Hour { // 2 days
return 1 * 24 * time.Hour
}
return 1 * time.Hour
}
// ShardGroupInfo represents metadata about a shard group. The DeletedAt field is important
// because it makes it clear that a ShardGroup has been marked as deleted, and allow the system
// to be sure that a ShardGroup is not simply missing. If the DeletedAt is set, the system can
// safely delete any associated shards.
type ShardGroupInfo struct {
ID uint64
StartTime time.Time
EndTime time.Time
DeletedAt time.Time
Shards []ShardInfo
}
type ShardGroupInfos []ShardGroupInfo
func (a ShardGroupInfos) Len() int { return len(a) }
func (a ShardGroupInfos) Less(i, j int) bool { return a[i].StartTime.Before(a[j].StartTime) }
func (a ShardGroupInfos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Contains return true if the shard group contains data for the timestamp.
func (sgi *ShardGroupInfo) Contains(timestamp time.Time) bool {
return !sgi.StartTime.After(timestamp) && sgi.EndTime.After(timestamp)
}
// Overlaps return whether the shard group contains data for the time range between min and max
func (sgi *ShardGroupInfo) Overlaps(min, max time.Time) bool {
return !sgi.StartTime.After(max) && sgi.EndTime.After(min)
}
// Deleted returns whether this ShardGroup has been deleted.
func (sgi *ShardGroupInfo) Deleted() bool {
return !sgi.DeletedAt.IsZero()
}
// clone returns a deep copy of sgi.
func (sgi ShardGroupInfo) clone() ShardGroupInfo {
other := sgi
if sgi.Shards != nil {
other.Shards = make([]ShardInfo, len(sgi.Shards))
for i := range sgi.Shards {
other.Shards[i] = sgi.Shards[i].clone()
}
}
return other
}
// ShardFor returns the ShardInfo for a Point hash
func (s *ShardGroupInfo) ShardFor(hash uint64) ShardInfo {
return s.Shards[hash%uint64(len(s.Shards))]
}
// marshal serializes to a protobuf representation.
func (sgi *ShardGroupInfo) marshal() *internal.ShardGroupInfo {
pb := &internal.ShardGroupInfo{
ID: proto.Uint64(sgi.ID),
StartTime: proto.Int64(MarshalTime(sgi.StartTime)),
EndTime: proto.Int64(MarshalTime(sgi.EndTime)),
DeletedAt: proto.Int64(MarshalTime(sgi.DeletedAt)),
}
pb.Shards = make([]*internal.ShardInfo, len(sgi.Shards))
for i := range sgi.Shards {
pb.Shards[i] = sgi.Shards[i].marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) {
sgi.ID = pb.GetID()
sgi.StartTime = UnmarshalTime(pb.GetStartTime())
sgi.EndTime = UnmarshalTime(pb.GetEndTime())
sgi.DeletedAt = UnmarshalTime(pb.GetDeletedAt())
if len(pb.GetShards()) > 0 {
sgi.Shards = make([]ShardInfo, len(pb.GetShards()))
for i, x := range pb.GetShards() {
sgi.Shards[i].unmarshal(x)
}
}
}
// ShardInfo represents metadata about a shard.
type ShardInfo struct {
ID uint64
Owners []ShardOwner
}
// OwnedBy returns whether the shard's owner IDs includes nodeID.
func (si ShardInfo) OwnedBy(nodeID uint64) bool {
for _, so := range si.Owners {
if so.NodeID == nodeID {
return true
}
}
return false
}
// clone returns a deep copy of si.
func (si ShardInfo) clone() ShardInfo {
other := si
if si.Owners != nil {
other.Owners = make([]ShardOwner, len(si.Owners))
for i := range si.Owners {
other.Owners[i] = si.Owners[i].clone()
}
}
return other
}
// marshal serializes to a protobuf representation.
func (si ShardInfo) marshal() *internal.ShardInfo {
pb := &internal.ShardInfo{
ID: proto.Uint64(si.ID),
}
pb.Owners = make([]*internal.ShardOwner, len(si.Owners))
for i := range si.Owners {
pb.Owners[i] = si.Owners[i].marshal()
}
return pb
}
// UnmarshalBinary decodes the object from a binary format.
func (si *ShardInfo) UnmarshalBinary(buf []byte) error {
var pb internal.ShardInfo
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
si.unmarshal(&pb)
return nil
}
// unmarshal deserializes from a protobuf representation.
func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) {
si.ID = pb.GetID()
// If deprecated "OwnerIDs" exists then convert it to "Owners" format.
if len(pb.GetOwnerIDs()) > 0 {
si.Owners = make([]ShardOwner, len(pb.GetOwnerIDs()))
for i, x := range pb.GetOwnerIDs() {
si.Owners[i].unmarshal(&internal.ShardOwner{
NodeID: proto.Uint64(x),
})
}
} else if len(pb.GetOwners()) > 0 {
si.Owners = make([]ShardOwner, len(pb.GetOwners()))
for i, x := range pb.GetOwners() {
si.Owners[i].unmarshal(x)
}
}
}
// ShardOwner represents a node that owns a shard.
type ShardOwner struct {
NodeID uint64
}
// clone returns a deep copy of so.
func (so ShardOwner) clone() ShardOwner {
return so
}
// marshal serializes to a protobuf representation.
func (so ShardOwner) marshal() *internal.ShardOwner {
return &internal.ShardOwner{
NodeID: proto.Uint64(so.NodeID),
}
}
// unmarshal deserializes from a protobuf representation.
func (so *ShardOwner) unmarshal(pb *internal.ShardOwner) {
so.NodeID = pb.GetNodeID()
}
// ContinuousQueryInfo represents metadata about a continuous query.
type ContinuousQueryInfo struct {
Name string
Query string
}
// clone returns a deep copy of cqi.
func (cqi ContinuousQueryInfo) clone() ContinuousQueryInfo { return cqi }
// marshal serializes to a protobuf representation.
func (cqi ContinuousQueryInfo) marshal() *internal.ContinuousQueryInfo {
return &internal.ContinuousQueryInfo{
Name: proto.String(cqi.Name),
Query: proto.String(cqi.Query),
}
}
// unmarshal deserializes from a protobuf representation.
func (cqi *ContinuousQueryInfo) unmarshal(pb *internal.ContinuousQueryInfo) {
cqi.Name = pb.GetName()
cqi.Query = pb.GetQuery()
}
// UserInfo represents metadata about a user in the system.
type UserInfo struct {
Name string
Hash string
Admin bool
Privileges map[string]influxql.Privilege
}
// Authorize returns true if the user is authorized and false if not.
func (ui *UserInfo) Authorize(privilege influxql.Privilege, database string) bool {
if ui.Admin {
return true
}
p, ok := ui.Privileges[database]
return ok && (p == privilege || p == influxql.AllPrivileges)
}
// clone returns a deep copy of si.
func (ui UserInfo) clone() UserInfo {
other := ui
if ui.Privileges != nil {
other.Privileges = make(map[string]influxql.Privilege)
for k, v := range ui.Privileges {
other.Privileges[k] = v
}
}
return other
}
// marshal serializes to a protobuf representation.
func (ui UserInfo) marshal() *internal.UserInfo {
pb := &internal.UserInfo{
Name: proto.String(ui.Name),
Hash: proto.String(ui.Hash),
Admin: proto.Bool(ui.Admin),
}
for database, privilege := range ui.Privileges {
pb.Privileges = append(pb.Privileges, &internal.UserPrivilege{
Database: proto.String(database),
Privilege: proto.Int32(int32(privilege)),
})
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (ui *UserInfo) unmarshal(pb *internal.UserInfo) {
ui.Name = pb.GetName()
ui.Hash = pb.GetHash()
ui.Admin = pb.GetAdmin()
ui.Privileges = make(map[string]influxql.Privilege)
for _, p := range pb.GetPrivileges() {
ui.Privileges[p.GetDatabase()] = influxql.Privilege(p.GetPrivilege())
}
}
// MarshalTime converts t to nanoseconds since epoch. A zero time returns 0.
func MarshalTime(t time.Time) int64 {
if t.IsZero() {
return 0
}
return t.UnixNano()
}
// UnmarshalTime converts nanoseconds since epoch to time.
// A zero value returns a zero time.
func UnmarshalTime(v int64) time.Time {
if v == 0 {
return time.Time{}
}
return time.Unix(0, v).UTC()
}
No error required if policy does not exist
This is the same way Database() works, and allows the caller to know it
should access the Raft leader.
package meta
import (
"sort"
"time"
"github.com/gogo/protobuf/proto"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/meta/internal"
)
//go:generate protoc --gogo_out=. internal/meta.proto
const (
// DefaultRetentionPolicyReplicaN is the default value of RetentionPolicyInfo.ReplicaN.
DefaultRetentionPolicyReplicaN = 1
// DefaultRetentionPolicyDuration is the default value of RetentionPolicyInfo.Duration.
DefaultRetentionPolicyDuration = 7 * (24 * time.Hour)
// MinRetentionPolicyDuration represents the minimum duration for a policy.
MinRetentionPolicyDuration = time.Hour
)
// Data represents the top level collection of all metadata.
type Data struct {
Term uint64 // associated raft term
Index uint64 // associated raft index
ClusterID uint64
Nodes []NodeInfo
Databases []DatabaseInfo
Users []UserInfo
MaxNodeID uint64
MaxShardGroupID uint64
MaxShardID uint64
}
// Node returns a node by id.
func (data *Data) Node(id uint64) *NodeInfo {
for i := range data.Nodes {
if data.Nodes[i].ID == id {
return &data.Nodes[i]
}
}
return nil
}
// NodeByHost returns a node by hostname.
func (data *Data) NodeByHost(host string) *NodeInfo {
for i := range data.Nodes {
if data.Nodes[i].Host == host {
return &data.Nodes[i]
}
}
return nil
}
// CreateNode adds a node to the metadata.
func (data *Data) CreateNode(host string) error {
// Ensure a node with the same host doesn't already exist.
if data.NodeByHost(host) != nil {
return ErrNodeExists
}
// Append new node.
data.MaxNodeID++
data.Nodes = append(data.Nodes, NodeInfo{
ID: data.MaxNodeID,
Host: host,
})
return nil
}
// DeleteNode removes a node from the metadata.
func (data *Data) DeleteNode(id uint64) error {
for i := range data.Nodes {
if data.Nodes[i].ID == id {
data.Nodes = append(data.Nodes[:i], data.Nodes[i+1:]...)
return nil
}
}
return ErrNodeNotFound
}
// Database returns a database by name.
func (data *Data) Database(name string) *DatabaseInfo {
for i := range data.Databases {
if data.Databases[i].Name == name {
return &data.Databases[i]
}
}
return nil
}
// CreateDatabase creates a new database.
// Returns an error if name is blank or if a database with the same name already exists.
func (data *Data) CreateDatabase(name string) error {
if name == "" {
return ErrDatabaseNameRequired
} else if data.Database(name) != nil {
return ErrDatabaseExists
}
// Append new node.
data.Databases = append(data.Databases, DatabaseInfo{Name: name})
return nil
}
// DropDatabase removes a database by name.
func (data *Data) DropDatabase(name string) error {
for i := range data.Databases {
if data.Databases[i].Name == name {
data.Databases = append(data.Databases[:i], data.Databases[i+1:]...)
return nil
}
}
return ErrDatabaseNotFound
}
// RetentionPolicy returns a retention policy for a database by name.
func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) {
di := data.Database(database)
if di == nil {
return nil, ErrDatabaseNotFound
}
for i := range di.RetentionPolicies {
if di.RetentionPolicies[i].Name == name {
return &di.RetentionPolicies[i], nil
}
}
return nil, nil
}
// CreateRetentionPolicy creates a new retention policy on a database.
// Returns an error if name is blank or if a database does not exist.
func (data *Data) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo) error {
// Validate retention policy.
if rpi.Name == "" {
return ErrRetentionPolicyNameRequired
} else if rpi.ReplicaN < 1 {
return ErrReplicationFactorTooLow
}
// Find database.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
} else if di.RetentionPolicy(rpi.Name) != nil {
return ErrRetentionPolicyExists
}
// Append new policy.
di.RetentionPolicies = append(di.RetentionPolicies, RetentionPolicyInfo{
Name: rpi.Name,
Duration: rpi.Duration,
ShardGroupDuration: shardGroupDuration(rpi.Duration),
ReplicaN: rpi.ReplicaN,
})
return nil
}
// DropRetentionPolicy removes a retention policy from a database by name.
func (data *Data) DropRetentionPolicy(database, name string) error {
// Find database.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
// Remove from list.
for i := range di.RetentionPolicies {
if di.RetentionPolicies[i].Name == name {
di.RetentionPolicies = append(di.RetentionPolicies[:i], di.RetentionPolicies[i+1:]...)
return nil
}
}
return ErrRetentionPolicyNotFound
}
// UpdateRetentionPolicy updates an existing retention policy.
func (data *Data) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate) error {
// Find database.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
// Find policy.
rpi := di.RetentionPolicy(name)
if rpi == nil {
return ErrRetentionPolicyNotFound
}
// Ensure new policy doesn't match an existing policy.
if rpu.Name != nil && *rpu.Name != name && di.RetentionPolicy(*rpu.Name) != nil {
return ErrRetentionPolicyNameExists
}
// Enforce duration of at least MinRetentionPolicyDuration
if rpu.Duration != nil && *rpu.Duration < MinRetentionPolicyDuration && *rpu.Duration != 0 {
return ErrRetentionPolicyDurationTooLow
}
// Update fields.
if rpu.Name != nil {
rpi.Name = *rpu.Name
}
if rpu.Duration != nil {
rpi.Duration = *rpu.Duration
rpi.ShardGroupDuration = shardGroupDuration(rpi.Duration)
}
if rpu.ReplicaN != nil {
rpi.ReplicaN = *rpu.ReplicaN
}
return nil
}
// SetDefaultRetentionPolicy sets the default retention policy for a database.
func (data *Data) SetDefaultRetentionPolicy(database, name string) error {
// Find database and verify policy exists.
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
} else if di.RetentionPolicy(name) == nil {
return ErrRetentionPolicyNotFound
}
// Set default policy.
di.DefaultRetentionPolicy = name
return nil
}
// ShardGroup returns a list of all shard groups on a database and policy.
func (data *Data) ShardGroups(database, policy string) ([]ShardGroupInfo, error) {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return nil, err
} else if rpi == nil {
return nil, ErrRetentionPolicyNotFound
}
groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))
for _, g := range rpi.ShardGroups {
if g.Deleted() {
continue
}
groups = append(groups, g)
}
return groups, nil
}
// ShardGroupsByTimeRange returns a list of all shard groups on a database and policy that may contain data
// for the specified time range. Shard groups are sorted by start time.
func (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax time.Time) ([]ShardGroupInfo, error) {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return nil, err
} else if rpi == nil {
return nil, ErrRetentionPolicyNotFound
}
groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups))
for _, g := range rpi.ShardGroups {
if g.Deleted() || !g.Overlaps(tmin, tmax) {
continue
}
groups = append(groups, g)
}
return groups, nil
}
// ShardGroupByTimestamp returns the shard group on a database and policy for a given timestamp.
func (data *Data) ShardGroupByTimestamp(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return nil, err
} else if rpi == nil {
return nil, ErrRetentionPolicyNotFound
}
return rpi.ShardGroupByTimestamp(timestamp), nil
}
// CreateShardGroup creates a shard group on a database and policy for a given timestamp.
func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time) error {
// Ensure there are nodes in the metadata.
if len(data.Nodes) == 0 {
return ErrNodesRequired
}
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return err
} else if rpi == nil {
return ErrRetentionPolicyNotFound
}
// Verify that shard group doesn't already exist for this timestamp.
if rpi.ShardGroupByTimestamp(timestamp) != nil {
return ErrShardGroupExists
}
// Require at least one replica but no more replicas than nodes.
replicaN := rpi.ReplicaN
if replicaN == 0 {
replicaN = 1
} else if replicaN > len(data.Nodes) {
replicaN = len(data.Nodes)
}
// Determine shard count by node count divided by replication factor.
// This will ensure nodes will get distributed across nodes evenly and
// replicated the correct number of times.
shardN := len(data.Nodes) / replicaN
// Create the shard group.
data.MaxShardGroupID++
sgi := ShardGroupInfo{}
sgi.ID = data.MaxShardGroupID
sgi.StartTime = timestamp.Truncate(rpi.ShardGroupDuration).UTC()
sgi.EndTime = sgi.StartTime.Add(rpi.ShardGroupDuration).UTC()
// Create shards on the group.
sgi.Shards = make([]ShardInfo, shardN)
for i := range sgi.Shards {
data.MaxShardID++
sgi.Shards[i] = ShardInfo{ID: data.MaxShardID}
}
// Assign data nodes to shards via round robin.
// Start from a repeatably "random" place in the node list.
nodeIndex := int(data.Index % uint64(len(data.Nodes)))
for i := range sgi.Shards {
si := &sgi.Shards[i]
for j := 0; j < replicaN; j++ {
nodeID := data.Nodes[nodeIndex%len(data.Nodes)].ID
si.Owners = append(si.Owners, ShardOwner{NodeID: nodeID})
nodeIndex++
}
}
// Retention policy has a new shard group, so update the policy. Shard
// Groups must be stored in sorted order, as other parts of the system
// assume this to be the case.
rpi.ShardGroups = append(rpi.ShardGroups, sgi)
sort.Sort(ShardGroupInfos(rpi.ShardGroups))
return nil
}
// DeleteShardGroup removes a shard group from a database and retention policy by id.
func (data *Data) DeleteShardGroup(database, policy string, id uint64) error {
// Find retention policy.
rpi, err := data.RetentionPolicy(database, policy)
if err != nil {
return err
} else if rpi == nil {
return ErrRetentionPolicyNotFound
}
// Find shard group by ID and set its deletion timestamp.
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].ID == id {
rpi.ShardGroups[i].DeletedAt = time.Now().UTC()
return nil
}
}
return ErrShardGroupNotFound
}
// CreateContinuousQuery adds a named continuous query to a database.
func (data *Data) CreateContinuousQuery(database, name, query string) error {
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
// Ensure the name doesn't already exist.
for i := range di.ContinuousQueries {
if di.ContinuousQueries[i].Name == name {
return ErrContinuousQueryExists
}
}
// Append new query.
di.ContinuousQueries = append(di.ContinuousQueries, ContinuousQueryInfo{
Name: name,
Query: query,
})
return nil
}
// DropContinuousQuery removes a continuous query.
func (data *Data) DropContinuousQuery(database, name string) error {
di := data.Database(database)
if di == nil {
return ErrDatabaseNotFound
}
for i := range di.ContinuousQueries {
if di.ContinuousQueries[i].Name == name {
di.ContinuousQueries = append(di.ContinuousQueries[:i], di.ContinuousQueries[i+1:]...)
return nil
}
}
return ErrContinuousQueryNotFound
}
// User returns a user by username.
func (data *Data) User(username string) *UserInfo {
for i := range data.Users {
if data.Users[i].Name == username {
return &data.Users[i]
}
}
return nil
}
// CreateUser creates a new user.
func (data *Data) CreateUser(name, hash string, admin bool) error {
// Ensure the user doesn't already exist.
if name == "" {
return ErrUsernameRequired
} else if data.User(name) != nil {
return ErrUserExists
}
// Append new user.
data.Users = append(data.Users, UserInfo{
Name: name,
Hash: hash,
Admin: admin,
})
return nil
}
// DropUser removes an existing user by name.
func (data *Data) DropUser(name string) error {
for i := range data.Users {
if data.Users[i].Name == name {
data.Users = append(data.Users[:i], data.Users[i+1:]...)
return nil
}
}
return ErrUserNotFound
}
// UpdateUser updates the password hash of an existing user.
func (data *Data) UpdateUser(name, hash string) error {
for i := range data.Users {
if data.Users[i].Name == name {
data.Users[i].Hash = hash
return nil
}
}
return ErrUserNotFound
}
// SetPrivilege sets a privilege for a user on a database.
func (data *Data) SetPrivilege(name, database string, p influxql.Privilege) error {
ui := data.User(name)
if ui == nil {
return ErrUserNotFound
}
if ui.Privileges == nil {
ui.Privileges = make(map[string]influxql.Privilege)
}
ui.Privileges[database] = p
return nil
}
// SetAdminPrivilege sets the admin privilege for a user.
func (data *Data) SetAdminPrivilege(name string, admin bool) error {
ui := data.User(name)
if ui == nil {
return ErrUserNotFound
}
ui.Admin = admin
return nil
}
// UserPrivileges gets the privileges for a user.
func (data *Data) UserPrivileges(name string) (map[string]influxql.Privilege, error) {
ui := data.User(name)
if ui == nil {
return nil, ErrUserNotFound
}
return ui.Privileges, nil
}
// UserPrivilege gets the privilege for a user on a database.
func (data *Data) UserPrivilege(name, database string) (*influxql.Privilege, error) {
ui := data.User(name)
if ui == nil {
return nil, ErrUserNotFound
}
for db, p := range ui.Privileges {
if db == database {
return &p, nil
}
}
return influxql.NewPrivilege(influxql.NoPrivileges), nil
}
// Clone returns a copy of data with a new version.
func (data *Data) Clone() *Data {
other := *data
// Copy nodes.
if data.Nodes != nil {
other.Nodes = make([]NodeInfo, len(data.Nodes))
for i := range data.Nodes {
other.Nodes[i] = data.Nodes[i].clone()
}
}
// Deep copy databases.
if data.Databases != nil {
other.Databases = make([]DatabaseInfo, len(data.Databases))
for i := range data.Databases {
other.Databases[i] = data.Databases[i].clone()
}
}
// Copy users.
if data.Users != nil {
other.Users = make([]UserInfo, len(data.Users))
for i := range data.Users {
other.Users[i] = data.Users[i].clone()
}
}
return &other
}
// marshal serializes to a protobuf representation.
func (data *Data) marshal() *internal.Data {
pb := &internal.Data{
Term: proto.Uint64(data.Term),
Index: proto.Uint64(data.Index),
ClusterID: proto.Uint64(data.ClusterID),
MaxNodeID: proto.Uint64(data.MaxNodeID),
MaxShardGroupID: proto.Uint64(data.MaxShardGroupID),
MaxShardID: proto.Uint64(data.MaxShardID),
}
pb.Nodes = make([]*internal.NodeInfo, len(data.Nodes))
for i := range data.Nodes {
pb.Nodes[i] = data.Nodes[i].marshal()
}
pb.Databases = make([]*internal.DatabaseInfo, len(data.Databases))
for i := range data.Databases {
pb.Databases[i] = data.Databases[i].marshal()
}
pb.Users = make([]*internal.UserInfo, len(data.Users))
for i := range data.Users {
pb.Users[i] = data.Users[i].marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (data *Data) unmarshal(pb *internal.Data) {
data.Term = pb.GetTerm()
data.Index = pb.GetIndex()
data.ClusterID = pb.GetClusterID()
data.MaxNodeID = pb.GetMaxNodeID()
data.MaxShardGroupID = pb.GetMaxShardGroupID()
data.MaxShardID = pb.GetMaxShardID()
data.Nodes = make([]NodeInfo, len(pb.GetNodes()))
for i, x := range pb.GetNodes() {
data.Nodes[i].unmarshal(x)
}
data.Databases = make([]DatabaseInfo, len(pb.GetDatabases()))
for i, x := range pb.GetDatabases() {
data.Databases[i].unmarshal(x)
}
data.Users = make([]UserInfo, len(pb.GetUsers()))
for i, x := range pb.GetUsers() {
data.Users[i].unmarshal(x)
}
}
// MarshalBinary encodes the metadata to a binary format.
func (data *Data) MarshalBinary() ([]byte, error) {
return proto.Marshal(data.marshal())
}
// UnmarshalBinary decodes the object from a binary format.
func (data *Data) UnmarshalBinary(buf []byte) error {
var pb internal.Data
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
data.unmarshal(&pb)
return nil
}
// NodeInfo represents information about a single node in the cluster.
type NodeInfo struct {
ID uint64
Host string
}
// clone returns a deep copy of ni.
func (ni NodeInfo) clone() NodeInfo { return ni }
// marshal serializes to a protobuf representation.
func (ni NodeInfo) marshal() *internal.NodeInfo {
pb := &internal.NodeInfo{}
pb.ID = proto.Uint64(ni.ID)
pb.Host = proto.String(ni.Host)
return pb
}
// unmarshal deserializes from a protobuf representation.
func (ni *NodeInfo) unmarshal(pb *internal.NodeInfo) {
ni.ID = pb.GetID()
ni.Host = pb.GetHost()
}
// DatabaseInfo represents information about a database in the system.
type DatabaseInfo struct {
Name string
DefaultRetentionPolicy string
RetentionPolicies []RetentionPolicyInfo
ContinuousQueries []ContinuousQueryInfo
}
// RetentionPolicy returns a retention policy by name.
func (di DatabaseInfo) RetentionPolicy(name string) *RetentionPolicyInfo {
for i := range di.RetentionPolicies {
if di.RetentionPolicies[i].Name == name {
return &di.RetentionPolicies[i]
}
}
return nil
}
// ShardInfos returns a list of all shards' info for the database.
func (di DatabaseInfo) ShardInfos() []ShardInfo {
shards := map[uint64]*ShardInfo{}
for i := range di.RetentionPolicies {
for j := range di.RetentionPolicies[i].ShardGroups {
sg := di.RetentionPolicies[i].ShardGroups[j]
// Skip deleted shard groups
if sg.Deleted() {
continue
}
for k := range sg.Shards {
si := &di.RetentionPolicies[i].ShardGroups[j].Shards[k]
shards[si.ID] = si
}
}
}
infos := make([]ShardInfo, 0, len(shards))
for _, info := range shards {
infos = append(infos, *info)
}
return infos
}
// clone returns a deep copy of di.
func (di DatabaseInfo) clone() DatabaseInfo {
other := di
if di.RetentionPolicies != nil {
other.RetentionPolicies = make([]RetentionPolicyInfo, len(di.RetentionPolicies))
for i := range di.RetentionPolicies {
other.RetentionPolicies[i] = di.RetentionPolicies[i].clone()
}
}
// Copy continuous queries.
if di.ContinuousQueries != nil {
other.ContinuousQueries = make([]ContinuousQueryInfo, len(di.ContinuousQueries))
for i := range di.ContinuousQueries {
other.ContinuousQueries[i] = di.ContinuousQueries[i].clone()
}
}
return other
}
// marshal serializes to a protobuf representation.
func (di DatabaseInfo) marshal() *internal.DatabaseInfo {
pb := &internal.DatabaseInfo{}
pb.Name = proto.String(di.Name)
pb.DefaultRetentionPolicy = proto.String(di.DefaultRetentionPolicy)
pb.RetentionPolicies = make([]*internal.RetentionPolicyInfo, len(di.RetentionPolicies))
for i := range di.RetentionPolicies {
pb.RetentionPolicies[i] = di.RetentionPolicies[i].marshal()
}
pb.ContinuousQueries = make([]*internal.ContinuousQueryInfo, len(di.ContinuousQueries))
for i := range di.ContinuousQueries {
pb.ContinuousQueries[i] = di.ContinuousQueries[i].marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (di *DatabaseInfo) unmarshal(pb *internal.DatabaseInfo) {
di.Name = pb.GetName()
di.DefaultRetentionPolicy = pb.GetDefaultRetentionPolicy()
if len(pb.GetRetentionPolicies()) > 0 {
di.RetentionPolicies = make([]RetentionPolicyInfo, len(pb.GetRetentionPolicies()))
for i, x := range pb.GetRetentionPolicies() {
di.RetentionPolicies[i].unmarshal(x)
}
}
if len(pb.GetContinuousQueries()) > 0 {
di.ContinuousQueries = make([]ContinuousQueryInfo, len(pb.GetContinuousQueries()))
for i, x := range pb.GetContinuousQueries() {
di.ContinuousQueries[i].unmarshal(x)
}
}
}
// RetentionPolicyInfo represents metadata about a retention policy.
type RetentionPolicyInfo struct {
Name string
ReplicaN int
Duration time.Duration
ShardGroupDuration time.Duration
ShardGroups []ShardGroupInfo
}
// NewRetentionPolicyInfo returns a new instance of RetentionPolicyInfo with defaults set.
func NewRetentionPolicyInfo(name string) *RetentionPolicyInfo {
return &RetentionPolicyInfo{
Name: name,
ReplicaN: DefaultRetentionPolicyReplicaN,
Duration: DefaultRetentionPolicyDuration,
}
}
// ShardGroupByTimestamp returns the shard group in the policy that contains the timestamp.
func (rpi *RetentionPolicyInfo) ShardGroupByTimestamp(timestamp time.Time) *ShardGroupInfo {
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].Contains(timestamp) && !rpi.ShardGroups[i].Deleted() {
return &rpi.ShardGroups[i]
}
}
return nil
}
// ExpiredShardGroups returns the Shard Groups which are considered expired, for the given time.
func (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInfo {
groups := make([]*ShardGroupInfo, 0)
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].Deleted() {
continue
}
if rpi.Duration != 0 && rpi.ShardGroups[i].EndTime.Add(rpi.Duration).Before(t) {
groups = append(groups, &rpi.ShardGroups[i])
}
}
return groups
}
// DeletedShardGroups returns the Shard Groups which are marked as deleted.
func (rpi *RetentionPolicyInfo) DeletedShardGroups() []*ShardGroupInfo {
groups := make([]*ShardGroupInfo, 0)
for i := range rpi.ShardGroups {
if rpi.ShardGroups[i].Deleted() {
groups = append(groups, &rpi.ShardGroups[i])
}
}
return groups
}
// marshal serializes to a protobuf representation.
func (rpi *RetentionPolicyInfo) marshal() *internal.RetentionPolicyInfo {
pb := &internal.RetentionPolicyInfo{
Name: proto.String(rpi.Name),
ReplicaN: proto.Uint32(uint32(rpi.ReplicaN)),
Duration: proto.Int64(int64(rpi.Duration)),
ShardGroupDuration: proto.Int64(int64(rpi.ShardGroupDuration)),
}
pb.ShardGroups = make([]*internal.ShardGroupInfo, len(rpi.ShardGroups))
for i, sgi := range rpi.ShardGroups {
pb.ShardGroups[i] = sgi.marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (rpi *RetentionPolicyInfo) unmarshal(pb *internal.RetentionPolicyInfo) {
rpi.Name = pb.GetName()
rpi.ReplicaN = int(pb.GetReplicaN())
rpi.Duration = time.Duration(pb.GetDuration())
rpi.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration())
if len(pb.GetShardGroups()) > 0 {
rpi.ShardGroups = make([]ShardGroupInfo, len(pb.GetShardGroups()))
for i, x := range pb.GetShardGroups() {
rpi.ShardGroups[i].unmarshal(x)
}
}
}
// clone returns a deep copy of rpi.
func (rpi RetentionPolicyInfo) clone() RetentionPolicyInfo {
other := rpi
if rpi.ShardGroups != nil {
other.ShardGroups = make([]ShardGroupInfo, len(rpi.ShardGroups))
for i := range rpi.ShardGroups {
other.ShardGroups[i] = rpi.ShardGroups[i].clone()
}
}
return other
}
// shardGroupDuration returns the duration for a shard group based on a policy duration.
func shardGroupDuration(d time.Duration) time.Duration {
if d >= 180*24*time.Hour || d == 0 { // 6 months or 0
return 7 * 24 * time.Hour
} else if d >= 2*24*time.Hour { // 2 days
return 1 * 24 * time.Hour
}
return 1 * time.Hour
}
// ShardGroupInfo represents metadata about a shard group. The DeletedAt field is important
// because it makes it clear that a ShardGroup has been marked as deleted, and allow the system
// to be sure that a ShardGroup is not simply missing. If the DeletedAt is set, the system can
// safely delete any associated shards.
type ShardGroupInfo struct {
ID uint64
StartTime time.Time
EndTime time.Time
DeletedAt time.Time
Shards []ShardInfo
}
type ShardGroupInfos []ShardGroupInfo
func (a ShardGroupInfos) Len() int { return len(a) }
func (a ShardGroupInfos) Less(i, j int) bool { return a[i].StartTime.Before(a[j].StartTime) }
func (a ShardGroupInfos) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Contains return true if the shard group contains data for the timestamp.
func (sgi *ShardGroupInfo) Contains(timestamp time.Time) bool {
return !sgi.StartTime.After(timestamp) && sgi.EndTime.After(timestamp)
}
// Overlaps return whether the shard group contains data for the time range between min and max
func (sgi *ShardGroupInfo) Overlaps(min, max time.Time) bool {
return !sgi.StartTime.After(max) && sgi.EndTime.After(min)
}
// Deleted returns whether this ShardGroup has been deleted.
func (sgi *ShardGroupInfo) Deleted() bool {
return !sgi.DeletedAt.IsZero()
}
// clone returns a deep copy of sgi.
func (sgi ShardGroupInfo) clone() ShardGroupInfo {
other := sgi
if sgi.Shards != nil {
other.Shards = make([]ShardInfo, len(sgi.Shards))
for i := range sgi.Shards {
other.Shards[i] = sgi.Shards[i].clone()
}
}
return other
}
// ShardFor returns the ShardInfo for a Point hash
func (s *ShardGroupInfo) ShardFor(hash uint64) ShardInfo {
return s.Shards[hash%uint64(len(s.Shards))]
}
// marshal serializes to a protobuf representation.
func (sgi *ShardGroupInfo) marshal() *internal.ShardGroupInfo {
pb := &internal.ShardGroupInfo{
ID: proto.Uint64(sgi.ID),
StartTime: proto.Int64(MarshalTime(sgi.StartTime)),
EndTime: proto.Int64(MarshalTime(sgi.EndTime)),
DeletedAt: proto.Int64(MarshalTime(sgi.DeletedAt)),
}
pb.Shards = make([]*internal.ShardInfo, len(sgi.Shards))
for i := range sgi.Shards {
pb.Shards[i] = sgi.Shards[i].marshal()
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) {
sgi.ID = pb.GetID()
sgi.StartTime = UnmarshalTime(pb.GetStartTime())
sgi.EndTime = UnmarshalTime(pb.GetEndTime())
sgi.DeletedAt = UnmarshalTime(pb.GetDeletedAt())
if len(pb.GetShards()) > 0 {
sgi.Shards = make([]ShardInfo, len(pb.GetShards()))
for i, x := range pb.GetShards() {
sgi.Shards[i].unmarshal(x)
}
}
}
// ShardInfo represents metadata about a shard.
type ShardInfo struct {
ID uint64
Owners []ShardOwner
}
// OwnedBy returns whether the shard's owner IDs includes nodeID.
func (si ShardInfo) OwnedBy(nodeID uint64) bool {
for _, so := range si.Owners {
if so.NodeID == nodeID {
return true
}
}
return false
}
// clone returns a deep copy of si.
func (si ShardInfo) clone() ShardInfo {
other := si
if si.Owners != nil {
other.Owners = make([]ShardOwner, len(si.Owners))
for i := range si.Owners {
other.Owners[i] = si.Owners[i].clone()
}
}
return other
}
// marshal serializes to a protobuf representation.
func (si ShardInfo) marshal() *internal.ShardInfo {
pb := &internal.ShardInfo{
ID: proto.Uint64(si.ID),
}
pb.Owners = make([]*internal.ShardOwner, len(si.Owners))
for i := range si.Owners {
pb.Owners[i] = si.Owners[i].marshal()
}
return pb
}
// UnmarshalBinary decodes the object from a binary format.
func (si *ShardInfo) UnmarshalBinary(buf []byte) error {
var pb internal.ShardInfo
if err := proto.Unmarshal(buf, &pb); err != nil {
return err
}
si.unmarshal(&pb)
return nil
}
// unmarshal deserializes from a protobuf representation.
func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) {
si.ID = pb.GetID()
// If deprecated "OwnerIDs" exists then convert it to "Owners" format.
if len(pb.GetOwnerIDs()) > 0 {
si.Owners = make([]ShardOwner, len(pb.GetOwnerIDs()))
for i, x := range pb.GetOwnerIDs() {
si.Owners[i].unmarshal(&internal.ShardOwner{
NodeID: proto.Uint64(x),
})
}
} else if len(pb.GetOwners()) > 0 {
si.Owners = make([]ShardOwner, len(pb.GetOwners()))
for i, x := range pb.GetOwners() {
si.Owners[i].unmarshal(x)
}
}
}
// ShardOwner represents a node that owns a shard.
type ShardOwner struct {
NodeID uint64
}
// clone returns a deep copy of so.
func (so ShardOwner) clone() ShardOwner {
return so
}
// marshal serializes to a protobuf representation.
func (so ShardOwner) marshal() *internal.ShardOwner {
return &internal.ShardOwner{
NodeID: proto.Uint64(so.NodeID),
}
}
// unmarshal deserializes from a protobuf representation.
func (so *ShardOwner) unmarshal(pb *internal.ShardOwner) {
so.NodeID = pb.GetNodeID()
}
// ContinuousQueryInfo represents metadata about a continuous query.
type ContinuousQueryInfo struct {
Name string
Query string
}
// clone returns a deep copy of cqi.
func (cqi ContinuousQueryInfo) clone() ContinuousQueryInfo { return cqi }
// marshal serializes to a protobuf representation.
func (cqi ContinuousQueryInfo) marshal() *internal.ContinuousQueryInfo {
return &internal.ContinuousQueryInfo{
Name: proto.String(cqi.Name),
Query: proto.String(cqi.Query),
}
}
// unmarshal deserializes from a protobuf representation.
func (cqi *ContinuousQueryInfo) unmarshal(pb *internal.ContinuousQueryInfo) {
cqi.Name = pb.GetName()
cqi.Query = pb.GetQuery()
}
// UserInfo represents metadata about a user in the system.
type UserInfo struct {
Name string
Hash string
Admin bool
Privileges map[string]influxql.Privilege
}
// Authorize returns true if the user is authorized and false if not.
func (ui *UserInfo) Authorize(privilege influxql.Privilege, database string) bool {
if ui.Admin {
return true
}
p, ok := ui.Privileges[database]
return ok && (p == privilege || p == influxql.AllPrivileges)
}
// clone returns a deep copy of si.
func (ui UserInfo) clone() UserInfo {
other := ui
if ui.Privileges != nil {
other.Privileges = make(map[string]influxql.Privilege)
for k, v := range ui.Privileges {
other.Privileges[k] = v
}
}
return other
}
// marshal serializes to a protobuf representation.
func (ui UserInfo) marshal() *internal.UserInfo {
pb := &internal.UserInfo{
Name: proto.String(ui.Name),
Hash: proto.String(ui.Hash),
Admin: proto.Bool(ui.Admin),
}
for database, privilege := range ui.Privileges {
pb.Privileges = append(pb.Privileges, &internal.UserPrivilege{
Database: proto.String(database),
Privilege: proto.Int32(int32(privilege)),
})
}
return pb
}
// unmarshal deserializes from a protobuf representation.
func (ui *UserInfo) unmarshal(pb *internal.UserInfo) {
ui.Name = pb.GetName()
ui.Hash = pb.GetHash()
ui.Admin = pb.GetAdmin()
ui.Privileges = make(map[string]influxql.Privilege)
for _, p := range pb.GetPrivileges() {
ui.Privileges[p.GetDatabase()] = influxql.Privilege(p.GetPrivilege())
}
}
// MarshalTime converts t to nanoseconds since epoch. A zero time returns 0.
func MarshalTime(t time.Time) int64 {
if t.IsZero() {
return 0
}
return t.UnixNano()
}
// UnmarshalTime converts nanoseconds since epoch to time.
// A zero value returns a zero time.
func UnmarshalTime(v int64) time.Time {
if v == 0 {
return time.Time{}
}
return time.Unix(0, v).UTC()
}
|
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package main_test
import (
"strings"
"testing"
kusttest_test "sigs.k8s.io/kustomize/api/testutils/kusttest"
)
const (
target = `
apiVersion: apps/v1
metadata:
name: myDeploy
labels:
old-label: old-value
kind: Deployment
spec:
replica: 2
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- name: nginx
image: nginx
---
apiVersion: apps/v1
metadata:
name: yourDeploy
labels:
new-label: new-value
kind: Deployment
spec:
replica: 1
template:
metadata:
labels:
new-label: new-value
spec:
containers:
- name: nginx
image: nginx:1.7.9
---
apiVersion: apps/v1
metadata:
name: myDeploy
label:
old-label: old-value
kind: MyKind
spec:
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- name: nginx
image: nginx
`
)
func TestPatchTransformerMissingFile(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
_, err := th.RunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
path: patch.yaml
`, target)
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(),
"'/patch.yaml' doesn't exist") {
t.Fatalf("unexpected err: %v", err)
}
}
func TestPatchTransformerBadPatch(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
_, err := th.RunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
patch: "thisIsNotAPatch"
`, target)
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(),
"unable to get either a Strategic Merge Patch or JSON patch 6902 from") {
t.Fatalf("unexpected err: %v", err)
}
}
func TestPatchTransformerMissingSelector(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
_, err := th.RunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
patch: '[{"op": "add", "path": "/spec/template/spec/dnsPolicy", "value": "ClusterFirst"}]'
`, target)
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(),
"must specify a target for patch") {
t.Fatalf("unexpected err: %v", err)
}
}
func TestPatchTransformerBothEmptyPathAndPatch(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
_, err := th.RunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
`, target)
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(), "must specify one of patch and path in") {
t.Fatalf("unexpected err: %v", err)
}
}
func TestPatchTransformerBothNonEmptyPathAndPatch(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
_, err := th.RunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
Path: patch.yaml
Patch: "something"
`, target)
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(), "patch and path can't be set at the same time") {
t.Fatalf("unexpected err: %v", err)
}
}
func TestPatchTransformerFromFiles(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.WriteF("patch.yaml", `
apiVersion: apps/v1
kind: Deployment
metadata:
name: myDeploy
spec:
replica: 3
`)
rm := th.LoadAndRunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
path: patch.yaml
target:
name: .*Deploy
`, target)
th.AssertActualEqualsExpected(rm, `
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
old-label: old-value
name: myDeploy
spec:
replica: 3
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx
name: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
new-label: new-value
name: yourDeploy
spec:
replica: 3
template:
metadata:
labels:
new-label: new-value
spec:
containers:
- image: nginx:1.7.9
name: nginx
---
apiVersion: apps/v1
kind: MyKind
metadata:
label:
old-label: old-value
name: myDeploy
spec:
replica: 3
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx
name: nginx
`)
}
func TestPatchTransformerWithInline(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
rm := th.LoadAndRunTransformer(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
patch: '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "nginx:latest"}]'
target:
name: .*Deploy
kind: Deployment
`, target)
th.AssertActualEqualsExpected(rm, `
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
old-label: old-value
name: myDeploy
spec:
replica: 2
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx:latest
name: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
new-label: new-value
name: yourDeploy
spec:
replica: 1
template:
metadata:
labels:
new-label: new-value
spec:
containers:
- image: nginx:latest
name: nginx
---
apiVersion: apps/v1
kind: MyKind
metadata:
label:
old-label: old-value
name: myDeploy
spec:
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx
name: nginx
`)
}
Update tests for PatchTransformer
// Copyright 2019 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package main_test
import (
"strings"
"testing"
kusttest_test "sigs.k8s.io/kustomize/api/testutils/kusttest"
)
const (
target = `
apiVersion: apps/v1
metadata:
name: myDeploy
labels:
old-label: old-value
kind: Deployment
spec:
replica: 2
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- name: nginx
image: nginx
---
apiVersion: apps/v1
metadata:
name: yourDeploy
labels:
new-label: new-value
kind: Deployment
spec:
replica: 1
template:
metadata:
labels:
new-label: new-value
spec:
containers:
- name: nginx
image: nginx:1.7.9
---
apiVersion: apps/v1
metadata:
name: myDeploy
label:
old-label: old-value
kind: MyKind
spec:
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- name: nginx
image: nginx
`
)
func TestPatchTransformerMissingFile(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.RunTransformerAndCheckError(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
path: patch.yaml
`, target, func(t *testing.T, err error) {
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(),
"'/patch.yaml' doesn't exist") {
t.Fatalf("unexpected err: %v", err)
}
})
}
func TestPatchTransformerBadPatch(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.RunTransformerAndCheckError(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
patch: "thisIsNotAPatch"
`, target, func(t *testing.T, err error) {
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(),
"unable to get either a Strategic Merge Patch or JSON patch 6902 from") {
t.Fatalf("unexpected err: %v", err)
}
})
}
func TestPatchTransformerMissingSelector(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.RunTransformerAndCheckError(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
patch: '[{"op": "add", "path": "/spec/template/spec/dnsPolicy", "value": "ClusterFirst"}]'
`, target, func(t *testing.T, err error) {
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(),
"must specify a target for patch") {
t.Fatalf("unexpected err: %v", err)
}
})
}
func TestPatchTransformerBothEmptyPathAndPatch(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.RunTransformerAndCheckError(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
`, target, func(t *testing.T, err error) {
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(), "must specify one of patch and path in") {
t.Fatalf("unexpected err: %v", err)
}
})
}
func TestPatchTransformerBothNonEmptyPathAndPatch(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.RunTransformerAndCheckError(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
Path: patch.yaml
Patch: "something"
`, target, func(t *testing.T, err error) {
if err == nil {
t.Fatalf("expected error")
}
if !strings.Contains(err.Error(), "patch and path can't be set at the same time") {
t.Fatalf("unexpected err: %v", err)
}
})
}
func TestPatchTransformerFromFiles(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.WriteF("patch.yaml", `
apiVersion: apps/v1
kind: Deployment
metadata:
name: myDeploy
spec:
replica: 3
`)
th.RunTransformerAndCheckResult(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
path: patch.yaml
target:
name: .*Deploy
`,
target,
`
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
old-label: old-value
name: myDeploy
spec:
replica: 3
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx
name: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
new-label: new-value
name: yourDeploy
spec:
replica: 3
template:
metadata:
labels:
new-label: new-value
spec:
containers:
- image: nginx:1.7.9
name: nginx
---
apiVersion: apps/v1
kind: MyKind
metadata:
label:
old-label: old-value
name: myDeploy
spec:
replica: 3
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx
name: nginx
`)
}
func TestPatchTransformerWithInline(t *testing.T) {
th := kusttest_test.MakeEnhancedHarness(t).
PrepBuiltin("PatchTransformer")
defer th.Reset()
th.RunTransformerAndCheckResult(`
apiVersion: builtin
kind: PatchTransformer
metadata:
name: notImportantHere
patch: '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "nginx:latest"}]'
target:
name: .*Deploy
kind: Deployment
`, target,
`
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
old-label: old-value
name: myDeploy
spec:
replica: 2
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx:latest
name: nginx
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
new-label: new-value
name: yourDeploy
spec:
replica: 1
template:
metadata:
labels:
new-label: new-value
spec:
containers:
- image: nginx:latest
name: nginx
---
apiVersion: apps/v1
kind: MyKind
metadata:
label:
old-label: old-value
name: myDeploy
spec:
template:
metadata:
labels:
old-label: old-value
spec:
containers:
- image: nginx
name: nginx
`)
}
|
// Copyright (c) 2017 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errors
//SwIndexNotFound is specific error type used to differentiate state when software index associated with name
// wasn't found in register
type SwIndexNotFound struct {
error
OriginalError error
}
func (swIndexNotFound SwIndexNotFound) Error() string {
return swIndexNotFound.OriginalError.Error()
}
Remove unused package
|
package photo
import (
"time"
)
type Photo struct {
Id int64
AlbumId int64
Name string
Uploaded time.Time
Status int
OriginalId int64
ThumbnailId int64
}
Removed the OriginalId and ThumbnailId fields from the photo struct
package photo
import (
"time"
)
type Photo struct {
Id int64
AlbumId int64
Name string
Uploaded time.Time
Status int
}
|
package app
import (
"errors"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/ardanlabs/kit/cfg"
"github.com/ardanlabs/kit/db"
"github.com/ardanlabs/kit/db/mongo"
"github.com/ardanlabs/kit/log"
"github.com/dimfeld/httptreemux"
"github.com/pborman/uuid"
)
var (
// ErrNotAuthorized occurs when the call is not authorized.
ErrNotAuthorized = errors.New("Not authorized")
// ErrNotFound is abstracting the mgo not found error.
ErrNotFound = errors.New("Entity Not found")
// ErrInvalidID occurs when an ID is not in a valid form.
ErrInvalidID = errors.New("ID is not in it's proper form")
// ErrValidation occurs when there are validation errors.
ErrValidation = errors.New("Validation errors occurred")
)
type (
// A Handler is a type that handles an http request within our own little mini
// framework. The fun part is that our context is fully controlled and
// configured by us so we can extend the functionality of the Context whenever
// we want.
Handler func(*Context) error
// A Middleware is a type that wraps a handler to remove boilerplate or other
// concerns not direct to any given Handler.
Middleware func(Handler) Handler
)
// Settings represents things required to initialize the app.
type Settings struct {
ConfigKey string // The based environment variable key for all variables.
UseMongo bool // If MongoDB should be initialized and used.
}
// app maintains some framework state.
var app struct {
Settings
userHeaders map[string]string // Extra headers for each response.
}
//==============================================================================
// App is the entrypoint into our application and what configures our context
// object for each of our http handlers. Feel free to add any configuration
// data/logic on this App struct
type App struct {
*httptreemux.TreeMux
mw []Middleware
}
// New create an App value that handle a set of routes for the application.
// You can provide any number of middleware and they'll be used to wrap every
// request handler.
func New(mw ...Middleware) *App {
return &App{
TreeMux: httptreemux.New(),
mw: mw,
}
}
// Handle is our mechanism for mounting Handlers for a given HTTP verb and path
// pair, this makes for really easy, convenient routing.
func (a *App) Handle(verb, path string, handler Handler, mw ...Middleware) {
// The function to execute for each request.
h := func(w http.ResponseWriter, r *http.Request, p map[string]string) {
start := time.Now()
var dbConn *db.DB
if app.UseMongo {
dbConn = db.NewMGO()
}
c := Context{
DB: dbConn,
ResponseWriter: w,
Request: r,
Params: p,
SessionID: uuid.New(),
}
if app.UseMongo {
defer c.DB.CloseMGO()
}
log.User(c.SessionID, "Request", "Started : Method[%s] URL[%s] RADDR[%s]", c.Request.Method, c.Request.URL.Path, c.Request.RemoteAddr)
// Wrap the handler in all associated middleware.
wrap := func(h Handler) Handler {
// Wrap up the application-wide first...
for i := len(a.mw) - 1; i >= 0; i-- {
h = a.mw[i](h)
}
// Then wrap with our route specific ones.
for i := len(mw) - 1; i >= 0; i-- {
h = mw[i](h)
}
return h
}
// Call the wrapped handler and handle any possible error.
if err := wrap(handler)(&c); err != nil {
c.Error(err)
}
log.User(c.SessionID, "Request", "Completed : Status[%d] Duration[%s]", c.Status, time.Since(start))
}
// Add this handler for the specified verb and route.
a.TreeMux.Handle(verb, path, h)
}
//==============================================================================
// Init is called to initialize the application.
func Init(settings Settings) {
app.Settings = settings
logLevel := func() int {
ll, err := cfg.Int("LOGGING_LEVEL")
if err != nil {
return log.USER
}
return ll
}
log.Init(os.Stderr, logLevel)
if err := cfg.Init(settings.ConfigKey); err != nil {
log.Error("startup", "Init", err, "Initializing config")
os.Exit(1)
}
if settings.UseMongo {
err := mongo.Init()
if err != nil {
log.Error("startup", "Init", err, "Initializing MongoDB")
os.Exit(1)
}
}
// HEADERS should be key:value,key:value
if hs, err := cfg.String("HEADERS"); err == nil {
hdrs := strings.Split(hs, ",")
for _, hdr := range hdrs {
if kv := strings.Split(hdr, ":"); len(kv) == 2 {
app.userHeaders[kv[0]] = kv[1]
}
}
}
}
// Run is called to start the web service.
func Run(cfgHost string, defaultHost string, routes http.Handler) {
log.Dev("startup", "Run", "Start : cfgHost[%s] defaultHost[%s]", cfgHost, defaultHost)
// Check for a configured host value.
host, err := cfg.String(cfgHost)
if err != nil {
host = defaultHost
}
// Create this goroutine to run the web server.
go func() {
log.Dev("listener", "Run", "Listening on: %s", host)
http.ListenAndServe(host, routes)
}()
// Listen for an interrupt signal from the OS.
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
<-sigChan
log.Dev("shutdown", "Run", "Complete")
}
Bug not setting map
package app
import (
"errors"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/ardanlabs/kit/cfg"
"github.com/ardanlabs/kit/db"
"github.com/ardanlabs/kit/db/mongo"
"github.com/ardanlabs/kit/log"
"github.com/dimfeld/httptreemux"
"github.com/pborman/uuid"
)
var (
// ErrNotAuthorized occurs when the call is not authorized.
ErrNotAuthorized = errors.New("Not authorized")
// ErrNotFound is abstracting the mgo not found error.
ErrNotFound = errors.New("Entity Not found")
// ErrInvalidID occurs when an ID is not in a valid form.
ErrInvalidID = errors.New("ID is not in it's proper form")
// ErrValidation occurs when there are validation errors.
ErrValidation = errors.New("Validation errors occurred")
)
type (
// A Handler is a type that handles an http request within our own little mini
// framework. The fun part is that our context is fully controlled and
// configured by us so we can extend the functionality of the Context whenever
// we want.
Handler func(*Context) error
// A Middleware is a type that wraps a handler to remove boilerplate or other
// concerns not direct to any given Handler.
Middleware func(Handler) Handler
)
// Settings represents things required to initialize the app.
type Settings struct {
ConfigKey string // The based environment variable key for all variables.
UseMongo bool // If MongoDB should be initialized and used.
}
// app maintains some framework state.
var app struct {
Settings
userHeaders map[string]string // Extra headers for each response.
}
//==============================================================================
// App is the entrypoint into our application and what configures our context
// object for each of our http handlers. Feel free to add any configuration
// data/logic on this App struct
type App struct {
*httptreemux.TreeMux
mw []Middleware
}
// New create an App value that handle a set of routes for the application.
// You can provide any number of middleware and they'll be used to wrap every
// request handler.
func New(mw ...Middleware) *App {
return &App{
TreeMux: httptreemux.New(),
mw: mw,
}
}
// Handle is our mechanism for mounting Handlers for a given HTTP verb and path
// pair, this makes for really easy, convenient routing.
func (a *App) Handle(verb, path string, handler Handler, mw ...Middleware) {
// The function to execute for each request.
h := func(w http.ResponseWriter, r *http.Request, p map[string]string) {
start := time.Now()
var dbConn *db.DB
if app.UseMongo {
dbConn = db.NewMGO()
}
c := Context{
DB: dbConn,
ResponseWriter: w,
Request: r,
Params: p,
SessionID: uuid.New(),
}
if app.UseMongo {
defer c.DB.CloseMGO()
}
log.User(c.SessionID, "Request", "Started : Method[%s] URL[%s] RADDR[%s]", c.Request.Method, c.Request.URL.Path, c.Request.RemoteAddr)
// Wrap the handler in all associated middleware.
wrap := func(h Handler) Handler {
// Wrap up the application-wide first...
for i := len(a.mw) - 1; i >= 0; i-- {
h = a.mw[i](h)
}
// Then wrap with our route specific ones.
for i := len(mw) - 1; i >= 0; i-- {
h = mw[i](h)
}
return h
}
// Call the wrapped handler and handle any possible error.
if err := wrap(handler)(&c); err != nil {
c.Error(err)
}
log.User(c.SessionID, "Request", "Completed : Status[%d] Duration[%s]", c.Status, time.Since(start))
}
// Add this handler for the specified verb and route.
a.TreeMux.Handle(verb, path, h)
}
//==============================================================================
// Init is called to initialize the application.
func Init(settings Settings) {
app.Settings = settings
logLevel := func() int {
ll, err := cfg.Int("LOGGING_LEVEL")
if err != nil {
return log.USER
}
return ll
}
log.Init(os.Stderr, logLevel)
if err := cfg.Init(settings.ConfigKey); err != nil {
log.Error("startup", "Init", err, "Initializing config")
os.Exit(1)
}
if settings.UseMongo {
err := mongo.Init()
if err != nil {
log.Error("startup", "Init", err, "Initializing MongoDB")
os.Exit(1)
}
}
// HEADERS should be key:value,key:value
if hs, err := cfg.String("HEADERS"); err == nil {
app.userHeaders = make(map[string]string)
hdrs := strings.Split(hs, ",")
for _, hdr := range hdrs {
if kv := strings.Split(hdr, ":"); len(kv) == 2 {
app.userHeaders[kv[0]] = kv[1]
}
}
}
}
// Run is called to start the web service.
func Run(cfgHost string, defaultHost string, routes http.Handler) {
log.Dev("startup", "Run", "Start : cfgHost[%s] defaultHost[%s]", cfgHost, defaultHost)
// Check for a configured host value.
host, err := cfg.String(cfgHost)
if err != nil {
host = defaultHost
}
// Create this goroutine to run the web server.
go func() {
log.Dev("listener", "Run", "Listening on: %s", host)
http.ListenAndServe(host, routes)
}()
// Listen for an interrupt signal from the OS.
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
<-sigChan
log.Dev("shutdown", "Run", "Complete")
}
|
// Package app provides a thin layer of support for writing web services. It
// integrates with the ardanlabs kit repo to provide support for logging,
// configuration, database, routing and application context. The base things
// you need to write a web service is provided.
package app
import (
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/ardanlabs/kit/cfg"
"github.com/ardanlabs/kit/db"
"github.com/ardanlabs/kit/db/mongo"
"github.com/ardanlabs/kit/log"
"github.com/dimfeld/httptreemux"
"github.com/pborman/uuid"
)
// Web config environmental variables.
const (
cfgLoggingLevel = "LOGGING_LEVEL"
cfgHost = "HOST"
)
// Mongo config environmental variables.
const (
cfgMongoHost = "MONGO_HOST"
cfgMongoAuthDB = "MONGO_AUTHDB"
cfgMongoDB = "MONGO_DB"
cfgMongoUser = "MONGO_USER"
cfgMongoPassword = "MONGO_PASS"
)
var (
// ErrNotAuthorized occurs when the call is not authorized.
ErrNotAuthorized = errors.New("Not authorized")
// ErrNotFound is abstracting the mgo not found error.
ErrNotFound = errors.New("Entity Not found")
// ErrInvalidID occurs when an ID is not in a valid form.
ErrInvalidID = errors.New("ID is not in it's proper form")
// ErrValidation occurs when there are validation errors.
ErrValidation = errors.New("Validation errors occurred")
)
type (
// A Handler is a type that handles an http request within our own little mini
// framework. The fun part is that our context is fully controlled and
// configured by us so we can extend the functionality of the Context whenever
// we want.
Handler func(*Context) error
// A Middleware is a type that wraps a handler to remove boilerplate or other
// concerns not direct to any given Handler.
Middleware func(Handler) Handler
)
// app maintains some framework state.
var app struct {
useMongo bool
userHeaders map[string]string // Extra headers for each response.
}
//==============================================================================
// App is the entrypoint into our application and what configures our context
// object for each of our http handlers. Feel free to add any configuration
// data/logic on this App struct
type App struct {
*httptreemux.TreeMux
mw []Middleware
}
// New create an App value that handle a set of routes for the application.
// You can provide any number of middleware and they'll be used to wrap every
// request handler.
func New(mw ...Middleware) *App {
return &App{
TreeMux: httptreemux.New(),
mw: mw,
}
}
// Handle is our mechanism for mounting Handlers for a given HTTP verb and path
// pair, this makes for really easy, convenient routing.
func (a *App) Handle(verb, path string, handler Handler, mw ...Middleware) {
// The function to execute for each request.
h := func(w http.ResponseWriter, r *http.Request, p map[string]string) {
start := time.Now()
var dbConn *db.DB
if app.useMongo {
dbConn = db.NewMGO()
}
c := Context{
DB: dbConn,
ResponseWriter: w,
Request: r,
Params: p,
SessionID: uuid.New(),
}
if app.useMongo {
defer c.DB.CloseMGO()
}
log.User(c.SessionID, "Request", "Started : Method[%s] URL[%s] RADDR[%s]", c.Request.Method, c.Request.URL.Path, c.Request.RemoteAddr)
// Wrap the handler in all associated middleware.
wrap := func(h Handler) Handler {
// Wrap up the application-wide first...
for i := len(a.mw) - 1; i >= 0; i-- {
h = a.mw[i](h)
}
// Then wrap with our route specific ones.
for i := len(mw) - 1; i >= 0; i-- {
h = mw[i](h)
}
return h
}
// Call the wrapped handler and handle any possible error.
if err := wrap(handler)(&c); err != nil {
c.Error(err)
}
log.User(c.SessionID, "Request", "Completed : Status[%d] Duration[%s]", c.Status, time.Since(start))
}
// Add this handler for the specified verb and route.
a.TreeMux.Handle(verb, path, h)
}
// CORS providing support for Cross-Origin Resource Sharing.
// https://metajack.im/2010/01/19/crossdomain-ajax-for-xmpp-http-binding-made-easy/
func (a *App) CORS() {
h := func(w http.ResponseWriter, r *http.Request, p map[string]string) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "86400")
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
const resp = `<html>
<body>
<a href='http://www.xmpp.org/extensions/xep-0124.html'>XEP-0124</a> - BOSH
</body>
</html>`
fmt.Fprintf(w, resp)
}
a.TreeMux.OptionsHandler = h
}
//==============================================================================
// Init is called to initialize the application.
func Init(configKey string) {
// Init the configuration system.
if err := cfg.Init(configKey); err != nil {
fmt.Println("Error initalizing configuration system", err)
os.Exit(1)
}
// Init the log system.
logLevel := func() int {
ll, err := cfg.Int(cfgLoggingLevel)
if err != nil {
return log.USER
}
return ll
}
log.Init(os.Stderr, logLevel)
// Log all the configuration options
log.User("startup", "Init", "\n\nConfig Settings: %s\n%s\n", configKey, cfg.Log())
// Init MongoDB if configured.
if _, err := cfg.String(cfgMongoHost); err == nil {
app.useMongo = true
cfg := mongo.Config{
Host: cfg.MustString(cfgMongoHost),
AuthDB: cfg.MustString(cfgMongoAuthDB),
DB: cfg.MustString(cfgMongoDB),
User: cfg.MustString(cfgMongoUser),
Password: cfg.MustString(cfgMongoPassword),
}
if err := mongo.Init(cfg); err != nil {
log.Error("startup", "Init", err, "Initializing MongoDB")
os.Exit(1)
}
}
// Load user defined custom headers. HEADERS should be key:value,key:value
if hs, err := cfg.String("HEADERS"); err == nil {
app.userHeaders = make(map[string]string)
hdrs := strings.Split(hs, ",")
for _, hdr := range hdrs {
if kv := strings.Split(hdr, ":"); len(kv) == 2 {
log.User("startup", "Init", "User Headers : %s:%s", kv[0], kv[1])
app.userHeaders[kv[0]] = kv[1]
}
}
}
}
// Run is called to start the web service.
func Run(defaultHost string, routes http.Handler) {
log.Dev("startup", "Run", "Start : defaultHost[%s]", defaultHost)
// Check for a configured host value.
host, err := cfg.String(cfgHost)
if err != nil {
host = defaultHost
}
// Create this goroutine to run the web server.
go func() {
log.Dev("listener", "Run", "Listening on: %s", host)
http.ListenAndServe(host, routes)
}()
// Listen for an interrupt signal from the OS.
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
<-sigChan
log.Dev("shutdown", "Run", "Complete")
}
Fix support for CORS
// Package app provides a thin layer of support for writing web services. It
// integrates with the ardanlabs kit repo to provide support for logging,
// configuration, database, routing and application context. The base things
// you need to write a web service is provided.
package app
import (
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"strings"
"time"
"github.com/ardanlabs/kit/cfg"
"github.com/ardanlabs/kit/db"
"github.com/ardanlabs/kit/db/mongo"
"github.com/ardanlabs/kit/log"
"github.com/dimfeld/httptreemux"
"github.com/pborman/uuid"
)
// Web config environmental variables.
const (
cfgLoggingLevel = "LOGGING_LEVEL"
cfgHost = "HOST"
)
// Mongo config environmental variables.
const (
cfgMongoHost = "MONGO_HOST"
cfgMongoAuthDB = "MONGO_AUTHDB"
cfgMongoDB = "MONGO_DB"
cfgMongoUser = "MONGO_USER"
cfgMongoPassword = "MONGO_PASS"
)
var (
// ErrNotAuthorized occurs when the call is not authorized.
ErrNotAuthorized = errors.New("Not authorized")
// ErrNotFound is abstracting the mgo not found error.
ErrNotFound = errors.New("Entity Not found")
// ErrInvalidID occurs when an ID is not in a valid form.
ErrInvalidID = errors.New("ID is not in it's proper form")
// ErrValidation occurs when there are validation errors.
ErrValidation = errors.New("Validation errors occurred")
)
type (
// A Handler is a type that handles an http request within our own little mini
// framework. The fun part is that our context is fully controlled and
// configured by us so we can extend the functionality of the Context whenever
// we want.
Handler func(*Context) error
// A Middleware is a type that wraps a handler to remove boilerplate or other
// concerns not direct to any given Handler.
Middleware func(Handler) Handler
)
// app maintains some framework state.
var app struct {
useMongo bool
userHeaders map[string]string // Extra headers for each response.
}
//==============================================================================
// App is the entrypoint into our application and what configures our context
// object for each of our http handlers. Feel free to add any configuration
// data/logic on this App struct
type App struct {
*httptreemux.TreeMux
mw []Middleware
}
// New create an App value that handle a set of routes for the application.
// You can provide any number of middleware and they'll be used to wrap every
// request handler.
func New(mw ...Middleware) *App {
return &App{
TreeMux: httptreemux.New(),
mw: mw,
}
}
// Handle is our mechanism for mounting Handlers for a given HTTP verb and path
// pair, this makes for really easy, convenient routing.
func (a *App) Handle(verb, path string, handler Handler, mw ...Middleware) {
// The function to execute for each request.
h := func(w http.ResponseWriter, r *http.Request, p map[string]string) {
start := time.Now()
var dbConn *db.DB
if app.useMongo {
dbConn = db.NewMGO()
}
c := Context{
DB: dbConn,
ResponseWriter: w,
Request: r,
Params: p,
SessionID: uuid.New(),
}
if app.useMongo {
defer c.DB.CloseMGO()
}
log.User(c.SessionID, "Request", "Started : Method[%s] URL[%s] RADDR[%s]", c.Request.Method, c.Request.URL.Path, c.Request.RemoteAddr)
// Wrap the handler in all associated middleware.
wrap := func(h Handler) Handler {
// Wrap up the application-wide first...
for i := len(a.mw) - 1; i >= 0; i-- {
h = a.mw[i](h)
}
// Then wrap with our route specific ones.
for i := len(mw) - 1; i >= 0; i-- {
h = mw[i](h)
}
return h
}
// Call the wrapped handler and handle any possible error.
if err := wrap(handler)(&c); err != nil {
c.Error(err)
}
log.User(c.SessionID, "Request", "Completed : Status[%d] Duration[%s]", c.Status, time.Since(start))
}
// Add this handler for the specified verb and route.
a.TreeMux.Handle(verb, path, h)
}
// CORS providing support for Cross-Origin Resource Sharing.
// https://metajack.im/2010/01/19/crossdomain-ajax-for-xmpp-http-binding-made-easy/
func (a *App) CORS() {
h := func(w http.ResponseWriter, r *http.Request, p map[string]string) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "86400")
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
const resp = `<html>
<body>
<a href='http://www.xmpp.org/extensions/xep-0124.html'>XEP-0124</a> - BOSH
</body>
</html>`
fmt.Fprintf(w, resp)
}
a.TreeMux.OptionsHandler = h
// This header is now required for all requests.
app.userHeaders["Access-Control-Allow-Origin"] = "*"
}
//==============================================================================
// Init is called to initialize the application.
func Init(configKey string) {
// Init the configuration system.
if err := cfg.Init(configKey); err != nil {
fmt.Println("Error initalizing configuration system", err)
os.Exit(1)
}
// Init the log system.
logLevel := func() int {
ll, err := cfg.Int(cfgLoggingLevel)
if err != nil {
return log.USER
}
return ll
}
log.Init(os.Stderr, logLevel)
// Log all the configuration options
log.User("startup", "Init", "\n\nConfig Settings: %s\n%s\n", configKey, cfg.Log())
// Init MongoDB if configured.
if _, err := cfg.String(cfgMongoHost); err == nil {
app.useMongo = true
cfg := mongo.Config{
Host: cfg.MustString(cfgMongoHost),
AuthDB: cfg.MustString(cfgMongoAuthDB),
DB: cfg.MustString(cfgMongoDB),
User: cfg.MustString(cfgMongoUser),
Password: cfg.MustString(cfgMongoPassword),
}
if err := mongo.Init(cfg); err != nil {
log.Error("startup", "Init", err, "Initializing MongoDB")
os.Exit(1)
}
}
// Load user defined custom headers. HEADERS should be key:value,key:value
if hs, err := cfg.String("HEADERS"); err == nil {
app.userHeaders = make(map[string]string)
hdrs := strings.Split(hs, ",")
for _, hdr := range hdrs {
if kv := strings.Split(hdr, ":"); len(kv) == 2 {
log.User("startup", "Init", "User Headers : %s:%s", kv[0], kv[1])
app.userHeaders[kv[0]] = kv[1]
}
}
}
}
// Run is called to start the web service.
func Run(defaultHost string, routes http.Handler) {
log.Dev("startup", "Run", "Start : defaultHost[%s]", defaultHost)
// Check for a configured host value.
host, err := cfg.String(cfgHost)
if err != nil {
host = defaultHost
}
// Create this goroutine to run the web server.
go func() {
log.Dev("listener", "Run", "Listening on: %s", host)
http.ListenAndServe(host, routes)
}()
// Listen for an interrupt signal from the OS.
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
<-sigChan
log.Dev("shutdown", "Run", "Complete")
}
|
package influxdb
import (
"fmt"
"sync"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
)
// tx represents a transaction that spans multiple shard data stores.
// This transaction will open and close all data stores atomically.
type tx struct {
mu sync.Mutex
server *Server
opened bool
now time.Time
itrs []*shardIterator // shard iterators
}
// newTx return a new initialized Tx.
func newTx(server *Server) *tx {
return &tx{
server: server,
now: time.Now(),
}
}
// SetNow sets the current time for the transaction.
func (tx *tx) SetNow(now time.Time) { tx.now = now }
// Open opens a read-only transaction on all data stores atomically.
func (tx *tx) Open() error {
tx.mu.Lock()
defer tx.mu.Unlock()
// Mark transaction as open.
tx.opened = true
// Open each iterator individually. If any fail close the transaction and error out
for _, itr := range tx.itrs {
if err := itr.open(); err != nil {
_ = tx.close()
return err
}
}
return nil
}
// Close closes all data store transactions atomically.
func (tx *tx) Close() error {
tx.mu.Lock()
defer tx.mu.Unlock()
return tx.close()
}
func (tx *tx) close() error {
// Mark transaction as closed.
tx.opened = false
for _, itr := range tx.itrs {
_ = itr.close()
}
return nil
}
// CreateIterators returns an iterator for a simple select statement.
func (tx *tx) CreateIterators(stmt *influxql.SelectStatement) ([]influxql.Iterator, error) {
// Parse the source segments.
database, policyName, measurement, err := splitIdent(stmt.Source.(*influxql.Measurement).Name)
if err != nil {
return nil, err
}
// Grab time range from statement.
tmin, tmax := influxql.TimeRange(stmt.Condition)
if tmin.IsZero() {
tmin = time.Unix(0, 1)
}
if tmax.IsZero() {
tmax = tx.now
}
// Find database and retention policy.
db := tx.server.databases[database]
if db == nil {
return nil, ErrDatabaseNotFound
}
rp := db.policies[policyName]
if rp == nil {
return nil, ErrRetentionPolicyNotFound
}
// Find shard groups within time range.
var shardGroups []*ShardGroup
for _, group := range rp.shardGroups {
if timeBetweenInclusive(group.StartTime, tmin, tmax) || timeBetweenInclusive(group.EndTime, tmin, tmax) {
shardGroups = append(shardGroups, group)
}
}
if len(shardGroups) == 0 {
return nil, nil
}
// Normalize dimensions to extract the interval.
_, dimensions, err := stmt.Dimensions.Normalize()
if err != nil {
return nil, err
}
// Find measurement.
m, err := tx.server.measurement(database, measurement)
if err != nil {
return nil, err
}
// Find field.
fieldName := stmt.Fields[0].Expr.(*influxql.VarRef).Val
f := m.FieldByName(fieldName)
if f == nil {
return nil, fmt.Errorf("field not found: %s", fieldName)
}
tagSets := m.tagSets(stmt, dimensions)
// Create an iterator for every shard.
var itrs []influxql.Iterator
for tag, set := range tagSets {
for _, group := range shardGroups {
// TODO: only create iterators for the shards we actually have to hit in a group
for _, sh := range group.Shards {
// create a series cursor for each unique series id
cursors := make([]*seriesCursor, 0, len(set))
for id, cond := range set {
cursors = append(cursors, &seriesCursor{id: id, condition: cond})
}
// create the shard iterator that will map over all series for the shard
itr := &shardIterator{
fieldName: f.Name,
fieldID: f.ID,
tags: tag,
db: sh.store,
cursors: cursors,
tmin: tmin.UnixNano(),
tmax: tmax.UnixNano(),
}
// Add to tx so the bolt transaction can be opened/closed.
tx.itrs = append(tx.itrs, itr)
itrs = append(itrs, itr)
}
}
}
return itrs, nil
}
// splitIdent splits an identifier into it's database, policy, and measurement parts.
func splitIdent(s string) (db, rp, m string, err error) {
a, err := influxql.SplitIdent(s)
if err != nil {
return "", "", "", err
} else if len(a) != 3 {
return "", "", "", fmt.Errorf("invalid ident, expected 3 segments: %q", s)
}
return a[0], a[1], a[2], nil
}
// shardIterator represents an iterator for traversing over a single series.
type shardIterator struct {
fieldName string
fieldID uint8
tags string // encoded dimensional tag values
cursors []*seriesCursor
keyValues []keyValue
db *bolt.DB // data stores by shard id
txn *bolt.Tx // read transactions by shard id
tmin, tmax int64
}
func (i *shardIterator) open() error {
// Open the data store
txn, err := i.db.Begin(false)
if err != nil {
return err
}
i.txn = txn
// Open cursors for each series id
for _, c := range i.cursors {
b := i.txn.Bucket(u32tob(c.id))
if b == nil {
continue
}
c.cur = b.Cursor()
}
i.keyValues = make([]keyValue, len(i.cursors))
for j, cur := range i.cursors {
i.keyValues[j].key, i.keyValues[j].value = cur.Next(i.fieldName, i.fieldID, i.tmin, i.tmax)
}
return nil
}
func (i *shardIterator) close() error {
_ = i.txn.Rollback()
return nil
}
func (i *shardIterator) Tags() string { return i.tags }
func (i *shardIterator) Next() (key int64, value interface{}) {
min := -1
for ind, kv := range i.keyValues {
if kv.key != 0 && kv.key < i.tmax {
min = ind
}
}
// if min is -1 we've exhausted all cursors for the given time range
if min == -1 {
return 0, nil
}
kv := i.keyValues[min]
key = kv.key
value = kv.value
i.keyValues[min].key, i.keyValues[min].value = i.cursors[min].Next(i.fieldName, i.fieldID, i.tmin, i.tmax)
return key, value
}
type keyValue struct {
key int64
value interface{}
}
type seriesCursor struct {
id uint32
condition influxql.Expr
cur *bolt.Cursor
initialized bool
}
func (c *seriesCursor) Next(fieldName string, fieldID uint8, tmin, tmax int64) (key int64, value interface{}) {
// TODO: clean this up when we make it so series ids are only queried against the shards they exist in.
// Right now we query for all series ids on a query against each shard, even if that shard may not have the
// data, so cur could be nil.
if c.cur == nil {
return 0, nil
}
for {
var k, v []byte
if !c.initialized {
k, v = c.cur.Seek(u64tob(uint64(tmin)))
c.initialized = true
} else {
k, v = c.cur.Next()
}
// Exit if there is no more data.
if k == nil {
return 0, nil
}
// Marshal key & value.
key, value = int64(btou64(k)), unmarshalValue(v, fieldID)
if key > tmax {
return 0, nil
}
// Evaluate condition. Move to next key/value if non-true.
if c.condition != nil {
if ok, _ := influxql.Eval(c.condition, map[string]interface{}{fieldName: value}).(bool); !ok {
continue
}
}
return key, value
}
}
Ensure measurement exists when querying
package influxdb
import (
"fmt"
"sync"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
)
// tx represents a transaction that spans multiple shard data stores.
// This transaction will open and close all data stores atomically.
type tx struct {
mu sync.Mutex
server *Server
opened bool
now time.Time
itrs []*shardIterator // shard iterators
}
// newTx return a new initialized Tx.
func newTx(server *Server) *tx {
return &tx{
server: server,
now: time.Now(),
}
}
// SetNow sets the current time for the transaction.
func (tx *tx) SetNow(now time.Time) { tx.now = now }
// Open opens a read-only transaction on all data stores atomically.
func (tx *tx) Open() error {
tx.mu.Lock()
defer tx.mu.Unlock()
// Mark transaction as open.
tx.opened = true
// Open each iterator individually. If any fail close the transaction and error out
for _, itr := range tx.itrs {
if err := itr.open(); err != nil {
_ = tx.close()
return err
}
}
return nil
}
// Close closes all data store transactions atomically.
func (tx *tx) Close() error {
tx.mu.Lock()
defer tx.mu.Unlock()
return tx.close()
}
func (tx *tx) close() error {
// Mark transaction as closed.
tx.opened = false
for _, itr := range tx.itrs {
_ = itr.close()
}
return nil
}
// CreateIterators returns an iterator for a simple select statement.
func (tx *tx) CreateIterators(stmt *influxql.SelectStatement) ([]influxql.Iterator, error) {
// Parse the source segments.
database, policyName, measurement, err := splitIdent(stmt.Source.(*influxql.Measurement).Name)
if err != nil {
return nil, err
}
// Grab time range from statement.
tmin, tmax := influxql.TimeRange(stmt.Condition)
if tmin.IsZero() {
tmin = time.Unix(0, 1)
}
if tmax.IsZero() {
tmax = tx.now
}
// Find database and retention policy.
db := tx.server.databases[database]
if db == nil {
return nil, ErrDatabaseNotFound
}
rp := db.policies[policyName]
if rp == nil {
return nil, ErrRetentionPolicyNotFound
}
// Find shard groups within time range.
var shardGroups []*ShardGroup
for _, group := range rp.shardGroups {
if timeBetweenInclusive(group.StartTime, tmin, tmax) || timeBetweenInclusive(group.EndTime, tmin, tmax) {
shardGroups = append(shardGroups, group)
}
}
if len(shardGroups) == 0 {
return nil, nil
}
// Normalize dimensions to extract the interval.
_, dimensions, err := stmt.Dimensions.Normalize()
if err != nil {
return nil, err
}
// Find measurement.
m, err := tx.server.measurement(database, measurement)
if err != nil {
return nil, err
}
if m == nil {
return nil, ErrMeasurementNotFound
}
// Find field.
fieldName := stmt.Fields[0].Expr.(*influxql.VarRef).Val
f := m.FieldByName(fieldName)
if f == nil {
return nil, fmt.Errorf("field not found: %s", fieldName)
}
tagSets := m.tagSets(stmt, dimensions)
// Create an iterator for every shard.
var itrs []influxql.Iterator
for tag, set := range tagSets {
for _, group := range shardGroups {
// TODO: only create iterators for the shards we actually have to hit in a group
for _, sh := range group.Shards {
// create a series cursor for each unique series id
cursors := make([]*seriesCursor, 0, len(set))
for id, cond := range set {
cursors = append(cursors, &seriesCursor{id: id, condition: cond})
}
// create the shard iterator that will map over all series for the shard
itr := &shardIterator{
fieldName: f.Name,
fieldID: f.ID,
tags: tag,
db: sh.store,
cursors: cursors,
tmin: tmin.UnixNano(),
tmax: tmax.UnixNano(),
}
// Add to tx so the bolt transaction can be opened/closed.
tx.itrs = append(tx.itrs, itr)
itrs = append(itrs, itr)
}
}
}
return itrs, nil
}
// splitIdent splits an identifier into it's database, policy, and measurement parts.
func splitIdent(s string) (db, rp, m string, err error) {
a, err := influxql.SplitIdent(s)
if err != nil {
return "", "", "", err
} else if len(a) != 3 {
return "", "", "", fmt.Errorf("invalid ident, expected 3 segments: %q", s)
}
return a[0], a[1], a[2], nil
}
// shardIterator represents an iterator for traversing over a single series.
type shardIterator struct {
fieldName string
fieldID uint8
tags string // encoded dimensional tag values
cursors []*seriesCursor
keyValues []keyValue
db *bolt.DB // data stores by shard id
txn *bolt.Tx // read transactions by shard id
tmin, tmax int64
}
func (i *shardIterator) open() error {
// Open the data store
txn, err := i.db.Begin(false)
if err != nil {
return err
}
i.txn = txn
// Open cursors for each series id
for _, c := range i.cursors {
b := i.txn.Bucket(u32tob(c.id))
if b == nil {
continue
}
c.cur = b.Cursor()
}
i.keyValues = make([]keyValue, len(i.cursors))
for j, cur := range i.cursors {
i.keyValues[j].key, i.keyValues[j].value = cur.Next(i.fieldName, i.fieldID, i.tmin, i.tmax)
}
return nil
}
func (i *shardIterator) close() error {
_ = i.txn.Rollback()
return nil
}
func (i *shardIterator) Tags() string { return i.tags }
func (i *shardIterator) Next() (key int64, value interface{}) {
min := -1
for ind, kv := range i.keyValues {
if kv.key != 0 && kv.key < i.tmax {
min = ind
}
}
// if min is -1 we've exhausted all cursors for the given time range
if min == -1 {
return 0, nil
}
kv := i.keyValues[min]
key = kv.key
value = kv.value
i.keyValues[min].key, i.keyValues[min].value = i.cursors[min].Next(i.fieldName, i.fieldID, i.tmin, i.tmax)
return key, value
}
type keyValue struct {
key int64
value interface{}
}
type seriesCursor struct {
id uint32
condition influxql.Expr
cur *bolt.Cursor
initialized bool
}
func (c *seriesCursor) Next(fieldName string, fieldID uint8, tmin, tmax int64) (key int64, value interface{}) {
// TODO: clean this up when we make it so series ids are only queried against the shards they exist in.
// Right now we query for all series ids on a query against each shard, even if that shard may not have the
// data, so cur could be nil.
if c.cur == nil {
return 0, nil
}
for {
var k, v []byte
if !c.initialized {
k, v = c.cur.Seek(u64tob(uint64(tmin)))
c.initialized = true
} else {
k, v = c.cur.Next()
}
// Exit if there is no more data.
if k == nil {
return 0, nil
}
// Marshal key & value.
key, value = int64(btou64(k)), unmarshalValue(v, fieldID)
if key > tmax {
return 0, nil
}
// Evaluate condition. Move to next key/value if non-true.
if c.condition != nil {
if ok, _ := influxql.Eval(c.condition, map[string]interface{}{fieldName: value}).(bool); !ok {
continue
}
}
return key, value
}
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"unicode"
"unicode/utf8"
"github.com/nsf/termbox-go"
)
const EscapeCode = 27
type Win struct {
w int
h int
x int
y int
}
func newWin(w, h, x, y int) *Win {
return &Win{w, h, x, y}
}
func (win *Win) renew(w, h, x, y int) {
win.w = w
win.h = h
win.x = x
win.y = y
}
func (win *Win) print(x, y int, fg, bg termbox.Attribute, s string) {
off := x
for i := 0; i < len(s); i++ {
r, w := utf8.DecodeRuneInString(s[i:])
if r == EscapeCode {
i++
if s[i] == '[' {
j := strings.IndexByte(s[i:], 'm')
toks := strings.Split(s[i+1:i+j], ";")
var nums []int
for _, t := range toks {
if t == "" {
fg = termbox.ColorDefault
bg = termbox.ColorDefault
break
}
i, err := strconv.Atoi(t)
if err != nil {
log.Printf("converting escape code: %s", err)
continue
}
nums = append(nums, i)
}
for _, n := range nums {
if 30 <= n && n <= 37 {
fg = termbox.ColorDefault
}
if 40 <= n && n <= 47 {
bg = termbox.ColorDefault
}
}
for _, n := range nums {
switch n {
case 1:
fg = fg | termbox.AttrBold
case 4:
fg = fg | termbox.AttrUnderline
case 7:
fg = fg | termbox.AttrReverse
case 30:
fg = fg | termbox.ColorBlack
case 31:
fg = fg | termbox.ColorRed
case 32:
fg = fg | termbox.ColorGreen
case 33:
fg = fg | termbox.ColorYellow
case 34:
fg = fg | termbox.ColorBlue
case 35:
fg = fg | termbox.ColorMagenta
case 36:
fg = fg | termbox.ColorCyan
case 37:
fg = fg | termbox.ColorWhite
case 40:
bg = bg | termbox.ColorBlack
case 41:
bg = bg | termbox.ColorRed
case 42:
bg = bg | termbox.ColorGreen
case 43:
bg = bg | termbox.ColorYellow
case 44:
bg = bg | termbox.ColorBlue
case 45:
bg = bg | termbox.ColorMagenta
case 46:
bg = bg | termbox.ColorCyan
case 47:
bg = bg | termbox.ColorWhite
}
}
i = i + j
continue
}
}
if x >= win.w {
break
}
termbox.SetCell(win.x+x, win.y+y, r, fg, bg)
i += w - 1
if r == '\t' {
x += gOpts.tabstop - (x-off)%gOpts.tabstop
} else {
x++
}
}
}
func (win *Win) printf(x, y int, fg, bg termbox.Attribute, format string, a ...interface{}) {
win.print(x, y, fg, bg, fmt.Sprintf(format, a...))
}
func (win *Win) printl(x, y int, fg, bg termbox.Attribute, s string) {
win.printf(x, y, fg, bg, "%s%*s", s, win.w-len(s), "")
}
func (win *Win) printd(dir *Dir, marks map[string]bool) {
if win.w < 3 {
return
}
fg, bg := termbox.ColorDefault, termbox.ColorDefault
if len(dir.fi) == 0 {
fg = termbox.AttrBold
win.print(0, 0, fg, bg, "empty")
return
}
maxind := len(dir.fi) - 1
beg := max(dir.ind-dir.pos, 0)
end := min(beg+win.h, maxind+1)
for i, f := range dir.fi[beg:end] {
switch {
case f.Mode().IsRegular():
if f.Mode()&0111 != 0 {
fg = termbox.AttrBold | termbox.ColorGreen
} else {
fg = termbox.ColorDefault
}
case f.Mode().IsDir():
fg = termbox.AttrBold | termbox.ColorBlue
case f.Mode()&os.ModeSymlink != 0:
fg = termbox.ColorCyan
case f.Mode()&os.ModeNamedPipe != 0:
fg = termbox.ColorRed
case f.Mode()&os.ModeSocket != 0:
fg = termbox.ColorYellow
case f.Mode()&os.ModeDevice != 0:
fg = termbox.ColorWhite
}
path := path.Join(dir.path, f.Name())
if marks[path] {
win.print(0, i, fg, termbox.ColorMagenta, " ")
}
if i == dir.pos {
fg = fg | termbox.AttrReverse
}
var s []byte
s = append(s, ' ')
s = append(s, f.Name()...)
if len(s) > win.w-2 {
s = s[:win.w-2]
} else {
s = append(s, make([]byte, win.w-2-len(s))...)
}
switch gOpts.showinfo {
case "none":
break
case "size":
if win.w > 8 {
h := humanize(f.Size())
s = append(s[:win.w-3-len(h)])
s = append(s, ' ')
s = append(s, h...)
}
case "time":
if win.w > 24 {
t := f.ModTime().Format("Jan _2 15:04")
s = append(s[:win.w-3-len(t)])
s = append(s, ' ')
s = append(s, t...)
}
default:
log.Printf("unknown showinfo type: %s", gOpts.showinfo)
}
// TODO: add a trailing '~' to the name if cut
win.print(1, i, fg, bg, string(s))
}
}
func (win *Win) printr(reg *os.File) error {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
buf := bufio.NewScanner(reg)
for i := 0; i < win.h && buf.Scan(); i++ {
for _, r := range buf.Text() {
if unicode.IsSpace(r) {
continue
}
if !unicode.IsPrint(r) && r != EscapeCode {
fg = termbox.AttrBold
win.print(0, 0, fg, bg, "binary")
return nil
}
}
}
if buf.Err() != nil {
return fmt.Errorf("printing regular file: %s", buf.Err())
}
if len(gOpts.previewer) != 0 {
cmd := exec.Command(gOpts.previewer, reg.Name(), strconv.Itoa(win.w), strconv.Itoa(win.h))
out, err := cmd.Output()
if err != nil {
log.Printf("previewing file: %s", err)
}
buf = bufio.NewScanner(bytes.NewReader(out))
} else {
reg.Seek(0, 0)
buf = bufio.NewScanner(reg)
}
for i := 0; i < win.h && buf.Scan(); i++ {
win.print(2, i, fg, bg, buf.Text())
}
if buf.Err() != nil {
return fmt.Errorf("printing regular file: %s", buf.Err())
}
return nil
}
type UI struct {
wins []*Win
pwdwin *Win
msgwin *Win
menuwin *Win
message string
}
func getWidths(wtot int) []int {
rsum := 0
for _, rat := range gOpts.ratios {
rsum += rat
}
wlen := len(gOpts.ratios)
widths := make([]int, wlen)
wsum := 0
for i := 0; i < wlen-1; i++ {
widths[i] = gOpts.ratios[i] * (wtot / rsum)
wsum += widths[i]
}
widths[wlen-1] = wtot - wsum
return widths
}
func newUI() *UI {
wtot, htot := termbox.Size()
var wins []*Win
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
wins = append(wins, newWin(widths[i], htot-2, wacc, 1))
wacc += widths[i]
}
return &UI{
wins: wins,
pwdwin: newWin(wtot, 1, 0, 0),
msgwin: newWin(wtot, 1, 0, htot-1),
menuwin: newWin(wtot, 1, 0, htot-2),
}
}
func (ui *UI) renew() {
termbox.Flush()
wtot, htot := termbox.Size()
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
ui.wins[i].renew(widths[i], htot-2, wacc, 1)
wacc += widths[i]
}
ui.msgwin.renew(wtot, 1, 0, htot-1)
}
func (ui *UI) echoFileInfo(nav *Nav) {
dir := nav.currDir()
if len(dir.fi) == 0 {
return
}
curr := nav.currFile()
ui.message = fmt.Sprintf("%v %v %v", curr.Mode(), humanize(curr.Size()), curr.ModTime().Format(time.ANSIC))
}
func (ui *UI) clearMsg() {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
win := ui.msgwin
win.printl(0, 0, fg, bg, "")
termbox.SetCursor(win.x, win.y)
termbox.Flush()
}
func (ui *UI) draw(nav *Nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
termbox.Clear(fg, bg)
defer termbox.Flush()
dir := nav.currDir()
path := strings.Replace(dir.path, envHome, "~", -1)
ui.pwdwin.printf(0, 0, termbox.AttrBold|termbox.ColorGreen, bg, "%s@%s", envUser, envHost)
ui.pwdwin.printf(len(envUser)+len(envHost)+1, 0, fg, bg, ":")
ui.pwdwin.printf(len(envUser)+len(envHost)+2, 0, termbox.AttrBold|termbox.ColorBlue, bg, "%s", path)
length := min(len(ui.wins), len(nav.dirs))
woff := len(ui.wins) - length
if gOpts.preview {
length = min(len(ui.wins)-1, len(nav.dirs))
woff = len(ui.wins) - 1 - length
}
doff := len(nav.dirs) - length
for i := 0; i < length; i++ {
ui.wins[woff+i].printd(nav.dirs[doff+i], nav.marks)
}
defer ui.msgwin.print(0, 0, fg, bg, ui.message)
if gOpts.preview {
if len(dir.fi) == 0 {
return
}
preview := ui.wins[len(ui.wins)-1]
path := nav.currPath()
f, err := os.Stat(path)
if err != nil {
msg := fmt.Sprintf("getting file information: %s", err)
ui.message = msg
log.Print(msg)
return
}
if f.IsDir() {
dir := newDir(path)
dir.load(nav.inds[path], nav.poss[path], nav.height, nav.names[path])
preview.printd(dir, nav.marks)
} else if f.Mode().IsRegular() {
file, err := os.Open(path)
if err != nil {
msg := fmt.Sprintf("opening file: %s", err)
ui.message = msg
log.Print(msg)
}
if err := preview.printr(file); err != nil {
ui.message = err.Error()
log.Print(err)
}
}
}
}
func findBinds(keys map[string]Expr, prefix string) (binds map[string]Expr, ok bool) {
binds = make(map[string]Expr)
for key, expr := range keys {
if strings.HasPrefix(key, prefix) {
binds[key] = expr
if key == prefix {
ok = true
}
}
}
return
}
func (ui *UI) getExpr(nav *Nav) Expr {
r := &CallExpr{"renew", nil}
var acc []rune
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Ch != 0 {
switch ev.Ch {
case '<':
acc = append(acc, '<', 'l', 't', '>')
case '>':
acc = append(acc, '<', 'g', 't', '>')
default:
acc = append(acc, ev.Ch)
}
} else {
switch ev.Key {
case termbox.KeyF1:
acc = append(acc, '<', 'f', '-', '1', '>')
case termbox.KeyF2:
acc = append(acc, '<', 'f', '-', '2', '>')
case termbox.KeyF3:
acc = append(acc, '<', 'f', '-', '3', '>')
case termbox.KeyF4:
acc = append(acc, '<', 'f', '-', '4', '>')
case termbox.KeyF5:
acc = append(acc, '<', 'f', '-', '5', '>')
case termbox.KeyF6:
acc = append(acc, '<', 'f', '-', '6', '>')
case termbox.KeyF7:
acc = append(acc, '<', 'f', '-', '7', '>')
case termbox.KeyF8:
acc = append(acc, '<', 'f', '-', '8', '>')
case termbox.KeyF9:
acc = append(acc, '<', 'f', '-', '9', '>')
case termbox.KeyF10:
acc = append(acc, '<', 'f', '-', '1', '0', '>')
case termbox.KeyF11:
acc = append(acc, '<', 'f', '-', '1', '1', '>')
case termbox.KeyF12:
acc = append(acc, '<', 'f', '-', '1', '2', '>')
case termbox.KeyInsert:
acc = append(acc, '<', 'i', 'n', 's', 'e', 'r', 't', '>')
case termbox.KeyDelete:
acc = append(acc, '<', 'd', 'e', 'l', 'e', 't', 'e', '>')
case termbox.KeyHome:
acc = append(acc, '<', 'h', 'o', 'm', 'e', '>')
case termbox.KeyEnd:
acc = append(acc, '<', 'e', 'n', 'd', '>')
case termbox.KeyPgup:
acc = append(acc, '<', 'p', 'g', 'u', 'p', '>')
case termbox.KeyPgdn:
acc = append(acc, '<', 'p', 'g', 'd', 'n', '>')
case termbox.KeyArrowUp:
acc = append(acc, '<', 'u', 'p', '>')
case termbox.KeyArrowDown:
acc = append(acc, '<', 'd', 'o', 'w', 'n', '>')
case termbox.KeyArrowLeft:
acc = append(acc, '<', 'l', 'e', 'f', 't', '>')
case termbox.KeyArrowRight:
acc = append(acc, '<', 'r', 'i', 'g', 'h', 't', '>')
case termbox.KeyCtrlSpace: // also KeyCtrlTilde and KeyCtrl2
acc = append(acc, '<', 'c', '-', 's', 'p', 'a', 'c', 'e', '>')
case termbox.KeyCtrlA:
acc = append(acc, '<', 'c', '-', 'a', '>')
case termbox.KeyCtrlB:
acc = append(acc, '<', 'c', '-', 'b', '>')
case termbox.KeyCtrlC:
acc = append(acc, '<', 'c', '-', 'c', '>')
case termbox.KeyCtrlD:
acc = append(acc, '<', 'c', '-', 'd', '>')
case termbox.KeyCtrlE:
acc = append(acc, '<', 'c', '-', 'e', '>')
case termbox.KeyCtrlF:
acc = append(acc, '<', 'c', '-', 'f', '>')
case termbox.KeyCtrlG:
acc = append(acc, '<', 'c', '-', 'g', '>')
case termbox.KeyBackspace: // also KeyCtrlH
acc = append(acc, '<', 'b', 's', '>')
case termbox.KeyTab: // also KeyCtrlI
acc = append(acc, '<', 't', 'a', 'b', '>')
case termbox.KeyCtrlJ:
acc = append(acc, '<', 'c', '-', 'j', '>')
case termbox.KeyCtrlK:
acc = append(acc, '<', 'c', '-', 'k', '>')
case termbox.KeyCtrlL:
acc = append(acc, '<', 'c', '-', 'l', '>')
case termbox.KeyEnter: // also KeyCtrlM
acc = append(acc, '<', 'e', 'n', 't', 'e', 'r', '>')
case termbox.KeyCtrlN:
acc = append(acc, '<', 'c', '-', 'n', '>')
case termbox.KeyCtrlO:
acc = append(acc, '<', 'c', '-', 'o', '>')
case termbox.KeyCtrlP:
acc = append(acc, '<', 'c', '-', 'p', '>')
case termbox.KeyCtrlQ:
acc = append(acc, '<', 'c', '-', 'q', '>')
case termbox.KeyCtrlR:
acc = append(acc, '<', 'c', '-', 'r', '>')
case termbox.KeyCtrlS:
acc = append(acc, '<', 'c', '-', 's', '>')
case termbox.KeyCtrlT:
acc = append(acc, '<', 'c', '-', 't', '>')
case termbox.KeyCtrlU:
acc = append(acc, '<', 'c', '-', 'u', '>')
case termbox.KeyCtrlV:
acc = append(acc, '<', 'c', '-', 'v', '>')
case termbox.KeyCtrlW:
acc = append(acc, '<', 'c', '-', 'w', '>')
case termbox.KeyCtrlX:
acc = append(acc, '<', 'c', '-', 'x', '>')
case termbox.KeyCtrlY:
acc = append(acc, '<', 'c', '-', 'y', '>')
case termbox.KeyCtrlZ:
acc = append(acc, '<', 'c', '-', 'z', '>')
case termbox.KeyEsc: // also KeyCtrlLsqBracket and KeyCtrl3
acc = nil
return r
case termbox.KeyCtrlBackslash: // also KeyCtrl4
acc = append(acc, '<', 'c', '-', '\\', '>')
case termbox.KeyCtrlRsqBracket: // also KeyCtrl5
acc = append(acc, '<', 'c', '-', ']', '>')
case termbox.KeyCtrl6:
acc = append(acc, '<', 'c', '-', '6', '>')
case termbox.KeyCtrlSlash: // also KeyCtrlUnderscore and KeyCtrl7
acc = append(acc, '<', 'c', '-', '/', '>')
case termbox.KeySpace:
acc = append(acc, '<', 's', 'p', 'a', 'c', 'e', '>')
case termbox.KeyBackspace2: // also KeyCtrl8
acc = append(acc, '<', 'b', 's', '2', '>')
}
}
binds, ok := findBinds(gOpts.keys, string(acc))
switch len(binds) {
case 0:
ui.message = fmt.Sprintf("unknown mapping: %s", string(acc))
acc = nil
return r
case 1:
if ok {
return gOpts.keys[string(acc)]
}
ui.draw(nav)
ui.listBinds(binds)
default:
if ok {
// TODO: use a delay
return gOpts.keys[string(acc)]
}
ui.draw(nav)
ui.listBinds(binds)
}
case termbox.EventResize:
return r
default:
// TODO: handle other events
}
}
}
func (ui *UI) prompt(nav *Nav, pref string) string {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
win := ui.msgwin
win.printl(0, 0, fg, bg, pref)
termbox.SetCursor(win.x+len(pref), win.y)
defer termbox.HideCursor()
termbox.Flush()
var acc []rune
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Ch != 0 {
acc = append(acc, ev.Ch)
} else {
// TODO: rest of the keys
switch ev.Key {
case termbox.KeySpace:
acc = append(acc, ' ')
case termbox.KeyBackspace2:
if len(acc) > 0 {
acc = acc[:len(acc)-1]
}
case termbox.KeyEnter:
win.printl(0, 0, fg, bg, "")
termbox.SetCursor(win.x, win.y)
termbox.Flush()
return string(acc)
case termbox.KeyTab:
var matches []string
if pref == ":" {
matches, acc = compCmd(acc)
} else {
matches, acc = compShell(acc)
}
ui.draw(nav)
if len(matches) > 1 {
ui.listMatches(matches)
}
case termbox.KeyEsc:
return ""
}
}
win.printl(0, 0, fg, bg, pref)
win.print(len(pref), 0, fg, bg, string(acc))
termbox.SetCursor(win.x+len(pref)+len(acc), win.y)
termbox.Flush()
default:
// TODO: handle other events
}
}
}
func (ui *UI) pause() {
termbox.Close()
}
func (ui *UI) resume() {
if err := termbox.Init(); err != nil {
log.Fatalf("initializing termbox: %s", err)
}
}
func (ui *UI) sync() {
if err := termbox.Sync(); err != nil {
log.Printf("syncing termbox: %s", err)
}
}
func (ui *UI) showMenu(b *bytes.Buffer) {
lines := strings.Split(b.String(), "\n")
lines = lines[:len(lines)-1]
ui.menuwin.h = len(lines) - 1
ui.menuwin.y = ui.wins[0].h - ui.menuwin.h
ui.menuwin.printl(0, 0, termbox.AttrBold, termbox.AttrBold, lines[0])
for i, line := range lines[1:] {
ui.menuwin.printl(0, i+1, termbox.ColorDefault, termbox.ColorDefault, "")
ui.menuwin.print(0, i+1, termbox.ColorDefault, termbox.ColorDefault, line)
}
termbox.Flush()
}
func (ui *UI) listBinds(binds map[string]Expr) {
t := new(tabwriter.Writer)
b := new(bytes.Buffer)
var keys []string
for k := range binds {
keys = append(keys, k)
}
sort.Strings(keys)
t.Init(b, 0, gOpts.tabstop, 2, '\t', 0)
fmt.Fprintln(t, "keys\tcommand")
for _, k := range keys {
fmt.Fprintf(t, "%s\t%v\n", k, binds[k])
}
t.Flush()
ui.showMenu(b)
}
func (ui *UI) listMatches(matches []string) {
b := new(bytes.Buffer)
wtot, _ := termbox.Size()
wcol := 0
for _, m := range matches {
wcol = max(wcol, len(m))
}
wcol += gOpts.tabstop - wcol%gOpts.tabstop
ncol := wtot / wcol
b.WriteString("possible matches\n")
for i := 0; i < len(matches); i++ {
for j := 0; j < ncol && i < len(matches); i, j = i+1, j+1 {
b.WriteString(fmt.Sprintf("%s%*s", matches[i], wcol-len(matches[i]), ""))
}
b.WriteByte('\n')
}
ui.showMenu(b)
}
Do binary check after calling previewer
Many files valid for previewer, such as archives, are treated as binary
and thus were not passed to the previewer.
Also sanitizes the previewer output, to some extent.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"unicode"
"unicode/utf8"
"github.com/nsf/termbox-go"
)
const EscapeCode = 27
type Win struct {
w int
h int
x int
y int
}
func newWin(w, h, x, y int) *Win {
return &Win{w, h, x, y}
}
func (win *Win) renew(w, h, x, y int) {
win.w = w
win.h = h
win.x = x
win.y = y
}
func (win *Win) print(x, y int, fg, bg termbox.Attribute, s string) {
off := x
for i := 0; i < len(s); i++ {
r, w := utf8.DecodeRuneInString(s[i:])
if r == EscapeCode {
i++
if s[i] == '[' {
j := strings.IndexByte(s[i:], 'm')
toks := strings.Split(s[i+1:i+j], ";")
var nums []int
for _, t := range toks {
if t == "" {
fg = termbox.ColorDefault
bg = termbox.ColorDefault
break
}
i, err := strconv.Atoi(t)
if err != nil {
log.Printf("converting escape code: %s", err)
continue
}
nums = append(nums, i)
}
for _, n := range nums {
if 30 <= n && n <= 37 {
fg = termbox.ColorDefault
}
if 40 <= n && n <= 47 {
bg = termbox.ColorDefault
}
}
for _, n := range nums {
switch n {
case 1:
fg = fg | termbox.AttrBold
case 4:
fg = fg | termbox.AttrUnderline
case 7:
fg = fg | termbox.AttrReverse
case 30:
fg = fg | termbox.ColorBlack
case 31:
fg = fg | termbox.ColorRed
case 32:
fg = fg | termbox.ColorGreen
case 33:
fg = fg | termbox.ColorYellow
case 34:
fg = fg | termbox.ColorBlue
case 35:
fg = fg | termbox.ColorMagenta
case 36:
fg = fg | termbox.ColorCyan
case 37:
fg = fg | termbox.ColorWhite
case 40:
bg = bg | termbox.ColorBlack
case 41:
bg = bg | termbox.ColorRed
case 42:
bg = bg | termbox.ColorGreen
case 43:
bg = bg | termbox.ColorYellow
case 44:
bg = bg | termbox.ColorBlue
case 45:
bg = bg | termbox.ColorMagenta
case 46:
bg = bg | termbox.ColorCyan
case 47:
bg = bg | termbox.ColorWhite
}
}
i = i + j
continue
}
}
if x >= win.w {
break
}
termbox.SetCell(win.x+x, win.y+y, r, fg, bg)
i += w - 1
if r == '\t' {
x += gOpts.tabstop - (x-off)%gOpts.tabstop
} else {
x++
}
}
}
func (win *Win) printf(x, y int, fg, bg termbox.Attribute, format string, a ...interface{}) {
win.print(x, y, fg, bg, fmt.Sprintf(format, a...))
}
func (win *Win) printl(x, y int, fg, bg termbox.Attribute, s string) {
win.printf(x, y, fg, bg, "%s%*s", s, win.w-len(s), "")
}
func (win *Win) printd(dir *Dir, marks map[string]bool) {
if win.w < 3 {
return
}
fg, bg := termbox.ColorDefault, termbox.ColorDefault
if len(dir.fi) == 0 {
fg = termbox.AttrBold
win.print(0, 0, fg, bg, "empty")
return
}
maxind := len(dir.fi) - 1
beg := max(dir.ind-dir.pos, 0)
end := min(beg+win.h, maxind+1)
for i, f := range dir.fi[beg:end] {
switch {
case f.Mode().IsRegular():
if f.Mode()&0111 != 0 {
fg = termbox.AttrBold | termbox.ColorGreen
} else {
fg = termbox.ColorDefault
}
case f.Mode().IsDir():
fg = termbox.AttrBold | termbox.ColorBlue
case f.Mode()&os.ModeSymlink != 0:
fg = termbox.ColorCyan
case f.Mode()&os.ModeNamedPipe != 0:
fg = termbox.ColorRed
case f.Mode()&os.ModeSocket != 0:
fg = termbox.ColorYellow
case f.Mode()&os.ModeDevice != 0:
fg = termbox.ColorWhite
}
path := path.Join(dir.path, f.Name())
if marks[path] {
win.print(0, i, fg, termbox.ColorMagenta, " ")
}
if i == dir.pos {
fg = fg | termbox.AttrReverse
}
var s []byte
s = append(s, ' ')
s = append(s, f.Name()...)
if len(s) > win.w-2 {
s = s[:win.w-2]
} else {
s = append(s, make([]byte, win.w-2-len(s))...)
}
switch gOpts.showinfo {
case "none":
break
case "size":
if win.w > 8 {
h := humanize(f.Size())
s = append(s[:win.w-3-len(h)])
s = append(s, ' ')
s = append(s, h...)
}
case "time":
if win.w > 24 {
t := f.ModTime().Format("Jan _2 15:04")
s = append(s[:win.w-3-len(t)])
s = append(s, ' ')
s = append(s, t...)
}
default:
log.Printf("unknown showinfo type: %s", gOpts.showinfo)
}
// TODO: add a trailing '~' to the name if cut
win.print(1, i, fg, bg, string(s))
}
}
func (win *Win) printr(reg *os.File) error {
var reader io.ReadSeeker
if len(gOpts.previewer) != 0 {
cmd := exec.Command(gOpts.previewer, reg.Name(), strconv.Itoa(win.w), strconv.Itoa(win.h))
out, err := cmd.Output()
if err != nil {
log.Printf("previewing file: %s", err)
}
reader = bytes.NewReader(out)
} else {
reader = reg
}
fg, bg := termbox.ColorDefault, termbox.ColorDefault
buf := bufio.NewScanner(reader)
for i := 0; i < win.h && buf.Scan(); i++ {
for _, r := range buf.Text() {
if unicode.IsSpace(r) {
continue
}
if !unicode.IsPrint(r) && r != EscapeCode {
fg = termbox.AttrBold
win.print(0, 0, fg, bg, "binary")
return nil
}
}
}
if buf.Err() != nil {
return fmt.Errorf("printing regular file: %s", buf.Err())
}
reader.Seek(0, 0)
buf = bufio.NewScanner(reader)
for i := 0; i < win.h && buf.Scan(); i++ {
win.print(2, i, fg, bg, buf.Text())
}
if buf.Err() != nil {
return fmt.Errorf("printing regular file: %s", buf.Err())
}
return nil
}
type UI struct {
wins []*Win
pwdwin *Win
msgwin *Win
menuwin *Win
message string
}
func getWidths(wtot int) []int {
rsum := 0
for _, rat := range gOpts.ratios {
rsum += rat
}
wlen := len(gOpts.ratios)
widths := make([]int, wlen)
wsum := 0
for i := 0; i < wlen-1; i++ {
widths[i] = gOpts.ratios[i] * (wtot / rsum)
wsum += widths[i]
}
widths[wlen-1] = wtot - wsum
return widths
}
func newUI() *UI {
wtot, htot := termbox.Size()
var wins []*Win
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
wins = append(wins, newWin(widths[i], htot-2, wacc, 1))
wacc += widths[i]
}
return &UI{
wins: wins,
pwdwin: newWin(wtot, 1, 0, 0),
msgwin: newWin(wtot, 1, 0, htot-1),
menuwin: newWin(wtot, 1, 0, htot-2),
}
}
func (ui *UI) renew() {
termbox.Flush()
wtot, htot := termbox.Size()
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
ui.wins[i].renew(widths[i], htot-2, wacc, 1)
wacc += widths[i]
}
ui.msgwin.renew(wtot, 1, 0, htot-1)
}
func (ui *UI) echoFileInfo(nav *Nav) {
dir := nav.currDir()
if len(dir.fi) == 0 {
return
}
curr := nav.currFile()
ui.message = fmt.Sprintf("%v %v %v", curr.Mode(), humanize(curr.Size()), curr.ModTime().Format(time.ANSIC))
}
func (ui *UI) clearMsg() {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
win := ui.msgwin
win.printl(0, 0, fg, bg, "")
termbox.SetCursor(win.x, win.y)
termbox.Flush()
}
func (ui *UI) draw(nav *Nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
termbox.Clear(fg, bg)
defer termbox.Flush()
dir := nav.currDir()
path := strings.Replace(dir.path, envHome, "~", -1)
ui.pwdwin.printf(0, 0, termbox.AttrBold|termbox.ColorGreen, bg, "%s@%s", envUser, envHost)
ui.pwdwin.printf(len(envUser)+len(envHost)+1, 0, fg, bg, ":")
ui.pwdwin.printf(len(envUser)+len(envHost)+2, 0, termbox.AttrBold|termbox.ColorBlue, bg, "%s", path)
length := min(len(ui.wins), len(nav.dirs))
woff := len(ui.wins) - length
if gOpts.preview {
length = min(len(ui.wins)-1, len(nav.dirs))
woff = len(ui.wins) - 1 - length
}
doff := len(nav.dirs) - length
for i := 0; i < length; i++ {
ui.wins[woff+i].printd(nav.dirs[doff+i], nav.marks)
}
defer ui.msgwin.print(0, 0, fg, bg, ui.message)
if gOpts.preview {
if len(dir.fi) == 0 {
return
}
preview := ui.wins[len(ui.wins)-1]
path := nav.currPath()
f, err := os.Stat(path)
if err != nil {
msg := fmt.Sprintf("getting file information: %s", err)
ui.message = msg
log.Print(msg)
return
}
if f.IsDir() {
dir := newDir(path)
dir.load(nav.inds[path], nav.poss[path], nav.height, nav.names[path])
preview.printd(dir, nav.marks)
} else if f.Mode().IsRegular() {
file, err := os.Open(path)
if err != nil {
msg := fmt.Sprintf("opening file: %s", err)
ui.message = msg
log.Print(msg)
}
if err := preview.printr(file); err != nil {
ui.message = err.Error()
log.Print(err)
}
}
}
}
func findBinds(keys map[string]Expr, prefix string) (binds map[string]Expr, ok bool) {
binds = make(map[string]Expr)
for key, expr := range keys {
if strings.HasPrefix(key, prefix) {
binds[key] = expr
if key == prefix {
ok = true
}
}
}
return
}
func (ui *UI) getExpr(nav *Nav) Expr {
r := &CallExpr{"renew", nil}
var acc []rune
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Ch != 0 {
switch ev.Ch {
case '<':
acc = append(acc, '<', 'l', 't', '>')
case '>':
acc = append(acc, '<', 'g', 't', '>')
default:
acc = append(acc, ev.Ch)
}
} else {
switch ev.Key {
case termbox.KeyF1:
acc = append(acc, '<', 'f', '-', '1', '>')
case termbox.KeyF2:
acc = append(acc, '<', 'f', '-', '2', '>')
case termbox.KeyF3:
acc = append(acc, '<', 'f', '-', '3', '>')
case termbox.KeyF4:
acc = append(acc, '<', 'f', '-', '4', '>')
case termbox.KeyF5:
acc = append(acc, '<', 'f', '-', '5', '>')
case termbox.KeyF6:
acc = append(acc, '<', 'f', '-', '6', '>')
case termbox.KeyF7:
acc = append(acc, '<', 'f', '-', '7', '>')
case termbox.KeyF8:
acc = append(acc, '<', 'f', '-', '8', '>')
case termbox.KeyF9:
acc = append(acc, '<', 'f', '-', '9', '>')
case termbox.KeyF10:
acc = append(acc, '<', 'f', '-', '1', '0', '>')
case termbox.KeyF11:
acc = append(acc, '<', 'f', '-', '1', '1', '>')
case termbox.KeyF12:
acc = append(acc, '<', 'f', '-', '1', '2', '>')
case termbox.KeyInsert:
acc = append(acc, '<', 'i', 'n', 's', 'e', 'r', 't', '>')
case termbox.KeyDelete:
acc = append(acc, '<', 'd', 'e', 'l', 'e', 't', 'e', '>')
case termbox.KeyHome:
acc = append(acc, '<', 'h', 'o', 'm', 'e', '>')
case termbox.KeyEnd:
acc = append(acc, '<', 'e', 'n', 'd', '>')
case termbox.KeyPgup:
acc = append(acc, '<', 'p', 'g', 'u', 'p', '>')
case termbox.KeyPgdn:
acc = append(acc, '<', 'p', 'g', 'd', 'n', '>')
case termbox.KeyArrowUp:
acc = append(acc, '<', 'u', 'p', '>')
case termbox.KeyArrowDown:
acc = append(acc, '<', 'd', 'o', 'w', 'n', '>')
case termbox.KeyArrowLeft:
acc = append(acc, '<', 'l', 'e', 'f', 't', '>')
case termbox.KeyArrowRight:
acc = append(acc, '<', 'r', 'i', 'g', 'h', 't', '>')
case termbox.KeyCtrlSpace: // also KeyCtrlTilde and KeyCtrl2
acc = append(acc, '<', 'c', '-', 's', 'p', 'a', 'c', 'e', '>')
case termbox.KeyCtrlA:
acc = append(acc, '<', 'c', '-', 'a', '>')
case termbox.KeyCtrlB:
acc = append(acc, '<', 'c', '-', 'b', '>')
case termbox.KeyCtrlC:
acc = append(acc, '<', 'c', '-', 'c', '>')
case termbox.KeyCtrlD:
acc = append(acc, '<', 'c', '-', 'd', '>')
case termbox.KeyCtrlE:
acc = append(acc, '<', 'c', '-', 'e', '>')
case termbox.KeyCtrlF:
acc = append(acc, '<', 'c', '-', 'f', '>')
case termbox.KeyCtrlG:
acc = append(acc, '<', 'c', '-', 'g', '>')
case termbox.KeyBackspace: // also KeyCtrlH
acc = append(acc, '<', 'b', 's', '>')
case termbox.KeyTab: // also KeyCtrlI
acc = append(acc, '<', 't', 'a', 'b', '>')
case termbox.KeyCtrlJ:
acc = append(acc, '<', 'c', '-', 'j', '>')
case termbox.KeyCtrlK:
acc = append(acc, '<', 'c', '-', 'k', '>')
case termbox.KeyCtrlL:
acc = append(acc, '<', 'c', '-', 'l', '>')
case termbox.KeyEnter: // also KeyCtrlM
acc = append(acc, '<', 'e', 'n', 't', 'e', 'r', '>')
case termbox.KeyCtrlN:
acc = append(acc, '<', 'c', '-', 'n', '>')
case termbox.KeyCtrlO:
acc = append(acc, '<', 'c', '-', 'o', '>')
case termbox.KeyCtrlP:
acc = append(acc, '<', 'c', '-', 'p', '>')
case termbox.KeyCtrlQ:
acc = append(acc, '<', 'c', '-', 'q', '>')
case termbox.KeyCtrlR:
acc = append(acc, '<', 'c', '-', 'r', '>')
case termbox.KeyCtrlS:
acc = append(acc, '<', 'c', '-', 's', '>')
case termbox.KeyCtrlT:
acc = append(acc, '<', 'c', '-', 't', '>')
case termbox.KeyCtrlU:
acc = append(acc, '<', 'c', '-', 'u', '>')
case termbox.KeyCtrlV:
acc = append(acc, '<', 'c', '-', 'v', '>')
case termbox.KeyCtrlW:
acc = append(acc, '<', 'c', '-', 'w', '>')
case termbox.KeyCtrlX:
acc = append(acc, '<', 'c', '-', 'x', '>')
case termbox.KeyCtrlY:
acc = append(acc, '<', 'c', '-', 'y', '>')
case termbox.KeyCtrlZ:
acc = append(acc, '<', 'c', '-', 'z', '>')
case termbox.KeyEsc: // also KeyCtrlLsqBracket and KeyCtrl3
acc = nil
return r
case termbox.KeyCtrlBackslash: // also KeyCtrl4
acc = append(acc, '<', 'c', '-', '\\', '>')
case termbox.KeyCtrlRsqBracket: // also KeyCtrl5
acc = append(acc, '<', 'c', '-', ']', '>')
case termbox.KeyCtrl6:
acc = append(acc, '<', 'c', '-', '6', '>')
case termbox.KeyCtrlSlash: // also KeyCtrlUnderscore and KeyCtrl7
acc = append(acc, '<', 'c', '-', '/', '>')
case termbox.KeySpace:
acc = append(acc, '<', 's', 'p', 'a', 'c', 'e', '>')
case termbox.KeyBackspace2: // also KeyCtrl8
acc = append(acc, '<', 'b', 's', '2', '>')
}
}
binds, ok := findBinds(gOpts.keys, string(acc))
switch len(binds) {
case 0:
ui.message = fmt.Sprintf("unknown mapping: %s", string(acc))
acc = nil
return r
case 1:
if ok {
return gOpts.keys[string(acc)]
}
ui.draw(nav)
ui.listBinds(binds)
default:
if ok {
// TODO: use a delay
return gOpts.keys[string(acc)]
}
ui.draw(nav)
ui.listBinds(binds)
}
case termbox.EventResize:
return r
default:
// TODO: handle other events
}
}
}
func (ui *UI) prompt(nav *Nav, pref string) string {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
win := ui.msgwin
win.printl(0, 0, fg, bg, pref)
termbox.SetCursor(win.x+len(pref), win.y)
defer termbox.HideCursor()
termbox.Flush()
var acc []rune
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Ch != 0 {
acc = append(acc, ev.Ch)
} else {
// TODO: rest of the keys
switch ev.Key {
case termbox.KeySpace:
acc = append(acc, ' ')
case termbox.KeyBackspace2:
if len(acc) > 0 {
acc = acc[:len(acc)-1]
}
case termbox.KeyEnter:
win.printl(0, 0, fg, bg, "")
termbox.SetCursor(win.x, win.y)
termbox.Flush()
return string(acc)
case termbox.KeyTab:
var matches []string
if pref == ":" {
matches, acc = compCmd(acc)
} else {
matches, acc = compShell(acc)
}
ui.draw(nav)
if len(matches) > 1 {
ui.listMatches(matches)
}
case termbox.KeyEsc:
return ""
}
}
win.printl(0, 0, fg, bg, pref)
win.print(len(pref), 0, fg, bg, string(acc))
termbox.SetCursor(win.x+len(pref)+len(acc), win.y)
termbox.Flush()
default:
// TODO: handle other events
}
}
}
func (ui *UI) pause() {
termbox.Close()
}
func (ui *UI) resume() {
if err := termbox.Init(); err != nil {
log.Fatalf("initializing termbox: %s", err)
}
}
func (ui *UI) sync() {
if err := termbox.Sync(); err != nil {
log.Printf("syncing termbox: %s", err)
}
}
func (ui *UI) showMenu(b *bytes.Buffer) {
lines := strings.Split(b.String(), "\n")
lines = lines[:len(lines)-1]
ui.menuwin.h = len(lines) - 1
ui.menuwin.y = ui.wins[0].h - ui.menuwin.h
ui.menuwin.printl(0, 0, termbox.AttrBold, termbox.AttrBold, lines[0])
for i, line := range lines[1:] {
ui.menuwin.printl(0, i+1, termbox.ColorDefault, termbox.ColorDefault, "")
ui.menuwin.print(0, i+1, termbox.ColorDefault, termbox.ColorDefault, line)
}
termbox.Flush()
}
func (ui *UI) listBinds(binds map[string]Expr) {
t := new(tabwriter.Writer)
b := new(bytes.Buffer)
var keys []string
for k := range binds {
keys = append(keys, k)
}
sort.Strings(keys)
t.Init(b, 0, gOpts.tabstop, 2, '\t', 0)
fmt.Fprintln(t, "keys\tcommand")
for _, k := range keys {
fmt.Fprintf(t, "%s\t%v\n", k, binds[k])
}
t.Flush()
ui.showMenu(b)
}
func (ui *UI) listMatches(matches []string) {
b := new(bytes.Buffer)
wtot, _ := termbox.Size()
wcol := 0
for _, m := range matches {
wcol = max(wcol, len(m))
}
wcol += gOpts.tabstop - wcol%gOpts.tabstop
ncol := wtot / wcol
b.WriteString("possible matches\n")
for i := 0; i < len(matches); i++ {
for j := 0; j < ncol && i < len(matches); i, j = i+1, j+1 {
b.WriteString(fmt.Sprintf("%s%*s", matches[i], wcol-len(matches[i]), ""))
}
b.WriteByte('\n')
}
ui.showMenu(b)
}
|
// Copyright 2011 Kevin Bulusek. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gameboy
import (
"⚛sdl"
"time"
)
const (
modeHBlank = byte(iota)
modeVBlank
modeOAM
modeVRAM
)
const (
tilesX = 20
tilesY = 18
tileW = 8
tileH = 8
mapW = 32
mapH = 32
displayW = tileW * tilesX
displayH = tileH * tilesY
hblankTicks = 204 / 4
vblankTicks = 4560 / 4
oamTicks = 80 / 4
vramTicks = 172 / 4
scanlineTicks = oamTicks + vramTicks + hblankTicks
refreshTicks = scanlineTicks*displayH + vblankTicks
)
type display struct {
*memory
*sdl.Surface
pal [4]uint32
frameTime int64
screenW int
screenH int
clock int
// LCDC flags
enable bool
windowMap bool
windowEnable bool
tileData bool
bgMap bool
spriteSize bool
spriteEnable bool
bgEnable bool
// STAT flags
lycInterrupt bool
oamInterrupt bool
vblankInterrupt bool
hblankInterrupt bool
mode byte
// LCD registers
ly byte
scy, scx byte
wy, wx byte
bgp [4]byte
obp [2][4]byte
// Scanlines are rendered to here first, and then drawn to the
// display - rather than each layer accessing the display
// separately (which is slow).
lineBuf [displayW]byte
// When rendering a scanline this is zeroed out, then
// bitwise-ORed with the pixels from the BG and window. This
// is then used to lookup which pixels can be painted in
// sprites that are to be obscured by the background layers.
oamLineMask [displayW]byte
}
func newDisplay(m *memory) *display {
sdl.WM_SetCaption(m.rom.title(), "")
flags := uint32(sdl.DOUBLEBUF)
if m.config.Fullscreen {
flags |= sdl.FULLSCREEN
}
lcd := display{memory: m}
lcd.screenW = displayW * m.config.Scale
lcd.screenH = displayH * m.config.Scale
lcd.Surface = sdl.SetVideoMode(lcd.screenW, lcd.screenH, 0, flags)
lcd.pal[0] = sdl.MapRGBA(lcd.Format, 0x9B, 0xBC, 0x0F, 0)
lcd.pal[1] = sdl.MapRGBA(lcd.Format, 0x8B, 0xAC, 0x0F, 0)
lcd.pal[2] = sdl.MapRGBA(lcd.Format, 0x30, 0x62, 0x30, 0)
lcd.pal[3] = sdl.MapRGBA(lcd.Format, 0x0F, 0x38, 0x0F, 0)
sdl.ShowCursor(sdl.DISABLE)
lcd.FillRect(nil, lcd.pal[0])
lcd.Flip()
lcd.frameTime = time.Nanoseconds()
return &lcd
}
func (lcd *display) toggleFullScreen() {
sdl.WM_ToggleFullScreen(lcd.Surface)
}
func (lcd *display) step(t int) {
lcd.clock += t
if lcd.clock >= refreshTicks {
lcd.clock -= refreshTicks
}
lcd.ly = byte(lcd.clock / scanlineTicks)
lcd.hram[portLY-0xFF00] = lcd.ly
mode := calcMode(lcd.clock, lcd.ly)
if mode == lcd.mode {
return
}
lcd.mode = mode
stat := lcd.readPort(portSTAT)&^3 | mode
irq := lcd.readPort(portIF)
switch mode {
case modeOAM:
if lcd.oamInterrupt {
lcd.writePort(portIF, irq|0x02)
}
if lyc := lcd.readPort(portLYC); lcd.ly-1 == lyc {
stat |= 0x04
if lcd.lycInterrupt {
lcd.writePort(portIF, irq|0x02)
}
} else {
stat &^= 0x04
}
case modeVRAM:
if lcd.enable {
lcd.scanline()
}
case modeHBlank:
if lcd.hblankInterrupt {
lcd.writePort(portIF, irq|0x02)
}
case modeVBlank:
if lcd.vblankInterrupt {
irq |= 0x02
}
lcd.writePort(portIF, irq|0x01)
lcd.Flip()
// while audio is playing, we let it control the
// emulation speed
if !lcd.audio.enable {
lcd.delay()
}
}
lcd.writePort(portSTAT, stat)
}
func (lcd *display) delay() {
now := time.Nanoseconds()
delta := now - lcd.frameTime
target := 16742706 - delta
if target > 0 {
time.Sleep(target)
}
lcd.frameTime = time.Nanoseconds()
}
func (lcd *display) scanline() {
for i := 0; i < displayW; i++ {
lcd.oamLineMask[i] = 0
lcd.lineBuf[i] = 0
}
if lcd.bgEnable {
lcd.mapline(lcd.bgMap, byte(0), lcd.scx, lcd.scy)
}
if lcd.windowEnable {
if lcd.wx < 167 && lcd.wy < 144 && lcd.ly >= lcd.wy {
x := int(lcd.wx) - 7
xoff := -x
if x < 0 {
x = 0
}
lcd.mapline(lcd.windowMap, byte(x),
byte(xoff), byte(-lcd.wy))
}
}
if lcd.spriteEnable {
lcd.oamline()
}
lcd.flushline()
}
func (lcd *display) mapline(map1 bool, x, xoff, yoff byte) {
y := lcd.ly + yoff
for ; x < displayW; x++ {
b := lcd.mapAt(map1, int(x+xoff), int(y))
lcd.oamLineMask[x] |= b
lcd.lineBuf[x] = lcd.bgp[b]
}
}
func (lcd *display) flushline() {
// Do some simple run-length counting to reduce the number of
// FillRect calls we need to make.
scale := uint16(lcd.config.Scale)
r := &sdl.Rect{0, int16(lcd.ly) * int16(scale), scale, scale}
cur := lcd.lineBuf[0]
for x := 1; x < displayW; x++ {
b := lcd.lineBuf[x]
if b != cur {
lcd.FillRect(r, lcd.pal[cur])
cur = b
r.X = int16(x) * int16(scale)
r.W = scale
} else {
r.W += scale
}
}
lcd.FillRect(r, lcd.pal[cur])
}
// oamline draws up to 10 sprites on the current scanline
func (lcd *display) oamline() {
// TODO sprite priorities for overlapping sprites at
// different x-coordinates (lower x-coordinate wins)
count := 0
for idx := 0; idx < 0xA0 && count < 10; idx += 4 {
y := int(lcd.oam[idx]) - 16
x := int(lcd.oam[idx+1]) - 8
tile := int(lcd.oam[idx+2])
info := lcd.oam[idx+3]
h := 8
if lcd.spriteSize {
h = 16
tile &= 0xFE
}
if int(lcd.ly) < y || int(lcd.ly) >= y+h {
continue
}
count++
if x == -8 || x >= 168 {
continue
}
lcd.spriteLine(tile, x, y, h, info)
}
}
func (lcd *display) spriteLine(tile, x, y, h int, info byte) {
masked := info&0x80 == 0x80
yflip := info&0x40 == 0x40
xflip := info&0x20 == 0x20
palidx := (info >> 4) & 1
tiley := int(lcd.ly) - y
if yflip {
tiley = h - 1 - tiley
}
tile = tile*16 + tiley*2
for i := 0; i < 8; i++ {
xi := byte(x + i)
if xi >= displayW {
if xi > 248 { // i.e., xi < 0
continue
}
return
}
if masked && lcd.oamLineMask[xi] != 0 {
continue
}
bit := uint(i)
if !xflip {
bit = uint(7 - i)
}
px := (lcd.vram[tile] >> bit) & 1
px |= ((lcd.vram[tile+1] >> bit) & 1) << 1
if px != 0 {
lcd.lineBuf[xi] = lcd.obp[palidx][px]
}
}
}
func (lcd *display) mapAt(map1 bool, x, y int) byte {
idx := (y/tileH)*mapW + x/tileW
if map1 {
idx += 0x1C00
} else {
idx += 0x1800
}
tile := lcd.vram[idx]
if lcd.tileData {
idx = int(tile) * 16
} else {
idx = 0x1000 + int(int8(tile))*16
}
idx += (y % tileH) * 2
bit := uint(tileW - 1 - x%tileW)
px := (lcd.vram[idx] >> bit) & 1
px |= ((lcd.vram[idx+1] >> bit) & 1) << 1
return px
}
func calcMode(t int, ly byte) byte {
if ly >= displayH {
return modeVBlank
}
t %= scanlineTicks
if t < oamTicks {
return modeOAM
}
if t < oamTicks+vramTicks {
return modeVRAM
}
return modeHBlank
}
Always update display frame time
// Copyright 2011 Kevin Bulusek. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gameboy
import (
"⚛sdl"
"time"
)
const (
modeHBlank = byte(iota)
modeVBlank
modeOAM
modeVRAM
)
const (
tilesX = 20
tilesY = 18
tileW = 8
tileH = 8
mapW = 32
mapH = 32
displayW = tileW * tilesX
displayH = tileH * tilesY
hblankTicks = 204 / 4
vblankTicks = 4560 / 4
oamTicks = 80 / 4
vramTicks = 172 / 4
scanlineTicks = oamTicks + vramTicks + hblankTicks
refreshTicks = scanlineTicks*displayH + vblankTicks
)
type display struct {
*memory
*sdl.Surface
pal [4]uint32
frameTime int64
screenW int
screenH int
clock int
// LCDC flags
enable bool
windowMap bool
windowEnable bool
tileData bool
bgMap bool
spriteSize bool
spriteEnable bool
bgEnable bool
// STAT flags
lycInterrupt bool
oamInterrupt bool
vblankInterrupt bool
hblankInterrupt bool
mode byte
// LCD registers
ly byte
scy, scx byte
wy, wx byte
bgp [4]byte
obp [2][4]byte
// Scanlines are rendered to here first, and then drawn to the
// display - rather than each layer accessing the display
// separately (which is slow).
lineBuf [displayW]byte
// When rendering a scanline this is zeroed out, then
// bitwise-ORed with the pixels from the BG and window. This
// is then used to lookup which pixels can be painted in
// sprites that are to be obscured by the background layers.
oamLineMask [displayW]byte
}
func newDisplay(m *memory) *display {
sdl.WM_SetCaption(m.rom.title(), "")
flags := uint32(sdl.DOUBLEBUF)
if m.config.Fullscreen {
flags |= sdl.FULLSCREEN
}
lcd := display{memory: m}
lcd.screenW = displayW * m.config.Scale
lcd.screenH = displayH * m.config.Scale
lcd.Surface = sdl.SetVideoMode(lcd.screenW, lcd.screenH, 0, flags)
lcd.pal[0] = sdl.MapRGBA(lcd.Format, 0x9B, 0xBC, 0x0F, 0)
lcd.pal[1] = sdl.MapRGBA(lcd.Format, 0x8B, 0xAC, 0x0F, 0)
lcd.pal[2] = sdl.MapRGBA(lcd.Format, 0x30, 0x62, 0x30, 0)
lcd.pal[3] = sdl.MapRGBA(lcd.Format, 0x0F, 0x38, 0x0F, 0)
sdl.ShowCursor(sdl.DISABLE)
lcd.FillRect(nil, lcd.pal[0])
lcd.Flip()
lcd.frameTime = time.Nanoseconds()
return &lcd
}
func (lcd *display) toggleFullScreen() {
sdl.WM_ToggleFullScreen(lcd.Surface)
}
func (lcd *display) step(t int) {
lcd.clock += t
if lcd.clock >= refreshTicks {
lcd.clock -= refreshTicks
}
lcd.ly = byte(lcd.clock / scanlineTicks)
lcd.hram[portLY-0xFF00] = lcd.ly
mode := calcMode(lcd.clock, lcd.ly)
if mode == lcd.mode {
return
}
lcd.mode = mode
stat := lcd.readPort(portSTAT)&^3 | mode
irq := lcd.readPort(portIF)
switch mode {
case modeOAM:
if lcd.oamInterrupt {
lcd.writePort(portIF, irq|0x02)
}
if lyc := lcd.readPort(portLYC); lcd.ly-1 == lyc {
stat |= 0x04
if lcd.lycInterrupt {
lcd.writePort(portIF, irq|0x02)
}
} else {
stat &^= 0x04
}
case modeVRAM:
if lcd.enable {
lcd.scanline()
}
case modeHBlank:
if lcd.hblankInterrupt {
lcd.writePort(portIF, irq|0x02)
}
case modeVBlank:
if lcd.vblankInterrupt {
irq |= 0x02
}
lcd.writePort(portIF, irq|0x01)
lcd.Flip()
lcd.delay()
}
lcd.writePort(portSTAT, stat)
}
func (lcd *display) delay() {
// while audio is playing, we let it control the
// emulation speed
if !lcd.audio.enable {
now := time.Nanoseconds()
delta := now - lcd.frameTime
target := 16742706 - delta
if target > 0 {
time.Sleep(target)
}
}
lcd.frameTime = time.Nanoseconds()
}
func (lcd *display) scanline() {
for i := 0; i < displayW; i++ {
lcd.oamLineMask[i] = 0
lcd.lineBuf[i] = 0
}
if lcd.bgEnable {
lcd.mapline(lcd.bgMap, byte(0), lcd.scx, lcd.scy)
}
if lcd.windowEnable {
if lcd.wx < 167 && lcd.wy < 144 && lcd.ly >= lcd.wy {
x := int(lcd.wx) - 7
xoff := -x
if x < 0 {
x = 0
}
lcd.mapline(lcd.windowMap, byte(x),
byte(xoff), byte(-lcd.wy))
}
}
if lcd.spriteEnable {
lcd.oamline()
}
lcd.flushline()
}
func (lcd *display) mapline(map1 bool, x, xoff, yoff byte) {
y := lcd.ly + yoff
for ; x < displayW; x++ {
b := lcd.mapAt(map1, int(x+xoff), int(y))
lcd.oamLineMask[x] |= b
lcd.lineBuf[x] = lcd.bgp[b]
}
}
func (lcd *display) flushline() {
// Do some simple run-length counting to reduce the number of
// FillRect calls we need to make.
scale := uint16(lcd.config.Scale)
r := &sdl.Rect{0, int16(lcd.ly) * int16(scale), scale, scale}
cur := lcd.lineBuf[0]
for x := 1; x < displayW; x++ {
b := lcd.lineBuf[x]
if b != cur {
lcd.FillRect(r, lcd.pal[cur])
cur = b
r.X = int16(x) * int16(scale)
r.W = scale
} else {
r.W += scale
}
}
lcd.FillRect(r, lcd.pal[cur])
}
// oamline draws up to 10 sprites on the current scanline
func (lcd *display) oamline() {
// TODO sprite priorities for overlapping sprites at
// different x-coordinates (lower x-coordinate wins)
count := 0
for idx := 0; idx < 0xA0 && count < 10; idx += 4 {
y := int(lcd.oam[idx]) - 16
x := int(lcd.oam[idx+1]) - 8
tile := int(lcd.oam[idx+2])
info := lcd.oam[idx+3]
h := 8
if lcd.spriteSize {
h = 16
tile &= 0xFE
}
if int(lcd.ly) < y || int(lcd.ly) >= y+h {
continue
}
count++
if x == -8 || x >= 168 {
continue
}
lcd.spriteLine(tile, x, y, h, info)
}
}
func (lcd *display) spriteLine(tile, x, y, h int, info byte) {
masked := info&0x80 == 0x80
yflip := info&0x40 == 0x40
xflip := info&0x20 == 0x20
palidx := (info >> 4) & 1
tiley := int(lcd.ly) - y
if yflip {
tiley = h - 1 - tiley
}
tile = tile*16 + tiley*2
for i := 0; i < 8; i++ {
xi := byte(x + i)
if xi >= displayW {
if xi > 248 { // i.e., xi < 0
continue
}
return
}
if masked && lcd.oamLineMask[xi] != 0 {
continue
}
bit := uint(i)
if !xflip {
bit = uint(7 - i)
}
px := (lcd.vram[tile] >> bit) & 1
px |= ((lcd.vram[tile+1] >> bit) & 1) << 1
if px != 0 {
lcd.lineBuf[xi] = lcd.obp[palidx][px]
}
}
}
func (lcd *display) mapAt(map1 bool, x, y int) byte {
idx := (y/tileH)*mapW + x/tileW
if map1 {
idx += 0x1C00
} else {
idx += 0x1800
}
tile := lcd.vram[idx]
if lcd.tileData {
idx = int(tile) * 16
} else {
idx = 0x1000 + int(int8(tile))*16
}
idx += (y % tileH) * 2
bit := uint(tileW - 1 - x%tileW)
px := (lcd.vram[idx] >> bit) & 1
px |= ((lcd.vram[idx+1] >> bit) & 1) << 1
return px
}
func calcMode(t int, ly byte) byte {
if ly >= displayH {
return modeVBlank
}
t %= scanlineTicks
if t < oamTicks {
return modeOAM
}
if t < oamTicks+vramTicks {
return modeVRAM
}
return modeHBlank
}
|
package main
import (
"bytes"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"unicode"
"unicode/utf8"
"github.com/doronbehar/termbox-go"
"github.com/mattn/go-runewidth"
)
const gEscapeCode = 27
var gKeyVal = map[termbox.Key]string{
termbox.KeyF1: "<f-1>",
termbox.KeyF2: "<f-2>",
termbox.KeyF3: "<f-3>",
termbox.KeyF4: "<f-4>",
termbox.KeyF5: "<f-5>",
termbox.KeyF6: "<f-6>",
termbox.KeyF7: "<f-7>",
termbox.KeyF8: "<f-8>",
termbox.KeyF9: "<f-9>",
termbox.KeyF10: "<f-10>",
termbox.KeyF11: "<f-11>",
termbox.KeyF12: "<f-12>",
termbox.KeyInsert: "<insert>",
termbox.KeyDelete: "<delete>",
termbox.KeyHome: "<home>",
termbox.KeyEnd: "<end>",
termbox.KeyPgup: "<pgup>",
termbox.KeyPgdn: "<pgdn>",
termbox.KeyArrowUp: "<up>",
termbox.KeyArrowDown: "<down>",
termbox.KeyArrowLeft: "<left>",
termbox.KeyArrowRight: "<right>",
termbox.KeyCtrlSpace: "<c-space>",
termbox.KeyCtrlA: "<c-a>",
termbox.KeyCtrlB: "<c-b>",
termbox.KeyCtrlC: "<c-c>",
termbox.KeyCtrlD: "<c-d>",
termbox.KeyCtrlE: "<c-e>",
termbox.KeyCtrlF: "<c-f>",
termbox.KeyCtrlG: "<c-g>",
termbox.KeyBackspace: "<bs>",
termbox.KeyTab: "<tab>",
termbox.KeyCtrlJ: "<c-j>",
termbox.KeyCtrlK: "<c-k>",
termbox.KeyCtrlL: "<c-l>",
termbox.KeyEnter: "<enter>",
termbox.KeyCtrlN: "<c-n>",
termbox.KeyCtrlO: "<c-o>",
termbox.KeyCtrlP: "<c-p>",
termbox.KeyCtrlQ: "<c-q>",
termbox.KeyCtrlR: "<c-r>",
termbox.KeyCtrlS: "<c-s>",
termbox.KeyCtrlT: "<c-t>",
termbox.KeyCtrlU: "<c-u>",
termbox.KeyCtrlV: "<c-v>",
termbox.KeyCtrlW: "<c-w>",
termbox.KeyCtrlX: "<c-x>",
termbox.KeyCtrlY: "<c-y>",
termbox.KeyCtrlZ: "<c-z>",
termbox.KeyEsc: "<esc>",
termbox.KeyCtrlBackslash: "<c-\\>",
termbox.KeyCtrlRsqBracket: "<c-]>",
termbox.KeyCtrl6: "<c-6>",
termbox.KeyCtrlSlash: "<c-/>",
termbox.KeySpace: "<space>",
termbox.KeyBackspace2: "<bs2>",
}
var gValKey map[string]termbox.Key
func init() {
gValKey = make(map[string]termbox.Key)
for k, v := range gKeyVal {
gValKey[v] = k
}
}
type win struct {
w, h, x, y int
}
func newWin(w, h, x, y int) *win {
return &win{w, h, x, y}
}
func (win *win) renew(w, h, x, y int) {
win.w, win.h, win.x, win.y = w, h, x, y
}
func printLength(s string) int {
ind := 0
off := 0
for i := 0; i < len(s); i++ {
r, w := utf8.DecodeRuneInString(s[i:])
if r == gEscapeCode && i+1 < len(s) && s[i+1] == '[' {
j := strings.IndexByte(s[i:min(len(s), i+32)], 'm')
if j == -1 {
continue
}
i += j
continue
}
i += w - 1
if r == '\t' {
ind += gOpts.tabstop - (ind-off)%gOpts.tabstop
} else {
ind += runewidth.RuneWidth(r)
}
}
return ind
}
func (win *win) print(x, y int, fg, bg termbox.Attribute, s string) (termbox.Attribute, termbox.Attribute) {
off := x
for i := 0; i < len(s); i++ {
r, w := utf8.DecodeRuneInString(s[i:])
if r == gEscapeCode && i+1 < len(s) && s[i+1] == '[' {
j := strings.IndexByte(s[i:min(len(s), i+32)], 'm')
if j == -1 {
continue
}
fg, bg = applyAnsiCodes(s[i+2:i+j], fg, bg)
i += j
continue
}
if x < win.w {
termbox.SetCell(win.x+x, win.y+y, r, fg, bg)
}
i += w - 1
if r == '\t' {
s := gOpts.tabstop - (x-off)%gOpts.tabstop
for i := 0; i < s && x+i < win.w; i++ {
termbox.SetCell(win.x+x+i, win.y+y, ' ', fg, bg)
}
x += s
} else {
x += runewidth.RuneWidth(r)
}
}
return fg, bg
}
func (win *win) printf(x, y int, fg, bg termbox.Attribute, format string, a ...interface{}) {
win.print(x, y, fg, bg, fmt.Sprintf(format, a...))
}
func (win *win) printLine(x, y int, fg, bg termbox.Attribute, s string) {
win.printf(x, y, fg, bg, "%s%*s", s, win.w-len(s), "")
}
func (win *win) printRight(y int, fg, bg termbox.Attribute, s string) {
win.print(win.w-len(s), y, fg, bg, s)
}
func (win *win) printReg(reg *reg) {
if reg == nil {
return
}
fg, bg := termbox.ColorDefault, termbox.ColorDefault
if reg.loading {
fg = termbox.AttrReverse
win.print(2, 0, fg, bg, "loading...")
return
}
for i, l := range reg.lines {
fg, bg = win.print(2, i, fg, bg, l)
}
}
func fileInfo(f *file, d *dir) string {
var info string
path := filepath.Join(d.path, f.Name())
for _, s := range gOpts.info {
switch s {
case "size":
if !(gOpts.dircounts && f.IsDir()) {
info = fmt.Sprintf("%s %4s", info, humanize(f.Size()))
continue
}
if f.dirCount == -1 {
d, err := os.Open(path)
if err != nil {
f.dirCount = -2
}
names, err := d.Readdirnames(1000)
d.Close()
if names == nil && err != io.EOF {
f.dirCount = -2
} else {
f.dirCount = len(names)
}
}
switch {
case f.dirCount < 0:
info = fmt.Sprintf("%s ?", info)
case f.dirCount < 1000:
info = fmt.Sprintf("%s %4d", info, f.dirCount)
default:
info = fmt.Sprintf("%s 999+", info)
}
case "time":
info = fmt.Sprintf("%s %12s", info, f.ModTime().Format("Jan _2 15:04"))
case "atime":
info = fmt.Sprintf("%s %12s", info, f.accessTime.Format("Jan _2 15:04"))
case "ctime":
info = fmt.Sprintf("%s %12s", info, f.changeTime.Format("Jan _2 15:04"))
default:
log.Printf("unknown info type: %s", s)
}
}
return info
}
func (win *win) printDir(dir *dir, selections map[string]int, saves map[string]bool, colors colorMap, icons iconMap) {
if win.w < 5 || dir == nil {
return
}
if dir.loading {
win.print(2, 0, termbox.AttrReverse, termbox.ColorDefault, "loading...")
return
}
if dir.noPerm {
win.print(2, 0, termbox.AttrReverse, termbox.ColorDefault, "permission denied")
return
}
if len(dir.files) == 0 {
win.print(2, 0, termbox.AttrReverse, termbox.ColorDefault, "empty")
return
}
beg := max(dir.ind-dir.pos, 0)
end := min(beg+win.h, len(dir.files))
if beg > end {
return
}
var lnwidth int
var lnformat string
if gOpts.number || gOpts.relativenumber {
lnwidth = 1
if gOpts.number && gOpts.relativenumber {
lnwidth++
}
for j := 10; j < len(dir.files); j *= 10 {
lnwidth++
}
lnformat = fmt.Sprintf("%%%d.d ", lnwidth)
}
for i, f := range dir.files[beg:end] {
fg, bg := colors.get(f)
if lnwidth > 0 {
var ln string
if gOpts.number && (!gOpts.relativenumber) {
ln = fmt.Sprintf(lnformat, i+1+beg)
} else if gOpts.relativenumber {
switch {
case i < dir.pos:
ln = fmt.Sprintf(lnformat, dir.pos-i)
case i > dir.pos:
ln = fmt.Sprintf(lnformat, i-dir.pos)
case gOpts.number:
ln = fmt.Sprintf(fmt.Sprintf("%%%d.d ", lnwidth-1), i+1+beg)
default:
ln = ""
}
}
win.print(0, i, termbox.ColorYellow, bg, ln)
}
path := filepath.Join(dir.path, f.Name())
if _, ok := selections[path]; ok {
win.print(lnwidth, i, fg, termbox.ColorMagenta, " ")
} else if cp, ok := saves[path]; ok {
if cp {
win.print(lnwidth, i, fg, termbox.ColorYellow, " ")
} else {
win.print(lnwidth, i, fg, termbox.ColorRed, " ")
}
}
if i == dir.pos {
fg |= termbox.AttrReverse
}
var s []rune
s = append(s, ' ')
if gOpts.icons {
s = append(s, []rune(icons.get(f))...)
s = append(s, ' ')
}
for _, r := range f.Name() {
s = append(s, r)
}
w := runeSliceWidth(s)
if w > win.w-3 {
s = runeSliceWidthRange(s, 0, win.w-4)
s = append(s, []rune(gOpts.truncatechar)...)
} else {
for i := 0; i < win.w-3-w; i++ {
s = append(s, ' ')
}
}
info := fileInfo(f, dir)
if len(info) > 0 && win.w-2 > 2*len(info) {
if win.w-2 > w+len(info) {
s = runeSliceWidthRange(s, 0, win.w-3-len(info)-lnwidth)
} else {
s = runeSliceWidthRange(s, 0, win.w-4-len(info)-lnwidth)
s = append(s, []rune(gOpts.truncatechar)...)
}
for _, r := range info {
s = append(s, r)
}
}
s = append(s, ' ')
win.print(lnwidth+1, i, fg, bg, string(s))
}
}
type ui struct {
wins []*win
promptWin *win
msgWin *win
menuWin *win
msg string
regPrev *reg
dirPrev *dir
exprChan chan expr
keyChan chan string
evChan chan termbox.Event
menuBuf *bytes.Buffer
cmdPrefix string
cmdAccLeft []rune
cmdAccRight []rune
cmdYankBuf []rune
keyAcc []rune
keyCount []rune
colors colorMap
icons iconMap
}
func getWidths(wtot int) []int {
rsum := 0
for _, r := range gOpts.ratios {
rsum += r
}
wlen := len(gOpts.ratios)
widths := make([]int, wlen)
wsum := 0
for i := 0; i < wlen-1; i++ {
widths[i] = gOpts.ratios[i] * (wtot / rsum)
wsum += widths[i]
}
widths[wlen-1] = wtot - wsum
if gOpts.drawbox {
widths[wlen-1]--
}
return widths
}
func getWins() []*win {
wtot, htot := termbox.Size()
var wins []*win
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
if gOpts.drawbox {
wins = append(wins, newWin(widths[i], htot-4, wacc+1, 2))
} else {
wins = append(wins, newWin(widths[i], htot-2, wacc, 1))
}
wacc += widths[i]
}
return wins
}
func newUI() *ui {
wtot, htot := termbox.Size()
evQueue := make(chan termbox.Event)
go func() {
for {
evQueue <- termbox.PollEvent()
}
}()
evChan := make(chan termbox.Event)
go func() {
for {
ev := <-evQueue
if ev.Type == termbox.EventKey && ev.Key == termbox.KeyEsc {
select {
case ev2 := <-evQueue:
ev2.Mod = termbox.ModAlt
evChan <- ev2
continue
case <-time.After(100 * time.Millisecond):
}
}
evChan <- ev
}
}()
return &ui{
wins: getWins(),
promptWin: newWin(wtot, 1, 0, 0),
msgWin: newWin(wtot, 1, 0, htot-1),
menuWin: newWin(wtot, 1, 0, htot-2),
keyChan: make(chan string, 1000),
evChan: evChan,
colors: parseColors(),
icons: parseIcons(),
}
}
func (ui *ui) renew() {
wtot, htot := termbox.Size()
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
if gOpts.drawbox {
ui.wins[i].renew(widths[i], htot-4, wacc+1, 2)
} else {
ui.wins[i].renew(widths[i], htot-2, wacc, 1)
}
wacc += widths[i]
}
ui.promptWin.renew(wtot, 1, 0, 0)
ui.msgWin.renew(wtot, 1, 0, htot-1)
ui.menuWin.renew(wtot, 1, 0, htot-2)
}
func (ui *ui) sort() {
if ui.dirPrev == nil {
return
}
name := ui.dirPrev.name()
ui.dirPrev.sort()
ui.dirPrev.sel(name, ui.wins[0].h)
}
func (ui *ui) echo(msg string) {
ui.msg = msg
}
func (ui *ui) echof(format string, a ...interface{}) {
ui.echo(fmt.Sprintf(format, a...))
}
func (ui *ui) echomsg(msg string) {
ui.msg = msg
log.Print(msg)
}
func (ui *ui) echomsgf(format string, a ...interface{}) {
ui.echomsg(fmt.Sprintf(format, a...))
}
func (ui *ui) echoerr(msg string) {
ui.msg = fmt.Sprintf(gOpts.errorfmt, msg)
log.Printf("error: %s", msg)
}
func (ui *ui) echoerrf(format string, a ...interface{}) {
ui.echoerr(fmt.Sprintf(format, a...))
}
type reg struct {
loading bool
loadTime time.Time
path string
lines []string
}
func (ui *ui) loadFile(nav *nav) {
curr, err := nav.currFile()
if err != nil {
return
}
if !gOpts.preview {
return
}
if curr.IsDir() {
ui.dirPrev = nav.loadDir(curr.path)
} else if curr.Mode().IsRegular() {
ui.regPrev = nav.loadReg(curr.path)
}
}
func (ui *ui) loadFileInfo(nav *nav) {
curr, err := nav.currFile()
if err != nil {
return
}
var linkTarget string
if curr.linkTarget != "" {
linkTarget = " -> " + curr.linkTarget
}
ui.echof("%v %4s %v%s", curr.Mode(), humanize(curr.Size()), curr.ModTime().Format(gOpts.timefmt), linkTarget)
}
func (ui *ui) drawPromptLine(nav *nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
pwd := nav.currDir().path
if strings.HasPrefix(pwd, gUser.HomeDir) {
pwd = filepath.Join("~", strings.TrimPrefix(pwd, gUser.HomeDir))
}
pwd = filepath.Clean(pwd)
sep := string(filepath.Separator)
if !strings.HasSuffix(pwd, sep) {
pwd += sep
}
var fname string
curr, err := nav.currFile()
if err == nil {
fname = filepath.Base(curr.path)
}
var prompt string
prompt = strings.Replace(gOpts.promptfmt, "%u", gUser.Username, -1)
prompt = strings.Replace(prompt, "%h", gHostname, -1)
prompt = strings.Replace(prompt, "%f", fname, -1)
if printLength(strings.Replace(prompt, "%w", pwd, -1)) > ui.promptWin.w {
names := strings.Split(pwd, sep)
for i := range names {
if names[i] == "" {
continue
}
r, _ := utf8.DecodeRuneInString(names[i])
names[i] = string(r)
if printLength(strings.Replace(prompt, "%w", strings.Join(names, sep), -1)) <= ui.promptWin.w {
break
}
}
pwd = strings.Join(names, sep)
}
prompt = strings.Replace(prompt, "%w", pwd, -1)
ui.promptWin.print(0, 0, fg, bg, prompt)
}
func (ui *ui) drawStatLine(nav *nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
currDir := nav.currDir()
ui.msgWin.print(0, 0, fg, bg, ui.msg)
tot := len(currDir.files)
ind := min(currDir.ind+1, tot)
acc := string(ui.keyCount) + string(ui.keyAcc)
var progress string
if nav.copyTotal > 0 {
percentage := int((100 * float64(nav.copyBytes)) / float64(nav.copyTotal))
progress += fmt.Sprintf(" [%d%%]", percentage)
}
if nav.moveTotal > 0 {
progress += fmt.Sprintf(" [%d/%d]", nav.moveCount, nav.moveTotal)
}
if nav.deleteTotal > 0 {
progress += fmt.Sprintf(" [%d/%d]", nav.deleteCount, nav.deleteTotal)
}
ruler := fmt.Sprintf("%s%s %d/%d", acc, progress, ind, tot)
ui.msgWin.printRight(0, fg, bg, ruler)
}
func (ui *ui) drawBox() {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
w, h := termbox.Size()
for i := 1; i < w-1; i++ {
termbox.SetCell(i, 1, '─', fg, bg)
termbox.SetCell(i, h-2, '─', fg, bg)
}
for i := 2; i < h-2; i++ {
termbox.SetCell(0, i, '│', fg, bg)
termbox.SetCell(w-1, i, '│', fg, bg)
}
termbox.SetCell(0, 1, '┌', fg, bg)
termbox.SetCell(w-1, 1, '┐', fg, bg)
termbox.SetCell(0, h-2, '└', fg, bg)
termbox.SetCell(w-1, h-2, '┘', fg, bg)
wacc := 0
for wind := 0; wind < len(ui.wins)-1; wind++ {
wacc += ui.wins[wind].w
termbox.SetCell(wacc, 1, '┬', fg, bg)
for i := 2; i < h-2; i++ {
termbox.SetCell(wacc, i, '│', fg, bg)
}
termbox.SetCell(wacc, h-2, '┴', fg, bg)
}
}
func (ui *ui) draw(nav *nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
termbox.Clear(fg, bg)
ui.drawPromptLine(nav)
length := min(len(ui.wins), len(nav.dirs))
woff := len(ui.wins) - length
if gOpts.preview {
length = min(len(ui.wins)-1, len(nav.dirs))
woff = len(ui.wins) - 1 - length
}
doff := len(nav.dirs) - length
for i := 0; i < length; i++ {
ui.wins[woff+i].printDir(nav.dirs[doff+i], nav.selections, nav.saves, ui.colors, ui.icons)
}
switch ui.cmdPrefix {
case "":
ui.drawStatLine(nav)
termbox.HideCursor()
case ">":
ui.msgWin.printLine(0, 0, fg, bg, ui.cmdPrefix)
ui.msgWin.print(len(ui.cmdPrefix), 0, fg, bg, ui.msg)
ui.msgWin.print(len(ui.cmdPrefix)+len(ui.msg), 0, fg, bg, string(ui.cmdAccLeft))
ui.msgWin.print(len(ui.cmdPrefix)+len(ui.msg)+runeSliceWidth(ui.cmdAccLeft), 0, fg, bg, string(ui.cmdAccRight))
termbox.SetCursor(ui.msgWin.x+len(ui.cmdPrefix)+len(ui.msg)+runeSliceWidth(ui.cmdAccLeft), ui.msgWin.y)
default:
ui.msgWin.printLine(0, 0, fg, bg, ui.cmdPrefix)
ui.msgWin.print(len(ui.cmdPrefix), 0, fg, bg, string(ui.cmdAccLeft))
ui.msgWin.print(len(ui.cmdPrefix)+runeSliceWidth(ui.cmdAccLeft), 0, fg, bg, string(ui.cmdAccRight))
termbox.SetCursor(ui.msgWin.x+len(ui.cmdPrefix)+runeSliceWidth(ui.cmdAccLeft), ui.msgWin.y)
}
if gOpts.preview {
f, err := nav.currFile()
if err == nil {
preview := ui.wins[len(ui.wins)-1]
if f.IsDir() {
preview.printDir(ui.dirPrev, nav.selections, nav.saves, ui.colors, ui.icons)
} else if f.Mode().IsRegular() {
preview.printReg(ui.regPrev)
}
}
}
if gOpts.drawbox {
ui.drawBox()
}
if ui.menuBuf != nil {
lines := strings.Split(ui.menuBuf.String(), "\n")
lines = lines[:len(lines)-1]
ui.menuWin.h = len(lines) - 1
ui.menuWin.y = ui.wins[0].h - ui.menuWin.h
if gOpts.drawbox {
ui.menuWin.y += 2
}
ui.menuWin.printLine(0, 0, termbox.AttrBold, termbox.AttrBold, lines[0])
for i, line := range lines[1:] {
ui.menuWin.printLine(0, i+1, fg, bg, "")
ui.menuWin.print(0, i+1, fg, bg, line)
}
}
termbox.Flush()
}
func findBinds(keys map[string]expr, prefix string) (binds map[string]expr, ok bool) {
binds = make(map[string]expr)
for key, expr := range keys {
if !strings.HasPrefix(key, prefix) {
continue
}
binds[key] = expr
if key == prefix {
ok = true
}
}
return
}
func listBinds(binds map[string]expr) *bytes.Buffer {
t := new(tabwriter.Writer)
b := new(bytes.Buffer)
var keys []string
for k := range binds {
keys = append(keys, k)
}
sort.Strings(keys)
t.Init(b, 0, gOpts.tabstop, 2, '\t', 0)
fmt.Fprintln(t, "keys\tcommand")
for _, k := range keys {
fmt.Fprintf(t, "%s\t%v\n", k, binds[k])
}
t.Flush()
return b
}
func listMarks(marks map[string]string) *bytes.Buffer {
t := new(tabwriter.Writer)
b := new(bytes.Buffer)
var keys []string
for k := range marks {
keys = append(keys, k)
}
sort.Strings(keys)
t.Init(b, 0, gOpts.tabstop, 2, '\t', 0)
fmt.Fprintln(t, "mark\tpath")
for _, k := range keys {
fmt.Fprintf(t, "%s\t%s\n", k, marks[k])
}
t.Flush()
return b
}
func (ui *ui) pollEvent() termbox.Event {
select {
case key := <-ui.keyChan:
ev := termbox.Event{Type: termbox.EventKey}
if len(key) == 1 {
ev.Ch, _ = utf8.DecodeRuneInString(key)
} else {
switch {
case key == "<lt>":
ev.Ch = '<'
case key == "<gt>":
ev.Ch = '>'
case reAltKey.MatchString(key):
match := reAltKey.FindStringSubmatch(key)[1]
ev.Ch, _ = utf8.DecodeRuneInString(match)
ev.Mod = termbox.ModAlt
default:
if val, ok := gValKey[key]; ok {
ev.Key = val
} else {
ev.Key = termbox.KeyEsc
ui.echoerrf("unknown key: %s", key)
}
}
}
return ev
case ev := <-ui.evChan:
return ev
}
}
// This function is used to read a normal event on the client side. For keys,
// digits are interpreted as command counts but this is only done for digits
// preceding any non-digit characters (e.g. "42y2k" as 42 times "y2k").
func (ui *ui) readEvent(ch chan<- expr, ev termbox.Event) {
draw := &callExpr{"draw", nil, 1}
count := 1
switch ev.Type {
case termbox.EventKey:
if ev.Ch != 0 {
switch {
case ev.Ch == '<':
ui.keyAcc = append(ui.keyAcc, []rune("<lt>")...)
case ev.Ch == '>':
ui.keyAcc = append(ui.keyAcc, []rune("<gt>")...)
case ev.Mod == termbox.ModAlt:
ui.keyAcc = append(ui.keyAcc, '<', 'a', '-', ev.Ch, '>')
case unicode.IsDigit(ev.Ch) && len(ui.keyAcc) == 0:
ui.keyCount = append(ui.keyCount, ev.Ch)
default:
ui.keyAcc = append(ui.keyAcc, ev.Ch)
}
} else {
val := gKeyVal[ev.Key]
if val == "<esc>" {
ch <- draw
ui.keyAcc = nil
ui.keyCount = nil
}
ui.keyAcc = append(ui.keyAcc, []rune(val)...)
}
if len(ui.keyAcc) == 0 {
ch <- draw
break
}
binds, ok := findBinds(gOpts.keys, string(ui.keyAcc))
switch len(binds) {
case 0:
ui.echoerrf("unknown mapping: %s", string(ui.keyAcc))
ch <- draw
ui.keyAcc = nil
ui.keyCount = nil
ui.menuBuf = nil
default:
if ok {
if len(ui.keyCount) > 0 {
c, err := strconv.Atoi(string(ui.keyCount))
if err != nil {
log.Printf("converting command count: %s", err)
}
count = c
}
expr := gOpts.keys[string(ui.keyAcc)]
if e, ok := expr.(*callExpr); ok {
e.count = count
} else if e, ok := expr.(*listExpr); ok {
e.count = count
}
ch <- expr
ui.keyAcc = nil
ui.keyCount = nil
ui.menuBuf = nil
} else {
ui.menuBuf = listBinds(binds)
ch <- draw
}
}
case termbox.EventResize:
ch <- &callExpr{"redraw", nil, 1}
}
}
func readCmdEvent(ch chan<- expr, ev termbox.Event) {
if ev.Ch != 0 {
if ev.Mod == termbox.ModAlt {
val := string([]rune{'<', 'a', '-', ev.Ch, '>'})
if expr, ok := gOpts.cmdkeys[val]; ok {
ch <- expr
}
} else {
ch <- &callExpr{"cmd-insert", []string{string(ev.Ch)}, 1}
}
} else {
val := gKeyVal[ev.Key]
if expr, ok := gOpts.cmdkeys[val]; ok {
ch <- expr
}
}
}
func (ui *ui) readExpr() <-chan expr {
ch := make(chan expr)
ui.exprChan = ch
go func() {
ch <- &callExpr{"draw", nil, 1}
for {
ev := ui.pollEvent()
if ui.cmdPrefix != "" && ev.Type == termbox.EventKey {
readCmdEvent(ch, ev)
continue
}
ui.readEvent(ch, ev)
}
}()
return ch
}
func setColorMode() {
if gOpts.color256 {
termbox.SetOutputMode(termbox.Output256)
} else {
termbox.SetOutputMode(termbox.OutputNormal)
}
}
func (ui *ui) pause() {
termbox.Close()
}
func (ui *ui) resume() {
if err := termbox.Init(); err != nil {
log.Fatalf("initializing termbox: %s", err)
}
}
func (ui *ui) sync() {
if err := termbox.Sync(); err != nil {
log.Printf("syncing termbox: %s", err)
}
}
func listMatches(matches []string) *bytes.Buffer {
b := new(bytes.Buffer)
wtot, _ := termbox.Size()
wcol := 0
for _, m := range matches {
wcol = max(wcol, len(m))
}
wcol += gOpts.tabstop - wcol%gOpts.tabstop
ncol := wtot / wcol
b.WriteString("possible matches\n")
for i := 0; i < len(matches); i++ {
for j := 0; j < ncol && i < len(matches); i, j = i+1, j+1 {
b.WriteString(fmt.Sprintf("%s%*s", matches[i], wcol-len(matches[i]), ""))
}
b.WriteByte('\n')
}
return b
}
handle unicode characters in 'push' command
Related #250
package main
import (
"bytes"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/tabwriter"
"time"
"unicode"
"unicode/utf8"
"github.com/doronbehar/termbox-go"
"github.com/mattn/go-runewidth"
)
const gEscapeCode = 27
var gKeyVal = map[termbox.Key]string{
termbox.KeyF1: "<f-1>",
termbox.KeyF2: "<f-2>",
termbox.KeyF3: "<f-3>",
termbox.KeyF4: "<f-4>",
termbox.KeyF5: "<f-5>",
termbox.KeyF6: "<f-6>",
termbox.KeyF7: "<f-7>",
termbox.KeyF8: "<f-8>",
termbox.KeyF9: "<f-9>",
termbox.KeyF10: "<f-10>",
termbox.KeyF11: "<f-11>",
termbox.KeyF12: "<f-12>",
termbox.KeyInsert: "<insert>",
termbox.KeyDelete: "<delete>",
termbox.KeyHome: "<home>",
termbox.KeyEnd: "<end>",
termbox.KeyPgup: "<pgup>",
termbox.KeyPgdn: "<pgdn>",
termbox.KeyArrowUp: "<up>",
termbox.KeyArrowDown: "<down>",
termbox.KeyArrowLeft: "<left>",
termbox.KeyArrowRight: "<right>",
termbox.KeyCtrlSpace: "<c-space>",
termbox.KeyCtrlA: "<c-a>",
termbox.KeyCtrlB: "<c-b>",
termbox.KeyCtrlC: "<c-c>",
termbox.KeyCtrlD: "<c-d>",
termbox.KeyCtrlE: "<c-e>",
termbox.KeyCtrlF: "<c-f>",
termbox.KeyCtrlG: "<c-g>",
termbox.KeyBackspace: "<bs>",
termbox.KeyTab: "<tab>",
termbox.KeyCtrlJ: "<c-j>",
termbox.KeyCtrlK: "<c-k>",
termbox.KeyCtrlL: "<c-l>",
termbox.KeyEnter: "<enter>",
termbox.KeyCtrlN: "<c-n>",
termbox.KeyCtrlO: "<c-o>",
termbox.KeyCtrlP: "<c-p>",
termbox.KeyCtrlQ: "<c-q>",
termbox.KeyCtrlR: "<c-r>",
termbox.KeyCtrlS: "<c-s>",
termbox.KeyCtrlT: "<c-t>",
termbox.KeyCtrlU: "<c-u>",
termbox.KeyCtrlV: "<c-v>",
termbox.KeyCtrlW: "<c-w>",
termbox.KeyCtrlX: "<c-x>",
termbox.KeyCtrlY: "<c-y>",
termbox.KeyCtrlZ: "<c-z>",
termbox.KeyEsc: "<esc>",
termbox.KeyCtrlBackslash: "<c-\\>",
termbox.KeyCtrlRsqBracket: "<c-]>",
termbox.KeyCtrl6: "<c-6>",
termbox.KeyCtrlSlash: "<c-/>",
termbox.KeySpace: "<space>",
termbox.KeyBackspace2: "<bs2>",
}
var gValKey map[string]termbox.Key
func init() {
gValKey = make(map[string]termbox.Key)
for k, v := range gKeyVal {
gValKey[v] = k
}
}
type win struct {
w, h, x, y int
}
func newWin(w, h, x, y int) *win {
return &win{w, h, x, y}
}
func (win *win) renew(w, h, x, y int) {
win.w, win.h, win.x, win.y = w, h, x, y
}
func printLength(s string) int {
ind := 0
off := 0
for i := 0; i < len(s); i++ {
r, w := utf8.DecodeRuneInString(s[i:])
if r == gEscapeCode && i+1 < len(s) && s[i+1] == '[' {
j := strings.IndexByte(s[i:min(len(s), i+32)], 'm')
if j == -1 {
continue
}
i += j
continue
}
i += w - 1
if r == '\t' {
ind += gOpts.tabstop - (ind-off)%gOpts.tabstop
} else {
ind += runewidth.RuneWidth(r)
}
}
return ind
}
func (win *win) print(x, y int, fg, bg termbox.Attribute, s string) (termbox.Attribute, termbox.Attribute) {
off := x
for i := 0; i < len(s); i++ {
r, w := utf8.DecodeRuneInString(s[i:])
if r == gEscapeCode && i+1 < len(s) && s[i+1] == '[' {
j := strings.IndexByte(s[i:min(len(s), i+32)], 'm')
if j == -1 {
continue
}
fg, bg = applyAnsiCodes(s[i+2:i+j], fg, bg)
i += j
continue
}
if x < win.w {
termbox.SetCell(win.x+x, win.y+y, r, fg, bg)
}
i += w - 1
if r == '\t' {
s := gOpts.tabstop - (x-off)%gOpts.tabstop
for i := 0; i < s && x+i < win.w; i++ {
termbox.SetCell(win.x+x+i, win.y+y, ' ', fg, bg)
}
x += s
} else {
x += runewidth.RuneWidth(r)
}
}
return fg, bg
}
func (win *win) printf(x, y int, fg, bg termbox.Attribute, format string, a ...interface{}) {
win.print(x, y, fg, bg, fmt.Sprintf(format, a...))
}
func (win *win) printLine(x, y int, fg, bg termbox.Attribute, s string) {
win.printf(x, y, fg, bg, "%s%*s", s, win.w-len(s), "")
}
func (win *win) printRight(y int, fg, bg termbox.Attribute, s string) {
win.print(win.w-len(s), y, fg, bg, s)
}
func (win *win) printReg(reg *reg) {
if reg == nil {
return
}
fg, bg := termbox.ColorDefault, termbox.ColorDefault
if reg.loading {
fg = termbox.AttrReverse
win.print(2, 0, fg, bg, "loading...")
return
}
for i, l := range reg.lines {
fg, bg = win.print(2, i, fg, bg, l)
}
}
func fileInfo(f *file, d *dir) string {
var info string
path := filepath.Join(d.path, f.Name())
for _, s := range gOpts.info {
switch s {
case "size":
if !(gOpts.dircounts && f.IsDir()) {
info = fmt.Sprintf("%s %4s", info, humanize(f.Size()))
continue
}
if f.dirCount == -1 {
d, err := os.Open(path)
if err != nil {
f.dirCount = -2
}
names, err := d.Readdirnames(1000)
d.Close()
if names == nil && err != io.EOF {
f.dirCount = -2
} else {
f.dirCount = len(names)
}
}
switch {
case f.dirCount < 0:
info = fmt.Sprintf("%s ?", info)
case f.dirCount < 1000:
info = fmt.Sprintf("%s %4d", info, f.dirCount)
default:
info = fmt.Sprintf("%s 999+", info)
}
case "time":
info = fmt.Sprintf("%s %12s", info, f.ModTime().Format("Jan _2 15:04"))
case "atime":
info = fmt.Sprintf("%s %12s", info, f.accessTime.Format("Jan _2 15:04"))
case "ctime":
info = fmt.Sprintf("%s %12s", info, f.changeTime.Format("Jan _2 15:04"))
default:
log.Printf("unknown info type: %s", s)
}
}
return info
}
func (win *win) printDir(dir *dir, selections map[string]int, saves map[string]bool, colors colorMap, icons iconMap) {
if win.w < 5 || dir == nil {
return
}
if dir.loading {
win.print(2, 0, termbox.AttrReverse, termbox.ColorDefault, "loading...")
return
}
if dir.noPerm {
win.print(2, 0, termbox.AttrReverse, termbox.ColorDefault, "permission denied")
return
}
if len(dir.files) == 0 {
win.print(2, 0, termbox.AttrReverse, termbox.ColorDefault, "empty")
return
}
beg := max(dir.ind-dir.pos, 0)
end := min(beg+win.h, len(dir.files))
if beg > end {
return
}
var lnwidth int
var lnformat string
if gOpts.number || gOpts.relativenumber {
lnwidth = 1
if gOpts.number && gOpts.relativenumber {
lnwidth++
}
for j := 10; j < len(dir.files); j *= 10 {
lnwidth++
}
lnformat = fmt.Sprintf("%%%d.d ", lnwidth)
}
for i, f := range dir.files[beg:end] {
fg, bg := colors.get(f)
if lnwidth > 0 {
var ln string
if gOpts.number && (!gOpts.relativenumber) {
ln = fmt.Sprintf(lnformat, i+1+beg)
} else if gOpts.relativenumber {
switch {
case i < dir.pos:
ln = fmt.Sprintf(lnformat, dir.pos-i)
case i > dir.pos:
ln = fmt.Sprintf(lnformat, i-dir.pos)
case gOpts.number:
ln = fmt.Sprintf(fmt.Sprintf("%%%d.d ", lnwidth-1), i+1+beg)
default:
ln = ""
}
}
win.print(0, i, termbox.ColorYellow, bg, ln)
}
path := filepath.Join(dir.path, f.Name())
if _, ok := selections[path]; ok {
win.print(lnwidth, i, fg, termbox.ColorMagenta, " ")
} else if cp, ok := saves[path]; ok {
if cp {
win.print(lnwidth, i, fg, termbox.ColorYellow, " ")
} else {
win.print(lnwidth, i, fg, termbox.ColorRed, " ")
}
}
if i == dir.pos {
fg |= termbox.AttrReverse
}
var s []rune
s = append(s, ' ')
if gOpts.icons {
s = append(s, []rune(icons.get(f))...)
s = append(s, ' ')
}
for _, r := range f.Name() {
s = append(s, r)
}
w := runeSliceWidth(s)
if w > win.w-3 {
s = runeSliceWidthRange(s, 0, win.w-4)
s = append(s, []rune(gOpts.truncatechar)...)
} else {
for i := 0; i < win.w-3-w; i++ {
s = append(s, ' ')
}
}
info := fileInfo(f, dir)
if len(info) > 0 && win.w-2 > 2*len(info) {
if win.w-2 > w+len(info) {
s = runeSliceWidthRange(s, 0, win.w-3-len(info)-lnwidth)
} else {
s = runeSliceWidthRange(s, 0, win.w-4-len(info)-lnwidth)
s = append(s, []rune(gOpts.truncatechar)...)
}
for _, r := range info {
s = append(s, r)
}
}
s = append(s, ' ')
win.print(lnwidth+1, i, fg, bg, string(s))
}
}
type ui struct {
wins []*win
promptWin *win
msgWin *win
menuWin *win
msg string
regPrev *reg
dirPrev *dir
exprChan chan expr
keyChan chan string
evChan chan termbox.Event
menuBuf *bytes.Buffer
cmdPrefix string
cmdAccLeft []rune
cmdAccRight []rune
cmdYankBuf []rune
keyAcc []rune
keyCount []rune
colors colorMap
icons iconMap
}
func getWidths(wtot int) []int {
rsum := 0
for _, r := range gOpts.ratios {
rsum += r
}
wlen := len(gOpts.ratios)
widths := make([]int, wlen)
wsum := 0
for i := 0; i < wlen-1; i++ {
widths[i] = gOpts.ratios[i] * (wtot / rsum)
wsum += widths[i]
}
widths[wlen-1] = wtot - wsum
if gOpts.drawbox {
widths[wlen-1]--
}
return widths
}
func getWins() []*win {
wtot, htot := termbox.Size()
var wins []*win
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
if gOpts.drawbox {
wins = append(wins, newWin(widths[i], htot-4, wacc+1, 2))
} else {
wins = append(wins, newWin(widths[i], htot-2, wacc, 1))
}
wacc += widths[i]
}
return wins
}
func newUI() *ui {
wtot, htot := termbox.Size()
evQueue := make(chan termbox.Event)
go func() {
for {
evQueue <- termbox.PollEvent()
}
}()
evChan := make(chan termbox.Event)
go func() {
for {
ev := <-evQueue
if ev.Type == termbox.EventKey && ev.Key == termbox.KeyEsc {
select {
case ev2 := <-evQueue:
ev2.Mod = termbox.ModAlt
evChan <- ev2
continue
case <-time.After(100 * time.Millisecond):
}
}
evChan <- ev
}
}()
return &ui{
wins: getWins(),
promptWin: newWin(wtot, 1, 0, 0),
msgWin: newWin(wtot, 1, 0, htot-1),
menuWin: newWin(wtot, 1, 0, htot-2),
keyChan: make(chan string, 1000),
evChan: evChan,
colors: parseColors(),
icons: parseIcons(),
}
}
func (ui *ui) renew() {
wtot, htot := termbox.Size()
widths := getWidths(wtot)
wacc := 0
wlen := len(widths)
for i := 0; i < wlen; i++ {
if gOpts.drawbox {
ui.wins[i].renew(widths[i], htot-4, wacc+1, 2)
} else {
ui.wins[i].renew(widths[i], htot-2, wacc, 1)
}
wacc += widths[i]
}
ui.promptWin.renew(wtot, 1, 0, 0)
ui.msgWin.renew(wtot, 1, 0, htot-1)
ui.menuWin.renew(wtot, 1, 0, htot-2)
}
func (ui *ui) sort() {
if ui.dirPrev == nil {
return
}
name := ui.dirPrev.name()
ui.dirPrev.sort()
ui.dirPrev.sel(name, ui.wins[0].h)
}
func (ui *ui) echo(msg string) {
ui.msg = msg
}
func (ui *ui) echof(format string, a ...interface{}) {
ui.echo(fmt.Sprintf(format, a...))
}
func (ui *ui) echomsg(msg string) {
ui.msg = msg
log.Print(msg)
}
func (ui *ui) echomsgf(format string, a ...interface{}) {
ui.echomsg(fmt.Sprintf(format, a...))
}
func (ui *ui) echoerr(msg string) {
ui.msg = fmt.Sprintf(gOpts.errorfmt, msg)
log.Printf("error: %s", msg)
}
func (ui *ui) echoerrf(format string, a ...interface{}) {
ui.echoerr(fmt.Sprintf(format, a...))
}
type reg struct {
loading bool
loadTime time.Time
path string
lines []string
}
func (ui *ui) loadFile(nav *nav) {
curr, err := nav.currFile()
if err != nil {
return
}
if !gOpts.preview {
return
}
if curr.IsDir() {
ui.dirPrev = nav.loadDir(curr.path)
} else if curr.Mode().IsRegular() {
ui.regPrev = nav.loadReg(curr.path)
}
}
func (ui *ui) loadFileInfo(nav *nav) {
curr, err := nav.currFile()
if err != nil {
return
}
var linkTarget string
if curr.linkTarget != "" {
linkTarget = " -> " + curr.linkTarget
}
ui.echof("%v %4s %v%s", curr.Mode(), humanize(curr.Size()), curr.ModTime().Format(gOpts.timefmt), linkTarget)
}
func (ui *ui) drawPromptLine(nav *nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
pwd := nav.currDir().path
if strings.HasPrefix(pwd, gUser.HomeDir) {
pwd = filepath.Join("~", strings.TrimPrefix(pwd, gUser.HomeDir))
}
pwd = filepath.Clean(pwd)
sep := string(filepath.Separator)
if !strings.HasSuffix(pwd, sep) {
pwd += sep
}
var fname string
curr, err := nav.currFile()
if err == nil {
fname = filepath.Base(curr.path)
}
var prompt string
prompt = strings.Replace(gOpts.promptfmt, "%u", gUser.Username, -1)
prompt = strings.Replace(prompt, "%h", gHostname, -1)
prompt = strings.Replace(prompt, "%f", fname, -1)
if printLength(strings.Replace(prompt, "%w", pwd, -1)) > ui.promptWin.w {
names := strings.Split(pwd, sep)
for i := range names {
if names[i] == "" {
continue
}
r, _ := utf8.DecodeRuneInString(names[i])
names[i] = string(r)
if printLength(strings.Replace(prompt, "%w", strings.Join(names, sep), -1)) <= ui.promptWin.w {
break
}
}
pwd = strings.Join(names, sep)
}
prompt = strings.Replace(prompt, "%w", pwd, -1)
ui.promptWin.print(0, 0, fg, bg, prompt)
}
func (ui *ui) drawStatLine(nav *nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
currDir := nav.currDir()
ui.msgWin.print(0, 0, fg, bg, ui.msg)
tot := len(currDir.files)
ind := min(currDir.ind+1, tot)
acc := string(ui.keyCount) + string(ui.keyAcc)
var progress string
if nav.copyTotal > 0 {
percentage := int((100 * float64(nav.copyBytes)) / float64(nav.copyTotal))
progress += fmt.Sprintf(" [%d%%]", percentage)
}
if nav.moveTotal > 0 {
progress += fmt.Sprintf(" [%d/%d]", nav.moveCount, nav.moveTotal)
}
if nav.deleteTotal > 0 {
progress += fmt.Sprintf(" [%d/%d]", nav.deleteCount, nav.deleteTotal)
}
ruler := fmt.Sprintf("%s%s %d/%d", acc, progress, ind, tot)
ui.msgWin.printRight(0, fg, bg, ruler)
}
func (ui *ui) drawBox() {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
w, h := termbox.Size()
for i := 1; i < w-1; i++ {
termbox.SetCell(i, 1, '─', fg, bg)
termbox.SetCell(i, h-2, '─', fg, bg)
}
for i := 2; i < h-2; i++ {
termbox.SetCell(0, i, '│', fg, bg)
termbox.SetCell(w-1, i, '│', fg, bg)
}
termbox.SetCell(0, 1, '┌', fg, bg)
termbox.SetCell(w-1, 1, '┐', fg, bg)
termbox.SetCell(0, h-2, '└', fg, bg)
termbox.SetCell(w-1, h-2, '┘', fg, bg)
wacc := 0
for wind := 0; wind < len(ui.wins)-1; wind++ {
wacc += ui.wins[wind].w
termbox.SetCell(wacc, 1, '┬', fg, bg)
for i := 2; i < h-2; i++ {
termbox.SetCell(wacc, i, '│', fg, bg)
}
termbox.SetCell(wacc, h-2, '┴', fg, bg)
}
}
func (ui *ui) draw(nav *nav) {
fg, bg := termbox.ColorDefault, termbox.ColorDefault
termbox.Clear(fg, bg)
ui.drawPromptLine(nav)
length := min(len(ui.wins), len(nav.dirs))
woff := len(ui.wins) - length
if gOpts.preview {
length = min(len(ui.wins)-1, len(nav.dirs))
woff = len(ui.wins) - 1 - length
}
doff := len(nav.dirs) - length
for i := 0; i < length; i++ {
ui.wins[woff+i].printDir(nav.dirs[doff+i], nav.selections, nav.saves, ui.colors, ui.icons)
}
switch ui.cmdPrefix {
case "":
ui.drawStatLine(nav)
termbox.HideCursor()
case ">":
ui.msgWin.printLine(0, 0, fg, bg, ui.cmdPrefix)
ui.msgWin.print(len(ui.cmdPrefix), 0, fg, bg, ui.msg)
ui.msgWin.print(len(ui.cmdPrefix)+len(ui.msg), 0, fg, bg, string(ui.cmdAccLeft))
ui.msgWin.print(len(ui.cmdPrefix)+len(ui.msg)+runeSliceWidth(ui.cmdAccLeft), 0, fg, bg, string(ui.cmdAccRight))
termbox.SetCursor(ui.msgWin.x+len(ui.cmdPrefix)+len(ui.msg)+runeSliceWidth(ui.cmdAccLeft), ui.msgWin.y)
default:
ui.msgWin.printLine(0, 0, fg, bg, ui.cmdPrefix)
ui.msgWin.print(len(ui.cmdPrefix), 0, fg, bg, string(ui.cmdAccLeft))
ui.msgWin.print(len(ui.cmdPrefix)+runeSliceWidth(ui.cmdAccLeft), 0, fg, bg, string(ui.cmdAccRight))
termbox.SetCursor(ui.msgWin.x+len(ui.cmdPrefix)+runeSliceWidth(ui.cmdAccLeft), ui.msgWin.y)
}
if gOpts.preview {
f, err := nav.currFile()
if err == nil {
preview := ui.wins[len(ui.wins)-1]
if f.IsDir() {
preview.printDir(ui.dirPrev, nav.selections, nav.saves, ui.colors, ui.icons)
} else if f.Mode().IsRegular() {
preview.printReg(ui.regPrev)
}
}
}
if gOpts.drawbox {
ui.drawBox()
}
if ui.menuBuf != nil {
lines := strings.Split(ui.menuBuf.String(), "\n")
lines = lines[:len(lines)-1]
ui.menuWin.h = len(lines) - 1
ui.menuWin.y = ui.wins[0].h - ui.menuWin.h
if gOpts.drawbox {
ui.menuWin.y += 2
}
ui.menuWin.printLine(0, 0, termbox.AttrBold, termbox.AttrBold, lines[0])
for i, line := range lines[1:] {
ui.menuWin.printLine(0, i+1, fg, bg, "")
ui.menuWin.print(0, i+1, fg, bg, line)
}
}
termbox.Flush()
}
func findBinds(keys map[string]expr, prefix string) (binds map[string]expr, ok bool) {
binds = make(map[string]expr)
for key, expr := range keys {
if !strings.HasPrefix(key, prefix) {
continue
}
binds[key] = expr
if key == prefix {
ok = true
}
}
return
}
func listBinds(binds map[string]expr) *bytes.Buffer {
t := new(tabwriter.Writer)
b := new(bytes.Buffer)
var keys []string
for k := range binds {
keys = append(keys, k)
}
sort.Strings(keys)
t.Init(b, 0, gOpts.tabstop, 2, '\t', 0)
fmt.Fprintln(t, "keys\tcommand")
for _, k := range keys {
fmt.Fprintf(t, "%s\t%v\n", k, binds[k])
}
t.Flush()
return b
}
func listMarks(marks map[string]string) *bytes.Buffer {
t := new(tabwriter.Writer)
b := new(bytes.Buffer)
var keys []string
for k := range marks {
keys = append(keys, k)
}
sort.Strings(keys)
t.Init(b, 0, gOpts.tabstop, 2, '\t', 0)
fmt.Fprintln(t, "mark\tpath")
for _, k := range keys {
fmt.Fprintf(t, "%s\t%s\n", k, marks[k])
}
t.Flush()
return b
}
func (ui *ui) pollEvent() termbox.Event {
select {
case key := <-ui.keyChan:
ev := termbox.Event{Type: termbox.EventKey}
if utf8.RuneCountInString(key) == 1 {
ev.Ch, _ = utf8.DecodeRuneInString(key)
} else {
switch {
case key == "<lt>":
ev.Ch = '<'
case key == "<gt>":
ev.Ch = '>'
case reAltKey.MatchString(key):
match := reAltKey.FindStringSubmatch(key)[1]
ev.Ch, _ = utf8.DecodeRuneInString(match)
ev.Mod = termbox.ModAlt
default:
if val, ok := gValKey[key]; ok {
ev.Key = val
} else {
ev.Key = termbox.KeyEsc
ui.echoerrf("unknown key: %s", key)
}
}
}
return ev
case ev := <-ui.evChan:
return ev
}
}
// This function is used to read a normal event on the client side. For keys,
// digits are interpreted as command counts but this is only done for digits
// preceding any non-digit characters (e.g. "42y2k" as 42 times "y2k").
func (ui *ui) readEvent(ch chan<- expr, ev termbox.Event) {
draw := &callExpr{"draw", nil, 1}
count := 1
switch ev.Type {
case termbox.EventKey:
if ev.Ch != 0 {
switch {
case ev.Ch == '<':
ui.keyAcc = append(ui.keyAcc, []rune("<lt>")...)
case ev.Ch == '>':
ui.keyAcc = append(ui.keyAcc, []rune("<gt>")...)
case ev.Mod == termbox.ModAlt:
ui.keyAcc = append(ui.keyAcc, '<', 'a', '-', ev.Ch, '>')
case unicode.IsDigit(ev.Ch) && len(ui.keyAcc) == 0:
ui.keyCount = append(ui.keyCount, ev.Ch)
default:
ui.keyAcc = append(ui.keyAcc, ev.Ch)
}
} else {
val := gKeyVal[ev.Key]
if val == "<esc>" {
ch <- draw
ui.keyAcc = nil
ui.keyCount = nil
}
ui.keyAcc = append(ui.keyAcc, []rune(val)...)
}
if len(ui.keyAcc) == 0 {
ch <- draw
break
}
binds, ok := findBinds(gOpts.keys, string(ui.keyAcc))
switch len(binds) {
case 0:
ui.echoerrf("unknown mapping: %s", string(ui.keyAcc))
ch <- draw
ui.keyAcc = nil
ui.keyCount = nil
ui.menuBuf = nil
default:
if ok {
if len(ui.keyCount) > 0 {
c, err := strconv.Atoi(string(ui.keyCount))
if err != nil {
log.Printf("converting command count: %s", err)
}
count = c
}
expr := gOpts.keys[string(ui.keyAcc)]
if e, ok := expr.(*callExpr); ok {
e.count = count
} else if e, ok := expr.(*listExpr); ok {
e.count = count
}
ch <- expr
ui.keyAcc = nil
ui.keyCount = nil
ui.menuBuf = nil
} else {
ui.menuBuf = listBinds(binds)
ch <- draw
}
}
case termbox.EventResize:
ch <- &callExpr{"redraw", nil, 1}
}
}
func readCmdEvent(ch chan<- expr, ev termbox.Event) {
if ev.Ch != 0 {
if ev.Mod == termbox.ModAlt {
val := string([]rune{'<', 'a', '-', ev.Ch, '>'})
if expr, ok := gOpts.cmdkeys[val]; ok {
ch <- expr
}
} else {
ch <- &callExpr{"cmd-insert", []string{string(ev.Ch)}, 1}
}
} else {
val := gKeyVal[ev.Key]
if expr, ok := gOpts.cmdkeys[val]; ok {
ch <- expr
}
}
}
func (ui *ui) readExpr() <-chan expr {
ch := make(chan expr)
ui.exprChan = ch
go func() {
ch <- &callExpr{"draw", nil, 1}
for {
ev := ui.pollEvent()
if ui.cmdPrefix != "" && ev.Type == termbox.EventKey {
readCmdEvent(ch, ev)
continue
}
ui.readEvent(ch, ev)
}
}()
return ch
}
func setColorMode() {
if gOpts.color256 {
termbox.SetOutputMode(termbox.Output256)
} else {
termbox.SetOutputMode(termbox.OutputNormal)
}
}
func (ui *ui) pause() {
termbox.Close()
}
func (ui *ui) resume() {
if err := termbox.Init(); err != nil {
log.Fatalf("initializing termbox: %s", err)
}
}
func (ui *ui) sync() {
if err := termbox.Sync(); err != nil {
log.Printf("syncing termbox: %s", err)
}
}
func listMatches(matches []string) *bytes.Buffer {
b := new(bytes.Buffer)
wtot, _ := termbox.Size()
wcol := 0
for _, m := range matches {
wcol = max(wcol, len(m))
}
wcol += gOpts.tabstop - wcol%gOpts.tabstop
ncol := wtot / wcol
b.WriteString("possible matches\n")
for i := 0; i < len(matches); i++ {
for j := 0; j < ncol && i < len(matches); i, j = i+1, j+1 {
b.WriteString(fmt.Sprintf("%s%*s", matches[i], wcol-len(matches[i]), ""))
}
b.WriteByte('\n')
}
return b
}
|
package pkg
import (
"fmt"
"io/ioutil"
"path"
"github.com/ghodss/yaml"
"github.com/howardjohn/istio-release/pkg/model"
"github.com/howardjohn/istio-release/util"
)
func Package(manifest model.Manifest) error {
out := path.Join(manifest.WorkingDirectory, "out")
istioOut := path.Join(manifest.WorkingDirectory, "work", "out", "linux_amd64", "release")
if manifest.ShouldBuild(model.Docker) {
if err := util.CopyDir(path.Join(istioOut, "docker"), path.Join(out, "docker")); err != nil {
return fmt.Errorf("failed to package docker images: %v", err)
}
}
if manifest.ShouldBuild(model.Helm) {
if err := util.CopyDir(path.Join(manifest.WorkingDirectory, "work", "helm", "packages"), path.Join(out, "charts")); err != nil {
return fmt.Errorf("failed to package helm chart: %v", err)
}
}
if manifest.ShouldBuild(model.Debian) {
if err := util.CopyFile(path.Join(istioOut, "istio-sidecar.deb"), path.Join(out, "deb", "istio-sidecar.deb")); err != nil {
return fmt.Errorf("failed to package istio-sidecar.deb: %v", err)
}
if err := util.CreateSha(path.Join(out, "deb", "istio-sidecar.deb")); err != nil {
return fmt.Errorf("failed to package istio-sidecar.deb: %v", err)
}
}
if manifest.ShouldBuild(model.Istioctl) {
for _, arch := range []string{"linux", "osx", "win"} {
archive := fmt.Sprintf("istio-%s-%s.tar.gz", manifest.Version, arch)
if arch == "win" {
archive = fmt.Sprintf("istio-%s-%s.zip", manifest.Version, arch)
}
archivePath := path.Join(manifest.WorkingDirectory, "work", "archive", arch, archive)
dest := path.Join(out, archive)
if err := util.CopyFile(archivePath, dest); err != nil {
return fmt.Errorf("failed to package %v release archive: %v", arch, err)
}
if err := util.CreateSha(dest); err != nil {
return fmt.Errorf("failed to package %v: %v", dest, err)
}
}
}
// Bundle sources
cmd := util.VerboseCommand("tar", "-czf", "out/sources.tar.gz", "sources")
cmd.Dir = path.Join(manifest.WorkingDirectory)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to bundle sources: %v", err)
}
// Manifest
if err := writeManifest(manifest); err != nil {
return fmt.Errorf("failed to write manifest: %v", err)
}
return nil
}
func writeManifest(manifest model.Manifest) error {
// TODO we should replace indirect refs with SHA (in other part of code)
yml, err := yaml.Marshal(manifest)
if err != nil {
return fmt.Errorf("failed to marshal manifest: %v", err)
}
if err := ioutil.WriteFile(path.Join(manifest.WorkingDirectory, "out", "manifest.yaml"), yml, 0640); err != nil {
return fmt.Errorf("failed to write manifest: %v", err)
}
return nil
}
Package the full license
package pkg
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"github.com/ghodss/yaml"
"github.com/howardjohn/istio-release/pkg/model"
"github.com/howardjohn/istio-release/util"
)
func Package(manifest model.Manifest) error {
out := path.Join(manifest.WorkingDirectory, "out")
istioOut := path.Join(manifest.WorkingDirectory, "work", "out", "linux_amd64", "release")
if manifest.ShouldBuild(model.Docker) {
if err := util.CopyDir(path.Join(istioOut, "docker"), path.Join(out, "docker")); err != nil {
return fmt.Errorf("failed to package docker images: %v", err)
}
}
if manifest.ShouldBuild(model.Helm) {
if err := util.CopyDir(path.Join(manifest.WorkingDirectory, "work", "helm", "packages"), path.Join(out, "charts")); err != nil {
return fmt.Errorf("failed to package helm chart: %v", err)
}
}
if manifest.ShouldBuild(model.Debian) {
if err := util.CopyFile(path.Join(istioOut, "istio-sidecar.deb"), path.Join(out, "deb", "istio-sidecar.deb")); err != nil {
return fmt.Errorf("failed to package istio-sidecar.deb: %v", err)
}
if err := util.CreateSha(path.Join(out, "deb", "istio-sidecar.deb")); err != nil {
return fmt.Errorf("failed to package istio-sidecar.deb: %v", err)
}
}
if manifest.ShouldBuild(model.Istioctl) {
for _, arch := range []string{"linux", "osx", "win"} {
archive := fmt.Sprintf("istio-%s-%s.tar.gz", manifest.Version, arch)
if arch == "win" {
archive = fmt.Sprintf("istio-%s-%s.zip", manifest.Version, arch)
}
archivePath := path.Join(manifest.WorkingDirectory, "work", "archive", arch, archive)
dest := path.Join(out, archive)
if err := util.CopyFile(archivePath, dest); err != nil {
return fmt.Errorf("failed to package %v release archive: %v", arch, err)
}
if err := util.CreateSha(dest); err != nil {
return fmt.Errorf("failed to package %v: %v", dest, err)
}
}
}
// Bundle sources
cmd := util.VerboseCommand("tar", "-czf", "out/sources.tar.gz", "sources")
cmd.Dir = path.Join(manifest.WorkingDirectory)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to bundle sources: %v", err)
}
// Manifest
if err := writeManifest(manifest); err != nil {
return fmt.Errorf("failed to write manifest: %v", err)
}
// Full license
if err := packageLicense(manifest); err != nil {
return fmt.Errorf("failed to package license file: %v", err)
}
return nil
}
func packageLicense(manifest model.Manifest) interface{} {
cmd := exec.Command("go", "run", "tools/license/get_dep_licenses.go")
cmd.Dir = path.Join(manifest.WorkingDirectory, "work", "src", "istio.io", "istio")
o, err := os.Create(path.Join(manifest.WorkingDirectory, "out", "LICENSES"))
if err != nil {
return err
}
cmd.Stdout = o
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return err
}
return nil
}
func writeManifest(manifest model.Manifest) error {
// TODO we should replace indirect refs with SHA (in other part of code)
yml, err := yaml.Marshal(manifest)
if err != nil {
return fmt.Errorf("failed to marshal manifest: %v", err)
}
if err := ioutil.WriteFile(path.Join(manifest.WorkingDirectory, "out", "manifest.yaml"), yml, 0640); err != nil {
return fmt.Errorf("failed to write manifest: %v", err)
}
return nil
}
|
package ui
import (
"image"
"image/color"
"image/draw"
)
// UI represents an instance of the UI
type UI struct {
Width, Height int
WindowTitle string
Scene *image.RGBA
components []Component
Input Input
keyFuncs map[Key]func() error
}
// New creates a new UI instance
func New(width, height int) *UI {
rect := image.Rect(0, 0, width, height)
ui := UI{
Width: width,
Height: height,
WindowTitle: "ui window",
Scene: image.NewRGBA(rect),
}
ui.keyFuncs = make(map[Key]func() error)
return &ui
}
// AddKeyFunc registers a function to run on key press
func (ui *UI) AddKeyFunc(key Key, fnc func() error) {
ui.keyFuncs[key] = fnc
}
// Update is called on every frame from the ebiten.Run update callback
func (ui *UI) Update() error {
ui.Input.updateMouse()
ui.handleClick()
if err := ui.handleKeypress(); err != nil {
return err
}
return nil
}
// handleKeypress runs corresponding function for key press
func (ui *UI) handleKeypress() error {
ui.Input.updateKeyboard()
for key, fnc := range ui.keyFuncs {
if ui.Input.StateForKey(key) {
if err := fnc(); err != nil {
return err
}
}
}
return nil
}
// SetWindowTitle sets the title of the application window
func (ui *UI) SetWindowTitle(s string) {
ui.WindowTitle = s
}
// AddComponent adds a component to the ui
func (ui *UI) AddComponent(o Component) {
ui.components = append(ui.components, o)
}
// Render returns a fresh frame of the GUI
func (ui *UI) Render(mx, my int) *image.RGBA {
if ui.isAllClean() {
return ui.Scene
}
// clear scene
whole := image.Rect(0, 0, ui.Width, ui.Height)
draw.Draw(ui.Scene, whole, &image.Uniform{color.Transparent}, image.ZP, draw.Src)
for _, c := range ui.components {
img := c.Draw(mx, my)
if img == nil {
continue
}
r := c.GetBounds()
c.Hover(mx >= r.Min.X && mx <= r.Max.X && my >= r.Min.Y && my <= r.Max.Y)
draw.Draw(ui.Scene, r, img, image.ZP, draw.Over)
}
return ui.Scene
}
func (ui *UI) isAllClean() bool {
for _, c := range ui.components {
if !c.IsClean() {
return false
}
}
return true
}
refactor
package ui
import (
"image"
"image/color"
"image/draw"
)
// UI represents an instance of the UI
type UI struct {
Width, Height int
WindowTitle string
Scene *image.RGBA
components []Component
Input Input
keyFuncs map[Key]func() error
}
// New creates a new UI instance
func New(width, height int) *UI {
rect := image.Rect(0, 0, width, height)
ui := UI{
Width: width,
Height: height,
WindowTitle: "ui window",
Scene: image.NewRGBA(rect),
}
ui.keyFuncs = make(map[Key]func() error)
return &ui
}
// AddKeyFunc registers a function to run on key press
func (ui *UI) AddKeyFunc(key Key, fnc func() error) {
ui.keyFuncs[key] = fnc
}
// Update is called on every frame from the ebiten.Run update callback
func (ui *UI) Update() error {
ui.Input.updateMouse()
ui.handleClick()
if err := ui.handleKeypress(); err != nil {
return err
}
return nil
}
// SetWindowTitle sets the title of the application window
func (ui *UI) SetWindowTitle(s string) {
ui.WindowTitle = s
}
// AddComponent adds a component to the ui
func (ui *UI) AddComponent(o Component) {
ui.components = append(ui.components, o)
}
// Render returns a fresh frame of the GUI
func (ui *UI) Render(mx, my int) *image.RGBA {
if ui.isAllClean() {
return ui.Scene
}
// clear scene
whole := image.Rect(0, 0, ui.Width, ui.Height)
draw.Draw(ui.Scene, whole, &image.Uniform{color.Transparent}, image.ZP, draw.Src)
for _, c := range ui.components {
img := c.Draw(mx, my)
if img == nil {
continue
}
r := c.GetBounds()
c.Hover(mx >= r.Min.X && mx <= r.Max.X && my >= r.Min.Y && my <= r.Max.Y)
draw.Draw(ui.Scene, r, img, image.ZP, draw.Over)
}
return ui.Scene
}
func (ui *UI) isAllClean() bool {
for _, c := range ui.components {
if !c.IsClean() {
return false
}
}
return true
}
// handleKeypress runs corresponding function when a key is pressed
func (ui *UI) handleKeypress() error {
ui.Input.updateKeyboard()
for key, fnc := range ui.keyFuncs {
if ui.Input.StateForKey(key) {
if err := fnc(); err != nil {
return err
}
}
}
return nil
}
|
package main
import "fmt"
import "strings"
import "github.com/nsf/termbox-go"
import "strconv"
const MAX_CELL_WIDTH = 20
const HILITE_FG = termbox.ColorBlack | termbox.AttrBold
const HILITE_BG = termbox.ColorWhite
type inputMode int
const (
ModeDefault = iota
ModeFilter
ModeColumnSelect
ModeRowSelect
)
// It is so dumb that go doesn't have this
func clamp(val, lo, hi int) int {
if val <= lo {
return lo
} else if val >= hi {
return hi
}
return val
}
var pinnedBounds = 0
func writeString(x, y int, fg, bg termbox.Attribute, msg string) int {
for _, c := range msg {
if x >= pinnedBounds {
termbox.SetCell(x, y, c, fg, bg)
}
x += 1
}
return x
}
func writeLine(x, y int, fg, bg termbox.Attribute, line string) {
width, _ := termbox.Size()
for _, c := range line {
termbox.SetCell(x, y, c, fg, bg)
x += 1
}
for i := x; i < width; i += 1 {
termbox.SetCell(x+i, y, ' ', fg, bg)
}
}
var cellFmtString = "%" + strconv.Itoa(MAX_CELL_WIDTH) + "s"
func (ui *UI) writeCell(cell string, x, y, index int, fg, bg termbox.Attribute) int {
colOpts := ui.columnOpts[index]
lastCol := index == len(ui.columnOpts)-1
if index == ui.colIdx && ui.mode == ModeColumnSelect {
fg = HILITE_FG
bg = HILITE_BG
}
if colOpts.collapsed {
x = writeString(x, y, fg, bg, "…")
} else if !colOpts.expanded && len(cell) < MAX_CELL_WIDTH {
padded := fmt.Sprintf(cellFmtString, cell)
x = writeString(x, y, fg, bg, padded)
} else if !colOpts.expanded && !lastCol {
width := clamp(len(cell)-1, 0, MAX_CELL_WIDTH-1)
x = writeString(x, y, fg, bg, cell[:width])
x = writeString(x, y, fg, bg, "…")
} else {
writeString(x, y, fg, bg, cell)
x += colOpts.width
}
// Draw separator if this isn't the last element
if index != len(ui.columns)-1 {
x = writeString(x, y, termbox.ColorRed, termbox.ColorDefault, " │ ")
}
return x
}
func (ui *UI) writePinned(y int, fg, bg termbox.Attribute, row []string) int {
// ignore our view offsets
pinnedBounds = 0
for i, cell := range row {
colOpts := ui.columnOpts[i]
if colOpts.pinned {
pinnedBounds = ui.writeCell(cell, pinnedBounds, y, i, fg, bg)
}
}
return pinnedBounds
}
func (ui *UI) writeColumns(x, y int) {
var fg, bg termbox.Attribute
x += ui.writePinned(y, termbox.ColorWhite, termbox.ColorDefault, ui.columns)
for i, col := range ui.columns {
colOpts := ui.columnOpts[i]
fg = termbox.ColorBlack | termbox.AttrBold
bg = termbox.ColorWhite
if !colOpts.pinned {
x = ui.writeCell(col, x, y, i, fg, bg)
}
}
}
func (ui *UI) writeRow(x, y int, row []string) {
const def = termbox.ColorDefault
var fg, bg termbox.Attribute
x += ui.writePinned(y, termbox.ColorCyan, termbox.ColorBlack, row)
for i, _ := range ui.columns {
colOpts := ui.columnOpts[i]
fg = def
bg = def
if !colOpts.pinned {
x = ui.writeCell(row[i], x, y, i, fg, bg)
}
}
}
type columnOptions struct {
expanded bool
collapsed bool
pinned bool
width int
}
type UI struct {
mode inputMode
rowIdx, colIdx int // Selection control
offsetX, offsetY int // Pan control
filterString string
columnOpts []columnOptions
columns []string
rows [][]string
width int
}
func NewUi(data TabularData) UI {
colOpts := make([]columnOptions, len(data.Columns))
columns := make([]string, len(data.Columns))
for i, col := range data.Columns {
columns[i] = col.Name
colOpts[i] = columnOptions{
expanded: col.Width < MAX_CELL_WIDTH,
collapsed: false,
pinned: false,
width: col.Width,
}
}
return UI{
offsetX: 0,
offsetY: 0,
mode: ModeDefault,
colIdx: -1,
columnOpts: colOpts,
rows: data.Rows,
columns: columns,
width: data.Width,
}
}
func (ui *UI) Init() error {
if err := termbox.Init(); err != nil {
return err
}
termbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)
return nil
}
func (ui *UI) Loop() {
defer termbox.Close()
ui.repaint()
eventloop:
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Key == termbox.KeyCtrlC {
break eventloop
}
switch ui.mode {
case ModeFilter:
ui.handleKeyFilter(ev)
case ModeColumnSelect:
ui.handleKeyColumnSelect(ev)
default:
ui.handleKeyDefault(ev)
}
}
ui.repaint()
}
}
// Return indices of rows to display
func (ui *UI) filterRows(num int) []int {
rows := make([]int, 0, num)
// fast pass
if ui.filterString == "" {
for i := 0; i < num; i += 1 {
if i+ui.offsetY >= len(ui.rows) {
break
}
rows = append(rows, i+ui.offsetY)
}
} else {
for i := 0; i < num; i += 1 {
if i+ui.offsetY >= len(ui.rows) {
break
}
for _, col := range ui.rows[i+ui.offsetY] {
if strings.Contains(col, ui.filterString) {
rows = append(rows, i+ui.offsetY)
break
}
}
}
}
return rows
}
func (ui *UI) repaint() {
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
_, height := termbox.Size()
const coldef = termbox.ColorDefault
ui.writeColumns(ui.offsetX+0, 0)
rowIdx := ui.filterRows(height - 2)
for i := 0; i < height-2; i += 1 {
if i < len(rowIdx) {
ui.writeRow(ui.offsetX+0, i+1, ui.rows[rowIdx[i]])
} else {
writeLine(0, i+1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorBlack, "~")
}
}
switch ui.mode {
case ModeFilter:
ext := ""
if len(rowIdx) == height-2 {
ext = "+"
}
line := fmt.Sprintf("FILTER [%d%s matches]: %s", len(rowIdx), ext, ui.filterString)
writeLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)
case ModeColumnSelect:
line := "COLUMN SELECT (^g quit) [" + ui.columns[ui.colIdx] + "]"
writeLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)
default:
first := 0
last := 0
total := len(ui.rows) - 1
filter := ""
if len(rowIdx) >= 2 {
first = rowIdx[0]
last = rowIdx[len(rowIdx)-1]
}
if ui.filterString != "" {
filter = fmt.Sprintf("[filter: \"%s\"] ", ui.filterString)
}
line := fmt.Sprintf("%s[rows %d-%d of %d] :", filter, first, last, total)
writeLine(0, height-1, termbox.ColorDefault, termbox.ColorDefault, line)
}
termbox.Flush()
}
func (ui *UI) handleKeyFilter(ev termbox.Event) {
// Ch == 0 implies this was a special key
if ev.Ch == 0 && ev.Key != termbox.KeySpace {
if ev.Key == termbox.KeyEsc || ev.Key == termbox.KeyCtrlG || ev.Key == termbox.KeyEnter {
ui.mode = ModeDefault
} else if ev.Key == termbox.KeyDelete || ev.Key == termbox.KeyBackspace ||
ev.Key == termbox.KeyBackspace2 {
if sz := len(ui.filterString); sz > 0 {
ui.filterString = ui.filterString[:sz-1]
}
} else {
// Fallback to default handling for arrows etc
ui.handleKeyDefault(ev)
}
return
}
if ev.Key == termbox.KeySpace {
ui.filterString += " "
} else {
ui.filterString += string(ev.Ch)
}
ui.offsetY = 0
}
var globalExpanded = false
func (ui *UI) handleKeyColumnSelect(ev termbox.Event) {
switch {
case ev.Key == termbox.KeyArrowRight:
ui.colIdx = clamp(ui.colIdx+1, 0, len(ui.columns)-1)
case ev.Key == termbox.KeyArrowLeft:
ui.colIdx = clamp(ui.colIdx-1, 0, len(ui.columns)-1)
case ev.Ch == 'w':
ui.columnOpts[ui.colIdx].collapsed = !ui.columnOpts[ui.colIdx].collapsed
case ev.Ch == 'x':
ui.columnOpts[ui.colIdx].expanded = !ui.columnOpts[ui.colIdx].expanded
if ui.columnOpts[ui.colIdx].expanded {
ui.columnOpts[ui.colIdx].collapsed = false
}
case ev.Ch == '.':
ui.columnOpts[ui.colIdx].pinned = !ui.columnOpts[ui.colIdx].pinned
if ui.columnOpts[ui.colIdx].pinned {
ui.columnOpts[ui.colIdx].collapsed = false
}
case ev.Key == termbox.KeyCtrlG, ev.Key == termbox.KeyEsc:
ui.mode = ModeDefault
default:
ui.handleKeyDefault(ev)
}
// find if we've gone off screen and readjust
// TODO: this bit is buggy
cursorPosition := 0
for i, _ := range ui.columns {
colOpts := ui.columnOpts[i]
if i == ui.colIdx {
break
}
//cursorPosition += 3
if !colOpts.collapsed {
cursorPosition += colOpts.width
}
}
width, _ := termbox.Size()
if cursorPosition > width-ui.offsetX || cursorPosition < -ui.offsetX {
ui.offsetX = -cursorPosition
}
}
func (ui *UI) handleKeyDefault(ev termbox.Event) {
switch {
case ev.Key == termbox.KeyCtrlA:
ui.offsetX = 0
case ev.Key == termbox.KeyCtrlE:
// FIXME: this is buggy
w, _ := termbox.Size()
ui.offsetX = -ui.width + w*2
case ev.Key == termbox.KeyArrowRight:
ui.offsetX = clamp(ui.offsetX-5, -ui.width, 0)
case ev.Key == termbox.KeyArrowLeft:
ui.offsetX = clamp(ui.offsetX+5, -ui.width, 0)
case ev.Key == termbox.KeyArrowUp:
ui.offsetY = clamp(ui.offsetY-1, 0, len(ui.rows))
case ev.Key == termbox.KeyArrowDown:
ui.offsetY = clamp(ui.offsetY+1, 0, len(ui.rows))
case ev.Ch == '/', ev.Key == termbox.KeyCtrlR:
ui.mode = ModeFilter
ui.filterString = ""
ui.offsetY = 0
case ev.Ch == 'C':
ui.mode = ModeColumnSelect
ui.offsetX = 0
ui.colIdx = 0
case ev.Ch == 'G':
_, height := termbox.Size()
ui.offsetY = len(ui.rows) - (height - 3)
case ev.Ch == 'g':
ui.offsetY = 0
case ev.Ch == 'X':
for i, _ := range ui.columnOpts {
ui.columnOpts[i].expanded = !globalExpanded
// FIXME: Possibly not the best behavior
ui.columnOpts[i].collapsed = false
}
globalExpanded = !globalExpanded
case ui.mode == ModeDefault && ev.Ch == 'q':
panic("TODO: real exit")
}
}
Zebra striping
package main
import "fmt"
import "strings"
import "github.com/nsf/termbox-go"
import "strconv"
const MAX_CELL_WIDTH = 20
const HILITE_FG = termbox.ColorBlack | termbox.AttrBold
const HILITE_BG = termbox.ColorWhite
type inputMode int
const (
ModeDefault = iota
ModeFilter
ModeColumnSelect
ModeRowSelect
)
// It is so dumb that go doesn't have this
func clamp(val, lo, hi int) int {
if val <= lo {
return lo
} else if val >= hi {
return hi
}
return val
}
var pinnedBounds = 0
func writeString(x, y int, fg, bg termbox.Attribute, msg string) int {
for _, c := range msg {
if x >= pinnedBounds {
termbox.SetCell(x, y, c, fg, bg)
}
x += 1
}
return x
}
func writeLine(x, y int, fg, bg termbox.Attribute, line string) {
width, _ := termbox.Size()
for _, c := range line {
termbox.SetCell(x, y, c, fg, bg)
x += 1
}
for i := x; i < width; i += 1 {
termbox.SetCell(x+i, y, ' ', fg, bg)
}
}
var cellFmtString = "%-" + strconv.Itoa(MAX_CELL_WIDTH) + "s"
func (ui *UI) writeCell(cell string, x, y, index int, fg, bg termbox.Attribute) int {
colOpts := ui.columnOpts[index]
lastCol := index == len(ui.columnOpts)-1
if index == ui.colIdx && ui.mode == ModeColumnSelect {
fg = HILITE_FG
bg = HILITE_BG
}
if colOpts.collapsed {
x = writeString(x, y, fg, bg, "…")
} else if !colOpts.expanded && len(cell) < MAX_CELL_WIDTH {
padded := fmt.Sprintf(cellFmtString, cell)
x = writeString(x, y, fg, bg, padded)
} else if !colOpts.expanded && !lastCol {
width := clamp(len(cell)-1, 0, MAX_CELL_WIDTH-1)
x = writeString(x, y, fg, bg, cell[:width])
x = writeString(x, y, fg, bg, "…")
} else {
writeString(x, y, fg, bg, cell)
x += colOpts.width
}
// Draw separator if this isn't the last element
if index != len(ui.columns)-1 {
x = writeString(x, y, termbox.ColorRed, termbox.ColorDefault, " │ ")
}
return x
}
func (ui *UI) writePinned(y int, fg, bg termbox.Attribute, row []string) int {
// ignore our view offsets
pinnedBounds = 0
for i, cell := range row {
colOpts := ui.columnOpts[i]
if colOpts.pinned {
pinnedBounds = ui.writeCell(cell, pinnedBounds, y, i, fg, bg)
}
}
return pinnedBounds
}
func (ui *UI) writeColumns(x, y int) {
var fg, bg termbox.Attribute
x += ui.writePinned(y, termbox.ColorWhite, termbox.ColorDefault, ui.columns)
for i, col := range ui.columns {
colOpts := ui.columnOpts[i]
fg = termbox.ColorBlack | termbox.AttrBold
bg = termbox.ColorWhite
if !colOpts.pinned {
x = ui.writeCell(col, x, y, i, fg, bg)
}
}
}
func (ui *UI) writeRow(x, y int, row []string) {
fg := termbox.ColorDefault
if ui.zebraStripe && y%2 == 0 {
fg = termbox.ColorMagenta
}
x += ui.writePinned(y, termbox.ColorCyan, termbox.ColorBlack, row)
for i, _ := range ui.columns {
colOpts := ui.columnOpts[i]
if !colOpts.pinned {
x = ui.writeCell(row[i], x, y, i, fg, termbox.ColorDefault)
}
}
}
type columnOptions struct {
expanded bool
collapsed bool
pinned bool
width int
}
type UI struct {
mode inputMode
rowIdx, colIdx int // Selection control
offsetX, offsetY int // Pan control
filterString string
zebraStripe bool
columnOpts []columnOptions
columns []string
rows [][]string
width int
}
func NewUi(data TabularData) UI {
colOpts := make([]columnOptions, len(data.Columns))
columns := make([]string, len(data.Columns))
for i, col := range data.Columns {
columns[i] = col.Name
colOpts[i] = columnOptions{
expanded: col.Width < MAX_CELL_WIDTH,
collapsed: false,
pinned: false,
width: col.Width,
}
}
return UI{
offsetX: 0,
offsetY: 0,
mode: ModeDefault,
colIdx: -1,
columnOpts: colOpts,
rows: data.Rows,
columns: columns,
width: data.Width,
zebraStripe: false,
}
}
func (ui *UI) Init() error {
if err := termbox.Init(); err != nil {
return err
}
termbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)
return nil
}
func (ui *UI) Loop() {
defer termbox.Close()
ui.repaint()
eventloop:
for {
switch ev := termbox.PollEvent(); ev.Type {
case termbox.EventKey:
if ev.Key == termbox.KeyCtrlC {
break eventloop
}
switch ui.mode {
case ModeFilter:
ui.handleKeyFilter(ev)
case ModeColumnSelect:
ui.handleKeyColumnSelect(ev)
default:
ui.handleKeyDefault(ev)
}
}
ui.repaint()
}
}
// Return indices of rows to display
func (ui *UI) filterRows(num int) []int {
rows := make([]int, 0, num)
// fast pass
if ui.filterString == "" {
for i := 0; i < num; i += 1 {
if i+ui.offsetY >= len(ui.rows) {
break
}
rows = append(rows, i+ui.offsetY)
}
} else {
for i := 0; i < num; i += 1 {
if i+ui.offsetY >= len(ui.rows) {
break
}
for _, col := range ui.rows[i+ui.offsetY] {
if strings.Contains(col, ui.filterString) {
rows = append(rows, i+ui.offsetY)
break
}
}
}
}
return rows
}
func (ui *UI) repaint() {
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
_, height := termbox.Size()
const coldef = termbox.ColorDefault
ui.writeColumns(ui.offsetX+0, 0)
rowIdx := ui.filterRows(height - 2)
for i := 0; i < height-2; i += 1 {
if i < len(rowIdx) {
ui.writeRow(ui.offsetX+0, i+1, ui.rows[rowIdx[i]])
} else {
writeLine(0, i+1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorBlack, "~")
}
}
switch ui.mode {
case ModeFilter:
ext := ""
if len(rowIdx) == height-2 {
ext = "+"
}
line := fmt.Sprintf("FILTER [%d%s matches]: %s", len(rowIdx), ext, ui.filterString)
writeLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)
case ModeColumnSelect:
line := "COLUMN SELECT (^g quit) [" + ui.columns[ui.colIdx] + "]"
writeLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)
default:
first := 0
last := 0
total := len(ui.rows) - 1
filter := ""
if len(rowIdx) >= 2 {
first = rowIdx[0]
last = rowIdx[len(rowIdx)-1]
}
if ui.filterString != "" {
filter = fmt.Sprintf("[filter: \"%s\"] ", ui.filterString)
}
line := fmt.Sprintf("%s[rows %d-%d of %d] :", filter, first, last, total)
writeLine(0, height-1, termbox.ColorDefault, termbox.ColorDefault, line)
}
termbox.Flush()
}
func (ui *UI) handleKeyFilter(ev termbox.Event) {
// Ch == 0 implies this was a special key
if ev.Ch == 0 && ev.Key != termbox.KeySpace {
if ev.Key == termbox.KeyEsc || ev.Key == termbox.KeyCtrlG || ev.Key == termbox.KeyEnter {
ui.mode = ModeDefault
} else if ev.Key == termbox.KeyDelete || ev.Key == termbox.KeyBackspace ||
ev.Key == termbox.KeyBackspace2 {
if sz := len(ui.filterString); sz > 0 {
ui.filterString = ui.filterString[:sz-1]
}
} else {
// Fallback to default handling for arrows etc
ui.handleKeyDefault(ev)
}
return
}
if ev.Key == termbox.KeySpace {
ui.filterString += " "
} else {
ui.filterString += string(ev.Ch)
}
ui.offsetY = 0
}
var globalExpanded = false
func (ui *UI) handleKeyColumnSelect(ev termbox.Event) {
switch {
case ev.Key == termbox.KeyArrowRight:
ui.colIdx = clamp(ui.colIdx+1, 0, len(ui.columns)-1)
case ev.Key == termbox.KeyArrowLeft:
ui.colIdx = clamp(ui.colIdx-1, 0, len(ui.columns)-1)
case ev.Ch == 'w':
ui.columnOpts[ui.colIdx].collapsed = !ui.columnOpts[ui.colIdx].collapsed
case ev.Ch == 'x':
ui.columnOpts[ui.colIdx].expanded = !ui.columnOpts[ui.colIdx].expanded
if ui.columnOpts[ui.colIdx].expanded {
ui.columnOpts[ui.colIdx].collapsed = false
}
case ev.Ch == '.':
ui.columnOpts[ui.colIdx].pinned = !ui.columnOpts[ui.colIdx].pinned
if ui.columnOpts[ui.colIdx].pinned {
ui.columnOpts[ui.colIdx].collapsed = false
}
case ev.Key == termbox.KeyCtrlG, ev.Key == termbox.KeyEsc:
ui.mode = ModeDefault
default:
ui.handleKeyDefault(ev)
}
// find if we've gone off screen and readjust
// TODO: this bit is buggy
cursorPosition := 0
for i, _ := range ui.columns {
colOpts := ui.columnOpts[i]
if i == ui.colIdx {
break
}
//cursorPosition += 3
if !colOpts.collapsed {
cursorPosition += colOpts.width
}
}
width, _ := termbox.Size()
if cursorPosition > width-ui.offsetX || cursorPosition < -ui.offsetX {
ui.offsetX = -cursorPosition
}
}
func (ui *UI) handleKeyDefault(ev termbox.Event) {
switch {
case ev.Key == termbox.KeyCtrlA:
ui.offsetX = 0
case ev.Key == termbox.KeyCtrlE:
// FIXME: this is buggy
w, _ := termbox.Size()
ui.offsetX = -ui.width + w*2
case ev.Key == termbox.KeyArrowRight:
ui.offsetX = clamp(ui.offsetX-5, -ui.width, 0)
case ev.Key == termbox.KeyArrowLeft:
ui.offsetX = clamp(ui.offsetX+5, -ui.width, 0)
case ev.Key == termbox.KeyArrowUp:
ui.offsetY = clamp(ui.offsetY-1, 0, len(ui.rows))
case ev.Key == termbox.KeyArrowDown:
ui.offsetY = clamp(ui.offsetY+1, 0, len(ui.rows))
case ev.Ch == '/', ev.Key == termbox.KeyCtrlR:
ui.mode = ModeFilter
ui.filterString = ""
ui.offsetY = 0
case ev.Ch == 'C':
ui.mode = ModeColumnSelect
ui.offsetX = 0
ui.colIdx = 0
case ev.Ch == 'G':
_, height := termbox.Size()
ui.offsetY = len(ui.rows) - (height - 3)
case ev.Ch == 'g':
ui.offsetY = 0
case ev.Ch == 'Z':
ui.zebraStripe = !ui.zebraStripe
case ev.Ch == 'X':
for i, _ := range ui.columnOpts {
ui.columnOpts[i].expanded = !globalExpanded
// FIXME: Possibly not the best behavior
ui.columnOpts[i].collapsed = false
}
globalExpanded = !globalExpanded
case ui.mode == ModeDefault && ev.Ch == 'q':
panic("TODO: real exit")
}
}
|
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package kv_test
import (
"errors"
"reflect"
"sync/atomic"
"testing"
"github.com/cockroachdb/cockroach/client"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/kv"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/server"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/uuid"
)
// NOTE: these tests are in package kv_test to avoid a circular
// dependency between the server and kv packages. These tests rely on
// starting a TestServer, which creates a "real" node and employs a
// distributed sender server-side.
// TestRangeLookupWithOpenTransaction verifies that range lookups are
// done in such a way (e.g. using inconsistent reads) that they
// proceed in the event that a write intent is extant at the meta
// index record being read.
func TestRangeLookupWithOpenTransaction(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := createTestClient(t, s.Stopper(), s.ServingAddr())
// Create an intent on the meta1 record by writing directly to the
// engine.
key := testutils.MakeKey(keys.Meta1Prefix, roachpb.KeyMax)
now := s.Clock().Now()
txn := roachpb.NewTransaction("txn", roachpb.Key("foobar"), 0, roachpb.SERIALIZABLE, now, 0)
if err := engine.MVCCPutProto(s.Ctx.Engines[0], nil, key, now, txn, &roachpb.RangeDescriptor{}); err != nil {
t.Fatal(err)
}
// Now, with an intent pending, attempt (asynchronously) to read
// from an arbitrary key. This will cause the distributed sender to
// do a range lookup, which will encounter the intent. We're
// verifying here that the range lookup doesn't fail with a write
// intent error. If it did, it would go into a deadloop attempting
// to push the transaction, which in turn requires another range
// lookup, etc, ad nauseam.
if _, err := db.Get("a"); err != nil {
t.Fatal(err)
}
}
// setupMultipleRanges creates a test server and splits the
// key range at the given keys. Returns the test server and client.
// The caller is responsible for stopping the server and
// closing the client.
func setupMultipleRanges(t *testing.T, ts *server.TestServer, splitAt ...string) *client.DB {
db := createTestClient(t, ts.Stopper(), ts.ServingAddr())
// Split the keyspace at the given keys.
for _, key := range splitAt {
if err := db.AdminSplit(key); err != nil {
// Don't leak server goroutines.
t.Fatal(err)
}
}
return db
}
func TestMultiRangeBatchBounded(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "a", "b", "c", "d", "e", "f")
for _, key := range []string{"a", "aa", "aaa", "b", "bb", "cc", "d", "dd", "ff"} {
if err := db.Put(key, "value"); err != nil {
t.Fatal(err)
}
}
expResults := [][]string{
{"aaa", "b", "bb"},
{"a", "aa"},
{"cc", "d", "dd"},
}
b := db.NewBatch()
b.Scan("aaa", "dd", 3)
b.Scan("a", "z", 2)
b.Scan("cc", "ff", 3)
if err := db.Run(b); err != nil {
t.Fatal(err)
}
if len(expResults) != len(b.Results) {
t.Fatalf("only got %d results, wanted %d", len(expResults), len(b.Results))
}
for i, res := range b.Results {
expRes := expResults[i]
var actRes []string
for _, k := range res.Rows {
actRes = append(actRes, string(k.Key))
}
if !reflect.DeepEqual(actRes, expRes) {
t.Errorf("%d: got %v, wanted %v", i, actRes, expRes)
}
}
}
// TestMultiRangeEmptyAfterTruncate exercises a code path in which a
// multi-range request deals with a range without any active requests after
// truncation. In that case, the request is skipped.
func TestMultiRangeEmptyAfterTruncate(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "c", "d")
// Delete the keys within a transaction. The range [c,d) doesn't have
// any active requests.
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
b := txn.NewBatch()
b.DelRange("a", "b", false)
b.DelRange("e", "f", false)
return txn.CommitInBatch(b)
}); pErr != nil {
t.Fatalf("unexpected error on transactional DeleteRange: %s", pErr)
}
}
// TestMultiRangeScanReverseScanDeleteResolve verifies that Scan, ReverseScan,
// DeleteRange and ResolveIntentRange work across ranges.
func TestMultiRangeScanReverseScanDeleteResolve(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
// Write keys before, at, and after the split key.
for _, key := range []string{"a", "b", "c"} {
if pErr := db.Put(key, "value"); pErr != nil {
t.Fatal(pErr)
}
}
// Scan to retrieve the keys just written.
if rows, pErr := db.Scan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected pError on Scan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Scan in reverse order to retrieve the keys just written.
if rows, pErr := db.ReverseScan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Delete the keys within a transaction. Implicitly, the intents are
// resolved via ResolveIntentRange upon completion.
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
b := txn.NewBatch()
b.DelRange("a", "d", false)
return txn.CommitInBatch(b)
}); pErr != nil {
t.Fatalf("unexpected error on transactional DeleteRange: %s", pErr)
}
// Scan consistently to make sure the intents are gone.
if rows, pErr := db.Scan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected error on Scan: %s", pErr)
} else if l := len(rows); l != 0 {
t.Errorf("expected 0 rows; got %d", l)
}
// ReverseScan consistently to make sure the intents are gone.
if rows, pErr := db.ReverseScan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 0 {
t.Errorf("expected 0 rows; got %d", l)
}
}
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := [2]string{"a", "b"}
ts := [2]roachpb.Timestamp{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if pErr := db.Run(b); pErr != nil {
t.Fatal(pErr)
}
ts[i] = s.Clock().Now()
log.Infof("%d: %d", i, ts[i])
if i == 0 {
util.SucceedsSoon(t, func() error {
// Enforce that when we write the second key, it's written
// with a strictly higher timestamp. We're dropping logical
// ticks and the clock may just have been pushed into the
// future, so that's necessary. See #3122.
if !ts[0].Less(s.Clock().Now()) {
return errors.New("time stands still")
}
return nil
})
}
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// just above the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[0].WallTime + 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock, RPCContext: s.RPCContext()}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
reply, pErr := client.SendWrappedWith(ds, nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, sa)
if pErr != nil {
t.Fatal(pErr)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
reply, pErr = client.SendWrappedWith(ds, nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rsa)
if pErr != nil {
t.Fatal(pErr)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
func initReverseScanTestEnv(s *server.TestServer, t *testing.T) *client.DB {
db := createTestClient(t, s.Stopper(), s.ServingAddr())
// Set up multiple ranges:
// ["", "b"),["b", "e") ,["e", "g") and ["g", "\xff\xff").
for _, key := range []string{"b", "e", "g"} {
// Split the keyspace at the given key.
if pErr := db.AdminSplit(key); pErr != nil {
t.Fatal(pErr)
}
}
// Write keys before, at, and after the split key.
for _, key := range []string{"a", "b", "c", "d", "e", "f", "g", "h"} {
if pErr := db.Put(key, "value"); pErr != nil {
t.Fatal(pErr)
}
}
return db
}
// TestSingleRangeReverseScan verifies that ReverseScan gets the right results
// on a single range.
func TestSingleRangeReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := initReverseScanTestEnv(s, t)
// Case 1: Request.EndKey is in the middle of the range.
if rows, pErr := db.ReverseScan("b", "d", 0); pErr != nil {
t.Fatalf("unexpected pError on ReverseScan: %s", pErr)
} else if l := len(rows); l != 2 {
t.Errorf("expected 2 rows; got %d", l)
}
// Case 2: Request.EndKey is equal to the EndKey of the range.
if rows, pErr := db.ReverseScan("e", "g", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 2 {
t.Errorf("expected 2 rows; got %d", l)
}
// Case 3: Test roachpb.TableDataMin. Expected to return "g" and "h".
wanted := 2
if rows, pErr := db.ReverseScan("g", keys.TableDataMin, 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != wanted {
t.Errorf("expected %d rows; got %d", wanted, l)
}
// Case 4: Test keys.SystemMax
// This span covers the system DB keys. Note sql.GetInitialSystemValues
// returns one key before keys.SystemMax, but our scan is including one key
// (\xffa) created for the test.
if rows, pErr := db.ReverseScan(keys.SystemMax, "b", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 1 {
t.Errorf("expected 1 row; got %d", l)
}
}
// TestMultiRangeReverseScan verifies that ReverseScan gets the right results
// across multiple ranges.
func TestMultiRangeReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := initReverseScanTestEnv(s, t)
// Case 1: Request.EndKey is in the middle of the range.
if rows, pErr := db.ReverseScan("a", "d", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Case 2: Request.EndKey is equal to the EndKey of the range.
if rows, pErr := db.ReverseScan("d", "g", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
}
// TestReverseScanWithSplitAndMerge verifies that ReverseScan gets the right results
// across multiple ranges while range splits and merges happen.
func TestReverseScanWithSplitAndMerge(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := initReverseScanTestEnv(s, t)
// Case 1: An encounter with a range split.
// Split the range ["b", "e") at "c".
if pErr := db.AdminSplit("c"); pErr != nil {
t.Fatal(pErr)
}
// The ReverseScan will run into a stale descriptor.
if rows, pErr := db.ReverseScan("a", "d", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Case 2: encounter with range merge .
// Merge the range ["e", "g") and ["g", "\xff\xff") .
if pErr := db.AdminMerge("e"); pErr != nil {
t.Fatal(pErr)
}
if rows, pErr := db.ReverseScan("d", "g", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
}
func TestBadRequest(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
db := createTestClient(t, s.Stopper(), s.ServingAddr())
defer s.Stop()
// Write key "a".
if pErr := db.Put("a", "value"); pErr != nil {
t.Fatal(pErr)
}
if _, pErr := db.Scan("a", "a", 0); !testutils.IsPError(pErr, "truncation resulted in empty batch") {
t.Fatalf("unexpected error on scan with startkey == endkey: %v", pErr)
}
if _, pErr := db.ReverseScan("a", "a", 0); !testutils.IsPError(pErr, "truncation resulted in empty batch") {
t.Fatalf("unexpected pError on reverse scan with startkey == endkey: %v", pErr)
}
if pErr := db.DelRange("x", "a"); !testutils.IsPError(pErr, "truncation resulted in empty batch") {
t.Fatalf("unexpected error on deletion on [x, a): %v", pErr)
}
if pErr := db.DelRange("", "z"); !testutils.IsPError(pErr, "must be greater than LocalMax") {
t.Fatalf("unexpected error on deletion on [KeyMin, z): %v", pErr)
}
}
// TestNoSequenceCachePutOnRangeMismatchError verifies that the
// sequence cache is not updated with RangeKeyMismatchError. This is a
// higher-level version of TestSequenceCacheShouldCache.
func TestNoSequenceCachePutOnRangeMismatchError(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b", "c")
// The requests in the transaction below will be chunked and
// sent to replicas in the following way:
// 1) A batch request containing a BeginTransaction and a
// put on "a" are sent to a replica owning range ["a","b").
// 2) A next batch request containing a put on "b" and a put
// on "c" are sent to a replica owning range ["b","c").
// (The range cache has a stale range descriptor.)
// 3) The put request on "c" causes a RangeKeyMismatchError.
// 4) The dist sender re-sends a request to the same replica.
// This time the request contains only the put on "b" to the
// same replica.
// 5) The command succeeds since the sequence cache has not yet been updated.
epoch := 0
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
epoch++
b := txn.NewBatch()
b.Put("a", "val")
b.Put("b", "val")
b.Put("c", "val")
return txn.CommitInBatch(b)
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
if epoch != 1 {
t.Errorf("unexpected epoch; the txn must not be retried, but got %d retries", epoch)
}
}
// TestPropagateTxnOnError verifies that DistSender.sendChunk properly
// propagates the txn data to a next iteration. Use txn.Writing field to
// verify that.
func TestPropagateTxnOnError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Set up a filter to so that the first CPut operation will
// get a ReadWithinUncertaintyIntervalError.
targetKey := roachpb.Key("b")
var numGets int32
ctx := server.NewTestContext()
ctx.TestingMocker.StoreTestingMocker.TestingCommandFilter =
func(_ roachpb.StoreID, args roachpb.Request, h roachpb.Header) error {
if _, ok := args.(*roachpb.ConditionalPutRequest); ok && args.Header().Key.Equal(targetKey) {
if atomic.AddInt32(&numGets, 1) == 1 {
return roachpb.NewReadWithinUncertaintyIntervalError(
roachpb.ZeroTimestamp, roachpb.ZeroTimestamp)
}
}
return nil
}
s := server.StartTestServerWithContext(t, ctx)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
// Set the initial value on the target key "b".
origVal := "val"
if pErr := db.Put(targetKey, origVal); pErr != nil {
t.Fatal(pErr)
}
// The following txn creates a batch request that is split
// into two requests: Put and CPut. The CPut operation will
// get a ReadWithinUncertaintyIntervalError and the txn will be
// retried.
epoch := 0
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
epoch++
if epoch >= 2 {
// Writing must be true since we ran the BeginTransaction command.
if !txn.Proto.Writing {
t.Errorf("unexpected non-writing txn")
}
} else {
// Writing must be false since we haven't run any write command.
if txn.Proto.Writing {
t.Errorf("unexpected writing txn")
}
}
b := txn.NewBatch()
b.Put("a", "val")
b.CPut(targetKey, "new_val", origVal)
pErr := txn.CommitInBatch(b)
if epoch == 1 {
if _, ok := pErr.GetDetail().(*roachpb.ReadWithinUncertaintyIntervalError); ok {
if !pErr.GetTxn().Writing {
t.Errorf("unexpected non-writing txn on error")
}
} else {
t.Errorf("expected ReadWithinUncertaintyIntervalError, but got: %s", pErr)
}
}
return pErr
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
if epoch != 2 {
t.Errorf("unexpected epoch; the txn must be retried exactly once, but got %d", epoch)
}
}
// TestPropagateTxnOnPushError is similar to TestPropagateTxnOnError,
// but verifies that txn data are propagated to the next iteration on
// TransactionPushError.
func TestPropagateTxnOnPushError(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
waitForWriteIntent := make(chan struct{})
waitForTxnRestart := make(chan struct{})
waitForTxnCommit := make(chan struct{})
// Create a goroutine that creates a write intent and waits until
// another txn created in this test is restarted.
go func() {
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
if pErr := txn.Put("b", "val"); pErr != nil {
return pErr
}
close(waitForWriteIntent)
// Wait until another txn in this test is
// restarted by a push txn error.
<-waitForTxnRestart
return txn.CommitInBatch(txn.NewBatch())
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
close(waitForTxnCommit)
}()
// Wait until a write intent is created by the above goroutine.
<-waitForWriteIntent
// The transaction below is restarted multiple times.
// - The first retry is caused by the write intent created on key "b" by the above goroutine.
// - The subsequent retries are caused by the write conflict on key "a". Since the txn
// ID is not propagated, a txn of a new epoch always has a new txn ID different
// from the previous txn's. So, the write intent made by the txn of the previous epoch
// is treated as a write made by some different txn.
epoch := 0
var txnID *uuid.UUID
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
// Set low priority so that the intent will not be pushed.
txn.InternalSetPriority(1)
epoch++
if epoch == 2 {
close(waitForTxnRestart)
// Wait until the txn created by the goroutine is committed.
<-waitForTxnCommit
if !roachpb.TxnIDEqual(txn.Proto.ID, txnID) {
t.Errorf("txn ID is not propagated; got %s", txn.Proto.ID)
}
}
b := txn.NewBatch()
b.Put("a", "val")
b.Put("b", "val")
// The commit returns an error, but it will not be
// passed to the next iteration. txnSender.Send() does
// not update the txn data since
// TransactionPushError.Transaction() returns nil.
pErr := txn.CommitInBatch(b)
if epoch == 1 {
if tErr, ok := pErr.GetDetail().(*roachpb.TransactionPushError); ok {
if pErr.GetTxn().ID == nil {
t.Errorf("txn ID is not set unexpectedly: %s", tErr)
}
txnID = pErr.GetTxn().ID
} else {
t.Errorf("expected TransactionRetryError, but got: %s", pErr)
}
}
return pErr
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
if e := 2; epoch != e {
t.Errorf("unexpected epoch; the txn must be attempted %d times, but got %d attempts", e, epoch)
}
}
Explicitly set the txn priority in TestPropagateTxnOnPushError
I couldn't reproduce a failure locally by "make stress PKG=./kv", so
this will probably not fix a stress test we saw (#4906), but I wanted
to give a shot.
Also fix comments, clean up the test, and add some logging. The test
was updated with 2a858b06, but most of the comments were not updated.
// Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package kv_test
import (
"errors"
"reflect"
"sync/atomic"
"testing"
"github.com/cockroachdb/cockroach/client"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/kv"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/server"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/testutils"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/uuid"
)
// NOTE: these tests are in package kv_test to avoid a circular
// dependency between the server and kv packages. These tests rely on
// starting a TestServer, which creates a "real" node and employs a
// distributed sender server-side.
// TestRangeLookupWithOpenTransaction verifies that range lookups are
// done in such a way (e.g. using inconsistent reads) that they
// proceed in the event that a write intent is extant at the meta
// index record being read.
func TestRangeLookupWithOpenTransaction(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := createTestClient(t, s.Stopper(), s.ServingAddr())
// Create an intent on the meta1 record by writing directly to the
// engine.
key := testutils.MakeKey(keys.Meta1Prefix, roachpb.KeyMax)
now := s.Clock().Now()
txn := roachpb.NewTransaction("txn", roachpb.Key("foobar"), 0, roachpb.SERIALIZABLE, now, 0)
if err := engine.MVCCPutProto(s.Ctx.Engines[0], nil, key, now, txn, &roachpb.RangeDescriptor{}); err != nil {
t.Fatal(err)
}
// Now, with an intent pending, attempt (asynchronously) to read
// from an arbitrary key. This will cause the distributed sender to
// do a range lookup, which will encounter the intent. We're
// verifying here that the range lookup doesn't fail with a write
// intent error. If it did, it would go into a deadloop attempting
// to push the transaction, which in turn requires another range
// lookup, etc, ad nauseam.
if _, err := db.Get("a"); err != nil {
t.Fatal(err)
}
}
// setupMultipleRanges creates a test server and splits the
// key range at the given keys. Returns the test server and client.
// The caller is responsible for stopping the server and
// closing the client.
func setupMultipleRanges(t *testing.T, ts *server.TestServer, splitAt ...string) *client.DB {
db := createTestClient(t, ts.Stopper(), ts.ServingAddr())
// Split the keyspace at the given keys.
for _, key := range splitAt {
if err := db.AdminSplit(key); err != nil {
// Don't leak server goroutines.
t.Fatal(err)
}
}
return db
}
func TestMultiRangeBatchBounded(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "a", "b", "c", "d", "e", "f")
for _, key := range []string{"a", "aa", "aaa", "b", "bb", "cc", "d", "dd", "ff"} {
if err := db.Put(key, "value"); err != nil {
t.Fatal(err)
}
}
expResults := [][]string{
{"aaa", "b", "bb"},
{"a", "aa"},
{"cc", "d", "dd"},
}
b := db.NewBatch()
b.Scan("aaa", "dd", 3)
b.Scan("a", "z", 2)
b.Scan("cc", "ff", 3)
if err := db.Run(b); err != nil {
t.Fatal(err)
}
if len(expResults) != len(b.Results) {
t.Fatalf("only got %d results, wanted %d", len(expResults), len(b.Results))
}
for i, res := range b.Results {
expRes := expResults[i]
var actRes []string
for _, k := range res.Rows {
actRes = append(actRes, string(k.Key))
}
if !reflect.DeepEqual(actRes, expRes) {
t.Errorf("%d: got %v, wanted %v", i, actRes, expRes)
}
}
}
// TestMultiRangeEmptyAfterTruncate exercises a code path in which a
// multi-range request deals with a range without any active requests after
// truncation. In that case, the request is skipped.
func TestMultiRangeEmptyAfterTruncate(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "c", "d")
// Delete the keys within a transaction. The range [c,d) doesn't have
// any active requests.
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
b := txn.NewBatch()
b.DelRange("a", "b", false)
b.DelRange("e", "f", false)
return txn.CommitInBatch(b)
}); pErr != nil {
t.Fatalf("unexpected error on transactional DeleteRange: %s", pErr)
}
}
// TestMultiRangeScanReverseScanDeleteResolve verifies that Scan, ReverseScan,
// DeleteRange and ResolveIntentRange work across ranges.
func TestMultiRangeScanReverseScanDeleteResolve(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
// Write keys before, at, and after the split key.
for _, key := range []string{"a", "b", "c"} {
if pErr := db.Put(key, "value"); pErr != nil {
t.Fatal(pErr)
}
}
// Scan to retrieve the keys just written.
if rows, pErr := db.Scan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected pError on Scan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Scan in reverse order to retrieve the keys just written.
if rows, pErr := db.ReverseScan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Delete the keys within a transaction. Implicitly, the intents are
// resolved via ResolveIntentRange upon completion.
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
b := txn.NewBatch()
b.DelRange("a", "d", false)
return txn.CommitInBatch(b)
}); pErr != nil {
t.Fatalf("unexpected error on transactional DeleteRange: %s", pErr)
}
// Scan consistently to make sure the intents are gone.
if rows, pErr := db.Scan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected error on Scan: %s", pErr)
} else if l := len(rows); l != 0 {
t.Errorf("expected 0 rows; got %d", l)
}
// ReverseScan consistently to make sure the intents are gone.
if rows, pErr := db.ReverseScan("a", "q", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 0 {
t.Errorf("expected 0 rows; got %d", l)
}
}
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := [2]string{"a", "b"}
ts := [2]roachpb.Timestamp{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if pErr := db.Run(b); pErr != nil {
t.Fatal(pErr)
}
ts[i] = s.Clock().Now()
log.Infof("%d: %d", i, ts[i])
if i == 0 {
util.SucceedsSoon(t, func() error {
// Enforce that when we write the second key, it's written
// with a strictly higher timestamp. We're dropping logical
// ticks and the clock may just have been pushed into the
// future, so that's necessary. See #3122.
if !ts[0].Less(s.Clock().Now()) {
return errors.New("time stands still")
}
return nil
})
}
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// just above the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[0].WallTime + 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock, RPCContext: s.RPCContext()}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
reply, pErr := client.SendWrappedWith(ds, nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, sa)
if pErr != nil {
t.Fatal(pErr)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
reply, pErr = client.SendWrappedWith(ds, nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rsa)
if pErr != nil {
t.Fatal(pErr)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
func initReverseScanTestEnv(s *server.TestServer, t *testing.T) *client.DB {
db := createTestClient(t, s.Stopper(), s.ServingAddr())
// Set up multiple ranges:
// ["", "b"),["b", "e") ,["e", "g") and ["g", "\xff\xff").
for _, key := range []string{"b", "e", "g"} {
// Split the keyspace at the given key.
if pErr := db.AdminSplit(key); pErr != nil {
t.Fatal(pErr)
}
}
// Write keys before, at, and after the split key.
for _, key := range []string{"a", "b", "c", "d", "e", "f", "g", "h"} {
if pErr := db.Put(key, "value"); pErr != nil {
t.Fatal(pErr)
}
}
return db
}
// TestSingleRangeReverseScan verifies that ReverseScan gets the right results
// on a single range.
func TestSingleRangeReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := initReverseScanTestEnv(s, t)
// Case 1: Request.EndKey is in the middle of the range.
if rows, pErr := db.ReverseScan("b", "d", 0); pErr != nil {
t.Fatalf("unexpected pError on ReverseScan: %s", pErr)
} else if l := len(rows); l != 2 {
t.Errorf("expected 2 rows; got %d", l)
}
// Case 2: Request.EndKey is equal to the EndKey of the range.
if rows, pErr := db.ReverseScan("e", "g", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 2 {
t.Errorf("expected 2 rows; got %d", l)
}
// Case 3: Test roachpb.TableDataMin. Expected to return "g" and "h".
wanted := 2
if rows, pErr := db.ReverseScan("g", keys.TableDataMin, 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != wanted {
t.Errorf("expected %d rows; got %d", wanted, l)
}
// Case 4: Test keys.SystemMax
// This span covers the system DB keys. Note sql.GetInitialSystemValues
// returns one key before keys.SystemMax, but our scan is including one key
// (\xffa) created for the test.
if rows, pErr := db.ReverseScan(keys.SystemMax, "b", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 1 {
t.Errorf("expected 1 row; got %d", l)
}
}
// TestMultiRangeReverseScan verifies that ReverseScan gets the right results
// across multiple ranges.
func TestMultiRangeReverseScan(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := initReverseScanTestEnv(s, t)
// Case 1: Request.EndKey is in the middle of the range.
if rows, pErr := db.ReverseScan("a", "d", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Case 2: Request.EndKey is equal to the EndKey of the range.
if rows, pErr := db.ReverseScan("d", "g", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
}
// TestReverseScanWithSplitAndMerge verifies that ReverseScan gets the right results
// across multiple ranges while range splits and merges happen.
func TestReverseScanWithSplitAndMerge(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := initReverseScanTestEnv(s, t)
// Case 1: An encounter with a range split.
// Split the range ["b", "e") at "c".
if pErr := db.AdminSplit("c"); pErr != nil {
t.Fatal(pErr)
}
// The ReverseScan will run into a stale descriptor.
if rows, pErr := db.ReverseScan("a", "d", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
// Case 2: encounter with range merge .
// Merge the range ["e", "g") and ["g", "\xff\xff") .
if pErr := db.AdminMerge("e"); pErr != nil {
t.Fatal(pErr)
}
if rows, pErr := db.ReverseScan("d", "g", 0); pErr != nil {
t.Fatalf("unexpected error on ReverseScan: %s", pErr)
} else if l := len(rows); l != 3 {
t.Errorf("expected 3 rows; got %d", l)
}
}
func TestBadRequest(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
db := createTestClient(t, s.Stopper(), s.ServingAddr())
defer s.Stop()
// Write key "a".
if pErr := db.Put("a", "value"); pErr != nil {
t.Fatal(pErr)
}
if _, pErr := db.Scan("a", "a", 0); !testutils.IsPError(pErr, "truncation resulted in empty batch") {
t.Fatalf("unexpected error on scan with startkey == endkey: %v", pErr)
}
if _, pErr := db.ReverseScan("a", "a", 0); !testutils.IsPError(pErr, "truncation resulted in empty batch") {
t.Fatalf("unexpected pError on reverse scan with startkey == endkey: %v", pErr)
}
if pErr := db.DelRange("x", "a"); !testutils.IsPError(pErr, "truncation resulted in empty batch") {
t.Fatalf("unexpected error on deletion on [x, a): %v", pErr)
}
if pErr := db.DelRange("", "z"); !testutils.IsPError(pErr, "must be greater than LocalMax") {
t.Fatalf("unexpected error on deletion on [KeyMin, z): %v", pErr)
}
}
// TestNoSequenceCachePutOnRangeMismatchError verifies that the
// sequence cache is not updated with RangeKeyMismatchError. This is a
// higher-level version of TestSequenceCacheShouldCache.
func TestNoSequenceCachePutOnRangeMismatchError(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b", "c")
// The requests in the transaction below will be chunked and
// sent to replicas in the following way:
// 1) A batch request containing a BeginTransaction and a
// put on "a" are sent to a replica owning range ["a","b").
// 2) A next batch request containing a put on "b" and a put
// on "c" are sent to a replica owning range ["b","c").
// (The range cache has a stale range descriptor.)
// 3) The put request on "c" causes a RangeKeyMismatchError.
// 4) The dist sender re-sends a request to the same replica.
// This time the request contains only the put on "b" to the
// same replica.
// 5) The command succeeds since the sequence cache has not yet been updated.
epoch := 0
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
epoch++
b := txn.NewBatch()
b.Put("a", "val")
b.Put("b", "val")
b.Put("c", "val")
return txn.CommitInBatch(b)
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
if epoch != 1 {
t.Errorf("unexpected epoch; the txn must not be retried, but got %d retries", epoch)
}
}
// TestPropagateTxnOnError verifies that DistSender.sendChunk properly
// propagates the txn data to a next iteration. Use txn.Writing field to
// verify that.
func TestPropagateTxnOnError(t *testing.T) {
defer leaktest.AfterTest(t)()
// Set up a filter to so that the first CPut operation will
// get a ReadWithinUncertaintyIntervalError.
targetKey := roachpb.Key("b")
var numGets int32
ctx := server.NewTestContext()
ctx.TestingMocker.StoreTestingMocker.TestingCommandFilter =
func(_ roachpb.StoreID, args roachpb.Request, h roachpb.Header) error {
if _, ok := args.(*roachpb.ConditionalPutRequest); ok && args.Header().Key.Equal(targetKey) {
if atomic.AddInt32(&numGets, 1) == 1 {
return roachpb.NewReadWithinUncertaintyIntervalError(
roachpb.ZeroTimestamp, roachpb.ZeroTimestamp)
}
}
return nil
}
s := server.StartTestServerWithContext(t, ctx)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
// Set the initial value on the target key "b".
origVal := "val"
if pErr := db.Put(targetKey, origVal); pErr != nil {
t.Fatal(pErr)
}
// The following txn creates a batch request that is split
// into two requests: Put and CPut. The CPut operation will
// get a ReadWithinUncertaintyIntervalError and the txn will be
// retried.
epoch := 0
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
epoch++
if epoch >= 2 {
// Writing must be true since we ran the BeginTransaction command.
if !txn.Proto.Writing {
t.Errorf("unexpected non-writing txn")
}
} else {
// Writing must be false since we haven't run any write command.
if txn.Proto.Writing {
t.Errorf("unexpected writing txn")
}
}
b := txn.NewBatch()
b.Put("a", "val")
b.CPut(targetKey, "new_val", origVal)
pErr := txn.CommitInBatch(b)
if epoch == 1 {
if _, ok := pErr.GetDetail().(*roachpb.ReadWithinUncertaintyIntervalError); ok {
if !pErr.GetTxn().Writing {
t.Errorf("unexpected non-writing txn on error")
}
} else {
t.Errorf("expected ReadWithinUncertaintyIntervalError, but got: %s", pErr)
}
}
return pErr
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
if epoch != 2 {
t.Errorf("unexpected epoch; the txn must be retried exactly once, but got %d", epoch)
}
}
// TestPropagateTxnOnPushError is similar to TestPropagateTxnOnError,
// but verifies that txn data are propagated to the next iteration on
// TransactionPushError.
func TestPropagateTxnOnPushError(t *testing.T) {
defer leaktest.AfterTest(t)()
s := server.StartTestServer(t)
defer s.Stop()
db := setupMultipleRanges(t, s, "b")
waitForWriteIntent := make(chan struct{})
waitForTxnRestart := make(chan struct{})
waitForTxnCommit := make(chan struct{})
lowPriority := int32(1)
highPriority := int32(10)
key := "a"
// Create a goroutine that creates a write intent and waits until
// another txn created in this test is restarted.
go func() {
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
// Set high priority so that the intent will not be pushed.
txn.InternalSetPriority(highPriority)
log.Infof("Creating a write intent with high priority")
if pErr := txn.Put(key, "val"); pErr != nil {
return pErr
}
close(waitForWriteIntent)
// Wait until another txn in this test is
// restarted by a push txn error.
log.Infof("Waiting for the txn restart")
<-waitForTxnRestart
return txn.CommitInBatch(txn.NewBatch())
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
close(waitForTxnCommit)
}()
// Wait until a write intent is created by the above goroutine.
log.Infof("Waiting for the write intent creation")
<-waitForWriteIntent
// The transaction below is restarted exactly once. The restart is
// caused by the write intent created on key "a" by the above goroutine.
// When the txn is retried, the error propagates the txn ID to the next
// iteration.
epoch := 0
var txnID *uuid.UUID
if pErr := db.Txn(func(txn *client.Txn) *roachpb.Error {
// Set low priority so that a write from this txn will not push others.
txn.InternalSetPriority(lowPriority)
epoch++
if epoch == 2 {
close(waitForTxnRestart)
// Wait until the txn created by the goroutine is committed.
log.Infof("Waiting for the txn commit")
<-waitForTxnCommit
if !roachpb.TxnIDEqual(txn.Proto.ID, txnID) {
t.Errorf("txn ID is not propagated; got %s", txn.Proto.ID)
}
}
// The commit returns an error, and it will pass
// the txn data to the next iteration.
pErr := txn.Put(key, "val")
if epoch == 1 {
if tErr, ok := pErr.GetDetail().(*roachpb.TransactionPushError); ok {
if pErr.GetTxn().ID == nil {
t.Errorf("txn ID is not set unexpectedly: %s", tErr)
}
txnID = pErr.GetTxn().ID
} else {
t.Errorf("expected TransactionRetryError, but got: %s", pErr)
}
}
return pErr
}); pErr != nil {
t.Errorf("unexpected error on transactional Puts: %s", pErr)
}
if e := 2; epoch != e {
t.Errorf("unexpected epoch; the txn must be attempted %d times, but got %d attempts", e, epoch)
if epoch == 1 {
// Wait for the completion of the goroutine to see if it successfully commits the txn.
close(waitForTxnRestart)
log.Infof("Waiting for the txn commit")
<-waitForTxnCommit
}
}
}
|
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package assettasks
import (
"fmt"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/upup/pkg/fi"
)
type copyAssetsTarget struct {
}
func (c copyAssetsTarget) Finish(taskMap map[string]fi.Task) error {
return nil
}
func (c copyAssetsTarget) ProcessDeletions() bool {
return false
}
func Copy(imageAssets []*assets.ImageAsset, fileAssets []*assets.FileAsset, cluster *kops.Cluster) error {
tasks := map[string]fi.Task{}
for _, imageAsset := range imageAssets {
if imageAsset.DownloadLocation != imageAsset.CanonicalLocation {
ctx := &fi.ModelBuilderContext{
Tasks: tasks,
}
copyImageTask := &CopyImage{
Name: fi.String(imageAsset.DownloadLocation),
SourceImage: fi.String(imageAsset.CanonicalLocation),
TargetImage: fi.String(imageAsset.DownloadLocation),
Lifecycle: fi.LifecycleSync,
}
if err := ctx.EnsureTask(copyImageTask); err != nil {
return fmt.Errorf("error adding image-copy task: %v", err)
}
tasks = ctx.Tasks
}
}
for _, fileAsset := range fileAssets {
// test if the asset needs to be copied
if fileAsset.DownloadURL.String() != fileAsset.CanonicalURL.String() {
ctx := &fi.ModelBuilderContext{
Tasks: tasks,
}
copyFileTask := &CopyFile{
Name: fi.String(fileAsset.CanonicalURL.String()),
TargetFile: fi.String(fileAsset.DownloadURL.String()),
SourceFile: fi.String(fileAsset.CanonicalURL.String()),
SHA: fi.String(fileAsset.SHAValue),
Lifecycle: fi.LifecycleSync,
}
if err := ctx.EnsureTask(copyFileTask); err != nil {
return fmt.Errorf("error adding file-copy task: %v", err)
}
tasks = ctx.Tasks
}
}
var options fi.RunTasksOptions
options.InitDefaults()
context, err := fi.NewContext(©AssetsTarget{}, cluster, nil, nil, nil, nil, true, tasks)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
err = context.RunTasks(options)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
return nil
}
Limit concurrency of asset copy tasks
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package assettasks
import (
"fmt"
"sort"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/upup/pkg/fi"
)
type copyAssetsTarget struct {
}
func (c copyAssetsTarget) Finish(taskMap map[string]fi.Task) error {
return nil
}
func (c copyAssetsTarget) ProcessDeletions() bool {
return false
}
func Copy(imageAssets []*assets.ImageAsset, fileAssets []*assets.FileAsset, cluster *kops.Cluster) error {
tasks := map[string]fi.Task{}
for _, imageAsset := range imageAssets {
if imageAsset.DownloadLocation != imageAsset.CanonicalLocation {
ctx := &fi.ModelBuilderContext{
Tasks: tasks,
}
copyImageTask := &CopyImage{
Name: fi.String(imageAsset.DownloadLocation),
SourceImage: fi.String(imageAsset.CanonicalLocation),
TargetImage: fi.String(imageAsset.DownloadLocation),
Lifecycle: fi.LifecycleSync,
}
if err := ctx.EnsureTask(copyImageTask); err != nil {
return fmt.Errorf("error adding image-copy task: %v", err)
}
tasks = ctx.Tasks
}
}
for _, fileAsset := range fileAssets {
// test if the asset needs to be copied
if fileAsset.DownloadURL.String() != fileAsset.CanonicalURL.String() {
ctx := &fi.ModelBuilderContext{
Tasks: tasks,
}
copyFileTask := &CopyFile{
Name: fi.String(fileAsset.CanonicalURL.String()),
TargetFile: fi.String(fileAsset.DownloadURL.String()),
SourceFile: fi.String(fileAsset.CanonicalURL.String()),
SHA: fi.String(fileAsset.SHAValue),
Lifecycle: fi.LifecycleSync,
}
if err := ctx.EnsureTask(copyFileTask); err != nil {
return fmt.Errorf("error adding file-copy task: %v", err)
}
tasks = ctx.Tasks
}
}
var options fi.RunTasksOptions
options.InitDefaults()
context, err := fi.NewContext(©AssetsTarget{}, cluster, nil, nil, nil, nil, true, tasks)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
ch := make(chan error, 5)
for i := 0; i < cap(ch); i++ {
ch <- nil
}
gotError := false
names := make([]string, 0, len(tasks))
for name := range tasks {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
task := tasks[name]
err := <-ch
if err != nil {
klog.Warning(err)
gotError = true
}
go func(n string, t fi.Task) {
err := t.Run(context)
if err != nil {
err = fmt.Errorf("%s: %v", n, err)
}
ch <- err
}(name, task)
}
for i := 0; i < cap(ch); i++ {
err = <-ch
if err != nil {
klog.Warning(err)
gotError = true
}
}
close(ch)
if gotError {
return fmt.Errorf("not all assets copied successfully")
}
return nil
}
|
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package local
import (
gc "launchpad.net/gocheck"
"launchpad.net/juju-core/environs/config"
)
var Provider = provider
// SetDefaultRootDir overrides the default root directory for testing
// purposes.
func SetDefaultRootDir(rootdir string) (old string) {
old, defaultRootDir = defaultRootDir, rootdir
return
}
// SetRootCheckFunction allows tests to override the check for a root user.
func SetRootCheckFunction(f func() bool) (old func() bool) {
old, rootCheckFunction = rootCheckFunction, f
return
}
// ConfigNamespace returns the result of the namespace call on the
// localConfig.
func ConfigNamespace(cfg *config.Config) string {
localConfig, _ := provider.newConfig(cfg)
return localConfig.namespace()
}
// CreateDirs calls createDirs on the localEnviron.
func CreateDirs(c *gc.C, cfg *config.Config) error {
localConfig, err := provider.newConfig(cfg)
c.Assert(err, gc.IsNil)
return localConfig.createDirs()
}
// CheckDirs returns the list of directories to check for permissions in the test.
func CheckDirs(c *gc.C, cfg *config.Config) []string {
localConfig, err := provider.newConfig(cfg)
c.Assert(err, gc.IsNil)
return []string{
localConfig.rootDir(),
localConfig.sharedStorageDir(),
localConfig.storageDir(),
localConfig.mongoDir(),
}
}
func GetSudoCallerIds() (uid, gid int, err error) {
return getSudoCallerIds()
}
Remove default root dir.
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package local
import (
gc "launchpad.net/gocheck"
"launchpad.net/juju-core/environs/config"
)
var Provider = provider
// SetRootCheckFunction allows tests to override the check for a root user.
func SetRootCheckFunction(f func() bool) (old func() bool) {
old, rootCheckFunction = rootCheckFunction, f
return
}
// ConfigNamespace returns the result of the namespace call on the
// localConfig.
func ConfigNamespace(cfg *config.Config) string {
localConfig, _ := provider.newConfig(cfg)
return localConfig.namespace()
}
// CreateDirs calls createDirs on the localEnviron.
func CreateDirs(c *gc.C, cfg *config.Config) error {
localConfig, err := provider.newConfig(cfg)
c.Assert(err, gc.IsNil)
return localConfig.createDirs()
}
// CheckDirs returns the list of directories to check for permissions in the test.
func CheckDirs(c *gc.C, cfg *config.Config) []string {
localConfig, err := provider.newConfig(cfg)
c.Assert(err, gc.IsNil)
return []string{
localConfig.rootDir(),
localConfig.sharedStorageDir(),
localConfig.storageDir(),
localConfig.mongoDir(),
}
}
func GetSudoCallerIds() (uid, gid int, err error) {
return getSudoCallerIds()
}
|
package ethchain
import (
"math/big"
)
var TxFeeRat *big.Int = big.NewInt(100000000000000)
var TxFee *big.Int = big.NewInt(100)
var StepFee *big.Int = big.NewInt(1)
var StoreFee *big.Int = big.NewInt(5)
var DataFee *big.Int = big.NewInt(20)
var ExtroFee *big.Int = big.NewInt(40)
var CryptoFee *big.Int = big.NewInt(20)
var ContractFee *big.Int = big.NewInt(100)
var BlockReward *big.Int = big.NewInt(1.5e+18)
var UncleReward *big.Int = big.NewInt(1.125e+18)
var UncleInclusionReward *big.Int = big.NewInt(1.875e+17)
var Period1Reward *big.Int = new(big.Int)
var Period2Reward *big.Int = new(big.Int)
var Period3Reward *big.Int = new(big.Int)
var Period4Reward *big.Int = new(big.Int)
func InitFees() {
StepFee.Mul(StepFee, TxFeeRat)
StoreFee.Mul(StoreFee, TxFeeRat)
DataFee.Mul(DataFee, TxFeeRat)
ExtroFee.Mul(ExtroFee, TxFeeRat)
CryptoFee.Mul(CryptoFee, TxFeeRat)
ContractFee.Mul(ContractFee, TxFeeRat)
/*
// Base for 2**64
b60 := new(big.Int)
b60.Exp(big.NewInt(2), big.NewInt(64), big.NewInt(0))
// Base for 2**80
b80 := new(big.Int)
b80.Exp(big.NewInt(2), big.NewInt(80), big.NewInt(0))
StepFee.Exp(big.NewInt(10), big.NewInt(16), big.NewInt(0))
//StepFee.Div(b60, big.NewInt(64))
//fmt.Println("StepFee:", StepFee)
TxFee.Exp(big.NewInt(2), big.NewInt(64), big.NewInt(0))
//fmt.Println("TxFee:", TxFee)
ContractFee.Exp(big.NewInt(2), big.NewInt(64), big.NewInt(0))
//fmt.Println("ContractFee:", ContractFee)
MemFee.Div(b60, big.NewInt(4))
//fmt.Println("MemFee:", MemFee)
DataFee.Div(b60, big.NewInt(16))
//fmt.Println("DataFee:", DataFee)
CryptoFee.Div(b60, big.NewInt(16))
//fmt.Println("CrytoFee:", CryptoFee)
ExtroFee.Div(b60, big.NewInt(16))
//fmt.Println("ExtroFee:", ExtroFee)
Period1Reward.Mul(b80, big.NewInt(1024))
//fmt.Println("Period1Reward:", Period1Reward)
Period2Reward.Mul(b80, big.NewInt(512))
//fmt.Println("Period2Reward:", Period2Reward)
Period3Reward.Mul(b80, big.NewInt(256))
//fmt.Println("Period3Reward:", Period3Reward)
Period4Reward.Mul(b80, big.NewInt(128))
//fmt.Println("Period4Reward:", Period4Reward)
*/
}
Removed comments
package ethchain
import (
"math/big"
)
var TxFeeRat *big.Int = big.NewInt(100000000000000)
var TxFee *big.Int = big.NewInt(100)
var StepFee *big.Int = big.NewInt(1)
var StoreFee *big.Int = big.NewInt(5)
var DataFee *big.Int = big.NewInt(20)
var ExtroFee *big.Int = big.NewInt(40)
var CryptoFee *big.Int = big.NewInt(20)
var ContractFee *big.Int = big.NewInt(100)
var BlockReward *big.Int = big.NewInt(1.5e+18)
var UncleReward *big.Int = big.NewInt(1.125e+18)
var UncleInclusionReward *big.Int = big.NewInt(1.875e+17)
var Period1Reward *big.Int = new(big.Int)
var Period2Reward *big.Int = new(big.Int)
var Period3Reward *big.Int = new(big.Int)
var Period4Reward *big.Int = new(big.Int)
func InitFees() {
StepFee.Mul(StepFee, TxFeeRat)
StoreFee.Mul(StoreFee, TxFeeRat)
DataFee.Mul(DataFee, TxFeeRat)
ExtroFee.Mul(ExtroFee, TxFeeRat)
CryptoFee.Mul(CryptoFee, TxFeeRat)
ContractFee.Mul(ContractFee, TxFeeRat)
}
|
package main
import (
"encoding/csv"
"flag"
"fmt"
"math"
"os"
"strconv"
"strings"
"github.com/kzahedi/goent"
)
func check(e error) {
if e != nil {
panic(e)
}
}
func f2s(v float64) string {
return strconv.FormatFloat(v, 'f', -1, 64)
}
func getvalues(str string) []float64 {
var r []float64
values := strings.Split(str, ":")
start, err := strconv.ParseFloat(values[0], 64)
check(err)
end := start
delta := start - end + 1.0
if len(values) == 3 {
delta, err = strconv.ParseFloat(values[1], 64)
check(err)
end, err = strconv.ParseFloat(values[2], 64)
check(err)
}
for v := start; v <= end; v += delta {
r = append(r, v)
}
return r
}
func bin(a int) float64 {
if a == 0 {
return -1.0
}
return 1.0
}
func pw2_c_w1_a1(w2, w1, a1 int, phi, psi, chi float64) float64 {
z := math.Exp(phi*bin(w2)*bin(w1) + psi*bin(w2)*bin(a1) + chi*bin(w2)*bin(w1)*bin(a1))
n := math.Exp(-phi*bin(w1)-psi*bin(a1)-chi*bin(w1)*bin(a1)) +
math.Exp(phi*bin(w1)+psi*bin(a1)+chi*bin(w1)*bin(a1))
return z / n
}
func pa1_c_s1(a1, s1 int, mu float64) float64 {
z := math.Exp(mu * bin(a1) * bin(s1))
n := math.Exp(mu*bin(s1)) + math.Exp(-mu*bin(s1))
return z / n
}
func ps1_c_w1(s1, w1 int, zeta float64) float64 {
z := math.Exp(zeta * bin(w1) * bin(s1))
n := math.Exp(zeta*bin(w1)) + math.Exp(-zeta*bin(w1))
return z / n
}
func pw1(w1 int, tau float64) float64 {
z := math.Exp(tau * bin(w1))
n := math.Exp(tau) + math.Exp(-tau)
return z / n
}
func calculate_MC_W(mu, phi, psi, chi, zeta, tau float64) float64 {
pw2w1a1 := make([][][]float64, 2)
for w2 := 0; w2 < 2; w2++ {
pw2w1a1[w2] = make([][]float64, 2)
for w1 := 0; w1 < 2; w1++ {
pw2w1a1[w2][w1] = make([]float64, 2)
}
}
for w2 := 0; w2 < 2; w2++ {
for w1 := 0; w1 < 2; w1++ {
for a1 := 0; a1 < 2; a1++ {
for s1 := 0; s1 < 2; s1++ {
pw2w1a1[w2][w1][a1] =
pw2_c_w1_a1(w2, w1, a1, phi, psi, chi) *
pa1_c_s1(a1, s1, mu) *
ps1_c_w1(s1, w1, zeta) *
pw1(w1, tau)
}
}
}
}
return goent.MC_W(pw2w1a1)
}
func calculate_MC_A(mu, phi, psi, chi, zeta, tau float64) float64 {
pw2a1w1 := make([][][]float64, 2)
for w2 := 0; w2 < 2; w2++ {
pw2a1w1[w2] = make([][]float64, 2)
for a1 := 0; a1 < 2; a1++ {
pw2a1w1[w2][a1] = make([]float64, 2)
}
}
for w2 := 0; w2 < 2; w2++ {
for w1 := 0; w1 < 2; w1++ {
for a1 := 0; a1 < 2; a1++ {
for s1 := 0; s1 < 2; s1++ {
pw2a1w1[w2][a1][w1] =
pw2_c_w1_a1(w2, w1, a1, phi, psi, chi) *
pa1_c_s1(a1, s1, mu) *
ps1_c_w1(s1, w1, zeta) *
pw1(w1, tau)
}
}
}
}
return goent.MC_A(pw2a1w1)
}
func main() {
muStr := flag.String("mu", "0", "mu values (s -> a). can take list (1,2,3) to range with delta (0:0.1:1.0)")
phiStr := flag.String("phi", "0:0.1:5", "phi values (w -> w'). can take list (1,2,3) to range with delta (0:0.1:1.0)")
psiStr := flag.String("psi", "0:0.1:5", "psi values (a -> w'). can take list (1,2,3) to range with delta (0:0.1:1.0)")
chiStr := flag.String("chi", "0:0.1:5", "chi values (a,w -> w'). can take list (1,2,3) to range with delta (0:0.1:1.0)")
zetaStr := flag.String("zeta", "0:0.1:5", "zeta values (w -> s). can take list (1,2,3) to range with delta (0:0.1:1.0)")
tauStr := flag.String("tau", "0", "tau values (p(w)). can take list (1,2,3) to range with delta (0:0.1:1.0)")
mc := flag.String("mc", "MC_W", "quantification to use: MC_W (soon: MC_A, MC_MI, MC_SY, MC_SY_NIS, MC_SY_GIS, MC_SY_SCGIS)")
verbose := flag.Bool("v", false, "verbose")
output := flag.String("o", "out.csv", "output file. default out.csv")
flag.Parse()
mu := getvalues(*muStr)
phi := getvalues(*phiStr)
psi := getvalues(*psiStr)
chi := getvalues(*chiStr)
zeta := getvalues(*zetaStr)
tau := getvalues(*tauStr)
if *verbose == true {
fmt.Println("mu:", *muStr)
fmt.Println("mu:", mu)
fmt.Println("phi:", *phiStr)
fmt.Println("phi:", phi)
fmt.Println("psi:", *psiStr)
fmt.Println("psi:", psi)
fmt.Println("chi:", *chiStr)
fmt.Println("chi:", chi)
fmt.Println("zeta:", *zetaStr)
fmt.Println("zeta:", zeta)
fmt.Println("tau:", *tauStr)
fmt.Println("tau:", tau)
fmt.Println("mc:", *mc)
fmt.Println("out:", *output)
}
f, err := os.Create(*output)
check(err)
defer f.Close()
writer := csv.NewWriter(f)
defer writer.Flush()
s := []string{"mu", "phi", "psi", "zeta", "tau", "r"}
err = writer.Write(s)
check(err)
r := 0.0
for _, vmu := range mu {
for _, vphi := range phi {
for _, vpsi := range psi {
for _, vchi := range chi {
for _, vzeta := range zeta {
for _, vtau := range tau {
switch *mc {
case "MC_W":
r = calculate_MC_W(vmu, vphi, vpsi, vchi, vzeta, vtau)
s = []string{f2s(vmu), f2s(vphi), f2s(vpsi), f2s(vzeta), f2s(vtau), f2s(r)}
err = writer.Write(s)
check(err)
case "MC_A":
r = calculate_MC_A(vmu, vphi, vpsi, vchi, vzeta, vtau)
s = []string{f2s(vmu), f2s(vphi), f2s(vpsi), f2s(vzeta), f2s(vtau), f2s(r)}
err = writer.Write(s)
check(err)
}
}
}
}
}
}
}
}
Bug fix
pw2w1a1 was not the sum over s1
package main
import (
"encoding/csv"
"flag"
"fmt"
"math"
"os"
"strconv"
"strings"
"github.com/kzahedi/goent"
)
func check3DProbabilityDistribution(p [][][]float64) {
sum := 0.0
for x := 0; x < len(p); x++ {
for y := 0; y < len(p[x]); y++ {
for z := 0; z < len(p[x][y]); z++ {
sum += p[x][y][z]
}
}
}
if math.Abs(sum-1.0) > 0.0000001 {
panic(fmt.Sprintf("Does not sum up to one %f", sum))
}
}
func check(e error) {
if e != nil {
panic(e)
}
}
func f2s(v float64) string {
return strconv.FormatFloat(v, 'f', -1, 64)
}
func getvalues(str string) []float64 {
var r []float64
values := strings.Split(str, ":")
start, err := strconv.ParseFloat(values[0], 64)
check(err)
end := start
delta := start - end + 1.0
if len(values) == 3 {
delta, err = strconv.ParseFloat(values[1], 64)
check(err)
end, err = strconv.ParseFloat(values[2], 64)
check(err)
}
for v := start; v <= end; v += delta {
r = append(r, v)
}
return r
}
func bin(a int) float64 {
if a == 0 {
return -1.0
}
return 1.0
}
func pw2_c_w1_a1(w2, w1, a1 int, phi, psi, chi float64) float64 {
z := math.Exp(phi*bin(w2)*bin(w1) + psi*bin(w2)*bin(a1) + chi*bin(w2)*bin(w1)*bin(a1))
n := math.Exp(phi*bin(0)*bin(w1)+psi*bin(0)*bin(a1)+chi*bin(0)*bin(w1)*bin(a1)) +
math.Exp(phi*bin(1)*bin(w1)+psi*bin(1)*bin(a1)+chi*bin(1)*bin(w1)*bin(a1))
return z / n
}
func pa1_c_s1(a1, s1 int, mu float64) float64 {
z := math.Exp(mu * bin(a1) * bin(s1))
n := math.Exp(mu*bin(0)*bin(s1)) + math.Exp(mu*bin(1)*bin(s1))
return z / n
}
func ps1_c_w1(s1, w1 int, zeta float64) float64 {
z := math.Exp(zeta * bin(w1) * bin(s1))
n := math.Exp(zeta*bin(0)*bin(w1)) + math.Exp(zeta*bin(1)*bin(w1))
return z / n
}
func pw1(w1 int, tau float64) float64 {
z := math.Exp(tau * bin(w1))
n := math.Exp(tau*bin(0)) + math.Exp(tau*bin(1))
return z / n
}
func calculate_MC_W(mu, phi, psi, chi, zeta, tau float64) float64 {
pw2w1a1 := make([][][]float64, 2)
for w2 := 0; w2 < 2; w2++ {
pw2w1a1[w2] = make([][]float64, 2)
for w1 := 0; w1 < 2; w1++ {
pw2w1a1[w2][w1] = make([]float64, 2)
}
}
for w2 := 0; w2 < 2; w2++ {
for w1 := 0; w1 < 2; w1++ {
for a1 := 0; a1 < 2; a1++ {
for s1 := 0; s1 < 2; s1++ {
pw2w1a1[w2][w1][a1] +=
pw2_c_w1_a1(w2, w1, a1, phi, psi, chi) *
pa1_c_s1(a1, s1, mu) *
ps1_c_w1(s1, w1, zeta) *
pw1(w1, tau)
}
}
}
}
// check3DProbabilityDistribution(pw2w1a1)
return goent.MC_W(pw2w1a1)
}
func calculate_MC_A(mu, phi, psi, chi, zeta, tau float64) float64 {
pw2a1w1 := make([][][]float64, 2)
for w2 := 0; w2 < 2; w2++ {
pw2a1w1[w2] = make([][]float64, 2)
for a1 := 0; a1 < 2; a1++ {
pw2a1w1[w2][a1] = make([]float64, 2)
}
}
for w2 := 0; w2 < 2; w2++ {
for w1 := 0; w1 < 2; w1++ {
for a1 := 0; a1 < 2; a1++ {
for s1 := 0; s1 < 2; s1++ {
pw2a1w1[w2][a1][w1] =
pw2_c_w1_a1(w2, w1, a1, phi, psi, chi) *
pa1_c_s1(a1, s1, mu) *
ps1_c_w1(s1, w1, zeta) *
pw1(w1, tau)
}
}
}
}
return goent.MC_A(pw2a1w1)
}
func main() {
muStr := flag.String("mu", "0", "mu values (s -> a). can take list (1,2,3) to range with delta (0:0.1:1.0)")
phiStr := flag.String("phi", "0:0.1:5", "phi values (w -> w'). can take list (1,2,3) to range with delta (0:0.1:1.0)")
psiStr := flag.String("psi", "0:0.1:5", "psi values (a -> w'). can take list (1,2,3) to range with delta (0:0.1:1.0)")
chiStr := flag.String("chi", "0:0.1:5", "chi values (a,w -> w'). can take list (1,2,3) to range with delta (0:0.1:1.0)")
zetaStr := flag.String("zeta", "0:0.1:5", "zeta values (w -> s). can take list (1,2,3) to range with delta (0:0.1:1.0)")
tauStr := flag.String("tau", "0", "tau values (p(w)). can take list (1,2,3) to range with delta (0:0.1:1.0)")
mc := flag.String("mc", "MC_W", "quantification to use: MC_W (soon: MC_A, MC_MI, MC_SY, MC_SY_NIS, MC_SY_GIS, MC_SY_SCGIS)")
verbose := flag.Bool("v", false, "verbose")
output := flag.String("o", "out.csv", "output file. default out.csv")
flag.Parse()
mu := getvalues(*muStr)
phi := getvalues(*phiStr)
psi := getvalues(*psiStr)
chi := getvalues(*chiStr)
zeta := getvalues(*zetaStr)
tau := getvalues(*tauStr)
if *verbose == true {
fmt.Println("mu:", *muStr)
fmt.Println("mu:", mu)
fmt.Println("phi:", *phiStr)
fmt.Println("phi:", phi)
fmt.Println("psi:", *psiStr)
fmt.Println("psi:", psi)
fmt.Println("chi:", *chiStr)
fmt.Println("chi:", chi)
fmt.Println("zeta:", *zetaStr)
fmt.Println("zeta:", zeta)
fmt.Println("tau:", *tauStr)
fmt.Println("tau:", tau)
fmt.Println("mc:", *mc)
fmt.Println("out:", *output)
}
f, err := os.Create(*output)
check(err)
defer f.Close()
writer := csv.NewWriter(f)
defer writer.Flush()
s := []string{"mu", "phi", "psi", "zeta", "tau", "r"}
err = writer.Write(s)
check(err)
r := 0.0
for _, vmu := range mu {
for _, vphi := range phi {
for _, vpsi := range psi {
for _, vchi := range chi {
for _, vzeta := range zeta {
for _, vtau := range tau {
switch *mc {
case "MC_W":
r = calculate_MC_W(vmu, vphi, vpsi, vchi, vzeta, vtau)
s = []string{f2s(vmu), f2s(vphi), f2s(vpsi), f2s(vzeta), f2s(vtau), f2s(r)}
err = writer.Write(s)
check(err)
case "MC_A":
r = calculate_MC_A(vmu, vphi, vpsi, vchi, vzeta, vtau)
s = []string{f2s(vmu), f2s(vphi), f2s(vpsi), f2s(vzeta), f2s(vtau), f2s(r)}
err = writer.Write(s)
check(err)
}
}
}
}
}
}
}
}
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"math"
"net"
"runtime"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
)
var stop = make(chan bool, 1)
func perpetuumMobile() {
select {
case <-stop:
default:
go perpetuumMobile()
}
}
func TestStopTheWorldDeadlock(t *testing.T) {
if testing.Short() {
t.Skip("skipping during short test")
}
maxprocs := runtime.GOMAXPROCS(3)
compl := make(chan bool, 2)
go func() {
for i := 0; i != 1000; i += 1 {
runtime.GC()
}
compl <- true
}()
go func() {
for i := 0; i != 1000; i += 1 {
runtime.GOMAXPROCS(3)
}
compl <- true
}()
go perpetuumMobile()
<-compl
<-compl
stop <- true
runtime.GOMAXPROCS(maxprocs)
}
func TestYieldProgress(t *testing.T) {
testYieldProgress(t, false)
}
func TestYieldLockedProgress(t *testing.T) {
testYieldProgress(t, true)
}
func testYieldProgress(t *testing.T, locked bool) {
c := make(chan bool)
cack := make(chan bool)
go func() {
if locked {
runtime.LockOSThread()
}
for {
select {
case <-c:
cack <- true
return
default:
runtime.Gosched()
}
}
}()
time.Sleep(10 * time.Millisecond)
c <- true
<-cack
}
func TestYieldLocked(t *testing.T) {
const N = 10
c := make(chan bool)
go func() {
runtime.LockOSThread()
for i := 0; i < N; i++ {
runtime.Gosched()
time.Sleep(time.Millisecond)
}
c <- true
// runtime.UnlockOSThread() is deliberately omitted
}()
<-c
}
func TestGoroutineParallelism(t *testing.T) {
if runtime.NumCPU() == 1 {
// Takes too long, too easy to deadlock, etc.
t.Skip("skipping on uniprocessor")
}
P := 4
N := 10
if testing.Short() {
P = 3
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
// If runtime triggers a forced GC during this test then it will deadlock,
// since the goroutines can't be stopped/preempted.
// Disable GC for this test (see issue #10958).
defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
done := make(chan bool)
x := uint32(0)
for p := 0; p < P; p++ {
// Test that all P goroutines are scheduled at the same time
go func(p int) {
for i := 0; i < 3; i++ {
expected := uint32(P*i + p)
for atomic.LoadUint32(&x) != expected {
}
atomic.StoreUint32(&x, expected+1)
}
done <- true
}(p)
}
for p := 0; p < P; p++ {
<-done
}
}
}
// Test that all runnable goroutines are scheduled at the same time.
func TestGoroutineParallelism2(t *testing.T) {
//testGoroutineParallelism2(t, false, false)
testGoroutineParallelism2(t, true, false)
testGoroutineParallelism2(t, false, true)
testGoroutineParallelism2(t, true, true)
}
func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
if runtime.NumCPU() == 1 {
// Takes too long, too easy to deadlock, etc.
t.Skip("skipping on uniprocessor")
}
P := 4
N := 10
if testing.Short() {
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
// If runtime triggers a forced GC during this test then it will deadlock,
// since the goroutines can't be stopped/preempted.
// Disable GC for this test (see issue #10958).
defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
if load {
// Create P goroutines and wait until they all run.
// When we run the actual test below, worker threads
// running the goroutines will start parking.
done := make(chan bool)
x := uint32(0)
for p := 0; p < P; p++ {
go func() {
if atomic.AddUint32(&x, 1) == uint32(P) {
done <- true
return
}
for atomic.LoadUint32(&x) != uint32(P) {
}
}()
}
<-done
}
if netpoll {
// Enable netpoller, affects schedler behavior.
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
defer ln.Close() // yup, defer in a loop
}
}
done := make(chan bool)
x := uint32(0)
// Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
for p := 0; p < P/2; p++ {
go func(p int) {
for p2 := 0; p2 < 2; p2++ {
go func(p2 int) {
for i := 0; i < 3; i++ {
expected := uint32(P*i + p*2 + p2)
for atomic.LoadUint32(&x) != expected {
}
atomic.StoreUint32(&x, expected+1)
}
done <- true
}(p2)
}
}(p)
}
for p := 0; p < P; p++ {
<-done
}
}
}
func TestBlockLocked(t *testing.T) {
const N = 10
c := make(chan bool)
go func() {
runtime.LockOSThread()
for i := 0; i < N; i++ {
c <- true
}
runtime.UnlockOSThread()
}()
for i := 0; i < N; i++ {
<-c
}
}
func TestTimerFairness(t *testing.T) {
done := make(chan bool)
c := make(chan bool)
for i := 0; i < 2; i++ {
go func() {
for {
select {
case c <- true:
case <-done:
return
}
}
}()
}
timer := time.After(20 * time.Millisecond)
for {
select {
case <-c:
case <-timer:
close(done)
return
}
}
}
func TestTimerFairness2(t *testing.T) {
done := make(chan bool)
c := make(chan bool)
for i := 0; i < 2; i++ {
go func() {
timer := time.After(20 * time.Millisecond)
var buf [1]byte
for {
syscall.Read(0, buf[0:0])
select {
case c <- true:
case <-c:
case <-timer:
done <- true
return
}
}
}()
}
<-done
<-done
}
// The function is used to test preemption at split stack checks.
// Declaring a var avoids inlining at the call site.
var preempt = func() int {
var a [128]int
sum := 0
for _, v := range a {
sum += v
}
return sum
}
func TestPreemption(t *testing.T) {
// Test that goroutines are preempted at function calls.
N := 5
if testing.Short() {
N = 2
}
c := make(chan bool)
var x uint32
for g := 0; g < 2; g++ {
go func(g int) {
for i := 0; i < N; i++ {
for atomic.LoadUint32(&x) != uint32(g) {
preempt()
}
atomic.StoreUint32(&x, uint32(1-g))
}
c <- true
}(g)
}
<-c
<-c
}
func TestPreemptionGC(t *testing.T) {
// Test that pending GC preempts running goroutines.
P := 5
N := 10
if testing.Short() {
P = 3
N = 2
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
var stop uint32
for i := 0; i < P; i++ {
go func() {
for atomic.LoadUint32(&stop) == 0 {
preempt()
}
}()
}
for i := 0; i < N; i++ {
runtime.Gosched()
runtime.GC()
}
atomic.StoreUint32(&stop, 1)
}
func TestGCFairness(t *testing.T) {
output := runTestProg(t, "testprog", "GCFairness")
want := "OK\n"
if output != want {
t.Fatalf("want %s, got %s\n", want, output)
}
}
func TestNumGoroutine(t *testing.T) {
output := runTestProg(t, "testprog", "NumGoroutine")
want := "1\n"
if output != want {
t.Fatalf("want %q, got %q", want, output)
}
buf := make([]byte, 1<<20)
buf = buf[:runtime.Stack(buf, true)]
n := runtime.NumGoroutine()
if nstk := strings.Count(string(buf), "goroutine "); n != nstk {
t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
}
}
func TestPingPongHog(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
done := make(chan bool)
hogChan, lightChan := make(chan bool), make(chan bool)
hogCount, lightCount := 0, 0
run := func(limit int, counter *int, wake chan bool) {
for {
select {
case <-done:
return
case <-wake:
for i := 0; i < limit; i++ {
*counter++
}
wake <- true
}
}
}
// Start two co-scheduled hog goroutines.
for i := 0; i < 2; i++ {
go run(1e6, &hogCount, hogChan)
}
// Start two co-scheduled light goroutines.
for i := 0; i < 2; i++ {
go run(1e3, &lightCount, lightChan)
}
// Start goroutine pairs and wait for a few preemption rounds.
hogChan <- true
lightChan <- true
time.Sleep(100 * time.Millisecond)
close(done)
<-hogChan
<-lightChan
// Check that hogCount and lightCount are within a factor of
// 2, which indicates that both pairs of goroutines handed off
// the P within a time-slice to their buddy.
if hogCount > lightCount*2 || lightCount > hogCount*2 {
t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount))
}
}
func BenchmarkPingPongHog(b *testing.B) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
// Create a CPU hog
stop, done := make(chan bool), make(chan bool)
go func() {
for {
select {
case <-stop:
done <- true
return
default:
}
}
}()
// Ping-pong b.N times
ping, pong := make(chan bool), make(chan bool)
go func() {
for j := 0; j < b.N; j++ {
pong <- <-ping
}
close(stop)
done <- true
}()
go func() {
for i := 0; i < b.N; i++ {
ping <- <-pong
}
done <- true
}()
b.ResetTimer()
ping <- true // Start ping-pong
<-stop
b.StopTimer()
<-ping // Let last ponger exit
<-done // Make sure goroutines exit
<-done
<-done
}
func stackGrowthRecursive(i int) {
var pad [128]uint64
if i != 0 && pad[0] == 0 {
stackGrowthRecursive(i - 1)
}
}
func TestPreemptSplitBig(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
stop := make(chan int)
go big(stop)
for i := 0; i < 3; i++ {
time.Sleep(10 * time.Microsecond) // let big start running
runtime.GC()
}
close(stop)
}
func big(stop chan int) int {
n := 0
for {
// delay so that gc is sure to have asked for a preemption
for i := 0; i < 1e9; i++ {
n++
}
// call bigframe, which used to miss the preemption in its prologue.
bigframe(stop)
// check if we've been asked to stop.
select {
case <-stop:
return n
}
}
}
func bigframe(stop chan int) int {
// not splitting the stack will overflow.
// small will notice that it needs a stack split and will
// catch the overflow.
var x [8192]byte
return small(stop, &x)
}
func small(stop chan int, x *[8192]byte) int {
for i := range x {
x[i] = byte(i)
}
sum := 0
for i := range x {
sum += int(x[i])
}
// keep small from being a leaf function, which might
// make it not do any stack check at all.
nonleaf(stop)
return sum
}
func nonleaf(stop chan int) bool {
// do something that won't be inlined:
select {
case <-stop:
return true
default:
return false
}
}
func TestSchedLocalQueue(t *testing.T) {
runtime.RunSchedLocalQueueTest()
}
func TestSchedLocalQueueSteal(t *testing.T) {
runtime.RunSchedLocalQueueStealTest()
}
func benchmarkStackGrowth(b *testing.B, rec int) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
stackGrowthRecursive(rec)
}
})
}
func BenchmarkStackGrowth(b *testing.B) {
benchmarkStackGrowth(b, 10)
}
func BenchmarkStackGrowthDeep(b *testing.B) {
benchmarkStackGrowth(b, 1024)
}
func BenchmarkCreateGoroutines(b *testing.B) {
benchmarkCreateGoroutines(b, 1)
}
func BenchmarkCreateGoroutinesParallel(b *testing.B) {
benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
}
func benchmarkCreateGoroutines(b *testing.B, procs int) {
c := make(chan bool)
var f func(n int)
f = func(n int) {
if n == 0 {
c <- true
return
}
go f(n - 1)
}
for i := 0; i < procs; i++ {
go f(b.N / procs)
}
for i := 0; i < procs; i++ {
<-c
}
}
func BenchmarkCreateGoroutinesCapture(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
const N = 4
var wg sync.WaitGroup
wg.Add(N)
for i := 0; i < N; i++ {
i := i
go func() {
if i >= N {
b.Logf("bad") // just to capture b
}
wg.Done()
}()
}
wg.Wait()
}
}
func BenchmarkClosureCall(b *testing.B) {
sum := 0
off1 := 1
for i := 0; i < b.N; i++ {
off2 := 2
func() {
sum += i + off1 + off2
}()
}
_ = sum
}
type Matrix [][]float64
func BenchmarkMatmult(b *testing.B) {
b.StopTimer()
// matmult is O(N**3) but testing expects O(b.N),
// so we need to take cube root of b.N
n := int(math.Cbrt(float64(b.N))) + 1
A := makeMatrix(n)
B := makeMatrix(n)
C := makeMatrix(n)
b.StartTimer()
matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
}
func makeMatrix(n int) Matrix {
m := make(Matrix, n)
for i := 0; i < n; i++ {
m[i] = make([]float64, n)
for j := 0; j < n; j++ {
m[i][j] = float64(i*n + j)
}
}
return m
}
func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
di := i1 - i0
dj := j1 - j0
dk := k1 - k0
if di >= dj && di >= dk && di >= threshold {
// divide in two by y axis
mi := i0 + di/2
done1 := make(chan struct{}, 1)
go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
<-done1
} else if dj >= dk && dj >= threshold {
// divide in two by x axis
mj := j0 + dj/2
done1 := make(chan struct{}, 1)
go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
<-done1
} else if dk >= threshold {
// divide in two by "k" axis
// deliberately not parallel because of data races
mk := k0 + dk/2
matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
} else {
// the matrices are small enough, compute directly
for i := i0; i < i1; i++ {
for j := j0; j < j1; j++ {
for k := k0; k < k1; k++ {
C[i][j] += A[i][k] * B[k][j]
}
}
}
}
if done != nil {
done <- struct{}{}
}
}
runtime: deflake TestNumGoroutine
Fixes #14107.
Change-Id: Icd9463b1a77b139c7ebc2d8732482d704ea332d0
Reviewed-on: https://go-review.googlesource.com/19002
Reviewed-by: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"math"
"net"
"runtime"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
)
var stop = make(chan bool, 1)
func perpetuumMobile() {
select {
case <-stop:
default:
go perpetuumMobile()
}
}
func TestStopTheWorldDeadlock(t *testing.T) {
if testing.Short() {
t.Skip("skipping during short test")
}
maxprocs := runtime.GOMAXPROCS(3)
compl := make(chan bool, 2)
go func() {
for i := 0; i != 1000; i += 1 {
runtime.GC()
}
compl <- true
}()
go func() {
for i := 0; i != 1000; i += 1 {
runtime.GOMAXPROCS(3)
}
compl <- true
}()
go perpetuumMobile()
<-compl
<-compl
stop <- true
runtime.GOMAXPROCS(maxprocs)
}
func TestYieldProgress(t *testing.T) {
testYieldProgress(t, false)
}
func TestYieldLockedProgress(t *testing.T) {
testYieldProgress(t, true)
}
func testYieldProgress(t *testing.T, locked bool) {
c := make(chan bool)
cack := make(chan bool)
go func() {
if locked {
runtime.LockOSThread()
}
for {
select {
case <-c:
cack <- true
return
default:
runtime.Gosched()
}
}
}()
time.Sleep(10 * time.Millisecond)
c <- true
<-cack
}
func TestYieldLocked(t *testing.T) {
const N = 10
c := make(chan bool)
go func() {
runtime.LockOSThread()
for i := 0; i < N; i++ {
runtime.Gosched()
time.Sleep(time.Millisecond)
}
c <- true
// runtime.UnlockOSThread() is deliberately omitted
}()
<-c
}
func TestGoroutineParallelism(t *testing.T) {
if runtime.NumCPU() == 1 {
// Takes too long, too easy to deadlock, etc.
t.Skip("skipping on uniprocessor")
}
P := 4
N := 10
if testing.Short() {
P = 3
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
// If runtime triggers a forced GC during this test then it will deadlock,
// since the goroutines can't be stopped/preempted.
// Disable GC for this test (see issue #10958).
defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
done := make(chan bool)
x := uint32(0)
for p := 0; p < P; p++ {
// Test that all P goroutines are scheduled at the same time
go func(p int) {
for i := 0; i < 3; i++ {
expected := uint32(P*i + p)
for atomic.LoadUint32(&x) != expected {
}
atomic.StoreUint32(&x, expected+1)
}
done <- true
}(p)
}
for p := 0; p < P; p++ {
<-done
}
}
}
// Test that all runnable goroutines are scheduled at the same time.
func TestGoroutineParallelism2(t *testing.T) {
//testGoroutineParallelism2(t, false, false)
testGoroutineParallelism2(t, true, false)
testGoroutineParallelism2(t, false, true)
testGoroutineParallelism2(t, true, true)
}
func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
if runtime.NumCPU() == 1 {
// Takes too long, too easy to deadlock, etc.
t.Skip("skipping on uniprocessor")
}
P := 4
N := 10
if testing.Short() {
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
// If runtime triggers a forced GC during this test then it will deadlock,
// since the goroutines can't be stopped/preempted.
// Disable GC for this test (see issue #10958).
defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
if load {
// Create P goroutines and wait until they all run.
// When we run the actual test below, worker threads
// running the goroutines will start parking.
done := make(chan bool)
x := uint32(0)
for p := 0; p < P; p++ {
go func() {
if atomic.AddUint32(&x, 1) == uint32(P) {
done <- true
return
}
for atomic.LoadUint32(&x) != uint32(P) {
}
}()
}
<-done
}
if netpoll {
// Enable netpoller, affects schedler behavior.
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
defer ln.Close() // yup, defer in a loop
}
}
done := make(chan bool)
x := uint32(0)
// Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
for p := 0; p < P/2; p++ {
go func(p int) {
for p2 := 0; p2 < 2; p2++ {
go func(p2 int) {
for i := 0; i < 3; i++ {
expected := uint32(P*i + p*2 + p2)
for atomic.LoadUint32(&x) != expected {
}
atomic.StoreUint32(&x, expected+1)
}
done <- true
}(p2)
}
}(p)
}
for p := 0; p < P; p++ {
<-done
}
}
}
func TestBlockLocked(t *testing.T) {
const N = 10
c := make(chan bool)
go func() {
runtime.LockOSThread()
for i := 0; i < N; i++ {
c <- true
}
runtime.UnlockOSThread()
}()
for i := 0; i < N; i++ {
<-c
}
}
func TestTimerFairness(t *testing.T) {
done := make(chan bool)
c := make(chan bool)
for i := 0; i < 2; i++ {
go func() {
for {
select {
case c <- true:
case <-done:
return
}
}
}()
}
timer := time.After(20 * time.Millisecond)
for {
select {
case <-c:
case <-timer:
close(done)
return
}
}
}
func TestTimerFairness2(t *testing.T) {
done := make(chan bool)
c := make(chan bool)
for i := 0; i < 2; i++ {
go func() {
timer := time.After(20 * time.Millisecond)
var buf [1]byte
for {
syscall.Read(0, buf[0:0])
select {
case c <- true:
case <-c:
case <-timer:
done <- true
return
}
}
}()
}
<-done
<-done
}
// The function is used to test preemption at split stack checks.
// Declaring a var avoids inlining at the call site.
var preempt = func() int {
var a [128]int
sum := 0
for _, v := range a {
sum += v
}
return sum
}
func TestPreemption(t *testing.T) {
// Test that goroutines are preempted at function calls.
N := 5
if testing.Short() {
N = 2
}
c := make(chan bool)
var x uint32
for g := 0; g < 2; g++ {
go func(g int) {
for i := 0; i < N; i++ {
for atomic.LoadUint32(&x) != uint32(g) {
preempt()
}
atomic.StoreUint32(&x, uint32(1-g))
}
c <- true
}(g)
}
<-c
<-c
}
func TestPreemptionGC(t *testing.T) {
// Test that pending GC preempts running goroutines.
P := 5
N := 10
if testing.Short() {
P = 3
N = 2
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
var stop uint32
for i := 0; i < P; i++ {
go func() {
for atomic.LoadUint32(&stop) == 0 {
preempt()
}
}()
}
for i := 0; i < N; i++ {
runtime.Gosched()
runtime.GC()
}
atomic.StoreUint32(&stop, 1)
}
func TestGCFairness(t *testing.T) {
output := runTestProg(t, "testprog", "GCFairness")
want := "OK\n"
if output != want {
t.Fatalf("want %s, got %s\n", want, output)
}
}
func TestNumGoroutine(t *testing.T) {
output := runTestProg(t, "testprog", "NumGoroutine")
want := "1\n"
if output != want {
t.Fatalf("want %q, got %q", want, output)
}
buf := make([]byte, 1<<20)
// Try up to 10 times for a match before giving up.
// This is a fundamentally racy check but it's important
// to notice if NumGoroutine and Stack are _always_ out of sync.
for i := 0; ; i++ {
// Give goroutines about to exit a chance to exit.
// The NumGoroutine and Stack below need to see
// the same state of the world, so anything we can do
// to keep it quiet is good.
runtime.Gosched()
n := runtime.NumGoroutine()
buf = buf[:runtime.Stack(buf, true)]
nstk := strings.Count(string(buf), "goroutine ")
if n == nstk {
break
}
if i >= 10 {
t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
}
}
}
func TestPingPongHog(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
done := make(chan bool)
hogChan, lightChan := make(chan bool), make(chan bool)
hogCount, lightCount := 0, 0
run := func(limit int, counter *int, wake chan bool) {
for {
select {
case <-done:
return
case <-wake:
for i := 0; i < limit; i++ {
*counter++
}
wake <- true
}
}
}
// Start two co-scheduled hog goroutines.
for i := 0; i < 2; i++ {
go run(1e6, &hogCount, hogChan)
}
// Start two co-scheduled light goroutines.
for i := 0; i < 2; i++ {
go run(1e3, &lightCount, lightChan)
}
// Start goroutine pairs and wait for a few preemption rounds.
hogChan <- true
lightChan <- true
time.Sleep(100 * time.Millisecond)
close(done)
<-hogChan
<-lightChan
// Check that hogCount and lightCount are within a factor of
// 2, which indicates that both pairs of goroutines handed off
// the P within a time-slice to their buddy.
if hogCount > lightCount*2 || lightCount > hogCount*2 {
t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount))
}
}
func BenchmarkPingPongHog(b *testing.B) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
// Create a CPU hog
stop, done := make(chan bool), make(chan bool)
go func() {
for {
select {
case <-stop:
done <- true
return
default:
}
}
}()
// Ping-pong b.N times
ping, pong := make(chan bool), make(chan bool)
go func() {
for j := 0; j < b.N; j++ {
pong <- <-ping
}
close(stop)
done <- true
}()
go func() {
for i := 0; i < b.N; i++ {
ping <- <-pong
}
done <- true
}()
b.ResetTimer()
ping <- true // Start ping-pong
<-stop
b.StopTimer()
<-ping // Let last ponger exit
<-done // Make sure goroutines exit
<-done
<-done
}
func stackGrowthRecursive(i int) {
var pad [128]uint64
if i != 0 && pad[0] == 0 {
stackGrowthRecursive(i - 1)
}
}
func TestPreemptSplitBig(t *testing.T) {
if testing.Short() {
t.Skip("skipping in -short mode")
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
stop := make(chan int)
go big(stop)
for i := 0; i < 3; i++ {
time.Sleep(10 * time.Microsecond) // let big start running
runtime.GC()
}
close(stop)
}
func big(stop chan int) int {
n := 0
for {
// delay so that gc is sure to have asked for a preemption
for i := 0; i < 1e9; i++ {
n++
}
// call bigframe, which used to miss the preemption in its prologue.
bigframe(stop)
// check if we've been asked to stop.
select {
case <-stop:
return n
}
}
}
func bigframe(stop chan int) int {
// not splitting the stack will overflow.
// small will notice that it needs a stack split and will
// catch the overflow.
var x [8192]byte
return small(stop, &x)
}
func small(stop chan int, x *[8192]byte) int {
for i := range x {
x[i] = byte(i)
}
sum := 0
for i := range x {
sum += int(x[i])
}
// keep small from being a leaf function, which might
// make it not do any stack check at all.
nonleaf(stop)
return sum
}
func nonleaf(stop chan int) bool {
// do something that won't be inlined:
select {
case <-stop:
return true
default:
return false
}
}
func TestSchedLocalQueue(t *testing.T) {
runtime.RunSchedLocalQueueTest()
}
func TestSchedLocalQueueSteal(t *testing.T) {
runtime.RunSchedLocalQueueStealTest()
}
func benchmarkStackGrowth(b *testing.B, rec int) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
stackGrowthRecursive(rec)
}
})
}
func BenchmarkStackGrowth(b *testing.B) {
benchmarkStackGrowth(b, 10)
}
func BenchmarkStackGrowthDeep(b *testing.B) {
benchmarkStackGrowth(b, 1024)
}
func BenchmarkCreateGoroutines(b *testing.B) {
benchmarkCreateGoroutines(b, 1)
}
func BenchmarkCreateGoroutinesParallel(b *testing.B) {
benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
}
func benchmarkCreateGoroutines(b *testing.B, procs int) {
c := make(chan bool)
var f func(n int)
f = func(n int) {
if n == 0 {
c <- true
return
}
go f(n - 1)
}
for i := 0; i < procs; i++ {
go f(b.N / procs)
}
for i := 0; i < procs; i++ {
<-c
}
}
func BenchmarkCreateGoroutinesCapture(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
const N = 4
var wg sync.WaitGroup
wg.Add(N)
for i := 0; i < N; i++ {
i := i
go func() {
if i >= N {
b.Logf("bad") // just to capture b
}
wg.Done()
}()
}
wg.Wait()
}
}
func BenchmarkClosureCall(b *testing.B) {
sum := 0
off1 := 1
for i := 0; i < b.N; i++ {
off2 := 2
func() {
sum += i + off1 + off2
}()
}
_ = sum
}
type Matrix [][]float64
func BenchmarkMatmult(b *testing.B) {
b.StopTimer()
// matmult is O(N**3) but testing expects O(b.N),
// so we need to take cube root of b.N
n := int(math.Cbrt(float64(b.N))) + 1
A := makeMatrix(n)
B := makeMatrix(n)
C := makeMatrix(n)
b.StartTimer()
matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
}
func makeMatrix(n int) Matrix {
m := make(Matrix, n)
for i := 0; i < n; i++ {
m[i] = make([]float64, n)
for j := 0; j < n; j++ {
m[i][j] = float64(i*n + j)
}
}
return m
}
func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
di := i1 - i0
dj := j1 - j0
dk := k1 - k0
if di >= dj && di >= dk && di >= threshold {
// divide in two by y axis
mi := i0 + di/2
done1 := make(chan struct{}, 1)
go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
<-done1
} else if dj >= dk && dj >= threshold {
// divide in two by x axis
mj := j0 + dj/2
done1 := make(chan struct{}, 1)
go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
<-done1
} else if dk >= threshold {
// divide in two by "k" axis
// deliberately not parallel because of data races
mk := k0 + dk/2
matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
} else {
// the matrices are small enough, compute directly
for i := i0; i < i1; i++ {
for j := j0; j < j1; j++ {
for k := k0; k < k1; k++ {
C[i][j] += A[i][k] * B[k][j]
}
}
}
}
if done != nil {
done <- struct{}{}
}
}
|
package bip38
import (
"bytes"
"golang.org/x/crypto/scrypt"
"crypto/aes"
"crypto/sha256"
"github.com/cculianu/gocoin/btc"
"log"
"math/big"
// "encoding/hex"
)
func sha256Twice(b []byte) []byte {
h := sha256.New()
h.Write(b)
hashedOnce := h.Sum(nil)
h.Reset()
h.Write(hashedOnce)
return h.Sum(nil)
}
func Pk2Wif(pk []byte, compressed bool) string {
pk = append([]byte{0x80},pk...) // prepend 0x80 for mainnet
if compressed {
pk = append(pk,0x01)
}
sha2 := sha256Twice(pk)
pkChk := append(pk, sha2[0:4]...)
return btc.Encodeb58(pkChk)
}
func DecryptWithPassphraseNoEC(dec []byte, passphrase string) string {
flagByte := dec[2]
compressed := (flagByte&0x20) == 0x20
if !compressed && flagByte != 0xc0 {
log.Fatal("Invalid BIP38 compression flag")
}
salt := dec[3:7]
scryptBuf, err := scrypt.Key([]byte(passphrase), salt, 16384, 8, 8, 64)
derivedHalf1 := scryptBuf[0:32]
derivedHalf2 := scryptBuf[32:64]
encryptedHalf1 := dec[7:23]
encryptedHalf2 := dec[23:39]
h, err := aes.NewCipher(derivedHalf2)
if h == nil {
log.Fatal(err)
}
k1 := make([] byte, 16)
k2 := make([] byte, 16)
h.Decrypt(k1, encryptedHalf1)
h, err = aes.NewCipher(derivedHalf2)
if h == nil {
log.Fatal(err)
}
h.Decrypt(k2, encryptedHalf2)
keyBytes := make([] byte, 32)
for i := 0; i < 16; i++ {
keyBytes[i] = k1[i] ^ derivedHalf1[i];
keyBytes[i+16] = k2[i] ^ derivedHalf1[i+16];
}
d := new (big.Int).SetBytes(keyBytes)
pubKey, err := btc.PublicFromPrivate(d.Bytes(), compressed)
if pubKey == nil {
log.Fatal(err)
}
addr := btc.NewAddrFromPubkey(pubKey, 0).String()
addrHashed := sha256Twice([]byte(addr))[0:4]
if addrHashed[0] != salt[0] || addrHashed[1] != salt[1] || addrHashed[2] != salt[2] || addrHashed[3] != salt[3] {
return ""
}
return Pk2Wif(d.Bytes(),compressed)
}
func DecryptWithPassphrase(dec []byte, passphrase string) string {
if len(dec) != 39 {
log.Fatal("Provided encrypted key data is of the wrong length")
}
if dec[0] == 0x01 && dec[1] == 0x42 {
log.Fatal("ha")
return DecryptWithPassphraseNoEC(dec, passphrase)
} else if dec[0] == 0x01 && dec[1] == 0x43 {
compress := dec[2]&0x20 == 0x20
hasLotSequence := dec[2]&0x04 == 0x04
var ownerSalt, ownerEntropy []byte
if hasLotSequence {
ownerSalt = dec[7:11]
ownerEntropy = dec[7:15]
} else {
ownerSalt = dec[7:15]
ownerEntropy = ownerSalt
}
prefactorA, err := scrypt.Key([]byte(passphrase), ownerSalt, 16384, 8, 8, 32)
if prefactorA == nil {
log.Fatal(err)
}
var passFactor []byte
if hasLotSequence {
prefactorB := bytes.Join([][]byte{prefactorA, ownerEntropy}, nil)
passFactor = sha256Twice(prefactorB)
} else {
passFactor = prefactorA
}
passpoint, err := btc.PublicFromPrivate(passFactor, true)
if passpoint == nil {
log.Fatal(err)
}
encryptedpart1 := dec[15:23]
encryptedpart2 := dec[23:39]
derived, err := scrypt.Key(passpoint, bytes.Join([][]byte{dec[3:7], ownerEntropy}, nil), 1024, 1, 1, 64)
if derived == nil {
log.Fatal(err)
}
h, err := aes.NewCipher(derived[32:])
if h == nil {
log.Fatal(err)
}
unencryptedpart2 := make([]byte, 16)
h.Decrypt(unencryptedpart2, encryptedpart2)
for i := range unencryptedpart2 {
unencryptedpart2[i] ^= derived[i+16]
}
encryptedpart1 = bytes.Join([][]byte{encryptedpart1, unencryptedpart2[:8]}, nil)
unencryptedpart1 := make([]byte, 16)
h.Decrypt(unencryptedpart1, encryptedpart1)
for i := range unencryptedpart1 {
unencryptedpart1[i] ^= derived[i]
}
seeddb := bytes.Join([][]byte{unencryptedpart1[:16], unencryptedpart2[8:]}, nil)
factorb := sha256Twice(seeddb)
bigN, success := new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
if !success {
log.Fatal("Failed to create Int for N")
}
passFactorBig := new(big.Int).SetBytes(passFactor)
factorbBig := new(big.Int).SetBytes(factorb)
privKey := new(big.Int)
privKey.Mul(passFactorBig, factorbBig)
privKey.Mod(privKey, bigN)
pubKey, err := btc.PublicFromPrivate(privKey.Bytes(), compress)
if pubKey == nil {
log.Fatal(err)
}
addr := btc.NewAddrFromPubkey(pubKey, 0).String()
addrHashed := sha256Twice([]byte(addr))
if addrHashed[0] != dec[3] || addrHashed[1] != dec[4] || addrHashed[2] != dec[5] || addrHashed[3] != dec[6] {
return ""
}
return Pk2Wif(privKey.Bytes(),compress)
//return hex.EncodeToString(privKey.Bytes())
}
log.Fatal("Malformed byte slice")
return ""
}
got rid of debug output that broke it
package bip38
import (
"bytes"
"golang.org/x/crypto/scrypt"
"crypto/aes"
"crypto/sha256"
"github.com/cculianu/gocoin/btc"
"log"
"math/big"
// "encoding/hex"
)
func sha256Twice(b []byte) []byte {
h := sha256.New()
h.Write(b)
hashedOnce := h.Sum(nil)
h.Reset()
h.Write(hashedOnce)
return h.Sum(nil)
}
func Pk2Wif(pk []byte, compressed bool) string {
pk = append([]byte{0x80},pk...) // prepend 0x80 for mainnet
if compressed {
pk = append(pk,0x01)
}
sha2 := sha256Twice(pk)
pkChk := append(pk, sha2[0:4]...)
return btc.Encodeb58(pkChk)
}
func DecryptWithPassphraseNoEC(dec []byte, passphrase string) string {
flagByte := dec[2]
compressed := (flagByte&0x20) == 0x20
if !compressed && flagByte != 0xc0 {
log.Fatal("Invalid BIP38 compression flag")
}
salt := dec[3:7]
scryptBuf, err := scrypt.Key([]byte(passphrase), salt, 16384, 8, 8, 64)
derivedHalf1 := scryptBuf[0:32]
derivedHalf2 := scryptBuf[32:64]
encryptedHalf1 := dec[7:23]
encryptedHalf2 := dec[23:39]
h, err := aes.NewCipher(derivedHalf2)
if h == nil {
log.Fatal(err)
}
k1 := make([] byte, 16)
k2 := make([] byte, 16)
h.Decrypt(k1, encryptedHalf1)
h, err = aes.NewCipher(derivedHalf2)
if h == nil {
log.Fatal(err)
}
h.Decrypt(k2, encryptedHalf2)
keyBytes := make([] byte, 32)
for i := 0; i < 16; i++ {
keyBytes[i] = k1[i] ^ derivedHalf1[i];
keyBytes[i+16] = k2[i] ^ derivedHalf1[i+16];
}
d := new (big.Int).SetBytes(keyBytes)
pubKey, err := btc.PublicFromPrivate(d.Bytes(), compressed)
if pubKey == nil {
log.Fatal(err)
}
addr := btc.NewAddrFromPubkey(pubKey, 0).String()
addrHashed := sha256Twice([]byte(addr))[0:4]
if addrHashed[0] != salt[0] || addrHashed[1] != salt[1] || addrHashed[2] != salt[2] || addrHashed[3] != salt[3] {
return ""
}
return Pk2Wif(d.Bytes(),compressed)
}
func DecryptWithPassphrase(dec []byte, passphrase string) string {
if len(dec) != 39 {
log.Fatal("Provided encrypted key data is of the wrong length")
}
if dec[0] == 0x01 && dec[1] == 0x42 {
return DecryptWithPassphraseNoEC(dec, passphrase)
} else if dec[0] == 0x01 && dec[1] == 0x43 {
compress := dec[2]&0x20 == 0x20
hasLotSequence := dec[2]&0x04 == 0x04
var ownerSalt, ownerEntropy []byte
if hasLotSequence {
ownerSalt = dec[7:11]
ownerEntropy = dec[7:15]
} else {
ownerSalt = dec[7:15]
ownerEntropy = ownerSalt
}
prefactorA, err := scrypt.Key([]byte(passphrase), ownerSalt, 16384, 8, 8, 32)
if prefactorA == nil {
log.Fatal(err)
}
var passFactor []byte
if hasLotSequence {
prefactorB := bytes.Join([][]byte{prefactorA, ownerEntropy}, nil)
passFactor = sha256Twice(prefactorB)
} else {
passFactor = prefactorA
}
passpoint, err := btc.PublicFromPrivate(passFactor, true)
if passpoint == nil {
log.Fatal(err)
}
encryptedpart1 := dec[15:23]
encryptedpart2 := dec[23:39]
derived, err := scrypt.Key(passpoint, bytes.Join([][]byte{dec[3:7], ownerEntropy}, nil), 1024, 1, 1, 64)
if derived == nil {
log.Fatal(err)
}
h, err := aes.NewCipher(derived[32:])
if h == nil {
log.Fatal(err)
}
unencryptedpart2 := make([]byte, 16)
h.Decrypt(unencryptedpart2, encryptedpart2)
for i := range unencryptedpart2 {
unencryptedpart2[i] ^= derived[i+16]
}
encryptedpart1 = bytes.Join([][]byte{encryptedpart1, unencryptedpart2[:8]}, nil)
unencryptedpart1 := make([]byte, 16)
h.Decrypt(unencryptedpart1, encryptedpart1)
for i := range unencryptedpart1 {
unencryptedpart1[i] ^= derived[i]
}
seeddb := bytes.Join([][]byte{unencryptedpart1[:16], unencryptedpart2[8:]}, nil)
factorb := sha256Twice(seeddb)
bigN, success := new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
if !success {
log.Fatal("Failed to create Int for N")
}
passFactorBig := new(big.Int).SetBytes(passFactor)
factorbBig := new(big.Int).SetBytes(factorb)
privKey := new(big.Int)
privKey.Mul(passFactorBig, factorbBig)
privKey.Mod(privKey, bigN)
pubKey, err := btc.PublicFromPrivate(privKey.Bytes(), compress)
if pubKey == nil {
log.Fatal(err)
}
addr := btc.NewAddrFromPubkey(pubKey, 0).String()
addrHashed := sha256Twice([]byte(addr))
if addrHashed[0] != dec[3] || addrHashed[1] != dec[4] || addrHashed[2] != dec[5] || addrHashed[3] != dec[6] {
return ""
}
return Pk2Wif(privKey.Bytes(),compress)
//return hex.EncodeToString(privKey.Bytes())
}
log.Fatal("Malformed byte slice")
return ""
}
|
//Copyright 2014 Rana Ian. All rights reserved.
//Use of this source code is governed by The MIT License
//found in the accompanying LICENSE file.
package ora_test
import (
"fmt"
"strings"
"testing"
"gopkg.in/rana/ora.v4"
)
//// bytes
//longRaw oracleColumnType = "long raw not null"
//longRawNull oracleColumnType = "long raw null"
//raw2000 oracleColumnType = "raw(2000) not null"
//raw2000Null oracleColumnType = "raw(2000) null"
//blob oracleColumnType = "blob not null"
//blobNull oracleColumnType = "blob null"
var _T_bytesCols = []string{
"longRaw", "longRawNull",
"raw2000", "raw2000Null",
"blob", "blobNull",
}
func TestBindDefine_bytes(t *testing.T) {
sc := ora.NewStmtCfg()
type testCase struct {
gen func() interface{}
ct oracleColumnType
gct ora.GoColumnType
}
testCases := make(map[string]testCase)
for _, ctName := range _T_bytesCols {
for _, typName := range []string{
"bytes", "OraBytes", "OraBytesLob",
"bytes2000", "OraBytes2000",
} {
if strings.HasSuffix(ctName, "Null") && !strings.Contains(typName, "Ora") {
continue
}
gct := ora.Bin
if strings.Contains(typName, "Ora") {
gct = ora.OraBin
}
testCases[typName+"_"+ctName] = testCase{
gen: _T_bytesGen[typName],
ct: _T_colType[ctName],
gct: gct,
}
}
}
for name, tc := range testCases {
tc := tc
if tc.gen == nil {
continue
}
t.Run(name, func(t *testing.T) {
t.Parallel()
testBindDefine(tc.gen(), tc.ct, t, sc, tc.gct)
})
}
}
func TestBindSlice_bytes(t *testing.T) {
sc := ora.NewStmtCfg()
type testCase struct {
ct oracleColumnType
gen func() interface{}
}
testCases := make(map[string]testCase)
for _, typName := range []string{
"bytesSlice", "OraBytesSlice",
"bytesSlice2000", "OraBytesSlice2000",
} {
for _, ctName := range _T_bytesCols {
typName := typName
if strings.HasSuffix(ctName, "Null") {
typName += "_null"
}
testCases[typName+"_"+ctName] = testCase{
ct: _T_colType[ctName],
gen: _T_bytesGen[typName],
}
}
}
for name, tc := range testCases {
tc := tc
if tc.gen == nil {
continue
}
t.Run(name, func(t *testing.T) {
t.Parallel()
testBindDefine(tc.gen(), tc.ct, t, sc)
})
}
}
func TestMultiDefine_bytes(t *testing.T) {
for _, ctName := range _T_bytesCols {
t.Run(ctName, func(t *testing.T) {
t.Parallel()
//enableLogging(t)
testMultiDefine(gen_bytes(9), _T_colType[ctName], t)
})
}
}
func TestWorkload_bytes(t *testing.T) {
for _, ctName := range []string{"raw2000", "raw2000Null", "blob", "blobNull"} {
ct := _T_colType[ctName]
t.Run(ctName, func(t *testing.T) {
t.Parallel()
//if strings.Contains(ctName, "blob") {
//enableLogging(t)
//}
testWorkload(ct, t)
})
}
}
func TestBindDefine_bytes_nil(t *testing.T) {
sc := ora.NewStmtCfg()
for _, ctName := range []string{"longRawNull", "raw2000Null", "blobNull"} {
ct := _T_colType[ctName]
t.Run(ctName, func(t *testing.T) {
t.Parallel()
testBindDefine(nil, ct, t, sc)
})
}
}
var _T_bytesGen = map[string](func() interface{}){
"bytes": func() interface{} { return gen_bytes(9) },
"bytes2000": func() interface{} { return gen_bytes(2000) },
"OraBytes": func() interface{} { return gen_OraBytes(9, false) },
"OraBytes2000": func() interface{} { return gen_OraBytes(2000, false) },
"OraBytes_null": func() interface{} { return gen_OraBytes(9, true) },
"OraBytes2000_null": func() interface{} { return gen_OraBytes(2000, true) },
"OraBytesLob": func() interface{} { return gen_OraBytesLob(9, false) },
"bytesSlice": func() interface{} { return gen_bytesSlice(9) },
"bytesSlice2000": func() interface{} { return gen_bytesSlice(2000) },
"OraBytesSlice": func() interface{} { return gen_OraBytesSlice(9, false) },
"OraBytesSlice2000": func() interface{} { return gen_OraBytesSlice(2000, false) },
"OraBytesSlice_null": func() interface{} { return gen_OraBytesSlice(9, true) },
"OraBytesSlice2000_null": func() interface{} { return gen_OraBytesSlice(2000, true) },
}
//// Do not test workload of multiple Oracle LONG RAW types within the same table because
//// ORA-01754: a table may contain only one column of type LONG
//func TestWorkload_longRaw_session(t *testing.T) {
// testWorkload(testWorkloadColumnCount, t, longRaw)
//}
//// Do not test workload of multiple Oracle LONG RAW types within the same table because
//// ORA-01754: a table may contain only one column of type LONG
//func TestWorkload_longRawNull_session(t *testing.T) {
// testWorkload(testWorkloadColumnCount, t, longRawNull)
//}
func TestBindDefine_bytes_blob_size(t *testing.T) {
sc := ora.NewStmtCfg()
cfg := ora.Cfg()
defer ora.SetCfg(cfg)
ora.SetCfg(cfg.SetLobBufferSize(1024))
lbs := ora.Cfg().LobBufferSize()
for _, size := range []int{
lbs - 1,
lbs,
lbs + 1,
lbs*3 - 1,
lbs * 3,
lbs*3 + 1,
} {
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
testBindDefine(gen_bytes(size), blob, t, sc, ora.Bin)
testBindDefine(gen_OraBytesLob(size, false), blob, t, sc, ora.Bin)
lob := gen_OraBytesLob(size, false)
testBindDefine(&lob, blob, t, sc, ora.Bin)
})
}
}
LOB tests cannot be run with Parallel()!
//Copyright 2014 Rana Ian. All rights reserved.
//Use of this source code is governed by The MIT License
//found in the accompanying LICENSE file.
package ora_test
import (
"fmt"
"strings"
"testing"
"gopkg.in/rana/ora.v4"
)
//// bytes
//longRaw oracleColumnType = "long raw not null"
//longRawNull oracleColumnType = "long raw null"
//raw2000 oracleColumnType = "raw(2000) not null"
//raw2000Null oracleColumnType = "raw(2000) null"
//blob oracleColumnType = "blob not null"
//blobNull oracleColumnType = "blob null"
var _T_bytesCols = []string{
"longRaw", "longRawNull",
"raw2000", "raw2000Null",
"blob", "blobNull",
}
func TestBindDefine_bytes(t *testing.T) {
sc := ora.NewStmtCfg()
type testCase struct {
gen func() interface{}
ct oracleColumnType
gct ora.GoColumnType
}
testCases := make(map[string]testCase)
for _, ctName := range _T_bytesCols {
for _, typName := range []string{
"bytes", "OraBytes", "OraBytesLob",
"bytes2000", "OraBytes2000",
} {
if strings.HasSuffix(ctName, "Null") && !strings.Contains(typName, "Ora") {
continue
}
gct := ora.Bin
if strings.Contains(typName, "Ora") {
gct = ora.OraBin
}
testCases[typName+"_"+ctName] = testCase{
gen: _T_bytesGen[typName],
ct: _T_colType[ctName],
gct: gct,
}
}
}
for name, tc := range testCases {
tc := tc
if tc.gen == nil {
continue
}
t.Run(name, func(t *testing.T) {
t.Parallel()
testBindDefine(tc.gen(), tc.ct, t, sc, tc.gct)
})
}
}
func TestBindSlice_bytes(t *testing.T) {
sc := ora.NewStmtCfg()
type testCase struct {
ct oracleColumnType
gen func() interface{}
}
testCases := make(map[string]testCase)
for _, typName := range []string{
"bytesSlice", "OraBytesSlice",
"bytesSlice2000", "OraBytesSlice2000",
} {
for _, ctName := range _T_bytesCols {
typName := typName
if strings.HasSuffix(ctName, "Null") {
typName += "_null"
}
testCases[typName+"_"+ctName] = testCase{
ct: _T_colType[ctName],
gen: _T_bytesGen[typName],
}
}
}
for name, tc := range testCases {
tc := tc
if tc.gen == nil {
continue
}
t.Run(name, func(t *testing.T) {
t.Parallel()
testBindDefine(tc.gen(), tc.ct, t, sc)
})
}
}
func TestMultiDefine_bytes(t *testing.T) {
for _, ctName := range _T_bytesCols {
t.Run(ctName, func(t *testing.T) {
t.Parallel()
//enableLogging(t)
testMultiDefine(gen_bytes(9), _T_colType[ctName], t)
})
}
}
func TestWorkload_bytes(t *testing.T) {
for _, ctName := range []string{"raw2000", "raw2000Null", "blob", "blobNull"} {
ct := _T_colType[ctName]
t.Run(ctName, func(t *testing.T) {
if !strings.Contains(ctName, "lob") {
t.Parallel()
}
//if strings.Contains(ctName, "blob") {
//enableLogging(t)
//}
testWorkload(ct, t)
})
}
}
func TestBindDefine_bytes_nil(t *testing.T) {
sc := ora.NewStmtCfg()
for _, ctName := range []string{"longRawNull", "raw2000Null", "blobNull"} {
ct := _T_colType[ctName]
t.Run(ctName, func(t *testing.T) {
t.Parallel()
testBindDefine(nil, ct, t, sc)
})
}
}
var _T_bytesGen = map[string](func() interface{}){
"bytes": func() interface{} { return gen_bytes(9) },
"bytes2000": func() interface{} { return gen_bytes(2000) },
"OraBytes": func() interface{} { return gen_OraBytes(9, false) },
"OraBytes2000": func() interface{} { return gen_OraBytes(2000, false) },
"OraBytes_null": func() interface{} { return gen_OraBytes(9, true) },
"OraBytes2000_null": func() interface{} { return gen_OraBytes(2000, true) },
"OraBytesLob": func() interface{} { return gen_OraBytesLob(9, false) },
"bytesSlice": func() interface{} { return gen_bytesSlice(9) },
"bytesSlice2000": func() interface{} { return gen_bytesSlice(2000) },
"OraBytesSlice": func() interface{} { return gen_OraBytesSlice(9, false) },
"OraBytesSlice2000": func() interface{} { return gen_OraBytesSlice(2000, false) },
"OraBytesSlice_null": func() interface{} { return gen_OraBytesSlice(9, true) },
"OraBytesSlice2000_null": func() interface{} { return gen_OraBytesSlice(2000, true) },
}
//// Do not test workload of multiple Oracle LONG RAW types within the same table because
//// ORA-01754: a table may contain only one column of type LONG
//func TestWorkload_longRaw_session(t *testing.T) {
// testWorkload(testWorkloadColumnCount, t, longRaw)
//}
//// Do not test workload of multiple Oracle LONG RAW types within the same table because
//// ORA-01754: a table may contain only one column of type LONG
//func TestWorkload_longRawNull_session(t *testing.T) {
// testWorkload(testWorkloadColumnCount, t, longRawNull)
//}
func TestBindDefine_bytes_blob_size(t *testing.T) {
sc := ora.NewStmtCfg()
cfg := ora.Cfg()
defer ora.SetCfg(cfg)
ora.SetCfg(cfg.SetLobBufferSize(1024))
lbs := ora.Cfg().LobBufferSize()
for _, size := range []int{
lbs - 1,
lbs,
lbs + 1,
lbs*3 - 1,
lbs * 3,
lbs*3 + 1,
} {
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
testBindDefine(gen_bytes(size), blob, t, sc, ora.Bin)
testBindDefine(gen_OraBytesLob(size, false), blob, t, sc, ora.Bin)
lob := gen_OraBytesLob(size, false)
testBindDefine(&lob, blob, t, sc, ora.Bin)
})
}
}
|
// Copyright 2015 SeukWon Kang (kasworld@gmail.com)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"runtime"
"time"
"github.com/kasworld/actionstat"
"github.com/kasworld/go-sdl2/sdl"
"github.com/kasworld/go-sdlgui"
"github.com/kasworld/go-sdlgui/analogueclock"
"github.com/kasworld/log"
)
func main() {
NewApp().Run()
}
type App struct {
Quit bool
SdlCh chan interface{}
Keys sdlgui.KeyState
Win *sdlgui.Window
Controls sdlgui.ControlIList
cl *analogueclock.Clock
Stat *actionstat.ActionStat
}
const (
WinW = 512
WinH = 512
ClockW = 512
ClockH = 512
DrawFPS = 30
)
func NewApp() *App {
app := App{
SdlCh: make(chan interface{}, 1),
Keys: make(map[sdl.Scancode]bool),
Win: sdlgui.NewWindow("SDL GUI Clock Example", WinW, WinH, true),
Stat: actionstat.New(),
}
app.addControls()
app.Win.UpdateAll()
return &app
}
func (app *App) AddControl(c sdlgui.ControlI) {
app.Controls = append(app.Controls, c)
app.Win.AddControl(c)
}
// change as app's need
func (g *App) addControls() {
g.cl = analogueclock.New(0, 0, 0, ClockW, ClockH)
g.AddControl(g.cl)
}
func (app *App) Run() {
// need to co-exist sdl lib
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// start sdl event loop
sdlgui.SDLEvent2Ch(app.SdlCh)
timerInfoCh := time.Tick(time.Duration(1000) * time.Millisecond)
timerDrawCh := time.Tick(time.Second / DrawFPS)
for !app.Quit {
select {
case data := <-app.SdlCh:
if app.Win.ProcessSDLMouseEvent(data) ||
app.Keys.ProcessSDLKeyEvent(data) {
app.Quit = true
}
app.Stat.Inc()
case <-timerDrawCh:
app.cl.SetTime(time.Now())
for _, v := range app.Controls {
v.DrawSurface()
}
app.Win.Update()
case <-timerInfoCh:
log.Info("stat %v", app.Stat)
app.Stat.UpdateLap()
}
}
}
change Stat display
// Copyright 2015 SeukWon Kang (kasworld@gmail.com)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"runtime"
"time"
"github.com/kasworld/actionstat"
"github.com/kasworld/go-sdl2/sdl"
"github.com/kasworld/go-sdlgui"
"github.com/kasworld/go-sdlgui/analogueclock"
"github.com/kasworld/log"
)
func main() {
NewApp().Run()
}
type App struct {
Quit bool
SdlCh chan interface{}
Keys sdlgui.KeyState
Win *sdlgui.Window
Controls sdlgui.ControlIList
cl *analogueclock.Clock
Stat *actionstat.ActionStat
}
const (
WinW = 512
WinH = 512
ClockW = 512
ClockH = 512
DrawFPS = 30
)
func NewApp() *App {
app := App{
SdlCh: make(chan interface{}, 1),
Keys: make(map[sdl.Scancode]bool),
Win: sdlgui.NewWindow("SDL GUI Clock Example", WinW, WinH, true),
Stat: actionstat.New(),
}
app.addControls()
app.Win.UpdateAll()
return &app
}
func (app *App) AddControl(c sdlgui.ControlI) {
app.Controls = append(app.Controls, c)
app.Win.AddControl(c)
}
// change as app's need
func (g *App) addControls() {
g.cl = analogueclock.New(0, 0, 0, ClockW, ClockH)
g.AddControl(g.cl)
}
func (app *App) Run() {
// need to co-exist sdl lib
runtime.LockOSThread()
defer runtime.UnlockOSThread()
// start sdl event loop
sdlgui.SDLEvent2Ch(app.SdlCh)
timerInfoCh := time.Tick(time.Duration(1000) * time.Millisecond)
timerDrawCh := time.Tick(time.Second / DrawFPS)
for !app.Quit {
select {
case data := <-app.SdlCh:
if app.Win.ProcessSDLMouseEvent(data) ||
app.Keys.ProcessSDLKeyEvent(data) {
app.Quit = true
}
case <-timerDrawCh:
app.cl.SetTime(time.Now())
for _, v := range app.Controls {
v.DrawSurface()
}
app.Win.Update()
app.Stat.Inc()
case <-timerInfoCh:
log.Info("FPS %v", app.Stat)
app.Stat.UpdateLap()
}
}
}
|
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowman
import (
"fmt"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/network"
"github.com/ava-labs/avalanchego/snow/choices"
"github.com/ava-labs/avalanchego/snow/consensus/snowball"
"github.com/ava-labs/avalanchego/snow/consensus/snowman"
"github.com/ava-labs/avalanchego/snow/consensus/snowman/poll"
"github.com/ava-labs/avalanchego/snow/engine/common"
"github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap"
"github.com/ava-labs/avalanchego/snow/events"
"github.com/ava-labs/avalanchego/utils/constants"
"github.com/ava-labs/avalanchego/utils/formatting"
"github.com/ava-labs/avalanchego/utils/wrappers"
)
const (
// TODO define this constant in one place rather than here and in snowman
// Max containers size in a MultiPut message
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
)
var _ Engine = &Transitive{}
// Transitive implements the Engine interface by attempting to fetch all
// transitive dependencies.
type Transitive struct {
bootstrap.Bootstrapper
metrics
Params snowball.Parameters
Consensus snowman.Consensus
// track outstanding preference requests
polls poll.Set
// blocks that have we have sent get requests for but haven't yet received
blkReqs common.Requests
// blocks that are queued to be issued to consensus once missing dependencies are fetched
// Block ID --> Block
pending map[ids.ID]snowman.Block
// operations that are blocked on a block being issued. This could be
// issuing another block, responding to a query, or applying votes to consensus
blocked events.Blocker
// number of times build block needs to be called once the number of
// processing blocks has gone below the optimal number.
pendingBuildBlocks int
// errs tracks if an error has occurred in a callback
errs wrappers.Errs
}
// Initialize implements the Engine interface
func (t *Transitive) Initialize(config Config) error {
config.Ctx.Log.Info("initializing consensus engine")
t.Params = config.Params
t.Consensus = config.Consensus
t.pending = make(map[ids.ID]snowman.Block)
factory := poll.NewEarlyTermNoTraversalFactory(config.Params.Alpha)
t.polls = poll.NewSet(factory,
config.Ctx.Log,
config.Params.Namespace,
config.Params.Metrics,
)
if err := t.metrics.Initialize(config.Params.Namespace, config.Params.Metrics); err != nil {
return err
}
return t.Bootstrapper.Initialize(
config.Config,
t.finishBootstrapping,
fmt.Sprintf("%s_bs", config.Params.Namespace),
config.Params.Metrics,
)
}
// When bootstrapping is finished, this will be called.
// This initializes the consensus engine with the last accepted block.
func (t *Transitive) finishBootstrapping() error {
lastAcceptedID, err := t.VM.LastAccepted()
if err != nil {
return err
}
lastAccepted, err := t.GetBlock(lastAcceptedID)
if err != nil {
t.Ctx.Log.Error("failed to get last accepted block due to: %s", err)
return err
}
// initialize consensus to the last accepted blockID
if err := t.Consensus.Initialize(t.Ctx, t.Params, lastAcceptedID, lastAccepted.Height()); err != nil {
return err
}
// to maintain the invariant that oracle blocks are issued in the correct
// preferences, we need to handle the case that we are bootstrapping into an oracle block
switch blk := lastAccepted.(type) {
case OracleBlock:
options, err := blk.Options()
if err != nil {
return err
}
for _, blk := range options {
// note that deliver will set the VM's preference
if err := t.deliver(blk); err != nil {
return err
}
}
default:
// if there aren't blocks we need to deliver on startup, we need to set
// the preference to the last accepted block
if err := t.VM.SetPreference(lastAcceptedID); err != nil {
return err
}
}
t.Ctx.Log.Info("bootstrapping finished with %s as the last accepted block", lastAcceptedID)
return nil
}
// Gossip implements the Engine interface
func (t *Transitive) Gossip() error {
blkID, err := t.VM.LastAccepted()
if err != nil {
return err
}
blk, err := t.GetBlock(blkID)
if err != nil {
t.Ctx.Log.Warn("dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
return nil
}
t.Ctx.Log.Verbo("gossiping %s as accepted to the network", blkID)
t.Sender.Gossip(blkID, blk.Bytes())
return nil
}
// Shutdown implements the Engine interface
func (t *Transitive) Shutdown() error {
t.Ctx.Log.Info("shutting down consensus engine")
return t.VM.Shutdown()
}
// Get implements the Engine interface
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
blk, err := t.GetBlock(blkID)
if err != nil {
// If we failed to get the block, that means either an unexpected error
// has occurred, [vdr] is not following the protocol, or the
// block has been pruned.
t.Ctx.Log.Debug("Get(%s, %d, %s) failed with: %s", vdr, requestID, blkID, err)
return nil
}
// Respond to the validator with the fetched block and the same requestID.
t.Sender.Put(vdr, requestID, blkID, blk.Bytes())
return nil
}
// GetAncestors implements the Engine interface
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
startTime := time.Now()
blk, err := t.GetBlock(blkID)
if err != nil { // Don't have the block. Drop this request.
t.Ctx.Log.Verbo("couldn't get block %s. dropping GetAncestors(%s, %d, %s)", blkID, vdr, requestID, blkID)
return nil
}
// First elt is byte repr. of [blk], then its parent, then grandparent, etc.
ancestorsBytes := make([][]byte, 1, t.Config.MultiputMaxContainersSent)
ancestorsBytes[0] = blk.Bytes()
ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors
for numFetched := 1; numFetched < t.Config.MultiputMaxContainersSent && time.Since(startTime) < t.Config.MaxTimeGetAncestors; numFetched++ {
if blk, err = t.GetBlock(blk.Parent()); err != nil {
break
}
blkBytes := blk.Bytes()
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
// is included with each container, and the size is repr. by an int.
if newLen := wrappers.IntLen + ancestorsBytesLen + len(blkBytes); newLen < maxContainersLen {
ancestorsBytes = append(ancestorsBytes, blkBytes)
ancestorsBytesLen = newLen
} else { // reached maximum response size
break
}
}
t.metrics.getAncestorsBlks.Observe(float64(len(ancestorsBytes)))
t.Sender.MultiPut(vdr, requestID, ancestorsBytes)
return nil
}
// Put implements the Engine interface
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
// bootstrapping isn't done --> we didn't send any gets --> this put is invalid
if !t.IsBootstrapped() {
if requestID == constants.GossipMsgRequestID {
t.Ctx.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping",
vdr, requestID, blkID)
} else {
t.Ctx.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
}
return nil
}
blk, err := t.VM.ParseBlock(blkBytes)
if err != nil {
t.Ctx.Log.Debug("failed to parse block %s: %s", blkID, err)
t.Ctx.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
// because GetFailed doesn't utilize the assumption that we actually
// sent a Get message, we can safely call GetFailed here to potentially
// abandon the request.
return t.GetFailed(vdr, requestID)
}
// issue the block into consensus. If the block has already been issued,
// this will be a noop. If this block has missing dependencies, vdr will
// receive requests to fill the ancestry. dependencies that have already
// been fetched, but with missing dependencies themselves won't be requested
// from the vdr.
if _, err := t.issueFrom(vdr, blk); err != nil {
return err
}
return t.buildBlocks()
}
// GetFailed implements the Engine interface
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
// not done bootstrapping --> didn't send a get --> this message is invalid
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping")
return nil
}
// We don't assume that this function is called after a failed Get message.
// Check to see if we have an outstanding request and also get what the request was for if it exists.
blkID, ok := t.blkReqs.Remove(vdr, requestID)
if !ok {
t.Ctx.Log.Debug("getFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
return nil
}
// Because the get request was dropped, we no longer expect blkID to be issued.
t.blocked.Abandon(blkID)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// PullQuery implements the Engine interface
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
// If the engine hasn't been bootstrapped, we aren't ready to respond to queries
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
// Will send chits once we've issued block [blkID] into consensus
c := &convincer{
consensus: t.Consensus,
sender: t.Sender,
vdr: vdr,
requestID: requestID,
errs: &t.errs,
}
// Try to issue [blkID] to consensus.
// If we're missing an ancestor, request it from [vdr]
added, err := t.issueFromByID(vdr, blkID)
if err != nil {
return err
}
// Wait until we've issued block [blkID] before sending chits.
if !added {
c.deps.Add(blkID)
}
t.blocked.Register(c)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// PushQuery implements the Engine interface
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
// if the engine hasn't been bootstrapped, we aren't ready to respond to queries
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
blk, err := t.VM.ParseBlock(blkBytes)
// If parsing fails, we just drop the request, as we didn't ask for it
if err != nil {
t.Ctx.Log.Debug("failed to parse block %s: %s", blkID, err)
t.Ctx.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
return nil
}
// issue the block into consensus. If the block has already been issued,
// this will be a noop. If this block has missing dependencies, vdr will
// receive requests to fill the ancestry. dependencies that have already
// been fetched, but with missing dependencies themselves won't be requested
// from the vdr.
if _, err := t.issueFrom(vdr, blk); err != nil {
return err
}
// register the chit request
return t.PullQuery(vdr, requestID, blk.ID())
}
// Chits implements the Engine interface
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes []ids.ID) error {
// if the engine hasn't been bootstrapped, we shouldn't be receiving chits
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
// Since this is a linear chain, there should only be one ID in the vote set
if len(votes) != 1 {
t.Ctx.Log.Debug("Chits(%s, %d) was called with %d votes (expected 1)", vdr, requestID, len(votes))
// because QueryFailed doesn't utilize the assumption that we actually
// sent a Query message, we can safely call QueryFailed here to
// potentially abandon the request.
return t.QueryFailed(vdr, requestID)
}
blkID := votes[0]
t.Ctx.Log.Verbo("Chits(%s, %d) contains vote for %s", vdr, requestID, blkID)
// Will record chits once [blkID] has been issued into consensus
v := &voter{
t: t,
vdr: vdr,
requestID: requestID,
response: blkID,
}
added, err := t.issueFromByID(vdr, blkID)
if err != nil {
return err
}
// Wait until [blkID] has been issued to consensus before for applying this chit.
if !added {
v.deps.Add(blkID)
}
t.blocked.Register(v)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// QueryFailed implements the Engine interface
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
// If the engine hasn't been bootstrapped, we didn't issue a query
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Warn("dropping QueryFailed(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
t.blocked.Register(&voter{
t: t,
vdr: vdr,
requestID: requestID,
})
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// Notify implements the Engine interface
func (t *Transitive) Notify(msg common.Message) error {
// if the engine hasn't been bootstrapped, we shouldn't build/issue blocks from the VM
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping Notify due to bootstrapping")
return nil
}
t.Ctx.Log.Verbo("snowman engine notified of %s from the vm", msg)
switch msg {
case common.PendingTxs:
// the pending txs message means we should attempt to build a block.
t.pendingBuildBlocks++
return t.buildBlocks()
default:
t.Ctx.Log.Warn("unexpected message from the VM: %s", msg)
}
return nil
}
// Build blocks if they have been requested and the number of processing blocks
// is less than optimal.
func (t *Transitive) buildBlocks() error {
if err := t.errs.Err; err != nil {
return err
}
for t.pendingBuildBlocks > 0 && t.Consensus.NumProcessing() < t.Params.OptimalProcessing {
t.pendingBuildBlocks--
blk, err := t.VM.BuildBlock()
if err != nil {
t.Ctx.Log.Debug("VM.BuildBlock errored with: %s", err)
return nil
}
// a newly created block is expected to be processing. If this check
// fails, there is potentially an error in the VM this engine is running
if status := blk.Status(); status != choices.Processing {
t.Ctx.Log.Warn("attempting to issue a block with status: %s, expected Processing", status)
}
// The newly created block should be built on top of the preferred block.
// Otherwise, the new block doesn't have the best chance of being confirmed.
parentID := blk.Parent()
if pref := t.Consensus.Preference(); parentID != pref {
t.Ctx.Log.Warn("built block with parent: %s, expected %s", parentID, pref)
}
added, err := t.issueWithAncestors(blk)
if err != nil {
return err
}
// issuing the block shouldn't have any missing dependencies
if added {
t.Ctx.Log.Verbo("successfully issued new block from the VM")
} else {
t.Ctx.Log.Warn("VM.BuildBlock returned a block with unissued ancestors")
}
}
return nil
}
// Issue another poll to the network, asking what it prefers given the block we prefer.
// Helps move consensus along.
func (t *Transitive) repoll() {
// if we are issuing a repoll, we should gossip our current preferences to
// propagate the most likely branch as quickly as possible
prefID := t.Consensus.Preference()
for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ {
t.pullQuery(prefID)
}
}
// issueFromByID attempts to issue the branch ending with a block [blkID] into consensus.
// If we do not have [blkID], request it.
// Returns true if the block is processing in consensus or is decided.
func (t *Transitive) issueFromByID(vdr ids.ShortID, blkID ids.ID) (bool, error) {
blk, err := t.GetBlock(blkID)
if err != nil {
t.sendRequest(vdr, blkID)
return false, nil
}
return t.issueFrom(vdr, blk)
}
// issueFrom attempts to issue the branch ending with block [blkID] to consensus.
// Returns true if the block is processing in consensus or is decided.
// If a dependency is missing, request it from [vdr].
func (t *Transitive) issueFrom(vdr ids.ShortID, blk snowman.Block) (bool, error) {
blkID := blk.ID()
// issue [blk] and its ancestors to consensus.
// If the block has been decided, we don't need to issue it.
// If the block is processing, we don't need to issue it.
// If the block is queued to be issued, we don't need to issue it.
for !t.Consensus.DecidedOrProcessing(blk) && !t.pendingContains(blkID) {
if err := t.issue(blk); err != nil {
return false, err
}
blkID = blk.Parent()
var err error
blk, err = t.GetBlock(blkID)
// If we don't have this ancestor, request it from [vdr]
if err != nil || !blk.Status().Fetched() {
t.sendRequest(vdr, blkID)
return false, nil
}
}
// Remove any outstanding requests for this block
t.blkReqs.RemoveAny(blkID)
issued := t.Consensus.DecidedOrProcessing(blk)
if issued {
// A dependency should never be waiting on a decided or processing
// block. However, if the block was marked as rejected by the VM, the
// dependencies may still be waiting. Therefore, they should abandoned.
t.blocked.Abandon(blkID)
}
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return issued, t.errs.Err
}
// issueWithAncestors attempts to issue the branch ending with [blk] to consensus.
// Returns true if the block is processing in consensus or is decided.
// If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned.
func (t *Transitive) issueWithAncestors(blk snowman.Block) (bool, error) {
blkID := blk.ID()
// issue [blk] and its ancestors into consensus
status := blk.Status()
for status.Fetched() && !t.Consensus.DecidedOrProcessing(blk) && !t.pendingContains(blkID) {
if err := t.issue(blk); err != nil {
return false, err
}
blkID = blk.Parent()
var err error
if blk, err = t.GetBlock(blkID); err != nil {
status = choices.Unknown
break
}
status = blk.Status()
}
// The block was issued into consensus. This is the happy path.
if status != choices.Unknown && t.Consensus.DecidedOrProcessing(blk) {
return true, nil
}
// There's an outstanding request for this block.
// We can just wait for that request to succeed or fail.
if t.blkReqs.Contains(blkID) {
return false, nil
}
// We don't have this block and have no reason to expect that we will get it.
// Abandon the block to avoid a memory leak.
t.blocked.Abandon(blkID)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return false, t.errs.Err
}
// Issue [blk] to consensus once its ancestors have been issued.
func (t *Transitive) issue(blk snowman.Block) error {
blkID := blk.ID()
// mark that the block is queued to be added to consensus once its ancestors have been
t.pending[blkID] = blk
// Remove any outstanding requests for this block
t.blkReqs.RemoveAny(blkID)
// Will add [blk] to consensus once its ancestors have been
i := &issuer{
t: t,
blk: blk,
}
// block on the parent if needed
parentID := blk.Parent()
if parent, err := t.GetBlock(parentID); err != nil || !t.Consensus.DecidedOrProcessing(parent) {
t.Ctx.Log.Verbo("block %s waiting for parent %s to be issued", blkID, parentID)
i.deps.Add(parentID)
}
t.blocked.Register(i)
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
t.metrics.numBlocked.Set(float64(len(t.pending)))
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
// Request that [vdr] send us block [blkID]
func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
// There is already an outstanding request for this block
if t.blkReqs.Contains(blkID) {
return
}
t.RequestID++
t.blkReqs.Add(vdr, t.RequestID, blkID)
t.Ctx.Log.Verbo("sending Get(%s, %d, %s)", vdr, t.RequestID, blkID)
t.Sender.Get(vdr, t.RequestID, blkID)
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
}
// send a pull query for this block ID
func (t *Transitive) pullQuery(blkID ids.ID) {
t.Ctx.Log.Verbo("about to sample from: %s", t.Validators)
// The validators we will query
vdrs, err := t.Validators.Sample(t.Params.K)
vdrBag := ids.ShortBag{}
for _, vdr := range vdrs {
vdrBag.Add(vdr.ID())
}
t.RequestID++
if err == nil && t.polls.Add(t.RequestID, vdrBag) {
vdrList := vdrBag.List()
vdrSet := ids.NewShortSet(len(vdrList))
vdrSet.Add(vdrList...)
t.Sender.PullQuery(vdrSet, t.RequestID, blkID)
} else if err != nil {
t.Ctx.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
}
}
// send a push query for this block
func (t *Transitive) pushQuery(blk snowman.Block) {
t.Ctx.Log.Verbo("about to sample from: %s", t.Validators)
vdrs, err := t.Validators.Sample(t.Params.K)
vdrBag := ids.ShortBag{}
for _, vdr := range vdrs {
vdrBag.Add(vdr.ID())
}
t.RequestID++
if err == nil && t.polls.Add(t.RequestID, vdrBag) {
vdrList := vdrBag.List()
vdrSet := ids.NewShortSet(len(vdrList))
vdrSet.Add(vdrList...)
t.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes())
} else if err != nil {
t.Ctx.Log.Error("query for %s was dropped due to an insufficient number of validators", blk.ID())
}
}
// issue [blk] to consensus
func (t *Transitive) deliver(blk snowman.Block) error {
if t.Consensus.DecidedOrProcessing(blk) {
return nil
}
// we are no longer waiting on adding the block to consensus, so it is no
// longer pending
blkID := blk.ID()
delete(t.pending, blkID)
parentID := blk.Parent()
parent, err := t.GetBlock(parentID)
// Because the dependency must have been fulfilled by the time this function
// is called - we don't expect [err] to be non-nil. But it is handled for
// completness and future proofing.
if err != nil || !t.Consensus.AcceptedOrProcessing(parent) {
// if the parent isn't processing or the last accepted block, then this
// block is effectively rejected
t.blocked.Abandon(blkID)
t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
// By ensuring that the parent is either processing or accepted, it is
// guaranteed that the parent was successfully verified. This means that
// calling Verify on this block is allowed.
// make sure this block is valid
if err := blk.Verify(); err != nil {
t.Ctx.Log.Debug("block failed verification due to %s, dropping block", err)
// if verify fails, then all descendants are also invalid
t.blocked.Abandon(blkID)
t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
t.Ctx.Log.Verbo("adding block to consensus: %s", blkID)
if err := t.Consensus.Add(blk); err != nil {
return err
}
// Add all the oracle blocks if they exist. We call verify on all the blocks
// and add them to consensus before marking anything as fulfilled to avoid
// any potential reentrant bugs.
added := []snowman.Block{}
dropped := []snowman.Block{}
if blk, ok := blk.(OracleBlock); ok {
options, err := blk.Options()
if err != nil {
return err
}
for _, blk := range options {
if err := blk.Verify(); err != nil {
t.Ctx.Log.Debug("block failed verification due to %s, dropping block", err)
dropped = append(dropped, blk)
} else {
if err := t.Consensus.Add(blk); err != nil {
return err
}
added = append(added, blk)
}
}
}
if err := t.VM.SetPreference(t.Consensus.Preference()); err != nil {
return err
}
// If the block is now preferred, query the network for its preferences
// with this new block.
if t.Consensus.IsPreferred(blk) {
t.pushQuery(blk)
}
t.blocked.Fulfill(blkID)
for _, blk := range added {
if t.Consensus.IsPreferred(blk) {
t.pushQuery(blk)
}
blkID := blk.ID()
delete(t.pending, blkID)
t.blocked.Fulfill(blkID)
t.blkReqs.RemoveAny(blkID)
}
for _, blk := range dropped {
blkID := blk.ID()
delete(t.pending, blkID)
t.blocked.Abandon(blkID)
t.blkReqs.RemoveAny(blkID)
}
// If we should issue multiple queries at the same time, we need to repoll
t.repoll()
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
t.metrics.numBlocked.Set(float64(len(t.pending)))
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
// IsBootstrapped returns true iff this chain is done bootstrapping
func (t *Transitive) IsBootstrapped() bool {
return t.Ctx.IsBootstrapped()
}
// Health implements the common.Engine interface
func (t *Transitive) HealthCheck() (interface{}, error) {
var (
consensusIntf interface{} = struct{}{}
consensusErr error
)
if t.Ctx.IsBootstrapped() {
consensusIntf, consensusErr = t.Consensus.HealthCheck()
}
vmIntf, vmErr := t.VM.HealthCheck()
intf := map[string]interface{}{
"consensus": consensusIntf,
"vm": vmIntf,
}
if consensusErr == nil {
return intf, vmErr
}
if vmErr == nil {
return intf, consensusErr
}
return intf, fmt.Errorf("vm: %s ; consensus: %s", vmErr, consensusErr)
}
// GetBlock implements the snowman.Engine interface
func (t *Transitive) GetBlock(blkID ids.ID) (snowman.Block, error) {
blk, ok := t.pending[blkID]
if ok {
return blk, nil
}
return t.VM.GetBlock(blkID)
}
// GetVM implements the snowman.Engine interface
func (t *Transitive) GetVM() common.VM {
return t.VM
}
func (t *Transitive) pendingContains(blkID ids.ID) bool {
_, ok := t.pending[blkID]
return ok
}
Update snow/engine/snowman/transitive.go
Co-authored-by: danlaine <9a51e3962f111c68c0b75e58de0d829a7291e088@avalabs.org>
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowman
import (
"fmt"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/network"
"github.com/ava-labs/avalanchego/snow/choices"
"github.com/ava-labs/avalanchego/snow/consensus/snowball"
"github.com/ava-labs/avalanchego/snow/consensus/snowman"
"github.com/ava-labs/avalanchego/snow/consensus/snowman/poll"
"github.com/ava-labs/avalanchego/snow/engine/common"
"github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap"
"github.com/ava-labs/avalanchego/snow/events"
"github.com/ava-labs/avalanchego/utils/constants"
"github.com/ava-labs/avalanchego/utils/formatting"
"github.com/ava-labs/avalanchego/utils/wrappers"
)
const (
// TODO define this constant in one place rather than here and in snowman
// Max containers size in a MultiPut message
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
)
var _ Engine = &Transitive{}
// Transitive implements the Engine interface by attempting to fetch all
// transitive dependencies.
type Transitive struct {
bootstrap.Bootstrapper
metrics
Params snowball.Parameters
Consensus snowman.Consensus
// track outstanding preference requests
polls poll.Set
// blocks that have we have sent get requests for but haven't yet received
blkReqs common.Requests
// blocks that are queued to be issued to consensus once missing dependencies are fetched
// Block ID --> Block
pending map[ids.ID]snowman.Block
// operations that are blocked on a block being issued. This could be
// issuing another block, responding to a query, or applying votes to consensus
blocked events.Blocker
// number of times build block needs to be called once the number of
// processing blocks has gone below the optimal number.
pendingBuildBlocks int
// errs tracks if an error has occurred in a callback
errs wrappers.Errs
}
// Initialize implements the Engine interface
func (t *Transitive) Initialize(config Config) error {
config.Ctx.Log.Info("initializing consensus engine")
t.Params = config.Params
t.Consensus = config.Consensus
t.pending = make(map[ids.ID]snowman.Block)
factory := poll.NewEarlyTermNoTraversalFactory(config.Params.Alpha)
t.polls = poll.NewSet(factory,
config.Ctx.Log,
config.Params.Namespace,
config.Params.Metrics,
)
if err := t.metrics.Initialize(config.Params.Namespace, config.Params.Metrics); err != nil {
return err
}
return t.Bootstrapper.Initialize(
config.Config,
t.finishBootstrapping,
fmt.Sprintf("%s_bs", config.Params.Namespace),
config.Params.Metrics,
)
}
// When bootstrapping is finished, this will be called.
// This initializes the consensus engine with the last accepted block.
func (t *Transitive) finishBootstrapping() error {
lastAcceptedID, err := t.VM.LastAccepted()
if err != nil {
return err
}
lastAccepted, err := t.GetBlock(lastAcceptedID)
if err != nil {
t.Ctx.Log.Error("failed to get last accepted block due to: %s", err)
return err
}
// initialize consensus to the last accepted blockID
if err := t.Consensus.Initialize(t.Ctx, t.Params, lastAcceptedID, lastAccepted.Height()); err != nil {
return err
}
// to maintain the invariant that oracle blocks are issued in the correct
// preferences, we need to handle the case that we are bootstrapping into an oracle block
switch blk := lastAccepted.(type) {
case OracleBlock:
options, err := blk.Options()
if err != nil {
return err
}
for _, blk := range options {
// note that deliver will set the VM's preference
if err := t.deliver(blk); err != nil {
return err
}
}
default:
// if there aren't blocks we need to deliver on startup, we need to set
// the preference to the last accepted block
if err := t.VM.SetPreference(lastAcceptedID); err != nil {
return err
}
}
t.Ctx.Log.Info("bootstrapping finished with %s as the last accepted block", lastAcceptedID)
return nil
}
// Gossip implements the Engine interface
func (t *Transitive) Gossip() error {
blkID, err := t.VM.LastAccepted()
if err != nil {
return err
}
blk, err := t.GetBlock(blkID)
if err != nil {
t.Ctx.Log.Warn("dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
return nil
}
t.Ctx.Log.Verbo("gossiping %s as accepted to the network", blkID)
t.Sender.Gossip(blkID, blk.Bytes())
return nil
}
// Shutdown implements the Engine interface
func (t *Transitive) Shutdown() error {
t.Ctx.Log.Info("shutting down consensus engine")
return t.VM.Shutdown()
}
// Get implements the Engine interface
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
blk, err := t.GetBlock(blkID)
if err != nil {
// If we failed to get the block, that means either an unexpected error
// has occurred, [vdr] is not following the protocol, or the
// block has been pruned.
t.Ctx.Log.Debug("Get(%s, %d, %s) failed with: %s", vdr, requestID, blkID, err)
return nil
}
// Respond to the validator with the fetched block and the same requestID.
t.Sender.Put(vdr, requestID, blkID, blk.Bytes())
return nil
}
// GetAncestors implements the Engine interface
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
startTime := time.Now()
blk, err := t.GetBlock(blkID)
if err != nil { // Don't have the block. Drop this request.
t.Ctx.Log.Verbo("couldn't get block %s. dropping GetAncestors(%s, %d, %s)", blkID, vdr, requestID, blkID)
return nil
}
// First elt is byte repr. of [blk], then its parent, then grandparent, etc.
ancestorsBytes := make([][]byte, 1, t.Config.MultiputMaxContainersSent)
ancestorsBytes[0] = blk.Bytes()
ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors
for numFetched := 1; numFetched < t.Config.MultiputMaxContainersSent && time.Since(startTime) < t.Config.MaxTimeGetAncestors; numFetched++ {
if blk, err = t.GetBlock(blk.Parent()); err != nil {
break
}
blkBytes := blk.Bytes()
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
// is included with each container, and the size is repr. by an int.
if newLen := wrappers.IntLen + ancestorsBytesLen + len(blkBytes); newLen < maxContainersLen {
ancestorsBytes = append(ancestorsBytes, blkBytes)
ancestorsBytesLen = newLen
} else { // reached maximum response size
break
}
}
t.metrics.getAncestorsBlks.Observe(float64(len(ancestorsBytes)))
t.Sender.MultiPut(vdr, requestID, ancestorsBytes)
return nil
}
// Put implements the Engine interface
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
// bootstrapping isn't done --> we didn't send any gets --> this put is invalid
if !t.IsBootstrapped() {
if requestID == constants.GossipMsgRequestID {
t.Ctx.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping",
vdr, requestID, blkID)
} else {
t.Ctx.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
}
return nil
}
blk, err := t.VM.ParseBlock(blkBytes)
if err != nil {
t.Ctx.Log.Debug("failed to parse block %s: %s", blkID, err)
t.Ctx.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
// because GetFailed doesn't utilize the assumption that we actually
// sent a Get message, we can safely call GetFailed here to potentially
// abandon the request.
return t.GetFailed(vdr, requestID)
}
// issue the block into consensus. If the block has already been issued,
// this will be a noop. If this block has missing dependencies, vdr will
// receive requests to fill the ancestry. dependencies that have already
// been fetched, but with missing dependencies themselves won't be requested
// from the vdr.
if _, err := t.issueFrom(vdr, blk); err != nil {
return err
}
return t.buildBlocks()
}
// GetFailed implements the Engine interface
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
// not done bootstrapping --> didn't send a get --> this message is invalid
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping")
return nil
}
// We don't assume that this function is called after a failed Get message.
// Check to see if we have an outstanding request and also get what the request was for if it exists.
blkID, ok := t.blkReqs.Remove(vdr, requestID)
if !ok {
t.Ctx.Log.Debug("getFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
return nil
}
// Because the get request was dropped, we no longer expect blkID to be issued.
t.blocked.Abandon(blkID)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// PullQuery implements the Engine interface
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
// If the engine hasn't been bootstrapped, we aren't ready to respond to queries
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
// Will send chits once we've issued block [blkID] into consensus
c := &convincer{
consensus: t.Consensus,
sender: t.Sender,
vdr: vdr,
requestID: requestID,
errs: &t.errs,
}
// Try to issue [blkID] to consensus.
// If we're missing an ancestor, request it from [vdr]
added, err := t.issueFromByID(vdr, blkID)
if err != nil {
return err
}
// Wait until we've issued block [blkID] before sending chits.
if !added {
c.deps.Add(blkID)
}
t.blocked.Register(c)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// PushQuery implements the Engine interface
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
// if the engine hasn't been bootstrapped, we aren't ready to respond to queries
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
blk, err := t.VM.ParseBlock(blkBytes)
// If parsing fails, we just drop the request, as we didn't ask for it
if err != nil {
t.Ctx.Log.Debug("failed to parse block %s: %s", blkID, err)
t.Ctx.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
return nil
}
// issue the block into consensus. If the block has already been issued,
// this will be a noop. If this block has missing dependencies, vdr will
// receive requests to fill the ancestry. dependencies that have already
// been fetched, but with missing dependencies themselves won't be requested
// from the vdr.
if _, err := t.issueFrom(vdr, blk); err != nil {
return err
}
// register the chit request
return t.PullQuery(vdr, requestID, blk.ID())
}
// Chits implements the Engine interface
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes []ids.ID) error {
// if the engine hasn't been bootstrapped, we shouldn't be receiving chits
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
// Since this is a linear chain, there should only be one ID in the vote set
if len(votes) != 1 {
t.Ctx.Log.Debug("Chits(%s, %d) was called with %d votes (expected 1)", vdr, requestID, len(votes))
// because QueryFailed doesn't utilize the assumption that we actually
// sent a Query message, we can safely call QueryFailed here to
// potentially abandon the request.
return t.QueryFailed(vdr, requestID)
}
blkID := votes[0]
t.Ctx.Log.Verbo("Chits(%s, %d) contains vote for %s", vdr, requestID, blkID)
// Will record chits once [blkID] has been issued into consensus
v := &voter{
t: t,
vdr: vdr,
requestID: requestID,
response: blkID,
}
added, err := t.issueFromByID(vdr, blkID)
if err != nil {
return err
}
// Wait until [blkID] has been issued to consensus before for applying this chit.
if !added {
v.deps.Add(blkID)
}
t.blocked.Register(v)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// QueryFailed implements the Engine interface
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
// If the engine hasn't been bootstrapped, we didn't issue a query
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Warn("dropping QueryFailed(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
t.blocked.Register(&voter{
t: t,
vdr: vdr,
requestID: requestID,
})
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.buildBlocks()
}
// Notify implements the Engine interface
func (t *Transitive) Notify(msg common.Message) error {
// if the engine hasn't been bootstrapped, we shouldn't build/issue blocks from the VM
if !t.Ctx.IsBootstrapped() {
t.Ctx.Log.Debug("dropping Notify due to bootstrapping")
return nil
}
t.Ctx.Log.Verbo("snowman engine notified of %s from the vm", msg)
switch msg {
case common.PendingTxs:
// the pending txs message means we should attempt to build a block.
t.pendingBuildBlocks++
return t.buildBlocks()
default:
t.Ctx.Log.Warn("unexpected message from the VM: %s", msg)
}
return nil
}
// Build blocks if they have been requested and the number of processing blocks
// is less than optimal.
func (t *Transitive) buildBlocks() error {
if err := t.errs.Err; err != nil {
return err
}
for t.pendingBuildBlocks > 0 && t.Consensus.NumProcessing() < t.Params.OptimalProcessing {
t.pendingBuildBlocks--
blk, err := t.VM.BuildBlock()
if err != nil {
t.Ctx.Log.Debug("VM.BuildBlock errored with: %s", err)
return nil
}
// a newly created block is expected to be processing. If this check
// fails, there is potentially an error in the VM this engine is running
if status := blk.Status(); status != choices.Processing {
t.Ctx.Log.Warn("attempting to issue a block with status: %s, expected Processing", status)
}
// The newly created block should be built on top of the preferred block.
// Otherwise, the new block doesn't have the best chance of being confirmed.
parentID := blk.Parent()
if pref := t.Consensus.Preference(); parentID != pref {
t.Ctx.Log.Warn("built block with parent: %s, expected %s", parentID, pref)
}
added, err := t.issueWithAncestors(blk)
if err != nil {
return err
}
// issuing the block shouldn't have any missing dependencies
if added {
t.Ctx.Log.Verbo("successfully issued new block from the VM")
} else {
t.Ctx.Log.Warn("VM.BuildBlock returned a block with unissued ancestors")
}
}
return nil
}
// Issue another poll to the network, asking what it prefers given the block we prefer.
// Helps move consensus along.
func (t *Transitive) repoll() {
// if we are issuing a repoll, we should gossip our current preferences to
// propagate the most likely branch as quickly as possible
prefID := t.Consensus.Preference()
for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ {
t.pullQuery(prefID)
}
}
// issueFromByID attempts to issue the branch ending with a block [blkID] into consensus.
// If we do not have [blkID], request it.
// Returns true if the block is processing in consensus or is decided.
func (t *Transitive) issueFromByID(vdr ids.ShortID, blkID ids.ID) (bool, error) {
blk, err := t.GetBlock(blkID)
if err != nil {
t.sendRequest(vdr, blkID)
return false, nil
}
return t.issueFrom(vdr, blk)
}
// issueFrom attempts to issue the branch ending with block [blkID] to consensus.
// Returns true if the block is processing in consensus or is decided.
// If a dependency is missing, request it from [vdr].
func (t *Transitive) issueFrom(vdr ids.ShortID, blk snowman.Block) (bool, error) {
blkID := blk.ID()
// issue [blk] and its ancestors to consensus.
// If the block has been decided, we don't need to issue it.
// If the block is processing, we don't need to issue it.
// If the block is queued to be issued, we don't need to issue it.
for !t.Consensus.DecidedOrProcessing(blk) && !t.pendingContains(blkID) {
if err := t.issue(blk); err != nil {
return false, err
}
blkID = blk.Parent()
var err error
blk, err = t.GetBlock(blkID)
// If we don't have this ancestor, request it from [vdr]
if err != nil || !blk.Status().Fetched() {
t.sendRequest(vdr, blkID)
return false, nil
}
}
// Remove any outstanding requests for this block
t.blkReqs.RemoveAny(blkID)
issued := t.Consensus.DecidedOrProcessing(blk)
if issued {
// A dependency should never be waiting on a decided or processing
// block. However, if the block was marked as rejected by the VM, the
// dependencies may still be waiting. Therefore, they should abandoned.
t.blocked.Abandon(blkID)
}
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return issued, t.errs.Err
}
// issueWithAncestors attempts to issue the branch ending with [blk] to consensus.
// Returns true if the block is processing in consensus or is decided.
// If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned.
func (t *Transitive) issueWithAncestors(blk snowman.Block) (bool, error) {
blkID := blk.ID()
// issue [blk] and its ancestors into consensus
status := blk.Status()
for status.Fetched() && !t.Consensus.DecidedOrProcessing(blk) && !t.pendingContains(blkID) {
if err := t.issue(blk); err != nil {
return false, err
}
blkID = blk.Parent()
var err error
if blk, err = t.GetBlock(blkID); err != nil {
status = choices.Unknown
break
}
status = blk.Status()
}
// The block was issued into consensus. This is the happy path.
if status != choices.Unknown && t.Consensus.DecidedOrProcessing(blk) {
return true, nil
}
// There's an outstanding request for this block.
// We can just wait for that request to succeed or fail.
if t.blkReqs.Contains(blkID) {
return false, nil
}
// We don't have this block and have no reason to expect that we will get it.
// Abandon the block to avoid a memory leak.
t.blocked.Abandon(blkID)
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return false, t.errs.Err
}
// Issue [blk] to consensus once its ancestors have been issued.
func (t *Transitive) issue(blk snowman.Block) error {
blkID := blk.ID()
// mark that the block is queued to be added to consensus once its ancestors have been
t.pending[blkID] = blk
// Remove any outstanding requests for this block
t.blkReqs.RemoveAny(blkID)
// Will add [blk] to consensus once its ancestors have been
i := &issuer{
t: t,
blk: blk,
}
// block on the parent if needed
parentID := blk.Parent()
if parent, err := t.GetBlock(parentID); err != nil || !t.Consensus.DecidedOrProcessing(parent) {
t.Ctx.Log.Verbo("block %s waiting for parent %s to be issued", blkID, parentID)
i.deps.Add(parentID)
}
t.blocked.Register(i)
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
t.metrics.numBlocked.Set(float64(len(t.pending)))
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
// Request that [vdr] send us block [blkID]
func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
// There is already an outstanding request for this block
if t.blkReqs.Contains(blkID) {
return
}
t.RequestID++
t.blkReqs.Add(vdr, t.RequestID, blkID)
t.Ctx.Log.Verbo("sending Get(%s, %d, %s)", vdr, t.RequestID, blkID)
t.Sender.Get(vdr, t.RequestID, blkID)
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
}
// send a pull query for this block ID
func (t *Transitive) pullQuery(blkID ids.ID) {
t.Ctx.Log.Verbo("about to sample from: %s", t.Validators)
// The validators we will query
vdrs, err := t.Validators.Sample(t.Params.K)
vdrBag := ids.ShortBag{}
for _, vdr := range vdrs {
vdrBag.Add(vdr.ID())
}
t.RequestID++
if err == nil && t.polls.Add(t.RequestID, vdrBag) {
vdrList := vdrBag.List()
vdrSet := ids.NewShortSet(len(vdrList))
vdrSet.Add(vdrList...)
t.Sender.PullQuery(vdrSet, t.RequestID, blkID)
} else if err != nil {
t.Ctx.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
}
}
// send a push query for this block
func (t *Transitive) pushQuery(blk snowman.Block) {
t.Ctx.Log.Verbo("about to sample from: %s", t.Validators)
vdrs, err := t.Validators.Sample(t.Params.K)
vdrBag := ids.ShortBag{}
for _, vdr := range vdrs {
vdrBag.Add(vdr.ID())
}
t.RequestID++
if err == nil && t.polls.Add(t.RequestID, vdrBag) {
vdrList := vdrBag.List()
vdrSet := ids.NewShortSet(len(vdrList))
vdrSet.Add(vdrList...)
t.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes())
} else if err != nil {
t.Ctx.Log.Error("query for %s was dropped due to an insufficient number of validators", blk.ID())
}
}
// issue [blk] to consensus
func (t *Transitive) deliver(blk snowman.Block) error {
if t.Consensus.DecidedOrProcessing(blk) {
return nil
}
// we are no longer waiting on adding the block to consensus, so it is no
// longer pending
blkID := blk.ID()
delete(t.pending, blkID)
parentID := blk.Parent()
parent, err := t.GetBlock(parentID)
// Because the dependency must have been fulfilled by the time this function
// is called - we don't expect [err] to be non-nil. But it is handled for
// completness and future proofing.
if err != nil || !t.Consensus.AcceptedOrProcessing(parent) {
// if the parent isn't processing or the last accepted block, then this
// block is effectively rejected
t.blocked.Abandon(blkID)
t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
// By ensuring that the parent is either processing or accepted, it is
// guaranteed that the parent was successfully verified. This means that
// calling Verify on this block is allowed.
// make sure this block is valid
if err := blk.Verify(); err != nil {
t.Ctx.Log.Debug("block failed verification due to %s, dropping block", err)
// if verify fails, then all descendants are also invalid
t.blocked.Abandon(blkID)
t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
t.Ctx.Log.Verbo("adding block to consensus: %s", blkID)
if err := t.Consensus.Add(blk); err != nil {
return err
}
// Add all the oracle blocks if they exist. We call verify on all the blocks
// and add them to consensus before marking anything as fulfilled to avoid
// any potential reentrant bugs.
added := []snowman.Block{}
dropped := []snowman.Block{}
if blk, ok := blk.(OracleBlock); ok {
options, err := blk.Options()
if err != nil {
return err
}
for _, blk := range options {
if err := blk.Verify(); err != nil {
t.Ctx.Log.Debug("block failed verification due to %s, dropping block", err)
dropped = append(dropped, blk)
} else {
if err := t.Consensus.Add(blk); err != nil {
return err
}
added = append(added, blk)
}
}
}
if err := t.VM.SetPreference(t.Consensus.Preference()); err != nil {
return err
}
// If the block is now preferred, query the network for its preferences
// with this new block.
if t.Consensus.IsPreferred(blk) {
t.pushQuery(blk)
}
t.blocked.Fulfill(blkID)
for _, blk := range added {
if t.Consensus.IsPreferred(blk) {
t.pushQuery(blk)
}
blkID := blk.ID()
delete(t.pending, blkID)
t.blocked.Fulfill(blkID)
t.blkReqs.RemoveAny(blkID)
}
for _, blk := range dropped {
blkID := blk.ID()
delete(t.pending, blkID)
t.blocked.Abandon(blkID)
t.blkReqs.RemoveAny(blkID)
}
// If we should issue multiple queries at the same time, we need to repoll
t.repoll()
// Tracks performance statistics
t.metrics.numRequests.Set(float64(t.blkReqs.Len()))
t.metrics.numBlocked.Set(float64(len(t.pending)))
t.metrics.numBlockers.Set(float64(t.blocked.Len()))
return t.errs.Err
}
// IsBootstrapped returns true iff this chain is done bootstrapping
func (t *Transitive) IsBootstrapped() bool {
return t.Ctx.IsBootstrapped()
}
// Health implements the common.Engine interface
func (t *Transitive) HealthCheck() (interface{}, error) {
var (
consensusIntf interface{} = struct{}{}
consensusErr error
)
if t.Ctx.IsBootstrapped() {
consensusIntf, consensusErr = t.Consensus.HealthCheck()
}
vmIntf, vmErr := t.VM.HealthCheck()
intf := map[string]interface{}{
"consensus": consensusIntf,
"vm": vmIntf,
}
if consensusErr == nil {
return intf, vmErr
}
if vmErr == nil {
return intf, consensusErr
}
return intf, fmt.Errorf("vm: %s ; consensus: %s", vmErr, consensusErr)
}
// GetBlock implements the snowman.Engine interface
func (t *Transitive) GetBlock(blkID ids.ID) (snowman.Block, error) {
blk, ok := t.pending[blkID]
if ok {
return blk, nil
}
return t.VM.GetBlock(blkID)
}
// GetVM implements the snowman.Engine interface
func (t *Transitive) GetVM() common.VM {
return t.VM
}
// Returns true if the block whose ID is [blkID] is waiting to be issued to consensus
func (t *Transitive) pendingContains(blkID ids.ID) bool {
_, ok := t.pending[blkID]
return ok
}
|
package main
import (
"fmt"
"github.com/unrolled/render"
"github.com/vanng822/r2router"
"net/http"
)
func main() {
renderer := render.New()
router := r2router.NewRouter()
router.Group("/repos/:owner/:repo", func(r *r2router.GroupRouter) {
r.Get("/stats/contributors", func(w http.ResponseWriter, r *http.Request, p r2router.Params) {
data := map[string]string{}
data["owner"] = p.Get("owner")
data["repo"] = p.Get("repo")
renderer.JSON(w, http.StatusOK, data)
})
r.Get("/releases/:id/assets", func(w http.ResponseWriter, r *http.Request, p r2router.Params) {
w.Write([]byte(fmt.Sprintf("#v", p)))
})
r.Get("/issue", func(w http.ResponseWriter, r *http.Request, p r2router.Params) {
w.Write([]byte(fmt.Sprintf("#v", p)))
})
})
http.ListenAndServe(":8080", router)
}
Use r2router.M instead
package main
import (
"fmt"
"github.com/unrolled/render"
"github.com/vanng822/r2router"
"net/http"
)
func main() {
renderer := render.New()
router := r2router.NewRouter()
router.Group("/repos/:owner/:repo", func(r *r2router.GroupRouter) {
r.Get("/stats/contributors", func(w http.ResponseWriter, r *http.Request, p r2router.Params) {
data := r2router.M{
"owner": p.Get("owner"),
"repo": p.Get("repo"),
}
renderer.JSON(w, http.StatusOK, data)
})
r.Get("/releases/:id/assets", func(w http.ResponseWriter, r *http.Request, p r2router.Params) {
w.Write([]byte(fmt.Sprintf("#v", p)))
})
r.Get("/issue", func(w http.ResponseWriter, r *http.Request, p r2router.Params) {
w.Write([]byte(fmt.Sprintf("#v", p)))
})
})
http.ListenAndServe(":8080", router)
}
|
package solidserver
import (
"encoding/json"
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"log"
"math/big"
"net/url"
"strconv"
"strings"
)
// Integer Absolute value
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
// Convert a Schema.TypeList interface into an array of strings
func toStringArray(in []interface{}) []string {
out := make([]string, len(in))
for i, v := range in {
if v == nil {
out[i] = ""
continue
}
out[i] = v.(string)
}
return out
}
// Convert an array of strings into a Schema.TypeList interface
func toStringArrayInterface(in []string) []interface{} {
out := make([]interface{}, len(in))
for i, v := range in {
out[i] = v
}
return out
}
// BigIntToHexStr convert a Big Integer into an Hexa String
func BigIntToHexStr(bigInt *big.Int) string {
return fmt.Sprintf("%x", bigInt)
}
// BigIntToStr convert a Big Integer to Decimal String
func BigIntToStr(bigInt *big.Int) string {
return fmt.Sprintf("%v", bigInt)
}
// Convert hexa IPv6 address string into standard IPv6 address string
// Return an empty string in case of failure
func hexiptoip(hexip string) string {
a, b, c, d := 0, 0, 0, 0
count, _ := fmt.Sscanf(hexip, "%02x%02x%02x%02x", &a, &b, &c, &d)
if count == 4 {
return fmt.Sprintf("%d.%d.%d.%d", a, b, c, d)
}
return ""
}
// Convert IP v4 address string into PTR record name
// Return an empty string in case of failure
func iptoptr(ip string) string {
a, b, c, d := 0, 0, 0, 0
count, _ := fmt.Sscanf(ip, "%03d.%03d.%03d.%03d", &a, &b, &c, &d)
if count == 4 {
return fmt.Sprintf("%d.%d.%d.%d.in-addr.arpa", d, c, b, a)
}
return ""
}
// Convert IPv6 address string into PTR record name
// Return an empty string in case of failure
func ip6toptr(ip string) string {
buffer := strings.Split(ip, ":")
res := ""
for i := len(buffer) - 1; i >= 0; i-- {
for j := len(buffer[i]) - 1; j >= 0; j-- {
res += string(buffer[i][j]) + "."
}
}
return res + "ip6.arpa"
}
// Convert a net.IP object into an IPv6 address in full format
// func FullIPv6(ip net.IP) string {
// dst := make([]byte, hex.EncodedLen(len(ip)))
// _ = hex.Encode(dst, ip)
// return string(dst[0:4]) + ":" +
// string(dst[4:8]) + ":" +
// string(dst[8:12]) + ":" +
// string(dst[12:16]) + ":" +
// string(dst[16:20]) + ":" +
// string(dst[20:24]) + ":" +
// string(dst[24:28]) + ":" +
// string(dst[28:])
// }
// Convert hexa IPv6 address string into standard IPv6 address string
// Return an empty string in case of failure
func hexip6toip6(hexip string) string {
res := ""
for i, c := range hexip {
if (i == 0) || ((i % 4) != 0) {
res += string(c)
} else {
res += ":"
res += string(c)
}
}
return res
}
// Convert standard IP address string into hexa IP address string
// Return an empty string in case of failure
func iptohexip(ip string) string {
ipDec := strings.Split(ip, ".")
if len(ipDec) == 4 {
a, _ := strconv.Atoi(ipDec[0])
b, _ := strconv.Atoi(ipDec[1])
c, _ := strconv.Atoi(ipDec[2])
d, _ := strconv.Atoi(ipDec[3])
if 0 <= a && a <= 255 && 0 <= b && b <= 255 &&
0 <= c && c <= 255 && 0 <= d && d <= 255 {
return fmt.Sprintf("%02x%02x%02x%02x", a, b, c, d)
}
return ""
}
return ""
}
// Convert standard IPv6 address string into hexa IPv6 address string
// Return an empty string in case of failure
func ip6tohexip6(ip string) string {
ipDec := strings.Split(ip, ":")
res := ""
if len(ipDec) == 8 {
for _, b := range ipDec {
res += fmt.Sprintf("%04s", b)
}
return res
}
return ""
}
// Convert standard IP address string into unsigned int32
// Return 0 in case of failure
func iptolong(ip string) uint32 {
ipDec := strings.Split(ip, ".")
if len(ipDec) == 4 {
a, _ := strconv.Atoi(ipDec[0])
b, _ := strconv.Atoi(ipDec[1])
c, _ := strconv.Atoi(ipDec[2])
d, _ := strconv.Atoi(ipDec[3])
var iplong uint32 = uint32(a) * 0x1000000
iplong += uint32(b) * 0x10000
iplong += uint32(c) * 0x100
iplong += uint32(d) * 0x1
return iplong
}
return 0
}
// Convert unsigned int32 into standard IP address string
// Return an IP formated string
func longtoip(iplong uint32) string {
a := (iplong & 0xFF000000) >> 24
b := (iplong & 0xFF0000) >> 16
c := (iplong & 0xFF00) >> 8
d := (iplong & 0xFF)
if a < 0 {
a = a + 0x100
}
return fmt.Sprintf("%d.%d.%d.%d", a, b, c, d)
}
func resourcediffsuppresscase(k, old, new string, d *schema.ResourceData) bool {
if strings.ToLower(old) == strings.ToLower(new) {
return true
}
return false
}
// Compute the prefix length from the size of a CIDR prefix
// Return the prefix lenght
func sizetoprefixlength(size int) int {
prefixlength := 32
for prefixlength > 0 && size > 1 {
size = size / 2
prefixlength--
}
return prefixlength
}
// Compute the actual size of a CIDR prefix from its length
// Return -1 in case of failure
func prefixlengthtosize(length int) int {
if length >= 0 && length <= 32 {
return (1 << (32 - uint32(length)))
}
return -1
}
// Compute the netmask of a CIDR prefix from its length
// Return an empty string in case of failure
func prefixlengthtohexip(length int) string {
if length >= 0 && length <= 32 {
return longtoip((^((1 << (32 - uint32(length))) - 1)) & 0xffffffff)
}
return ""
}
// Compute the actual size of an IPv6 CIDR prefix from its length
// Return -1 in case of failure
func prefix6lengthtosize(length int64) *big.Int {
sufix := big.NewInt(32 - (length / 4))
size := big.NewInt(16)
size = size.Exp(size, sufix, nil)
//size = size.Sub(size, big.NewInt(1))
return size
}
// Build url value object from class parameters
// Return an url.Values{} object
func urlfromclassparams(parameters interface{}) url.Values {
classParameters := url.Values{}
for k, v := range parameters.(map[string]interface{}) {
classParameters.Add(k, v.(string))
}
return classParameters
}
// Return the oid of a device from hostdev_name
// Or an empty string in case of failure
func hostdevidbyname(hostdevName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "hostdev_name='"+strings.ToLower(hostdevName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/hostdev_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if hostdevID, hostdevIDExist := buf[0]["hostdev_id"].(string); hostdevIDExist {
return hostdevID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find device: %s\n", hostdevName)
return "", err
}
// Return an available IP addresses from site_id, block_id and expected subnet_size
// Or an empty table of string in case of failure
func ipaddressfindfree(subnetID string, poolID string, meta interface{}) ([]string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("subnet_id", subnetID)
parameters.Add("max_find", "32")
if len(poolID) > 0 {
parameters.Add("pool_id", poolID)
}
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip_find_free_address", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
addresses := []string{}
for i := 0; i < len(buf); i++ {
if addr, addrExist := buf[i]["hostaddr"].(string); addrExist {
log.Printf("[DEBUG] SOLIDServer - Suggested IP address: %s\n", addr)
addresses = append(addresses, addr)
}
}
return addresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IP address in subnet (oid): %s\n", subnetID)
return []string{}, err
}
// Return an available IP addresses from site_id, block_id and expected subnet_size
// Or an empty table of string in case of failure
func ip6addressfindfree(subnetID string, poolID string, meta interface{}) ([]string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("subnet6_id", subnetID)
parameters.Add("max_find", "32")
if len(poolID) > 0 {
parameters.Add("pool6_id", poolID)
}
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip6_find_free_address6", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
addresses := []string{}
for i := 0; i < len(buf); i++ {
if addr, addrExist := buf[i]["hostaddr6"].(string); addrExist {
log.Printf("[DEBUG] SOLIDServer - Suggested IP address: %s\n", addr)
addresses = append(addresses, addr)
}
}
return addresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IPv6 address in subnet (oid): %s\n", subnetID)
return []string{}, err
}
// Return an available vlan from specified vlmdomain_name
// Or an empty table strings in case of failure
func vlanidfindfree(vlmdomainName string, meta interface{}) ([]string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("limit", "16")
if s.Version < 700 {
parameters.Add("WHERE", "vlmdomain_name='"+strings.ToLower(vlmdomainName)+"' AND row_enabled='2'")
} else {
parameters.Add("WHERE", "vlmdomain_name='"+strings.ToLower(vlmdomainName)+"' AND type='free'")
}
// Sending the creation request
resp, body, err := s.Request("get", "rest/vlmvlan_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
vnIDs := []string{}
for i := range buf {
if s.Version < 700 {
if vnID, vnIDExist := buf[i]["vlmvlan_vlan_id"].(string); vnIDExist {
log.Printf("[DEBUG] SOLIDServer - Suggested vlan ID: %s\n", vnID)
vnIDs = append(vnIDs, vnID)
}
} else {
if startVlanID, startVlanIDExist := buf[i]["free_start_vlan_id"].(string); startVlanIDExist {
if endVlanID, endVlanIDExist := buf[i]["free_end_vlan_id"].(string); endVlanIDExist {
vnID, _ := strconv.Atoi(startVlanID)
maxVnID, _ := strconv.Atoi(endVlanID)
j := 0
for vnID < maxVnID && j < 8 {
log.Printf("[DEBUG] SOLIDServer - Suggested vlan ID: %d\n", vnID)
vnIDs = append(vnIDs, strconv.Itoa(vnID))
vnID++
j++
}
}
}
}
}
return vnIDs, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free vlan ID in vlan domain: %s\n", vlmdomainName)
return []string{}, err
}
// Return the oid of a space from site_name
// Or an empty string in case of failure
func ipsiteidbyname(siteName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_name='"+strings.ToLower(siteName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_site_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if siteID, siteIDExist := buf[0]["site_id"].(string); siteIDExist {
return siteID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP space: %s\n", siteName)
return "", err
}
// Return the oid of a vlan domain from vlmdomain_name
// Or an empty string in case of failure
func vlandomainidbyname(vlmdomainName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "vlmdomain_name='"+strings.ToLower(vlmdomainName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/vlmdomain_name", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if vlmdomainID, vlmdomainIDExist := buf[0]["vlmdomain_id"].(string); vlmdomainIDExist {
return vlmdomainID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find vlan domain: %s\n", vlmdomainName)
return "", err
}
// Return the oid of a subnet from site_id, subnet_name and is_terminal property
// Or an empty string in case of failure
func ipsubnetidbyname(siteID string, subnetName string, terminal bool, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_block_subnet_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet_id"].(string); subnetIDExist {
return subnetID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP subnet: %s\n", subnetName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ippoolidbyname(siteID string, poolName string, subnetName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool_name='"+strings.ToLower(poolName)+"' AND subnet_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_pool_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool_id"].(string); poolIDExist {
return poolID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP pool: %s\n", poolName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ippoolinfobyname(siteID string, poolName string, subnetName string, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool_name='"+strings.ToLower(poolName)+"' AND subnet_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_pool_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool_id"].(string); poolIDExist {
res["id"] = poolID
if poolName, poolNameExist := buf[0]["pool_name"].(string); poolNameExist {
res["name"] = poolName
}
if poolSize, poolSizeExist := buf[0]["pool_size"].(string); poolSizeExist {
res["size"], _ = strconv.Atoi(poolSize)
}
if poolStartAddr, poolStartAddrExist := buf[0]["start_ip_addr"].(string); poolStartAddrExist {
res["start_hex_addr"] = poolStartAddr
res["start_addr"] = hexiptoip(poolStartAddr)
}
if poolEndAddr, poolEndAddrExist := buf[0]["end_ip_addr"].(string); poolEndAddrExist {
res["end_hex_addr"] = poolEndAddr
res["end_addr"] = hexiptoip(poolEndAddr)
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP pool: %s\n", poolName)
return nil, err
}
// Return a map of information about a subnet from site_id, subnet_name and is_terminal property
// Or nil in case of failure
func ipsubnetinfobyname(siteID string, subnetName string, terminal bool, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_block_subnet_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet_id"].(string); subnetIDExist {
res["id"] = subnetID
if subnetName, subnetNameExist := buf[0]["subnet_name"].(string); subnetNameExist {
res["name"] = subnetName
}
if subnetSize, subnetSizeExist := buf[0]["subnet_size"].(string); subnetSizeExist {
res["size"], _ = strconv.Atoi(subnetSize)
res["prefix_length"] = sizetoprefixlength(res["size"].(int))
}
if subnetStartAddr, subnetStartAddrExist := buf[0]["start_ip_addr"].(string); subnetStartAddrExist {
res["start_hex_addr"] = subnetStartAddr
res["start_addr"] = hexiptoip(subnetStartAddr)
}
if subnetEndAddr, subnetEndAddrExist := buf[0]["end_ip_addr"].(string); subnetEndAddrExist {
res["end_hex_addr"] = subnetEndAddr
res["end_addr"] = hexiptoip(subnetEndAddr)
}
if subnetTerminal, subnetTerminalExist := buf[0]["is_terminal"].(string); subnetTerminalExist {
res["terminal"] = subnetTerminal
}
if subnetLvl, subnetLvlExist := buf[0]["subnet_level"].(string); subnetLvlExist {
res["level"] = subnetLvl
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP subnet: %s\n", subnetName)
return nil, err
}
// Return the oid of a subnet from site_id, subnet_name and is_terminal property
// Or an empty string in case of failure
func ip6subnetidbyname(siteID string, subnetName string, terminal bool, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet6_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_block6_subnet6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet6_id"].(string); subnetIDExist {
return subnetID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 subnet: %s\n", subnetName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ip6poolidbyname(siteID string, poolName string, subnetName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool6_name='"+strings.ToLower(poolName)+"' AND subnet6_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_pool6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool6_id"].(string); poolIDExist {
return poolID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 pool: %s\n", poolName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ip6poolinfobyname(siteID string, poolName string, subnetName string, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool6_name='"+strings.ToLower(poolName)+"' AND subnet6_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_pool6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool6_id"].(string); poolIDExist {
res["id"] = poolID
if poolName, poolNameExist := buf[0]["pool6_name"].(string); poolNameExist {
res["name"] = poolName
}
if poolSize, poolSizeExist := buf[0]["pool6_size"].(string); poolSizeExist {
res["size"], _ = strconv.Atoi(poolSize)
}
if poolStartAddr, poolStartAddrExist := buf[0]["start_ip6_addr"].(string); poolStartAddrExist {
res["start_hex_addr"] = poolStartAddr
res["start_addr"] = hexiptoip(poolStartAddr)
}
if poolEndAddr, poolEndAddrExist := buf[0]["end_ip6_addr"].(string); poolEndAddrExist {
res["end_hex_addr"] = poolEndAddr
res["end_addr"] = hexiptoip(poolEndAddr)
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 pool: %s\n", poolName)
return nil, err
}
// Return a map of information about a subnet from site_id, subnet_name and is_terminal property
// Or nil in case of failure
func ip6subnetinfobyname(siteID string, subnetName string, terminal bool, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet6_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_block6_subnet6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet6_id"].(string); subnetIDExist {
res["id"] = subnetID
if subnetName, subnetNameExist := buf[0]["subnet6_name"].(string); subnetNameExist {
res["name"] = subnetName
}
if subnetPrefixSize, subnetPrefixSizeExist := buf[0]["subnet6_prefix"].(string); subnetPrefixSizeExist {
res["prefix_length"], _ = strconv.Atoi(subnetPrefixSize)
}
if subnetStartAddr, subnetStartAddrExist := buf[0]["start_ip6_addr"].(string); subnetStartAddrExist {
res["start_hex_addr"] = subnetStartAddr
res["start_addr"] = hexiptoip(subnetStartAddr)
}
if subnetEndAddr, subnetEndAddrExist := buf[0]["end_ip6_addr"].(string); subnetEndAddrExist {
res["end_hex_addr"] = subnetEndAddr
res["end_addr"] = hexiptoip(subnetEndAddr)
}
if subnetTerminal, subnetTerminalExist := buf[0]["is_terminal"].(string); subnetTerminalExist {
res["terminal"] = subnetTerminal
}
if subnetLvl, subnetLvlExist := buf[0]["subnet_level"].(string); subnetLvlExist {
res["level"] = subnetLvl
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 subnet: %s\n", subnetName)
return nil, err
}
// Return the oid of an address from site_id, ip_address
// Or an empty string in case of failure
func ipaddressidbyip(siteID string, ipAddress string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"ip_addr='"+iptohexip(ipAddress)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_address_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if ipID, ipIDExist := buf[0]["ip_id"].(string); ipIDExist {
return ipID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP address: %s\n", ipAddress)
return "", err
}
// Return the oid of an address from site_id, ip_address
// Or an empty string in case of failure
func ip6addressidbyip6(siteID string, ipAddress string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"ip6_addr='"+ip6tohexip6(ipAddress)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_address6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if ipID, ipIDExist := buf[0]["ip6_id"].(string); ipIDExist {
return ipID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 address: %s\n", ipAddress)
return "", err
}
// Return the oid of an address from ip_id, ip_name_type, alias_name
// Or an empty string in case of failure
func ipaliasidbyinfo(addressID string, aliasName string, ipNameType string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("ip_id", addressID)
parameters.Add("WHERE", "ip_name_type='"+ipNameType+"' AND "+"alias_name='"+aliasName+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_alias_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if ip_name_id, ip_name_id_exist := buf[0]["ip_name_id"].(string); ip_name_id_exist {
return ip_name_id, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP alias: %s - %s associated with IP address ID %s\n", aliasName, ipNameType, addressID)
return "", err
}
// Return an available subnet address from site_id, block_id and expected subnet_size
// Or an empty string in case of failure
func ipsubnetfindbysize(siteID string, blockID string, requestedIP string, prefixSize int, meta interface{}) ([]string, error) {
subnetAddresses := []string{}
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("site_id", siteID)
parameters.Add("prefix", strconv.Itoa(prefixSize))
parameters.Add("max_find", "16")
// Specifying a suggested subnet IP address
if len(requestedIP) > 0 {
subnetAddresses = append(subnetAddresses, iptohexip(requestedIP))
return subnetAddresses, nil
}
// Trying to create a subnet under an existing block
parameters.Add("block_id", blockID)
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip_find_free_subnet", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
subnetAddresses := []string{}
for i := 0; i < len(buf); i++ {
if hexaddr, hexaddr_exist := buf[i]["start_ip_addr"].(string); hexaddr_exist {
log.Printf("[DEBUG] SOLIDServer - Suggested IP subnet address: %s\n", hexiptoip(hexaddr))
subnetAddresses = append(subnetAddresses, hexaddr)
}
}
return subnetAddresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IP subnet in space (oid): %s, block (oid): %s, size: %s\n", siteID, blockID, strconv.Itoa(prefixSize))
return []string{}, err
}
// Return an available subnet address from site_id, block_id and expected subnet_size
// Or an empty string in case of failure
func ip6subnetfindbysize(siteID string, blockID string, requestedIP string, prefixSize int, meta interface{}) ([]string, error) {
subnetAddresses := []string{}
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("site_id", siteID)
parameters.Add("prefix", strconv.Itoa(prefixSize))
parameters.Add("max_find", "16")
// Specifying a suggested subnet IP address
if len(requestedIP) > 0 {
subnetAddresses = append(subnetAddresses, ip6tohexip6(requestedIP))
return subnetAddresses, nil
}
// Trying to create a subnet under an existing block
parameters.Add("block6_id", blockID)
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip6_find_free_subnet6", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
subnetAddresses := []string{}
for i := 0; i < len(buf); i++ {
if hexaddr, hexaddr_exist := buf[i]["start_ip6_addr"].(string); hexaddr_exist {
log.Printf("[DEBUG] SOLIDServer - Suggested IPv6 subnet address: %s\n", hexip6toip6(hexaddr))
subnetAddresses = append(subnetAddresses, hexaddr)
}
}
return subnetAddresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IPv6 subnet in space (oid): %s, block (oid): %s, size: %s\n", siteID, blockID, strconv.Itoa(prefixSize))
return []string{}, err
}
// Return the oid of a Custom DB from name
// Or an empty string in case of failure
func cdbnameidbyname(name string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "name='"+name+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/custom_db_name_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if cdbnameID, cdbnameIDExist := buf[0]["custom_db_name_id"].(string); cdbnameIDExist {
return cdbnameID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find Custom DB: %s\n", name)
return "", err
}
// Update a DNS SMART member's role list
// Return false in case of failure
func dnssmartmembersupdate(smartName string, smartMembersRole string, meta interface{}) bool {
s := meta.(*SOLIDserver)
// Building parameters for retrieving SMART vdns_dns_group_role information
parameters := url.Values{}
parameters.Add("dns_name", smartName)
parameters.Add("add_flag", "edit_only")
parameters.Add("vdns_dns_group_role", smartMembersRole)
// Sending the update request
resp, body, err := s.Request("put", "rest/dns_add", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
return true
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to update members list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to update members list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Get DNS Server status
// Return an empty string in case of failure the server status otherwise (Y -> OK)
func dnsserverstatus(serverID string, meta interface{}) string {
s := meta.(*SOLIDserver)
// Building parameters for retrieving information
parameters := url.Values{}
parameters.Add("dns_id", serverID)
// Sending the get request
resp, body, err := s.Request("get", "rest/dns_server_info", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if state, stateExist := buf[0]["dns_state"].(string); stateExist {
return state
}
return ""
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server status: %s (%s)\n", serverID, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server status: %s\n", serverID)
}
}
return ""
}
// Get number of pending deletion operations on DNS server
// Return -1 in case of failure
func dnsserverpendingdeletions(serverID string, meta interface{}) int {
s := meta.(*SOLIDserver)
result := 0
// Building parameters for retrieving information
parameters := url.Values{}
parameters.Add("WHERE", "delayed_delete_time='1' AND dns_id='"+serverID+"'")
// Sending the get request
resp, body, err := s.Request("get", "rest/dns_zone_count", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if total, totalExist := buf[0]["total"].(string); totalExist {
inc, _ := strconv.Atoi(total)
result += inc
} else {
return -1
}
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s (%s)\n", serverID, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s\n", serverID)
}
}
// Building parameters for retrieving information
parameters = url.Values{}
parameters.Add("WHERE", "delayed_delete_time='1' AND dns_id='"+serverID+"'")
// Sending the get request
resp, body, err = s.Request("get", "rest/dns_view_count", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if total, totalExist := buf[0]["total"].(string); totalExist {
inc, _ := strconv.Atoi(total)
result += inc
} else {
return -1
}
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s (%s)\n", serverID, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s\n", serverID)
}
}
return result
}
// Set a DNSserver or DNSview param value
// Return false in case of failure
func dnsparamset(serverName string, viewID string, paramKey string, paramValue string, meta interface{}) bool {
s := meta.(*SOLIDserver)
service := "dns_server_param_add"
// Building parameters to push information
parameters := url.Values{}
if viewID != "" {
service = "dns_view_param_add"
parameters.Add("dnsview_id", viewID)
} else {
parameters.Add("dns_name", serverName)
}
parameters.Add("param_key", paramKey)
parameters.Add("param_value", paramValue)
// Sending the update request
resp, body, err := s.Request("put", "rest/"+service, ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
return true
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to set DNS server or view parameter: %s on %s (%s)\n", paramKey, serverName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to set DNS server or view parameter: %s on %s\n", paramKey, serverName)
}
}
return false
}
// UnSet a DNSserver or DNSview param value
// Return false in case of failure
func dnsparamunset(serverName string, viewID string, paramKey string, meta interface{}) bool {
s := meta.(*SOLIDserver)
service := "dns_server_param_delete"
// Building parameters to push information
parameters := url.Values{}
if viewID != "" {
service = "dns_view_param_delete"
parameters.Add("dnsview_id", viewID)
} else {
parameters.Add("dns_name", serverName)
}
parameters.Add("param_key", paramKey)
// Sending the delete request
resp, body, err := s.Request("delete", "rest/"+service, ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
return true
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to unset DNS server or view parameter: %s on %s (%s)\n", paramKey, serverName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to unset DNS server or view parameter: %s on %s\n", paramKey, serverName)
}
}
return false
}
// Get a DNSserver or DNSview param's value
// Return an empty string and an error in case of failure
func dnsparamget(serverName string, viewID string, paramKey string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
service := "dns_server_param_list"
if viewID != "" {
service = "dns_view_param_list"
}
// Building parameters for retrieving information
parameters := url.Values{}
if viewID == "" {
parameters.Add("WHERE", "dns_name='"+serverName+"' AND param_key='"+paramKey+"'")
} else {
parameters.Add("WHERE", "dns_name='"+serverName+"' AND dnsview_id='"+viewID+"' AND param_key='"+paramKey+"'")
}
// Sending the read request
resp, body, err := s.Request("get", "rest/"+service, ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if paramValue, paramValueExist := buf[0]["param_value"].(string); paramValueExist {
return paramValue, nil
} else {
return "", nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find DNS Param Key: %s\n", paramKey)
return "", err
}
// Add a DNS server to a SMART with the required role, return the
// Return false in case of failure
func dnsaddtosmart(smartName string, serverName string, serverRole string, meta interface{}) bool {
s := meta.(*SOLIDserver)
parameters := url.Values{}
parameters.Add("vdns_name", smartName)
parameters.Add("dns_name", serverName)
parameters.Add("dns_role", serverRole)
// Sending the read request
resp, body, err := s.Request("post", "rest/dns_smart_member_add", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 201 {
return true
}
// Atomic SMART registration service unavailable attempting to use existing services
if resp.StatusCode == 400 || resp.StatusCode == 404 {
// Random Delay (in case of concurrent resources creation - until 8.0 and service dns_smart_member_add)
//time.Sleep(time.Duration((rand.Intn(600) / 10) * time.Second))
// Otherwise proceed using the previous method
// Building parameters for retrieving SMART vdns_dns_group_role information
parameters := url.Values{}
parameters.Add("WHERE", "vdns_parent_name='"+smartName+"' AND dns_type!='vdns'")
// Sending the read request
resp, body, err := s.Request("get", "rest/dns_server_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 204 {
// Building vdns_dns_group_role parameter from the SMART member list
membersRole := ""
if len(buf) > 0 {
for _, smartMember := range buf {
membersRole += smartMember["dns_name"].(string) + "&" + smartMember["dns_role"].(string) + ";"
}
}
membersRole += serverName + "&" + serverRole
if dnssmartmembersupdate(smartName, membersRole, meta) {
return true
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Remove a DNS server from a SMART
// Return false in case of failure
func dnsdeletefromsmart(smartName string, serverName string, meta interface{}) bool {
s := meta.(*SOLIDserver)
parameters := url.Values{}
parameters.Add("vdns_name", smartName)
parameters.Add("dns_name", serverName)
// Sending the read request
resp, body, err := s.Request("delete", "rest/dns_smart_member_delete", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 204 {
return true
}
// Atomic SMART registration service unavailable attempting to use existing services
if resp.StatusCode == 400 || resp.StatusCode == 404 {
// Random Delay (in case of concurrent resources creation - until 8.0 and service dns_smart_member_add)
//time.Sleep(time.Duration((rand.Intn(600) / 10) * time.Second))
// Building parameters for retrieving SMART vdns_dns_group_role information
parameters := url.Values{}
parameters.Add("WHERE", "vdns_parent_name='"+smartName+"' AND dns_type!='vdns'")
// Sending the read request
resp, body, err := s.Request("get", "rest/dns_server_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 204 {
// Building vdns_dns_group_role parameter from the SMART member list
membersRole := ""
if len(buf) > 0 {
for _, smartMember := range buf {
if smartMember["dns_name"].(string) != serverName {
membersRole += smartMember["dns_name"].(string) + "&" + smartMember["dns_role"].(string) + ";"
}
}
}
if dnssmartmembersupdate(smartName, membersRole, meta) {
return true
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s\n", smartName)
}
}
return false
}
Sorting returned array to prevent unxecpected conflicts
package solidserver
import (
"encoding/json"
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"log"
"math/big"
"net/url"
"sort"
"strconv"
"strings"
)
// Integer Absolute value
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
// Convert a Schema.TypeList interface into an array of strings
func toStringArray(in []interface{}) []string {
out := make([]string, len(in))
for i, v := range in {
if v == nil {
out[i] = ""
continue
}
out[i] = v.(string)
}
sort.Strings(out)
return out
}
// Convert an array of strings into a Schema.TypeList interface
func toStringArrayInterface(in []string) []interface{} {
out := make([]interface{}, len(in))
for i, v := range in {
out[i] = v
}
return out
}
// BigIntToHexStr convert a Big Integer into an Hexa String
func BigIntToHexStr(bigInt *big.Int) string {
return fmt.Sprintf("%x", bigInt)
}
// BigIntToStr convert a Big Integer to Decimal String
func BigIntToStr(bigInt *big.Int) string {
return fmt.Sprintf("%v", bigInt)
}
// Convert hexa IPv6 address string into standard IPv6 address string
// Return an empty string in case of failure
func hexiptoip(hexip string) string {
a, b, c, d := 0, 0, 0, 0
count, _ := fmt.Sscanf(hexip, "%02x%02x%02x%02x", &a, &b, &c, &d)
if count == 4 {
return fmt.Sprintf("%d.%d.%d.%d", a, b, c, d)
}
return ""
}
// Convert IP v4 address string into PTR record name
// Return an empty string in case of failure
func iptoptr(ip string) string {
a, b, c, d := 0, 0, 0, 0
count, _ := fmt.Sscanf(ip, "%03d.%03d.%03d.%03d", &a, &b, &c, &d)
if count == 4 {
return fmt.Sprintf("%d.%d.%d.%d.in-addr.arpa", d, c, b, a)
}
return ""
}
// Convert IPv6 address string into PTR record name
// Return an empty string in case of failure
func ip6toptr(ip string) string {
buffer := strings.Split(ip, ":")
res := ""
for i := len(buffer) - 1; i >= 0; i-- {
for j := len(buffer[i]) - 1; j >= 0; j-- {
res += string(buffer[i][j]) + "."
}
}
return res + "ip6.arpa"
}
// Convert a net.IP object into an IPv6 address in full format
// func FullIPv6(ip net.IP) string {
// dst := make([]byte, hex.EncodedLen(len(ip)))
// _ = hex.Encode(dst, ip)
// return string(dst[0:4]) + ":" +
// string(dst[4:8]) + ":" +
// string(dst[8:12]) + ":" +
// string(dst[12:16]) + ":" +
// string(dst[16:20]) + ":" +
// string(dst[20:24]) + ":" +
// string(dst[24:28]) + ":" +
// string(dst[28:])
// }
// Convert hexa IPv6 address string into standard IPv6 address string
// Return an empty string in case of failure
func hexip6toip6(hexip string) string {
res := ""
for i, c := range hexip {
if (i == 0) || ((i % 4) != 0) {
res += string(c)
} else {
res += ":"
res += string(c)
}
}
return res
}
// Convert standard IP address string into hexa IP address string
// Return an empty string in case of failure
func iptohexip(ip string) string {
ipDec := strings.Split(ip, ".")
if len(ipDec) == 4 {
a, _ := strconv.Atoi(ipDec[0])
b, _ := strconv.Atoi(ipDec[1])
c, _ := strconv.Atoi(ipDec[2])
d, _ := strconv.Atoi(ipDec[3])
if 0 <= a && a <= 255 && 0 <= b && b <= 255 &&
0 <= c && c <= 255 && 0 <= d && d <= 255 {
return fmt.Sprintf("%02x%02x%02x%02x", a, b, c, d)
}
return ""
}
return ""
}
// Convert standard IPv6 address string into hexa IPv6 address string
// Return an empty string in case of failure
func ip6tohexip6(ip string) string {
ipDec := strings.Split(ip, ":")
res := ""
if len(ipDec) == 8 {
for _, b := range ipDec {
res += fmt.Sprintf("%04s", b)
}
return res
}
return ""
}
// Convert standard IP address string into unsigned int32
// Return 0 in case of failure
func iptolong(ip string) uint32 {
ipDec := strings.Split(ip, ".")
if len(ipDec) == 4 {
a, _ := strconv.Atoi(ipDec[0])
b, _ := strconv.Atoi(ipDec[1])
c, _ := strconv.Atoi(ipDec[2])
d, _ := strconv.Atoi(ipDec[3])
var iplong uint32 = uint32(a) * 0x1000000
iplong += uint32(b) * 0x10000
iplong += uint32(c) * 0x100
iplong += uint32(d) * 0x1
return iplong
}
return 0
}
// Convert unsigned int32 into standard IP address string
// Return an IP formated string
func longtoip(iplong uint32) string {
a := (iplong & 0xFF000000) >> 24
b := (iplong & 0xFF0000) >> 16
c := (iplong & 0xFF00) >> 8
d := (iplong & 0xFF)
if a < 0 {
a = a + 0x100
}
return fmt.Sprintf("%d.%d.%d.%d", a, b, c, d)
}
func resourcediffsuppresscase(k, old, new string, d *schema.ResourceData) bool {
if strings.ToLower(old) == strings.ToLower(new) {
return true
}
return false
}
// Compute the prefix length from the size of a CIDR prefix
// Return the prefix lenght
func sizetoprefixlength(size int) int {
prefixlength := 32
for prefixlength > 0 && size > 1 {
size = size / 2
prefixlength--
}
return prefixlength
}
// Compute the actual size of a CIDR prefix from its length
// Return -1 in case of failure
func prefixlengthtosize(length int) int {
if length >= 0 && length <= 32 {
return (1 << (32 - uint32(length)))
}
return -1
}
// Compute the netmask of a CIDR prefix from its length
// Return an empty string in case of failure
func prefixlengthtohexip(length int) string {
if length >= 0 && length <= 32 {
return longtoip((^((1 << (32 - uint32(length))) - 1)) & 0xffffffff)
}
return ""
}
// Compute the actual size of an IPv6 CIDR prefix from its length
// Return -1 in case of failure
func prefix6lengthtosize(length int64) *big.Int {
sufix := big.NewInt(32 - (length / 4))
size := big.NewInt(16)
size = size.Exp(size, sufix, nil)
//size = size.Sub(size, big.NewInt(1))
return size
}
// Build url value object from class parameters
// Return an url.Values{} object
func urlfromclassparams(parameters interface{}) url.Values {
classParameters := url.Values{}
for k, v := range parameters.(map[string]interface{}) {
classParameters.Add(k, v.(string))
}
return classParameters
}
// Return the oid of a device from hostdev_name
// Or an empty string in case of failure
func hostdevidbyname(hostdevName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "hostdev_name='"+strings.ToLower(hostdevName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/hostdev_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if hostdevID, hostdevIDExist := buf[0]["hostdev_id"].(string); hostdevIDExist {
return hostdevID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find device: %s\n", hostdevName)
return "", err
}
// Return an available IP addresses from site_id, block_id and expected subnet_size
// Or an empty table of string in case of failure
func ipaddressfindfree(subnetID string, poolID string, meta interface{}) ([]string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("subnet_id", subnetID)
parameters.Add("max_find", "32")
if len(poolID) > 0 {
parameters.Add("pool_id", poolID)
}
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip_find_free_address", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
addresses := []string{}
for i := 0; i < len(buf); i++ {
if addr, addrExist := buf[i]["hostaddr"].(string); addrExist {
log.Printf("[DEBUG] SOLIDServer - Suggested IP address: %s\n", addr)
addresses = append(addresses, addr)
}
}
return addresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IP address in subnet (oid): %s\n", subnetID)
return []string{}, err
}
// Return an available IP addresses from site_id, block_id and expected subnet_size
// Or an empty table of string in case of failure
func ip6addressfindfree(subnetID string, poolID string, meta interface{}) ([]string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("subnet6_id", subnetID)
parameters.Add("max_find", "32")
if len(poolID) > 0 {
parameters.Add("pool6_id", poolID)
}
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip6_find_free_address6", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
addresses := []string{}
for i := 0; i < len(buf); i++ {
if addr, addrExist := buf[i]["hostaddr6"].(string); addrExist {
log.Printf("[DEBUG] SOLIDServer - Suggested IP address: %s\n", addr)
addresses = append(addresses, addr)
}
}
return addresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IPv6 address in subnet (oid): %s\n", subnetID)
return []string{}, err
}
// Return an available vlan from specified vlmdomain_name
// Or an empty table strings in case of failure
func vlanidfindfree(vlmdomainName string, meta interface{}) ([]string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("limit", "16")
if s.Version < 700 {
parameters.Add("WHERE", "vlmdomain_name='"+strings.ToLower(vlmdomainName)+"' AND row_enabled='2'")
} else {
parameters.Add("WHERE", "vlmdomain_name='"+strings.ToLower(vlmdomainName)+"' AND type='free'")
}
// Sending the creation request
resp, body, err := s.Request("get", "rest/vlmvlan_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
vnIDs := []string{}
for i := range buf {
if s.Version < 700 {
if vnID, vnIDExist := buf[i]["vlmvlan_vlan_id"].(string); vnIDExist {
log.Printf("[DEBUG] SOLIDServer - Suggested vlan ID: %s\n", vnID)
vnIDs = append(vnIDs, vnID)
}
} else {
if startVlanID, startVlanIDExist := buf[i]["free_start_vlan_id"].(string); startVlanIDExist {
if endVlanID, endVlanIDExist := buf[i]["free_end_vlan_id"].(string); endVlanIDExist {
vnID, _ := strconv.Atoi(startVlanID)
maxVnID, _ := strconv.Atoi(endVlanID)
j := 0
for vnID < maxVnID && j < 8 {
log.Printf("[DEBUG] SOLIDServer - Suggested vlan ID: %d\n", vnID)
vnIDs = append(vnIDs, strconv.Itoa(vnID))
vnID++
j++
}
}
}
}
}
return vnIDs, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free vlan ID in vlan domain: %s\n", vlmdomainName)
return []string{}, err
}
// Return the oid of a space from site_name
// Or an empty string in case of failure
func ipsiteidbyname(siteName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_name='"+strings.ToLower(siteName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_site_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if siteID, siteIDExist := buf[0]["site_id"].(string); siteIDExist {
return siteID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP space: %s\n", siteName)
return "", err
}
// Return the oid of a vlan domain from vlmdomain_name
// Or an empty string in case of failure
func vlandomainidbyname(vlmdomainName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "vlmdomain_name='"+strings.ToLower(vlmdomainName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/vlmdomain_name", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if vlmdomainID, vlmdomainIDExist := buf[0]["vlmdomain_id"].(string); vlmdomainIDExist {
return vlmdomainID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find vlan domain: %s\n", vlmdomainName)
return "", err
}
// Return the oid of a subnet from site_id, subnet_name and is_terminal property
// Or an empty string in case of failure
func ipsubnetidbyname(siteID string, subnetName string, terminal bool, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_block_subnet_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet_id"].(string); subnetIDExist {
return subnetID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP subnet: %s\n", subnetName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ippoolidbyname(siteID string, poolName string, subnetName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool_name='"+strings.ToLower(poolName)+"' AND subnet_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_pool_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool_id"].(string); poolIDExist {
return poolID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP pool: %s\n", poolName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ippoolinfobyname(siteID string, poolName string, subnetName string, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool_name='"+strings.ToLower(poolName)+"' AND subnet_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_pool_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool_id"].(string); poolIDExist {
res["id"] = poolID
if poolName, poolNameExist := buf[0]["pool_name"].(string); poolNameExist {
res["name"] = poolName
}
if poolSize, poolSizeExist := buf[0]["pool_size"].(string); poolSizeExist {
res["size"], _ = strconv.Atoi(poolSize)
}
if poolStartAddr, poolStartAddrExist := buf[0]["start_ip_addr"].(string); poolStartAddrExist {
res["start_hex_addr"] = poolStartAddr
res["start_addr"] = hexiptoip(poolStartAddr)
}
if poolEndAddr, poolEndAddrExist := buf[0]["end_ip_addr"].(string); poolEndAddrExist {
res["end_hex_addr"] = poolEndAddr
res["end_addr"] = hexiptoip(poolEndAddr)
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP pool: %s\n", poolName)
return nil, err
}
// Return a map of information about a subnet from site_id, subnet_name and is_terminal property
// Or nil in case of failure
func ipsubnetinfobyname(siteID string, subnetName string, terminal bool, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_block_subnet_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet_id"].(string); subnetIDExist {
res["id"] = subnetID
if subnetName, subnetNameExist := buf[0]["subnet_name"].(string); subnetNameExist {
res["name"] = subnetName
}
if subnetSize, subnetSizeExist := buf[0]["subnet_size"].(string); subnetSizeExist {
res["size"], _ = strconv.Atoi(subnetSize)
res["prefix_length"] = sizetoprefixlength(res["size"].(int))
}
if subnetStartAddr, subnetStartAddrExist := buf[0]["start_ip_addr"].(string); subnetStartAddrExist {
res["start_hex_addr"] = subnetStartAddr
res["start_addr"] = hexiptoip(subnetStartAddr)
}
if subnetEndAddr, subnetEndAddrExist := buf[0]["end_ip_addr"].(string); subnetEndAddrExist {
res["end_hex_addr"] = subnetEndAddr
res["end_addr"] = hexiptoip(subnetEndAddr)
}
if subnetTerminal, subnetTerminalExist := buf[0]["is_terminal"].(string); subnetTerminalExist {
res["terminal"] = subnetTerminal
}
if subnetLvl, subnetLvlExist := buf[0]["subnet_level"].(string); subnetLvlExist {
res["level"] = subnetLvl
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP subnet: %s\n", subnetName)
return nil, err
}
// Return the oid of a subnet from site_id, subnet_name and is_terminal property
// Or an empty string in case of failure
func ip6subnetidbyname(siteID string, subnetName string, terminal bool, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet6_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_block6_subnet6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet6_id"].(string); subnetIDExist {
return subnetID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 subnet: %s\n", subnetName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ip6poolidbyname(siteID string, poolName string, subnetName string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool6_name='"+strings.ToLower(poolName)+"' AND subnet6_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_pool6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool6_id"].(string); poolIDExist {
return poolID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 pool: %s\n", poolName)
return "", err
}
// Return the oid of a pool from site_id and pool_name
// Or an empty string in case of failure
func ip6poolinfobyname(siteID string, poolName string, subnetName string, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"pool6_name='"+strings.ToLower(poolName)+"' AND subnet6_name='"+strings.ToLower(subnetName)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_pool6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if poolID, poolIDExist := buf[0]["pool6_id"].(string); poolIDExist {
res["id"] = poolID
if poolName, poolNameExist := buf[0]["pool6_name"].(string); poolNameExist {
res["name"] = poolName
}
if poolSize, poolSizeExist := buf[0]["pool6_size"].(string); poolSizeExist {
res["size"], _ = strconv.Atoi(poolSize)
}
if poolStartAddr, poolStartAddrExist := buf[0]["start_ip6_addr"].(string); poolStartAddrExist {
res["start_hex_addr"] = poolStartAddr
res["start_addr"] = hexiptoip(poolStartAddr)
}
if poolEndAddr, poolEndAddrExist := buf[0]["end_ip6_addr"].(string); poolEndAddrExist {
res["end_hex_addr"] = poolEndAddr
res["end_addr"] = hexiptoip(poolEndAddr)
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 pool: %s\n", poolName)
return nil, err
}
// Return a map of information about a subnet from site_id, subnet_name and is_terminal property
// Or nil in case of failure
func ip6subnetinfobyname(siteID string, subnetName string, terminal bool, meta interface{}) (map[string]interface{}, error) {
res := make(map[string]interface{})
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
whereClause := "site_id='" + siteID + "' AND " + "subnet6_name='" + strings.ToLower(subnetName) + "'"
if terminal {
whereClause += "AND is_terminal='1'"
} else {
whereClause += "AND is_terminal='0'"
}
parameters.Add("WHERE", whereClause)
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_block6_subnet6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if subnetID, subnetIDExist := buf[0]["subnet6_id"].(string); subnetIDExist {
res["id"] = subnetID
if subnetName, subnetNameExist := buf[0]["subnet6_name"].(string); subnetNameExist {
res["name"] = subnetName
}
if subnetPrefixSize, subnetPrefixSizeExist := buf[0]["subnet6_prefix"].(string); subnetPrefixSizeExist {
res["prefix_length"], _ = strconv.Atoi(subnetPrefixSize)
}
if subnetStartAddr, subnetStartAddrExist := buf[0]["start_ip6_addr"].(string); subnetStartAddrExist {
res["start_hex_addr"] = subnetStartAddr
res["start_addr"] = hexiptoip(subnetStartAddr)
}
if subnetEndAddr, subnetEndAddrExist := buf[0]["end_ip6_addr"].(string); subnetEndAddrExist {
res["end_hex_addr"] = subnetEndAddr
res["end_addr"] = hexiptoip(subnetEndAddr)
}
if subnetTerminal, subnetTerminalExist := buf[0]["is_terminal"].(string); subnetTerminalExist {
res["terminal"] = subnetTerminal
}
if subnetLvl, subnetLvlExist := buf[0]["subnet_level"].(string); subnetLvlExist {
res["level"] = subnetLvl
}
return res, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 subnet: %s\n", subnetName)
return nil, err
}
// Return the oid of an address from site_id, ip_address
// Or an empty string in case of failure
func ipaddressidbyip(siteID string, ipAddress string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"ip_addr='"+iptohexip(ipAddress)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_address_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if ipID, ipIDExist := buf[0]["ip_id"].(string); ipIDExist {
return ipID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP address: %s\n", ipAddress)
return "", err
}
// Return the oid of an address from site_id, ip_address
// Or an empty string in case of failure
func ip6addressidbyip6(siteID string, ipAddress string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "site_id='"+siteID+"' AND "+"ip6_addr='"+ip6tohexip6(ipAddress)+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip6_address6_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if ipID, ipIDExist := buf[0]["ip6_id"].(string); ipIDExist {
return ipID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IPv6 address: %s\n", ipAddress)
return "", err
}
// Return the oid of an address from ip_id, ip_name_type, alias_name
// Or an empty string in case of failure
func ipaliasidbyinfo(addressID string, aliasName string, ipNameType string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("ip_id", addressID)
parameters.Add("WHERE", "ip_name_type='"+ipNameType+"' AND "+"alias_name='"+aliasName+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/ip_alias_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if ip_name_id, ip_name_id_exist := buf[0]["ip_name_id"].(string); ip_name_id_exist {
return ip_name_id, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find IP alias: %s - %s associated with IP address ID %s\n", aliasName, ipNameType, addressID)
return "", err
}
// Return an available subnet address from site_id, block_id and expected subnet_size
// Or an empty string in case of failure
func ipsubnetfindbysize(siteID string, blockID string, requestedIP string, prefixSize int, meta interface{}) ([]string, error) {
subnetAddresses := []string{}
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("site_id", siteID)
parameters.Add("prefix", strconv.Itoa(prefixSize))
parameters.Add("max_find", "16")
// Specifying a suggested subnet IP address
if len(requestedIP) > 0 {
subnetAddresses = append(subnetAddresses, iptohexip(requestedIP))
return subnetAddresses, nil
}
// Trying to create a subnet under an existing block
parameters.Add("block_id", blockID)
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip_find_free_subnet", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
subnetAddresses := []string{}
for i := 0; i < len(buf); i++ {
if hexaddr, hexaddr_exist := buf[i]["start_ip_addr"].(string); hexaddr_exist {
log.Printf("[DEBUG] SOLIDServer - Suggested IP subnet address: %s\n", hexiptoip(hexaddr))
subnetAddresses = append(subnetAddresses, hexaddr)
}
}
return subnetAddresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IP subnet in space (oid): %s, block (oid): %s, size: %s\n", siteID, blockID, strconv.Itoa(prefixSize))
return []string{}, err
}
// Return an available subnet address from site_id, block_id and expected subnet_size
// Or an empty string in case of failure
func ip6subnetfindbysize(siteID string, blockID string, requestedIP string, prefixSize int, meta interface{}) ([]string, error) {
subnetAddresses := []string{}
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("site_id", siteID)
parameters.Add("prefix", strconv.Itoa(prefixSize))
parameters.Add("max_find", "16")
// Specifying a suggested subnet IP address
if len(requestedIP) > 0 {
subnetAddresses = append(subnetAddresses, ip6tohexip6(requestedIP))
return subnetAddresses, nil
}
// Trying to create a subnet under an existing block
parameters.Add("block6_id", blockID)
// Sending the creation request
resp, body, err := s.Request("get", "rpc/ip6_find_free_subnet6", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
subnetAddresses := []string{}
for i := 0; i < len(buf); i++ {
if hexaddr, hexaddr_exist := buf[i]["start_ip6_addr"].(string); hexaddr_exist {
log.Printf("[DEBUG] SOLIDServer - Suggested IPv6 subnet address: %s\n", hexip6toip6(hexaddr))
subnetAddresses = append(subnetAddresses, hexaddr)
}
}
return subnetAddresses, nil
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find a free IPv6 subnet in space (oid): %s, block (oid): %s, size: %s\n", siteID, blockID, strconv.Itoa(prefixSize))
return []string{}, err
}
// Return the oid of a Custom DB from name
// Or an empty string in case of failure
func cdbnameidbyname(name string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
// Building parameters
parameters := url.Values{}
parameters.Add("WHERE", "name='"+name+"'")
// Sending the read request
resp, body, err := s.Request("get", "rest/custom_db_name_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if cdbnameID, cdbnameIDExist := buf[0]["custom_db_name_id"].(string); cdbnameIDExist {
return cdbnameID, nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find Custom DB: %s\n", name)
return "", err
}
// Update a DNS SMART member's role list
// Return false in case of failure
func dnssmartmembersupdate(smartName string, smartMembersRole string, meta interface{}) bool {
s := meta.(*SOLIDserver)
// Building parameters for retrieving SMART vdns_dns_group_role information
parameters := url.Values{}
parameters.Add("dns_name", smartName)
parameters.Add("add_flag", "edit_only")
parameters.Add("vdns_dns_group_role", smartMembersRole)
// Sending the update request
resp, body, err := s.Request("put", "rest/dns_add", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
return true
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to update members list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to update members list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Get DNS Server status
// Return an empty string in case of failure the server status otherwise (Y -> OK)
func dnsserverstatus(serverID string, meta interface{}) string {
s := meta.(*SOLIDserver)
// Building parameters for retrieving information
parameters := url.Values{}
parameters.Add("dns_id", serverID)
// Sending the get request
resp, body, err := s.Request("get", "rest/dns_server_info", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if state, stateExist := buf[0]["dns_state"].(string); stateExist {
return state
}
return ""
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server status: %s (%s)\n", serverID, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server status: %s\n", serverID)
}
}
return ""
}
// Get number of pending deletion operations on DNS server
// Return -1 in case of failure
func dnsserverpendingdeletions(serverID string, meta interface{}) int {
s := meta.(*SOLIDserver)
result := 0
// Building parameters for retrieving information
parameters := url.Values{}
parameters.Add("WHERE", "delayed_delete_time='1' AND dns_id='"+serverID+"'")
// Sending the get request
resp, body, err := s.Request("get", "rest/dns_zone_count", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if total, totalExist := buf[0]["total"].(string); totalExist {
inc, _ := strconv.Atoi(total)
result += inc
} else {
return -1
}
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s (%s)\n", serverID, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s\n", serverID)
}
}
// Building parameters for retrieving information
parameters = url.Values{}
parameters.Add("WHERE", "delayed_delete_time='1' AND dns_id='"+serverID+"'")
// Sending the get request
resp, body, err = s.Request("get", "rest/dns_view_count", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if total, totalExist := buf[0]["total"].(string); totalExist {
inc, _ := strconv.Atoi(total)
result += inc
} else {
return -1
}
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s (%s)\n", serverID, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve DNS server pending operations: %s\n", serverID)
}
}
return result
}
// Set a DNSserver or DNSview param value
// Return false in case of failure
func dnsparamset(serverName string, viewID string, paramKey string, paramValue string, meta interface{}) bool {
s := meta.(*SOLIDserver)
service := "dns_server_param_add"
// Building parameters to push information
parameters := url.Values{}
if viewID != "" {
service = "dns_view_param_add"
parameters.Add("dnsview_id", viewID)
} else {
parameters.Add("dns_name", serverName)
}
parameters.Add("param_key", paramKey)
parameters.Add("param_value", paramValue)
// Sending the update request
resp, body, err := s.Request("put", "rest/"+service, ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
return true
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to set DNS server or view parameter: %s on %s (%s)\n", paramKey, serverName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to set DNS server or view parameter: %s on %s\n", paramKey, serverName)
}
}
return false
}
// UnSet a DNSserver or DNSview param value
// Return false in case of failure
func dnsparamunset(serverName string, viewID string, paramKey string, meta interface{}) bool {
s := meta.(*SOLIDserver)
service := "dns_server_param_delete"
// Building parameters to push information
parameters := url.Values{}
if viewID != "" {
service = "dns_view_param_delete"
parameters.Add("dnsview_id", viewID)
} else {
parameters.Add("dns_name", serverName)
}
parameters.Add("param_key", paramKey)
// Sending the delete request
resp, body, err := s.Request("delete", "rest/"+service, ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
return true
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to unset DNS server or view parameter: %s on %s (%s)\n", paramKey, serverName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to unset DNS server or view parameter: %s on %s\n", paramKey, serverName)
}
}
return false
}
// Get a DNSserver or DNSview param's value
// Return an empty string and an error in case of failure
func dnsparamget(serverName string, viewID string, paramKey string, meta interface{}) (string, error) {
s := meta.(*SOLIDserver)
service := "dns_server_param_list"
if viewID != "" {
service = "dns_view_param_list"
}
// Building parameters for retrieving information
parameters := url.Values{}
if viewID == "" {
parameters.Add("WHERE", "dns_name='"+serverName+"' AND param_key='"+paramKey+"'")
} else {
parameters.Add("WHERE", "dns_name='"+serverName+"' AND dnsview_id='"+viewID+"' AND param_key='"+paramKey+"'")
}
// Sending the read request
resp, body, err := s.Request("get", "rest/"+service, ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 && len(buf) > 0 {
if paramValue, paramValueExist := buf[0]["param_value"].(string); paramValueExist {
return paramValue, nil
} else {
return "", nil
}
}
}
log.Printf("[DEBUG] SOLIDServer - Unable to find DNS Param Key: %s\n", paramKey)
return "", err
}
// Add a DNS server to a SMART with the required role, return the
// Return false in case of failure
func dnsaddtosmart(smartName string, serverName string, serverRole string, meta interface{}) bool {
s := meta.(*SOLIDserver)
parameters := url.Values{}
parameters.Add("vdns_name", smartName)
parameters.Add("dns_name", serverName)
parameters.Add("dns_role", serverRole)
// Sending the read request
resp, body, err := s.Request("post", "rest/dns_smart_member_add", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 201 {
return true
}
// Atomic SMART registration service unavailable attempting to use existing services
if resp.StatusCode == 400 || resp.StatusCode == 404 {
// Random Delay (in case of concurrent resources creation - until 8.0 and service dns_smart_member_add)
//time.Sleep(time.Duration((rand.Intn(600) / 10) * time.Second))
// Otherwise proceed using the previous method
// Building parameters for retrieving SMART vdns_dns_group_role information
parameters := url.Values{}
parameters.Add("WHERE", "vdns_parent_name='"+smartName+"' AND dns_type!='vdns'")
// Sending the read request
resp, body, err := s.Request("get", "rest/dns_server_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 204 {
// Building vdns_dns_group_role parameter from the SMART member list
membersRole := ""
if len(buf) > 0 {
for _, smartMember := range buf {
membersRole += smartMember["dns_name"].(string) + "&" + smartMember["dns_role"].(string) + ";"
}
}
membersRole += serverName + "&" + serverRole
if dnssmartmembersupdate(smartName, membersRole, meta) {
return true
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Remove a DNS server from a SMART
// Return false in case of failure
func dnsdeletefromsmart(smartName string, serverName string, meta interface{}) bool {
s := meta.(*SOLIDserver)
parameters := url.Values{}
parameters.Add("vdns_name", smartName)
parameters.Add("dns_name", serverName)
// Sending the read request
resp, body, err := s.Request("delete", "rest/dns_smart_member_delete", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 204 {
return true
}
// Atomic SMART registration service unavailable attempting to use existing services
if resp.StatusCode == 400 || resp.StatusCode == 404 {
// Random Delay (in case of concurrent resources creation - until 8.0 and service dns_smart_member_add)
//time.Sleep(time.Duration((rand.Intn(600) / 10) * time.Second))
// Building parameters for retrieving SMART vdns_dns_group_role information
parameters := url.Values{}
parameters.Add("WHERE", "vdns_parent_name='"+smartName+"' AND dns_type!='vdns'")
// Sending the read request
resp, body, err := s.Request("get", "rest/dns_server_list", ¶meters)
if err == nil {
var buf [](map[string]interface{})
json.Unmarshal([]byte(body), &buf)
// Checking the answer
if resp.StatusCode == 200 || resp.StatusCode == 204 {
// Building vdns_dns_group_role parameter from the SMART member list
membersRole := ""
if len(buf) > 0 {
for _, smartMember := range buf {
if smartMember["dns_name"].(string) != serverName {
membersRole += smartMember["dns_name"].(string) + "&" + smartMember["dns_role"].(string) + ";"
}
}
}
if dnssmartmembersupdate(smartName, membersRole, meta) {
return true
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to retrieve members list of the DNS SMART: %s\n", smartName)
}
}
return false
}
// Log the error
if len(buf) > 0 {
if errMsg, errExist := buf[0]["errmsg"].(string); errExist {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s (%s)\n", smartName, errMsg)
}
} else {
log.Printf("[DEBUG] SOLIDServer - Unable to update the member list of the DNS SMART: %s\n", smartName)
}
}
return false
}
|
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
var cmdPullRequest = &Command{
Run: pullRequest,
Usage: "pull-request [-f] [TITLE|-i ISSUE] [-b BASE] [-h HEAD]",
Short: "Open a pull request on GitHub",
Long: `Opens a pull request on GitHub for the project that the "origin" remote
points to. The default head of the pull request is the current branch.
Both base and head of the pull request can be explicitly given in one of
the following formats: "branch", "owner:branch", "owner/repo:branch".
This command will abort operation if it detects that the current topic
branch has local commits that are not yet pushed to its upstream branch
on the remote. To skip this check, use -f.
If TITLE is omitted, a text editor will open in which title and body of
the pull request can be entered in the same manner as git commit message.
If instead of normal TITLE an issue number is given with -i, the pull
request will be attached to an existing GitHub issue. Alternatively, instead
of title you can paste a full URL to an issue on GitHub.
`,
}
var flagPullRequestBase, flagPullRequestHead string
func init() {
head, _ := FetchGitHead()
cmdPullRequest.Flag.StringVar(&flagPullRequestBase, "b", "master", "BASE")
cmdPullRequest.Flag.StringVar(&flagPullRequestHead, "h", head, "HEAD")
}
func pullRequest(cmd *Command, args []string) {
repo := NewRepo()
repo.Base = flagPullRequestBase
repo.Head = flagPullRequestHead
messageFile := filepath.Join(repo.Dir, "PULLREQ_EDITMSG")
err := writePullRequestChanges(repo, messageFile)
check(err)
editorPath, err := exec.LookPath(repo.Editor)
check(err)
editCmd := buildEditCommand(editorPath, messageFile)
err = editCmd.Exec()
check(err)
title, body, err := readTitleAndBodyFromFile(messageFile)
check(err)
if len(title) == 0 {
log.Fatal("Aborting due to empty pull request title")
}
params := PullRequestParams{title, body, flagPullRequestBase, flagPullRequestHead}
gh := NewGitHub()
pullRequestResponse, err := gh.CreatePullRequest(repo.Owner, repo.Project, params)
check(err)
fmt.Println(pullRequestResponse.HtmlUrl)
}
func writePullRequestChanges(repo *Repo, messageFile string) error {
message := `
# Requesting a pull to %s from %s
#
# Write a message for this pull reuqest. The first block
# of the text is the title and the rest is description.%s
`
startRegexp := regexp.MustCompilePOSIX("^")
endRegexp := regexp.MustCompilePOSIX(" +$")
commitLogs, _ := FetchGitCommitLogs(repo.Base, repo.Head)
var changesMsg string
if len(commitLogs) > 0 {
commitLogs = strings.TrimSpace(commitLogs)
commitLogs = startRegexp.ReplaceAllString(commitLogs, "# ")
commitLogs = endRegexp.ReplaceAllString(commitLogs, "")
changesMsg = `
#
# Changes:
#
%s
`
changesMsg = fmt.Sprintf(changesMsg, commitLogs)
}
message = fmt.Sprintf(message, repo.FullBase(), repo.FullHead(), changesMsg)
return ioutil.WriteFile(messageFile, []byte(message), 0644)
}
func buildEditCommand(editorPath, messageFile string) *ExecCmd {
editCmd := NewExecCmd(editorPath)
r := regexp.MustCompile("[mg]?vi[m]$")
if r.MatchString(editorPath) {
editCmd.WithArg("-c")
editCmd.WithArg("set ft=gitcommit")
}
editCmd.WithArg(messageFile)
return editCmd
}
func readTitleAndBodyFromFile(messageFile string) (title, body string, err error) {
f, err := os.Open(messageFile)
defer f.Close()
if err != nil {
return "", "", err
}
reader := bufio.NewReader(f)
return readTitleAndBody(reader)
}
func readTitleAndBody(reader *bufio.Reader) (title, body string, err error) {
r := regexp.MustCompile("\\S")
var titleParts, bodyParts []string
line, err := readln(reader)
for err == nil {
if strings.HasPrefix(line, "#") {
break
}
if len(bodyParts) == 0 && r.MatchString(line) {
titleParts = append(titleParts, line)
} else {
bodyParts = append(bodyParts, line)
}
line, err = readln(reader)
}
title = strings.Join(titleParts, " ")
title = strings.TrimSpace(title)
body = strings.Join(bodyParts, "\n")
body = strings.TrimSpace(body)
return title, body, nil
}
func readln(r *bufio.Reader) (string, error) {
var (
isPrefix bool = true
err error = nil
line, ln []byte
)
for isPrefix && err == nil {
line, isPrefix, err = r.ReadLine()
ln = append(ln, line...)
}
return string(ln), err
}
Move head fetching to command executation
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
var cmdPullRequest = &Command{
Run: pullRequest,
Usage: "pull-request [-f] [TITLE|-i ISSUE] [-b BASE] [-h HEAD]",
Short: "Open a pull request on GitHub",
Long: `Opens a pull request on GitHub for the project that the "origin" remote
points to. The default head of the pull request is the current branch.
Both base and head of the pull request can be explicitly given in one of
the following formats: "branch", "owner:branch", "owner/repo:branch".
This command will abort operation if it detects that the current topic
branch has local commits that are not yet pushed to its upstream branch
on the remote. To skip this check, use -f.
If TITLE is omitted, a text editor will open in which title and body of
the pull request can be entered in the same manner as git commit message.
If instead of normal TITLE an issue number is given with -i, the pull
request will be attached to an existing GitHub issue. Alternatively, instead
of title you can paste a full URL to an issue on GitHub.
`,
}
var flagPullRequestBase, flagPullRequestHead string
func init() {
cmdPullRequest.Flag.StringVar(&flagPullRequestBase, "b", "master", "BASE")
cmdPullRequest.Flag.StringVar(&flagPullRequestHead, "h", "", "HEAD")
}
func pullRequest(cmd *Command, args []string) {
repo := NewRepo()
repo.Base = flagPullRequestBase
if flagPullRequestHead != "" {
repo.Head = flagPullRequestHead
}
messageFile := filepath.Join(repo.Dir, "PULLREQ_EDITMSG")
err := writePullRequestChanges(repo, messageFile)
check(err)
editorPath, err := exec.LookPath(repo.Editor)
check(err)
editCmd := buildEditCommand(editorPath, messageFile)
err = editCmd.Exec()
check(err)
title, body, err := readTitleAndBodyFromFile(messageFile)
check(err)
if len(title) == 0 {
log.Fatal("Aborting due to empty pull request title")
}
params := PullRequestParams{title, body, flagPullRequestBase, flagPullRequestHead}
gh := NewGitHub()
pullRequestResponse, err := gh.CreatePullRequest(repo.Owner, repo.Project, params)
check(err)
fmt.Println(pullRequestResponse.HtmlUrl)
}
func writePullRequestChanges(repo *Repo, messageFile string) error {
message := `
# Requesting a pull to %s from %s
#
# Write a message for this pull reuqest. The first block
# of the text is the title and the rest is description.%s
`
startRegexp := regexp.MustCompilePOSIX("^")
endRegexp := regexp.MustCompilePOSIX(" +$")
commitLogs, _ := FetchGitCommitLogs(repo.Base, repo.Head)
var changesMsg string
if len(commitLogs) > 0 {
commitLogs = strings.TrimSpace(commitLogs)
commitLogs = startRegexp.ReplaceAllString(commitLogs, "# ")
commitLogs = endRegexp.ReplaceAllString(commitLogs, "")
changesMsg = `
#
# Changes:
#
%s
`
changesMsg = fmt.Sprintf(changesMsg, commitLogs)
}
message = fmt.Sprintf(message, repo.FullBase(), repo.FullHead(), changesMsg)
return ioutil.WriteFile(messageFile, []byte(message), 0644)
}
func buildEditCommand(editorPath, messageFile string) *ExecCmd {
editCmd := NewExecCmd(editorPath)
r := regexp.MustCompile("[mg]?vi[m]$")
if r.MatchString(editorPath) {
editCmd.WithArg("-c")
editCmd.WithArg("set ft=gitcommit")
}
editCmd.WithArg(messageFile)
return editCmd
}
func readTitleAndBodyFromFile(messageFile string) (title, body string, err error) {
f, err := os.Open(messageFile)
defer f.Close()
if err != nil {
return "", "", err
}
reader := bufio.NewReader(f)
return readTitleAndBody(reader)
}
func readTitleAndBody(reader *bufio.Reader) (title, body string, err error) {
r := regexp.MustCompile("\\S")
var titleParts, bodyParts []string
line, err := readln(reader)
for err == nil {
if strings.HasPrefix(line, "#") {
break
}
if len(bodyParts) == 0 && r.MatchString(line) {
titleParts = append(titleParts, line)
} else {
bodyParts = append(bodyParts, line)
}
line, err = readln(reader)
}
title = strings.Join(titleParts, " ")
title = strings.TrimSpace(title)
body = strings.Join(bodyParts, "\n")
body = strings.TrimSpace(body)
return title, body, nil
}
func readln(r *bufio.Reader) (string, error) {
var (
isPrefix bool = true
err error = nil
line, ln []byte
)
for isPrefix && err == nil {
line, isPrefix, err = r.ReadLine()
ln = append(ln, line...)
}
return string(ln), err
}
|
// Package push sends notifications over HTTP/2 to
// Apple's Push Notification Service.
package push
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"golang.org/x/net/http2"
)
// Apple host locations for configuring Service.
const (
Development = "https://api.development.push.apple.com"
Development2197 = "https://api.development.push.apple.com:2197"
Production = "https://api.push.apple.com"
Production2197 = "https://api.push.apple.com:2197"
)
const maxPayload = 4096 // 4KB at most
// Service is the Apple Push Notification Service that you send notifications to.
type Service struct {
Host string
Client *http.Client
}
// NewService creates a new service to connect to APN.
func NewService(client *http.Client, host string) *Service {
return &Service{
Client: client,
Host: host,
}
}
// NewClient sets up an HTTP/2 client for a certificate.
func NewClient(cert tls.Certificate) (*http.Client, error) {
config := &tls.Config{
Certificates: []tls.Certificate{cert},
}
config.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: config}
if err := http2.ConfigureTransport(transport); err != nil {
return nil, err
}
return &http.Client{Transport: transport}, nil
}
// Push sends a notification and waits for a response.
func (s *Service) Push(deviceToken string, headers *Headers, payload []byte) (string, error) {
// check payload length before even hitting Apple.
if len(payload) > maxPayload {
return "", &Error{
Reason: ErrPayloadTooLarge,
Status: http.StatusRequestEntityTooLarge,
}
}
urlStr := fmt.Sprintf("%v/3/device/%v", s.Host, deviceToken)
req, err := http.NewRequest("POST", urlStr, bytes.NewReader(payload))
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
headers.set(req.Header)
tr := s.Client.Transport
if tr == nil {
tr = http.DefaultTransport
}
resp, err := tr.RoundTrip(req)
if err != nil {
if e, ok := err.(http2.GoAwayError); ok {
// parse DebugData as JSON. no status code known (0)
return "", parseErrorResponse(strings.NewReader(e.DebugData), 0)
}
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return resp.Header.Get("apns-id"), nil
}
return "", parseErrorResponse(resp.Body, resp.StatusCode)
}
func parseErrorResponse(body io.Reader, statusCode int) error {
var response struct {
// Reason for failure
Reason string `json:"reason"`
// Timestamp for 410 StatusGone (ErrUnregistered)
Timestamp int64 `json:"timestamp"`
}
err := json.NewDecoder(body).Decode(&response)
if err != nil {
return err
}
es := &Error{
Reason: mapErrorReason(response.Reason),
Status: statusCode,
}
if response.Timestamp != 0 {
// the response.Timestamp is Milliseconds, but time.Unix() requires seconds
es.Timestamp = time.Unix(response.Timestamp/1000, 0).UTC()
}
return es
}
go back to using Client.Do (#73)
ref #72
// Package push sends notifications over HTTP/2 to
// Apple's Push Notification Service.
package push
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"golang.org/x/net/http2"
)
// Apple host locations for configuring Service.
const (
Development = "https://api.development.push.apple.com"
Development2197 = "https://api.development.push.apple.com:2197"
Production = "https://api.push.apple.com"
Production2197 = "https://api.push.apple.com:2197"
)
const maxPayload = 4096 // 4KB at most
// Service is the Apple Push Notification Service that you send notifications to.
type Service struct {
Host string
Client *http.Client
}
// NewService creates a new service to connect to APN.
func NewService(client *http.Client, host string) *Service {
return &Service{
Client: client,
Host: host,
}
}
// NewClient sets up an HTTP/2 client for a certificate.
func NewClient(cert tls.Certificate) (*http.Client, error) {
config := &tls.Config{
Certificates: []tls.Certificate{cert},
}
config.BuildNameToCertificate()
transport := &http.Transport{TLSClientConfig: config}
if err := http2.ConfigureTransport(transport); err != nil {
return nil, err
}
return &http.Client{Transport: transport}, nil
}
// Push sends a notification and waits for a response.
func (s *Service) Push(deviceToken string, headers *Headers, payload []byte) (string, error) {
// check payload length before even hitting Apple.
if len(payload) > maxPayload {
return "", &Error{
Reason: ErrPayloadTooLarge,
Status: http.StatusRequestEntityTooLarge,
}
}
urlStr := fmt.Sprintf("%v/3/device/%v", s.Host, deviceToken)
req, err := http.NewRequest("POST", urlStr, bytes.NewReader(payload))
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
headers.set(req.Header)
resp, err := s.Client.Do(req)
if err != nil {
if e, ok := err.(*url.Error); ok {
if e, ok := e.Err.(http2.GoAwayError); ok {
// parse DebugData as JSON. no status code known (0)
return "", parseErrorResponse(strings.NewReader(e.DebugData), 0)
}
}
return "", err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return resp.Header.Get("apns-id"), nil
}
return "", parseErrorResponse(resp.Body, resp.StatusCode)
}
func parseErrorResponse(body io.Reader, statusCode int) error {
var response struct {
// Reason for failure
Reason string `json:"reason"`
// Timestamp for 410 StatusGone (ErrUnregistered)
Timestamp int64 `json:"timestamp"`
}
err := json.NewDecoder(body).Decode(&response)
if err != nil {
return err
}
es := &Error{
Reason: mapErrorReason(response.Reason),
Status: statusCode,
}
if response.Timestamp != 0 {
// the response.Timestamp is Milliseconds, but time.Unix() requires seconds
es.Timestamp = time.Unix(response.Timestamp/1000, 0).UTC()
}
return es
}
|
package main
import (
"fmt"
"reflect"
"time"
cl "github.com/rdwilliamson/cl11"
"github.com/rdwilliamson/cl11/examples/utils"
"github.com/rdwilliamson/snippets"
)
func check(err error) {
if err != nil {
panic(err)
}
}
var kernel = `
__kernel void copy(__global float* in, __global float* out, int size)
{
for (int id = get_global_id(0); id < size; id += get_global_size(0)) {
out[id] = in[id];
}
}
`
func main() {
platforms, err := cl.GetPlatforms()
check(err)
for _, p := range platforms {
devices, err := p.GetDevices()
check(err)
for _, d := range devices {
c, err := cl.CreateContext([]*cl.Device{d}, cl.ContextProperties{}, nil)
check(err)
progam, err := c.CreateProgramWithSource([]byte(kernel))
check(err)
err = progam.Build([]*cl.Device{d}, "")
check(err)
kernel, err := progam.CreateKernel("copy")
check(err)
size := int64(256 * 1024 * 1024 / 4)
if size*4 > d.MaxMemAllocSize/2 {
size = d.MaxMemAllocSize / 2 / 4
}
values := utils.RandomFloat32(int(size))
inData, err := c.CreateDeviceBufferInitializedBy(cl.MemoryReadOnly, values)
check(err)
outData, err := c.CreateDeviceBuffer(size*4, cl.MemoryWriteOnly)
check(err)
err = kernel.SetArguments(inData, outData, size)
check(err)
cq, err := c.CreateCommandQueue(d, cl.QueueProfilingEnable)
check(err)
localSize := kernel.WorkGroupInfo[0].PreferredWorkGroupSizeMultiple
globalSize := int(size)
if globalSize%localSize > 0 {
globalSize = (globalSize/localSize + 1) * localSize
}
var kernelEvent cl.Event
err = cq.EnqueueNDRangeKernel(kernel, nil, []int{globalSize}, []int{localSize}, nil, &kernelEvent)
check(err)
check(kernelEvent.Wait())
check(kernelEvent.GetProfilingInfo())
mb, err := cq.MapBuffer(outData, cl.Blocking, cl.MapRead, 0, size*4, nil, nil)
check(err)
equal := reflect.DeepEqual(values, mb.Float32Slice())
var event cl.Event
check(cq.UnmapBuffer(mb, nil, &event))
check(event.Wait())
if equal {
fmt.Println(d.Name, time.Duration(kernelEvent.End-kernelEvent.Start), snippets.PrintBytes(int64(size*4)))
} else {
fmt.Println(d.Name, "values do not match")
}
}
}
}
Fix argument type.
package main
import (
"fmt"
"reflect"
"time"
cl "github.com/rdwilliamson/cl11"
"github.com/rdwilliamson/cl11/examples/utils"
"github.com/rdwilliamson/snippets"
)
func check(err error) {
if err != nil {
panic(err)
}
}
var kernel = `
__kernel void copy(__global float* in, __global float* out, long size)
{
for (long id = get_global_id(0); id < size; id += get_global_size(0)) {
out[id] = in[id];
}
}
`
func main() {
platforms, err := cl.GetPlatforms()
check(err)
for _, p := range platforms {
devices, err := p.GetDevices()
check(err)
for _, d := range devices {
c, err := cl.CreateContext([]*cl.Device{d}, cl.ContextProperties{}, nil)
check(err)
progam, err := c.CreateProgramWithSource([]byte(kernel))
check(err)
err = progam.Build([]*cl.Device{d}, "")
check(err)
kernel, err := progam.CreateKernel("copy")
check(err)
size := int64(256 * 1024 * 1024 / 4)
if size*4 > d.MaxMemAllocSize/2 {
size = d.MaxMemAllocSize / 2 / 4
}
values := utils.RandomFloat32(int(size))
inData, err := c.CreateDeviceBufferInitializedBy(cl.MemoryReadOnly, values)
check(err)
outData, err := c.CreateDeviceBuffer(size*4, cl.MemoryWriteOnly)
check(err)
err = kernel.SetArguments(inData, outData, size)
check(err)
cq, err := c.CreateCommandQueue(d, cl.QueueProfilingEnable)
check(err)
localSize := kernel.WorkGroupInfo[0].PreferredWorkGroupSizeMultiple
globalSize := int(size)
if globalSize%localSize > 0 {
globalSize = (globalSize/localSize + 1) * localSize
}
var kernelEvent cl.Event
err = cq.EnqueueNDRangeKernel(kernel, nil, []int{globalSize}, []int{localSize}, nil, &kernelEvent)
check(err)
check(kernelEvent.Wait())
check(kernelEvent.GetProfilingInfo())
mb, err := cq.MapBuffer(outData, cl.Blocking, cl.MapRead, 0, size*4, nil, nil)
check(err)
equal := reflect.DeepEqual(values, mb.Float32Slice())
var event cl.Event
check(cq.UnmapBuffer(mb, nil, &event))
check(event.Wait())
if equal {
fmt.Println(d.Name, time.Duration(kernelEvent.End-kernelEvent.Start), snippets.PrintBytes(int64(size*4)))
} else {
fmt.Println(d.Name, "values do not match")
}
}
}
}
|
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
u "istio.io/test-infra/toolbox/util"
)
var (
repo = flag.String("repo", "", "Optional. Update dependencies of only this repository")
owner = flag.String("owner", "istio", "Github Owner or org")
tokenFile = flag.String("token_file", "", "File containing Github API Access Token")
baseBranch = flag.String("base_branch", "master", "Branch from which the deps update commit is based")
hub = flag.String("hub", "", "Where the testing images are hosted")
updateExtDep = flag.Bool("update_ext_dep", false, "Updates external dependences")
githubClnt *u.GithubClient
githubEnvoyClnt *u.GithubClient
)
const (
istioDepsFile = "istio.deps"
prTitlePrefix = "[BOT PR] to update dependencies of "
prBody = "This PR will be merged automatically once checks are successful."
dependencyUpdateLabel = "dependency-update"
// CI Artifacts URLs
istioArtifcatsURL = "https://storage.googleapis.com/istio-artifacts/%s/%s/artifacts"
istioctlSuffix = "istioctl"
debianSuffix = "debs"
// envoy
envoyOwner = "envoyproxy"
envoyRepo = "envoy"
envoyRepoPath = envoyOwner + "/" + envoyRepo
// Istio Repos
istioRepo = "istio"
pilotRepo = "pilot"
authRepo = "auth"
mixerRepo = "mixer"
proxyRepo = "proxy"
)
// Updates dependency objects in :deps to the latest stable version.
// Generates an MD5 digest of the latest dependencies, useful in avoiding making duplicate
// branches of the same code change.
// Returns a list of dependencies that were stale and have just been updated
func updateDepSHAGetFingerPrint(repo string, deps *[]u.Dependency) (string, []u.Dependency, error) {
var depChangeList []u.Dependency
digest, err := githubClnt.GetHeadCommitSHA(repo, *baseBranch)
if err != nil {
return "", depChangeList, err
}
digest += *baseBranch + *hub
for i, dep := range *deps {
var commitSHA string
if dep.RepoName == envoyRepoPath {
if *updateExtDep {
// update envoy sha only when specified
commitSHA, err = githubEnvoyClnt.GetHeadCommitSHA(envoyRepo, dep.ProdBranch)
log.Printf("new envoy proxy sha is %s\n", commitSHA)
} else {
// otherwise skip update
commitSHA = dep.LastStableSHA
log.Printf("skipping update of envoy proxy sha is %s\n", commitSHA)
}
} else {
commitSHA, err = githubClnt.GetHeadCommitSHA(dep.RepoName, dep.ProdBranch)
}
if err != nil {
return "", depChangeList, err
}
digest += commitSHA
if dep.LastStableSHA != commitSHA {
(*deps)[i].LastStableSHA = commitSHA
depChangeList = append(depChangeList, (*deps)[i])
}
}
return u.GetMD5Hash(digest), depChangeList, nil
}
func generateArtifactURL(repo, ref, suffix string) string {
baseURL := fmt.Sprintf(istioArtifcatsURL, repo, ref)
return fmt.Sprintf("%s/%s", baseURL, suffix)
}
// Updates the list of dependencies in repo to the latest stable references
func updateDeps(repo string, deps *[]u.Dependency, depChangeList *[]u.Dependency) error {
for _, dep := range *deps {
if err := u.UpdateKeyValueInFile(dep.File, dep.Name, dep.LastStableSHA); err != nil {
return err
}
}
if repo != istioRepo || len(*hub) == 0 {
return nil
}
args := ""
for _, updatedDep := range *depChangeList {
switch updatedDep.RepoName {
case mixerRepo:
args += fmt.Sprintf("-x %s,%s ", *hub, updatedDep.LastStableSHA)
case pilotRepo:
istioctlURL := generateArtifactURL(pilotRepo, updatedDep.LastStableSHA, istioctlSuffix)
debianURL := generateArtifactURL(pilotRepo, updatedDep.LastStableSHA, debianSuffix)
args += fmt.Sprintf("-p %s,%s -i %s -P %s ", *hub, updatedDep.LastStableSHA, istioctlURL, debianURL)
case authRepo:
debianURL := generateArtifactURL(authRepo, updatedDep.LastStableSHA, debianSuffix)
args += fmt.Sprintf("-c %s,%s -A %s ", *hub, updatedDep.LastStableSHA, debianURL)
case proxyRepo:
debianURL := generateArtifactURL(proxyRepo, updatedDep.LastStableSHA, debianSuffix)
args += fmt.Sprintf("-r %s -E %s ", updatedDep.LastStableSHA, debianURL)
default:
return fmt.Errorf("unknown dependency: %s", updatedDep.Name)
}
}
cmd := fmt.Sprintf("./install/updateVersion.sh %s", args)
_, err := u.Shell(cmd)
return err
}
// Updates the given repository so that it uses the latest dependency references
// pushes new branch to remote, create pull request on master,
// which is auto-merged after presumbit
func updateDependenciesOf(repo string) error {
log.Printf("Updating dependencies of %s\n", repo)
saveDir, err := os.Getwd()
if err != nil {
return err
}
repoDir, err := u.CloneRepoCheckoutBranch(githubClnt, repo, *baseBranch, "", "go/src/istio.io")
if err != nil {
return err
}
defer func() {
if err = os.Chdir(saveDir); err != nil {
log.Fatalf("Error during chdir: %v\n", err)
}
if err = u.RemoveLocalRepo("go"); err != nil {
log.Fatalf("Error during clean up: %v\n", err)
}
}()
deps, err := u.DeserializeDeps(istioDepsFile)
if err != nil {
return err
}
fingerPrint, depChangeList, err := updateDepSHAGetFingerPrint(repo, &deps)
if err != nil {
return err
}
branch := "autoUpdateDeps_" + fingerPrint
// First try to cleanup old PRs
if err = githubClnt.CloseIdlePullRequests(
prTitlePrefix, repo, *baseBranch); err != nil {
log.Printf("error while closing idle PRs: %v\n", err)
}
// If the same branch still exists (which means it's not old enough), leave it there and don't do anything in this cycle
exists, err := githubClnt.ExistBranch(repo, branch)
if err != nil {
return err
}
if exists {
log.Printf("Branch %s exists", branch)
return nil
}
if _, err = u.Shell("git checkout -b " + branch); err != nil {
return err
}
if err = updateDeps(repo, &deps, &depChangeList); err != nil {
return err
}
if err = u.SerializeDeps(istioDepsFile, &deps); err != nil {
return err
}
if repo == istioRepo && *updateExtDep {
// while depend update can introduce new changes,
// introduce them only when requested
goPath := path.Join(repoDir, "../../..")
env := "GOPATH=" + goPath
if _, err = u.Shell(env + " make depend.update"); err != nil {
return err
}
}
if _, err = u.Shell("git diff --quiet HEAD"); err == nil {
// it exited without error, nothing to commit
log.Printf("%s is up to date. No commits are made.", repo)
return nil
}
// git is dirty so commit
if err = u.CreateCommitPushToRemote(branch, "Update_Dependencies"); err != nil {
return err
}
prTitle := prTitlePrefix + repo
pr, err := githubClnt.CreatePullRequest(prTitle, prBody, "", branch, *baseBranch, repo)
if err != nil {
return err
}
if err := githubClnt.AddAutoMergeLabelsToPR(repo, pr); err != nil {
return err
}
return githubClnt.AddlabelsToPR(repo, pr, dependencyUpdateLabel)
}
func init() {
flag.Parse()
if *tokenFile == "" {
log.Fatalf("token_file not provided\n")
return
}
token, err := u.GetAPITokenFromFile(*tokenFile)
if err != nil {
log.Fatalf("Error accessing user supplied token_file: %v\n", err)
}
githubClnt = u.NewGithubClient(*owner, token)
githubEnvoyClnt = u.NewGithubClient(envoyOwner, token)
}
func main() {
if *repo != "" { // only update dependencies of this repo
if err := updateDependenciesOf(*repo); err != nil {
log.Fatalf("Failed to update dependency: %v\n", err)
}
} else { // update dependencies of all repos in the istio project
repos, err := githubClnt.ListRepos()
if err != nil {
log.Fatalf("Error when fetching list of repos: %v\n", err)
return
}
for _, r := range repos {
if err := updateDependenciesOf(r); err != nil {
log.Fatalf("Failed to update dependency: %v\n", err)
}
}
}
}
Update main.go (#730)
Automatic merge from submit-queue.
Skip updating dependency file if it is not specified
this allows us to just update istio.deps with new shas and not have any other dependencies, used for PROXY_TAG in istio repo
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"log"
"os"
"path"
u "istio.io/test-infra/toolbox/util"
)
var (
repo = flag.String("repo", "", "Optional. Update dependencies of only this repository")
owner = flag.String("owner", "istio", "Github Owner or org")
tokenFile = flag.String("token_file", "", "File containing Github API Access Token")
baseBranch = flag.String("base_branch", "master", "Branch from which the deps update commit is based")
hub = flag.String("hub", "", "Where the testing images are hosted")
updateExtDep = flag.Bool("update_ext_dep", false, "Updates external dependences")
githubClnt *u.GithubClient
githubEnvoyClnt *u.GithubClient
)
const (
istioDepsFile = "istio.deps"
prTitlePrefix = "[BOT PR] to update dependencies of "
prBody = "This PR will be merged automatically once checks are successful."
dependencyUpdateLabel = "dependency-update"
// CI Artifacts URLs
istioArtifcatsURL = "https://storage.googleapis.com/istio-artifacts/%s/%s/artifacts"
istioctlSuffix = "istioctl"
debianSuffix = "debs"
// envoy
envoyOwner = "envoyproxy"
envoyRepo = "envoy"
envoyRepoPath = envoyOwner + "/" + envoyRepo
// Istio Repos
istioRepo = "istio"
pilotRepo = "pilot"
authRepo = "auth"
mixerRepo = "mixer"
proxyRepo = "proxy"
)
// Updates dependency objects in :deps to the latest stable version.
// Generates an MD5 digest of the latest dependencies, useful in avoiding making duplicate
// branches of the same code change.
// Returns a list of dependencies that were stale and have just been updated
func updateDepSHAGetFingerPrint(repo string, deps *[]u.Dependency) (string, []u.Dependency, error) {
var depChangeList []u.Dependency
digest, err := githubClnt.GetHeadCommitSHA(repo, *baseBranch)
if err != nil {
return "", depChangeList, err
}
digest += *baseBranch + *hub
for i, dep := range *deps {
var commitSHA string
if dep.RepoName == envoyRepoPath {
if *updateExtDep {
// update envoy sha only when specified
commitSHA, err = githubEnvoyClnt.GetHeadCommitSHA(envoyRepo, dep.ProdBranch)
log.Printf("new envoy proxy sha is %s\n", commitSHA)
} else {
// otherwise skip update
commitSHA = dep.LastStableSHA
log.Printf("skipping update of envoy proxy sha is %s\n", commitSHA)
}
} else {
commitSHA, err = githubClnt.GetHeadCommitSHA(dep.RepoName, dep.ProdBranch)
}
if err != nil {
return "", depChangeList, err
}
digest += commitSHA
if dep.LastStableSHA != commitSHA {
(*deps)[i].LastStableSHA = commitSHA
depChangeList = append(depChangeList, (*deps)[i])
}
}
return u.GetMD5Hash(digest), depChangeList, nil
}
func generateArtifactURL(repo, ref, suffix string) string {
baseURL := fmt.Sprintf(istioArtifcatsURL, repo, ref)
return fmt.Sprintf("%s/%s", baseURL, suffix)
}
// Updates the list of dependencies in repo to the latest stable references
func updateDeps(repo string, deps *[]u.Dependency, depChangeList *[]u.Dependency) error {
for _, dep := range *deps {
if dep.File == "" {
continue
}
if err := u.UpdateKeyValueInFile(dep.File, dep.Name, dep.LastStableSHA); err != nil {
return err
}
}
if repo != istioRepo || len(*hub) == 0 {
return nil
}
args := ""
for _, updatedDep := range *depChangeList {
switch updatedDep.RepoName {
case mixerRepo:
args += fmt.Sprintf("-x %s,%s ", *hub, updatedDep.LastStableSHA)
case pilotRepo:
istioctlURL := generateArtifactURL(pilotRepo, updatedDep.LastStableSHA, istioctlSuffix)
debianURL := generateArtifactURL(pilotRepo, updatedDep.LastStableSHA, debianSuffix)
args += fmt.Sprintf("-p %s,%s -i %s -P %s ", *hub, updatedDep.LastStableSHA, istioctlURL, debianURL)
case authRepo:
debianURL := generateArtifactURL(authRepo, updatedDep.LastStableSHA, debianSuffix)
args += fmt.Sprintf("-c %s,%s -A %s ", *hub, updatedDep.LastStableSHA, debianURL)
case proxyRepo:
debianURL := generateArtifactURL(proxyRepo, updatedDep.LastStableSHA, debianSuffix)
args += fmt.Sprintf("-r %s -E %s ", updatedDep.LastStableSHA, debianURL)
default:
return fmt.Errorf("unknown dependency: %s", updatedDep.Name)
}
}
cmd := fmt.Sprintf("./install/updateVersion.sh %s", args)
_, err := u.Shell(cmd)
return err
}
// Updates the given repository so that it uses the latest dependency references
// pushes new branch to remote, create pull request on master,
// which is auto-merged after presumbit
func updateDependenciesOf(repo string) error {
log.Printf("Updating dependencies of %s\n", repo)
saveDir, err := os.Getwd()
if err != nil {
return err
}
repoDir, err := u.CloneRepoCheckoutBranch(githubClnt, repo, *baseBranch, "", "go/src/istio.io")
if err != nil {
return err
}
defer func() {
if err = os.Chdir(saveDir); err != nil {
log.Fatalf("Error during chdir: %v\n", err)
}
if err = u.RemoveLocalRepo("go"); err != nil {
log.Fatalf("Error during clean up: %v\n", err)
}
}()
deps, err := u.DeserializeDeps(istioDepsFile)
if err != nil {
return err
}
fingerPrint, depChangeList, err := updateDepSHAGetFingerPrint(repo, &deps)
if err != nil {
return err
}
branch := "autoUpdateDeps_" + fingerPrint
// First try to cleanup old PRs
if err = githubClnt.CloseIdlePullRequests(
prTitlePrefix, repo, *baseBranch); err != nil {
log.Printf("error while closing idle PRs: %v\n", err)
}
// If the same branch still exists (which means it's not old enough), leave it there and don't do anything in this cycle
exists, err := githubClnt.ExistBranch(repo, branch)
if err != nil {
return err
}
if exists {
log.Printf("Branch %s exists", branch)
return nil
}
if _, err = u.Shell("git checkout -b " + branch); err != nil {
return err
}
if err = updateDeps(repo, &deps, &depChangeList); err != nil {
return err
}
if err = u.SerializeDeps(istioDepsFile, &deps); err != nil {
return err
}
if repo == istioRepo && *updateExtDep {
// while depend update can introduce new changes,
// introduce them only when requested
goPath := path.Join(repoDir, "../../..")
env := "GOPATH=" + goPath
if _, err = u.Shell(env + " make depend.update"); err != nil {
return err
}
}
if _, err = u.Shell("git diff --quiet HEAD"); err == nil {
// it exited without error, nothing to commit
log.Printf("%s is up to date. No commits are made.", repo)
return nil
}
// git is dirty so commit
if err = u.CreateCommitPushToRemote(branch, "Update_Dependencies"); err != nil {
return err
}
prTitle := prTitlePrefix + repo
pr, err := githubClnt.CreatePullRequest(prTitle, prBody, "", branch, *baseBranch, repo)
if err != nil {
return err
}
if err := githubClnt.AddAutoMergeLabelsToPR(repo, pr); err != nil {
return err
}
return githubClnt.AddlabelsToPR(repo, pr, dependencyUpdateLabel)
}
func init() {
flag.Parse()
if *tokenFile == "" {
log.Fatalf("token_file not provided\n")
return
}
token, err := u.GetAPITokenFromFile(*tokenFile)
if err != nil {
log.Fatalf("Error accessing user supplied token_file: %v\n", err)
}
githubClnt = u.NewGithubClient(*owner, token)
githubEnvoyClnt = u.NewGithubClient(envoyOwner, token)
}
func main() {
if *repo != "" { // only update dependencies of this repo
if err := updateDependenciesOf(*repo); err != nil {
log.Fatalf("Failed to update dependency: %v\n", err)
}
} else { // update dependencies of all repos in the istio project
repos, err := githubClnt.ListRepos()
if err != nil {
log.Fatalf("Error when fetching list of repos: %v\n", err)
return
}
for _, r := range repos {
if err := updateDependenciesOf(r); err != nil {
log.Fatalf("Failed to update dependency: %v\n", err)
}
}
}
}
|
// Copyright 2014 SteelSeries ApS. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This package implements a basic LISP interpretor for embedding in a go program for scripting.
// This file contains the system primitive functions.
package golisp
import (
"fmt"
"os"
"strings"
"time"
)
func RegisterSystemPrimitives() {
MakePrimitiveFunction("load", 1, LoadFileImpl)
MakePrimitiveFunction("sleep", 1, SleepImpl)
MakePrimitiveFunction("millis", 0, MillisImpl)
MakePrimitiveFunction("write-line", 1, WriteLineImpl)
MakePrimitiveFunction("str", -1, MakeStringImpl)
MakePrimitiveFunction("intern", 1, InternImpl)
MakePrimitiveFunction("time", 1, TimeImpl)
MakePrimitiveFunction("quit", 0, QuitImpl)
}
func LoadFileImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
filename := Car(args)
if !StringP(filename) {
err = ProcessError("Filename must be a string", env)
return
}
return ProcessFile(StringValue(filename))
}
func QuitImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
WriteHistoryToFile(".golisp_history")
os.Exit(0)
return
}
func SleepImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
n, err := Eval(Car(args), env)
if err != nil {
return
}
if !IntegerP(n) {
err = ProcessError(fmt.Sprintf("Number expected, received %s", String(n)), env)
return
}
millis := IntegerValue(n)
time.Sleep(time.Duration(millis) * time.Millisecond)
return
}
func MillisImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
result = IntegerWithValue(int64(time.Now().UnixNano() / 1e6))
return
}
func WriteLineImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
data, err := Eval(Car(args), env)
if err != nil {
return
}
println(PrintString(data))
return
}
func MakeStringImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
pieces := make([]string, 2)
for cell := args; NotNilP(cell); cell = Cdr(cell) {
sexpr := Car(cell)
s, err := Eval(sexpr, env)
if err != nil {
break
}
pieces = append(pieces, PrintString(s))
}
return StringWithValue(strings.Join(pieces, "")), nil
}
func TimeImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
fmt.Printf("Starting timer.\n")
startTime := time.Now()
for cell := args; NotNilP(cell); cell = Cdr(cell) {
sexpr := Car(cell)
result, err = Eval(sexpr, env)
if err != nil {
break
}
}
d := time.Since(startTime)
fmt.Printf("Stopped timer.\nTook %v to run.\n", d)
result = IntegerWithValue(int64(d.Nanoseconds() / 1000000))
return
}
func InternImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
sym, err := Eval(Car(args), env)
if err != nil {
return
}
if !StringP(sym) {
err = ProcessError(fmt.Sprintf("intern expects a string, but received %s.", String(sym)), env)
return
}
return SymbolWithName(StringValue(sym)), nil
}
The argument to load should be evaluated so that we can compute filenames.
// Copyright 2014 SteelSeries ApS. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This package implements a basic LISP interpretor for embedding in a go program for scripting.
// This file contains the system primitive functions.
package golisp
import (
"fmt"
"os"
"strings"
"time"
)
func RegisterSystemPrimitives() {
MakePrimitiveFunction("load", 1, LoadFileImpl)
MakePrimitiveFunction("sleep", 1, SleepImpl)
MakePrimitiveFunction("millis", 0, MillisImpl)
MakePrimitiveFunction("write-line", 1, WriteLineImpl)
MakePrimitiveFunction("str", -1, MakeStringImpl)
MakePrimitiveFunction("intern", 1, InternImpl)
MakePrimitiveFunction("time", 1, TimeImpl)
MakePrimitiveFunction("quit", 0, QuitImpl)
}
func LoadFileImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
filename := Eval(Car(args), env)
if !StringP(filename) {
err = ProcessError("Filename must be a string", env)
return
}
return ProcessFile(StringValue(filename))
}
func QuitImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
WriteHistoryToFile(".golisp_history")
os.Exit(0)
return
}
func SleepImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
n, err := Eval(Car(args), env)
if err != nil {
return
}
if !IntegerP(n) {
err = ProcessError(fmt.Sprintf("Number expected, received %s", String(n)), env)
return
}
millis := IntegerValue(n)
time.Sleep(time.Duration(millis) * time.Millisecond)
return
}
func MillisImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
result = IntegerWithValue(int64(time.Now().UnixNano() / 1e6))
return
}
func WriteLineImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
data, err := Eval(Car(args), env)
if err != nil {
return
}
println(PrintString(data))
return
}
func MakeStringImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
pieces := make([]string, 2)
for cell := args; NotNilP(cell); cell = Cdr(cell) {
sexpr := Car(cell)
s, err := Eval(sexpr, env)
if err != nil {
break
}
pieces = append(pieces, PrintString(s))
}
return StringWithValue(strings.Join(pieces, "")), nil
}
func TimeImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
fmt.Printf("Starting timer.\n")
startTime := time.Now()
for cell := args; NotNilP(cell); cell = Cdr(cell) {
sexpr := Car(cell)
result, err = Eval(sexpr, env)
if err != nil {
break
}
}
d := time.Since(startTime)
fmt.Printf("Stopped timer.\nTook %v to run.\n", d)
result = IntegerWithValue(int64(d.Nanoseconds() / 1000000))
return
}
func InternImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
sym, err := Eval(Car(args), env)
if err != nil {
return
}
if !StringP(sym) {
err = ProcessError(fmt.Sprintf("intern expects a string, but received %s.", String(sym)), env)
return
}
return SymbolWithName(StringValue(sym)), nil
}
|
package main
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"path"
"strings"
"syscall"
"text/template"
"time"
)
type Service struct {
ContainterName string
Name string `json:"name"`
BridgeName string
BridgeIP string `json:"bridge-ip"`
Pid int
Containers map[string]Container
NginxUpStream
}
type Container struct {
Name string
ServiceName string `json:"service-name"`
Command string `json:"command"`
CopyFile bool `json:"copy-file"`
FileToCopy string `json:"file"`
Pid int
IP string
StartTime time.Time
VEth string
}
type NginxUpStream struct {
LoadBalanceType string
Servers []string
}
var services map[string]Service
var containers map[string]Container
var nginx_config *template.Template
const (
bridgeNameBase = "brocker"
vethNameBase = "veth"
MOUNT_LOC = "/app"
CONTAIN_DIR = "/container"
)
func (c *Container) setName() {
value := fmt.Sprintf("%s%s", c.StartTime, c.Command)
sha := sha1.New()
sha.Write([]byte(value))
c.Name = hex.EncodeToString(sha.Sum(nil))[:8]
}
func (c *Container) Close() {
if err := execInContainter("/bin/umount /app", c.Pid); err != nil {
fmt.Println("Cannot unmount /app: ", err)
}
p, _ := os.FindProcess(c.Pid)
p.Kill()
}
func (s *Service) reload() {
if err := execInContainter("/usr/sbin/nginx -s reload -c /app/nginx.conf", s.Pid); err != nil {
fmt.Println("Cannot reload nginx: ", err)
return
}
}
func (s *Service) Stop() {
if err := execInContainter("/usr/sbin/nginx -s stop -c /app/nginx.conf", s.Pid); err != nil {
fmt.Println(err)
}
for _, c := range s.Containers {
c.Close()
}
delete_bridge := strings.Split(fmt.Sprintf("ip link delete %s type bridge", s.BridgeName), " ")
if err := exec.Command(delete_bridge[0], delete_bridge[1:]...).Run(); err != nil {
fmt.Printf("Cannot delete bridge %s", s.BridgeName)
}
}
func (s *Service) writeConfig() {
myappconffile, err := os.OpenFile(fmt.Sprintf("%s/%s/myapp.conf", CONTAIN_DIR, s.ContainterName), os.O_WRONLY, 0644)
if err != nil {
fmt.Println(err)
return
}
defer myappconffile.Close()
if err := nginx_config.ExecuteTemplate(myappconffile, "myapp.conf.tmpl", s); err != nil {
fmt.Println(err)
return
}
}
func init() {
services = make(map[string]Service)
containers = make(map[string]Container)
nginx_config = template.Must(template.ParseFiles("/etc/brocker/nginx.conf.tmpl", "/etc/brocker/myapp.conf.tmpl"))
}
func main() {
ctrl_c := make(chan os.Signal, 1)
signal.Notify(ctrl_c, os.Interrupt)
go func() {
for range ctrl_c {
for _, s := range services {
s.Stop()
}
os.Exit(0)
}
}()
http.HandleFunc("/api/v1/service/add", service_add)
http.HandleFunc("/api/v1/container/run", container_run)
http.HandleFunc("/api/v1/container/list", container_list)
http.HandleFunc("/api/v1/container/exec", container_exec)
http.HandleFunc("/api/v1/container/rm", container_rm)
err := http.ListenAndServe(":3000", nil)
if err != nil {
fmt.Println(err)
}
}
func service_add(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
s := Service{
Containers: make(map[string]Container),
}
if err := json.NewDecoder(r.Body).Decode(&s); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, ok := services[s.Name]; ok {
http.Error(w, "Service already exists", http.StatusInternalServerError)
return
}
s.BridgeName = fmt.Sprintf("%s%d", bridgeNameBase, len(services)+1)
s.LoadBalanceType = "least_conn"
if err := service_create_network(s); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
path, err := exec.LookPath("nginx")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
c := Container{
Name: fmt.Sprintf("%s-nginx", s.Name),
ServiceName: s.Name,
Command: fmt.Sprintf("%s -c %s", path, "/app/nginx.conf"),
}
go run(c, true)
w.WriteHeader(http.StatusCreated)
}
func container_run(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
var c Container
if err := json.NewDecoder(r.Body).Decode(&c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, ok := services[c.ServiceName]; ok == false {
http.Error(w, "Service does not exists", http.StatusInternalServerError)
return
}
go run(c, false)
w.WriteHeader(http.StatusCreated)
}
func container_list(w http.ResponseWriter, r *http.Request) {
if err := json.NewEncoder(w).Encode(containers); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func container_exec(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
data := struct {
Name string `json:"name"`
}{}
if err := json.NewDecoder(r.Body).Decode(&data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for _, c := range containers {
if c.Name == data.Name {
w.Write([]byte(fmt.Sprintf("%d", c.Pid)))
return
}
}
}
func container_rm(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
data := struct {
Name string `json:"name"`
}{}
if err := json.NewDecoder(r.Body).Decode(&data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
c, ok := containers[data.Name]
if !ok {
http.Error(w, "Not a running container", http.StatusInternalServerError)
return
}
c.Close()
w.Write([]byte("Stopping container"))
}
func service_create_network(s Service) error {
create_bridge := strings.Split(fmt.Sprintf("/sbin/ip link add name %s type bridge", s.BridgeName), " ")
set_bridge_up := strings.Split(fmt.Sprintf("/sbin/ip link set %s up", s.BridgeName), " ")
set_bridge_ip := strings.Split(fmt.Sprintf("/sbin/ifconfig %s %s", s.BridgeName, s.BridgeIP), " ")
if err := exec.Command(create_bridge[0], create_bridge[1:]...).Run(); err != nil {
return err
}
if err := exec.Command(set_bridge_up[0], set_bridge_up[1:]...).Run(); err != nil {
return err
}
if err := exec.Command(set_bridge_ip[0], set_bridge_ip[1:]...).Run(); err != nil {
return err
}
services[s.Name] = s
return nil
}
func run(c Container, isNginx bool) {
fmt.Println("running parent")
s := services[c.ServiceName]
runcmd, err := exec.LookPath("brocker-run")
if err != nil {
fmt.Println(err)
return
}
c.StartTime = time.Now()
c.setName()
if err := os.Mkdir(fmt.Sprintf("%s/%s", CONTAIN_DIR, c.Name), 0644); err != nil {
fmt.Println(err)
return
}
if c.CopyFile {
if err := exec.Command("cp", c.FileToCopy, fmt.Sprintf("%s/%s/%s", CONTAIN_DIR, c.Name, path.Base(c.FileToCopy))).Run(); err != nil {
fmt.Println(err)
return
}
}
if isNginx {
nginxconffile, err := os.Create(fmt.Sprintf("%s/%s/nginx.conf", CONTAIN_DIR, c.Name))
if err != nil {
fmt.Println(err)
nginxconffile.Close()
return
}
if err := nginx_config.ExecuteTemplate(nginxconffile, "nginx.conf.tmpl", s); err != nil {
fmt.Println(err)
nginxconffile.Close()
return
}
nginxconffile.Close()
myappconffile, err := os.Create(fmt.Sprintf("%s/%s/myapp.conf", CONTAIN_DIR, c.Name))
if err != nil {
fmt.Println(err)
myappconffile.Close()
return
}
if err := nginx_config.ExecuteTemplate(myappconffile, "myapp.conf.tmpl", s); err != nil {
fmt.Println(err)
myappconffile.Close()
return
}
myappconffile.Close()
}
args := strings.Split(fmt.Sprintf("%s %s %s", runcmd, c.Name, c.Command), " ")
cmd := &exec.Cmd{
Path: runcmd,
Args: args,
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWPID |
syscall.CLONE_NEWNS |
syscall.CLONE_NEWNET,
}
if err := cmd.Start(); err != nil {
fmt.Println(err)
}
c.Pid = cmd.Process.Pid
c.VEth = fmt.Sprintf("%s%d", vethNameBase, len(containers))
link := strings.Split(fmt.Sprintf("/sbin/ip link add name %s type veth peer name veth1 netns %d", c.VEth, c.Pid), " ")
if err := exec.Command(link[0], link[1:]...).Run(); err != nil {
fmt.Println(err)
return
}
uplink := strings.Split(fmt.Sprintf("/sbin/ifconfig %s up", c.VEth), " ")
if err := exec.Command(uplink[0], uplink[1:]...).Run(); err != nil {
fmt.Println(err)
return
}
bridge := strings.Split(fmt.Sprintf("/sbin/ip link set %s master %s", c.VEth, s.BridgeName), " ")
if err := exec.Command(bridge[0], bridge[1:]...).Run(); err != nil {
fmt.Println(err)
return
}
bridgeip := net.ParseIP(s.BridgeIP)
lastOctet := bridgeip[15] + byte(len(s.Containers)+1)
ip := net.IPv4(bridgeip[12], bridgeip[13], bridgeip[14], lastOctet)
c.IP = ip.String()
if err := execInContainter(fmt.Sprintf("/sbin/ifconfig veth1 %s", ip.String()), c.Pid); err != nil {
fmt.Println(err)
return
}
containers[c.Name] = c
s.Containers[c.Name] = c
if isNginx {
s.Pid = c.Pid
s.ContainterName = c.Name
} else {
s.Servers = append(s.Servers, fmt.Sprintf("%s:8080", c.IP))
s.writeConfig()
s.reload()
}
services[c.ServiceName] = s
fmt.Println(cmd.Process.Pid)
cmd.Wait()
delete(containers, c.Name)
delete(services[c.ServiceName].Containers, c.Name)
}
func execInContainter(cmd string, pid int) error {
command := strings.Split(fmt.Sprintf("nsenter --target %d --pid --net --mount %s", pid, cmd), " ")
if err := exec.Command(command[0], command[1:]...).Run(); err != nil {
return err
}
return nil
}
golint: don't use underscores in Go names
package main
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"path"
"strings"
"syscall"
"text/template"
"time"
)
type Service struct {
ContainterName string
Name string `json:"name"`
BridgeName string
BridgeIP string `json:"bridge-ip"`
Pid int
Containers map[string]Container
NginxUpStream
}
type Container struct {
Name string
ServiceName string `json:"service-name"`
Command string `json:"command"`
CopyFile bool `json:"copy-file"`
FileToCopy string `json:"file"`
Pid int
IP string
StartTime time.Time
VEth string
}
type NginxUpStream struct {
LoadBalanceType string
Servers []string
}
var services map[string]Service
var containers map[string]Container
var nginxConfig *template.Template
const (
bridgeNameBase = "brocker"
vethNameBase = "veth"
MOUNT_LOC = "/app"
CONTAIN_DIR = "/container"
)
func (c *Container) setName() {
value := fmt.Sprintf("%s%s", c.StartTime, c.Command)
sha := sha1.New()
sha.Write([]byte(value))
c.Name = hex.EncodeToString(sha.Sum(nil))[:8]
}
func (c *Container) Close() {
if err := execInContainter("/bin/umount /app", c.Pid); err != nil {
fmt.Println("Cannot unmount /app: ", err)
}
p, _ := os.FindProcess(c.Pid)
p.Kill()
}
func (s *Service) reload() {
if err := execInContainter("/usr/sbin/nginx -s reload -c /app/nginx.conf", s.Pid); err != nil {
fmt.Println("Cannot reload nginx: ", err)
return
}
}
func (s *Service) Stop() {
if err := execInContainter("/usr/sbin/nginx -s stop -c /app/nginx.conf", s.Pid); err != nil {
fmt.Println(err)
}
for _, c := range s.Containers {
c.Close()
}
deleteBridge := strings.Split(fmt.Sprintf("ip link delete %s type bridge", s.BridgeName), " ")
if err := exec.Command(deleteBridge[0], deleteBridge[1:]...).Run(); err != nil {
fmt.Printf("Cannot delete bridge %s", s.BridgeName)
}
}
func (s *Service) writeConfig() {
myappconffile, err := os.OpenFile(fmt.Sprintf("%s/%s/myapp.conf", CONTAIN_DIR, s.ContainterName), os.O_WRONLY, 0644)
if err != nil {
fmt.Println(err)
return
}
defer myappconffile.Close()
if err := nginxConfig.ExecuteTemplate(myappconffile, "myapp.conf.tmpl", s); err != nil {
fmt.Println(err)
return
}
}
func init() {
services = make(map[string]Service)
containers = make(map[string]Container)
nginxConfig = template.Must(template.ParseFiles("/etc/brocker/nginx.conf.tmpl", "/etc/brocker/myapp.conf.tmpl"))
}
func main() {
ctrlC := make(chan os.Signal, 1)
signal.Notify(ctrlC, os.Interrupt)
go func() {
for range ctrlC {
for _, s := range services {
s.Stop()
}
os.Exit(0)
}
}()
http.HandleFunc("/api/v1/service/add", serviceAdd)
http.HandleFunc("/api/v1/container/run", containerRun)
http.HandleFunc("/api/v1/container/list", containerList)
http.HandleFunc("/api/v1/container/exec", containerExec)
http.HandleFunc("/api/v1/container/rm", containerRm)
err := http.ListenAndServe(":3000", nil)
if err != nil {
fmt.Println(err)
}
}
func serviceAdd(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
s := Service{
Containers: make(map[string]Container),
}
if err := json.NewDecoder(r.Body).Decode(&s); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, ok := services[s.Name]; ok {
http.Error(w, "Service already exists", http.StatusInternalServerError)
return
}
s.BridgeName = fmt.Sprintf("%s%d", bridgeNameBase, len(services)+1)
s.LoadBalanceType = "least_conn"
if err := serviceCreateNetwork(s); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
path, err := exec.LookPath("nginx")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
c := Container{
Name: fmt.Sprintf("%s-nginx", s.Name),
ServiceName: s.Name,
Command: fmt.Sprintf("%s -c %s", path, "/app/nginx.conf"),
}
go run(c, true)
w.WriteHeader(http.StatusCreated)
}
func containerRun(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
var c Container
if err := json.NewDecoder(r.Body).Decode(&c); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if _, ok := services[c.ServiceName]; ok == false {
http.Error(w, "Service does not exists", http.StatusInternalServerError)
return
}
go run(c, false)
w.WriteHeader(http.StatusCreated)
}
func containerList(w http.ResponseWriter, r *http.Request) {
if err := json.NewEncoder(w).Encode(containers); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func containerExec(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
data := struct {
Name string `json:"name"`
}{}
if err := json.NewDecoder(r.Body).Decode(&data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
for _, c := range containers {
if c.Name == data.Name {
w.Write([]byte(fmt.Sprintf("%d", c.Pid)))
return
}
}
}
func containerRm(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Invalid Request!", http.StatusMethodNotAllowed)
return
}
data := struct {
Name string `json:"name"`
}{}
if err := json.NewDecoder(r.Body).Decode(&data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
c, ok := containers[data.Name]
if !ok {
http.Error(w, "Not a running container", http.StatusInternalServerError)
return
}
c.Close()
w.Write([]byte("Stopping container"))
}
func serviceCreateNetwork(s Service) error {
createBridge := strings.Split(fmt.Sprintf("/sbin/ip link add name %s type bridge", s.BridgeName), " ")
setBridgeUp := strings.Split(fmt.Sprintf("/sbin/ip link set %s up", s.BridgeName), " ")
setBridgeIP := strings.Split(fmt.Sprintf("/sbin/ifconfig %s %s", s.BridgeName, s.BridgeIP), " ")
if err := exec.Command(createBridge[0], createBridge[1:]...).Run(); err != nil {
return err
}
if err := exec.Command(setBridgeUp[0], setBridgeUp[1:]...).Run(); err != nil {
return err
}
if err := exec.Command(setBridgeIP[0], setBridgeIP[1:]...).Run(); err != nil {
return err
}
services[s.Name] = s
return nil
}
func run(c Container, isNginx bool) {
fmt.Println("running parent")
s := services[c.ServiceName]
runcmd, err := exec.LookPath("brocker-run")
if err != nil {
fmt.Println(err)
return
}
c.StartTime = time.Now()
c.setName()
if err := os.Mkdir(fmt.Sprintf("%s/%s", CONTAIN_DIR, c.Name), 0644); err != nil {
fmt.Println(err)
return
}
if c.CopyFile {
if err := exec.Command("cp", c.FileToCopy, fmt.Sprintf("%s/%s/%s", CONTAIN_DIR, c.Name, path.Base(c.FileToCopy))).Run(); err != nil {
fmt.Println(err)
return
}
}
if isNginx {
nginxconffile, err := os.Create(fmt.Sprintf("%s/%s/nginx.conf", CONTAIN_DIR, c.Name))
if err != nil {
fmt.Println(err)
nginxconffile.Close()
return
}
if err := nginxConfig.ExecuteTemplate(nginxconffile, "nginx.conf.tmpl", s); err != nil {
fmt.Println(err)
nginxconffile.Close()
return
}
nginxconffile.Close()
myappconffile, err := os.Create(fmt.Sprintf("%s/%s/myapp.conf", CONTAIN_DIR, c.Name))
if err != nil {
fmt.Println(err)
myappconffile.Close()
return
}
if err := nginxConfig.ExecuteTemplate(myappconffile, "myapp.conf.tmpl", s); err != nil {
fmt.Println(err)
myappconffile.Close()
return
}
myappconffile.Close()
}
args := strings.Split(fmt.Sprintf("%s %s %s", runcmd, c.Name, c.Command), " ")
cmd := &exec.Cmd{
Path: runcmd,
Args: args,
}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWPID |
syscall.CLONE_NEWNS |
syscall.CLONE_NEWNET,
}
if err := cmd.Start(); err != nil {
fmt.Println(err)
}
c.Pid = cmd.Process.Pid
c.VEth = fmt.Sprintf("%s%d", vethNameBase, len(containers))
link := strings.Split(fmt.Sprintf("/sbin/ip link add name %s type veth peer name veth1 netns %d", c.VEth, c.Pid), " ")
if err := exec.Command(link[0], link[1:]...).Run(); err != nil {
fmt.Println(err)
return
}
uplink := strings.Split(fmt.Sprintf("/sbin/ifconfig %s up", c.VEth), " ")
if err := exec.Command(uplink[0], uplink[1:]...).Run(); err != nil {
fmt.Println(err)
return
}
bridge := strings.Split(fmt.Sprintf("/sbin/ip link set %s master %s", c.VEth, s.BridgeName), " ")
if err := exec.Command(bridge[0], bridge[1:]...).Run(); err != nil {
fmt.Println(err)
return
}
bridgeip := net.ParseIP(s.BridgeIP)
lastOctet := bridgeip[15] + byte(len(s.Containers)+1)
ip := net.IPv4(bridgeip[12], bridgeip[13], bridgeip[14], lastOctet)
c.IP = ip.String()
if err := execInContainter(fmt.Sprintf("/sbin/ifconfig veth1 %s", ip.String()), c.Pid); err != nil {
fmt.Println(err)
return
}
containers[c.Name] = c
s.Containers[c.Name] = c
if isNginx {
s.Pid = c.Pid
s.ContainterName = c.Name
} else {
s.Servers = append(s.Servers, fmt.Sprintf("%s:8080", c.IP))
s.writeConfig()
s.reload()
}
services[c.ServiceName] = s
fmt.Println(cmd.Process.Pid)
cmd.Wait()
delete(containers, c.Name)
delete(services[c.ServiceName].Containers, c.Name)
}
func execInContainter(cmd string, pid int) error {
command := strings.Split(fmt.Sprintf("nsenter --target %d --pid --net --mount %s", pid, cmd), " ")
if err := exec.Command(command[0], command[1:]...).Run(); err != nil {
return err
}
return nil
}
|
package sparta
import (
"encoding/json"
"fmt"
"github.com/mweagle/cloudformationresources"
"net/http"
"expvar"
"github.com/Sirupsen/logrus"
"strings"
)
// Dispatch map for user defined CloudFormation CustomResources to
// lambda functions
type dispatchMap map[string]*LambdaAWSInfo
// Dispatch map for normal AWS Lambda to user defined Sparta lambda functions
type customResourceDispatchMap map[string]*customResourceInfo
func userDefinedCustomResourceForwarder(customResource *customResourceInfo,
event *json.RawMessage,
context *LambdaContext,
w http.ResponseWriter,
logger *logrus.Logger) {
var rawProps map[string]interface{}
json.Unmarshal([]byte(*event), &rawProps)
var lambdaEvent cloudformationresources.CloudFormationLambdaEvent
jsonErr := json.Unmarshal([]byte(*event), &lambdaEvent)
if jsonErr != nil {
logger.WithFields(logrus.Fields{
"RawEvent": rawProps,
"UnmarshalError": jsonErr,
}).Warn("Raw event data")
http.Error(w, jsonErr.Error(), http.StatusInternalServerError)
}
logger.WithFields(logrus.Fields{
"LambdaEvent": lambdaEvent,
}).Debug("CloudFormation user resource lambda event")
// Create the new request and send it off
customResourceRequest := &cloudformationresources.UserFuncResourceRequest{}
customResourceRequest.LambdaHandler = func(requestType string,
stackID string,
properties map[string]interface{},
logger *logrus.Logger) (map[string]interface{}, error) {
// Descend to get the "UserProperties" field iff defined by the customResource
var userProperties map[string]interface{}
if _, exists := lambdaEvent.ResourceProperties["UserProperties"]; exists {
childProps, ok := lambdaEvent.ResourceProperties["UserProperties"].(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Failed to extract UserProperties from payload")
}
userProperties = childProps
}
return customResource.userFunction(requestType, stackID, userProperties, logger)
}
customResourceRequest.RequestType = lambdaEvent.RequestType
customResourceRequest.ResponseURL = lambdaEvent.ResponseURL
customResourceRequest.StackID = lambdaEvent.StackID
customResourceRequest.RequestID = lambdaEvent.RequestID
customResourceRequest.LogicalResourceID = lambdaEvent.LogicalResourceID
customResourceRequest.PhysicalResourceID = lambdaEvent.PhysicalResourceID
customResourceRequest.LogGroupName = context.LogGroupName
customResourceRequest.LogStreamName = context.LogStreamName
customResourceRequest.ResourceProperties = lambdaEvent.ResourceProperties
if "" == customResourceRequest.PhysicalResourceID {
customResourceRequest.PhysicalResourceID = fmt.Sprintf("LogStreamName: %s", context.LogStreamName)
}
requestErr := cloudformationresources.Run(customResourceRequest, logger)
if requestErr != nil {
http.Error(w, requestErr.Error(), http.StatusInternalServerError)
} else {
fmt.Fprint(w, "CustomResource handled: "+lambdaEvent.LogicalResourceID)
}
}
// Extract the fields and forward the event to the resource
func spartaCustomResourceForwarder(event *json.RawMessage,
context *LambdaContext,
w http.ResponseWriter,
logger *logrus.Logger) {
var rawProps map[string]interface{}
json.Unmarshal([]byte(*event), &rawProps)
var lambdaEvent cloudformationresources.CloudFormationLambdaEvent
jsonErr := json.Unmarshal([]byte(*event), &lambdaEvent)
if jsonErr != nil {
logger.WithFields(logrus.Fields{
"RawEvent": rawProps,
"UnmarshalError": jsonErr,
}).Warn("Raw event data")
http.Error(w, jsonErr.Error(), http.StatusInternalServerError)
}
logger.WithFields(logrus.Fields{
"LambdaEvent": lambdaEvent,
}).Debug("CloudFormation Lambda event")
// Setup the request and send it off
customResourceRequest := &cloudformationresources.CustomResourceRequest{}
customResourceRequest.RequestType = lambdaEvent.RequestType
customResourceRequest.ResponseURL = lambdaEvent.ResponseURL
customResourceRequest.StackID = lambdaEvent.StackID
customResourceRequest.RequestID = lambdaEvent.RequestID
customResourceRequest.LogicalResourceID = lambdaEvent.LogicalResourceID
customResourceRequest.PhysicalResourceID = lambdaEvent.PhysicalResourceID
customResourceRequest.LogGroupName = context.LogGroupName
customResourceRequest.LogStreamName = context.LogStreamName
customResourceRequest.ResourceProperties = lambdaEvent.ResourceProperties
if "" == customResourceRequest.PhysicalResourceID {
customResourceRequest.PhysicalResourceID = fmt.Sprintf("LogStreamName: %s", context.LogStreamName)
}
requestErr := cloudformationresources.Handle(customResourceRequest, logger)
if requestErr != nil {
http.Error(w, requestErr.Error(), http.StatusInternalServerError)
} else {
fmt.Fprint(w, "CustomResource handled: "+lambdaEvent.LogicalResourceID)
}
}
// LambdaHTTPHandler is an HTTP compliant handler that implements
// ServeHTTP
type LambdaHTTPHandler struct {
LambdaDispatchMap dispatchMap
customResourceDispatchMap customResourceDispatchMap
logger *logrus.Logger
}
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func (handler *LambdaHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// If this is the expvar handler then skip it
if "/golang/expvar" == req.URL.Path {
expvarHandler(w, req)
return
}
// Remove the leading slash and dispatch it to the golang handler
lambdaFunc := strings.TrimLeft(req.URL.Path, "/")
decoder := json.NewDecoder(req.Body)
var request lambdaRequest
defer func() {
if r := recover(); r != nil {
err, ok := r.(error)
if !ok {
err = fmt.Errorf("%v", r)
}
errorString := fmt.Sprintf("Lambda handler panic: %#v", err)
http.Error(w, errorString, http.StatusBadRequest)
}
}()
err := decoder.Decode(&request)
if nil != err {
errorString := fmt.Sprintf("Failed to decode proxy request: %s", err.Error())
http.Error(w, errorString, http.StatusBadRequest)
return
}
handler.logger.WithFields(logrus.Fields{
"Request": request,
"LookupName": lambdaFunc,
}).Debug("Dispatching")
lambdaAWSInfo := handler.LambdaDispatchMap[lambdaFunc]
var lambdaFn LambdaFunction
if nil != lambdaAWSInfo {
lambdaFn = lambdaAWSInfo.lambdaFn
} else if strings.Contains(lambdaFunc, "::") {
// Not the most exhaustive guard, but the CloudFormation custom resources
// all have "::" delimiters in their type field. Even if there is a false
// positive, the spartaCustomResourceForwarder will simply error out.
lambdaFn = spartaCustomResourceForwarder
}
if nil != lambdaFn {
lambdaFn(&request.Event, &request.Context, w, handler.logger)
} else {
// Final check for user-defined resource
customResource, exists := handler.customResourceDispatchMap[lambdaFunc]
handler.logger.WithFields(logrus.Fields{
"Request": request,
"LookupName": lambdaFunc,
"Exists": exists,
}).Debug("Custom Resource request")
if exists {
userDefinedCustomResourceForwarder(customResource,
&request.Event,
&request.Context,
w,
handler.logger)
} else {
http.Error(w, "Unsupported path: "+lambdaFunc, http.StatusBadRequest)
}
}
}
// NewLambdaHTTPHandler returns an initialized LambdaHTTPHandler instance. The returned value
// can be provided to https://golang.org/pkg/net/http/httptest/#NewServer to perform
// localhost testing.
func NewLambdaHTTPHandler(lambdaAWSInfos []*LambdaAWSInfo, logger *logrus.Logger) *LambdaHTTPHandler {
lookupMap := make(dispatchMap, 0)
customResourceMap := make(customResourceDispatchMap, 0)
for _, eachLambdaInfo := range lambdaAWSInfos {
logger.WithFields(logrus.Fields{
"Path": eachLambdaInfo.lambdaFunctionName(),
}).Debug("Registering lambda URL")
lookupMap[eachLambdaInfo.lambdaFunctionName()] = eachLambdaInfo
// Build up the customResourceDispatchMap
for _, eachCustomResource := range eachLambdaInfo.customResources {
logger.WithFields(logrus.Fields{
"Path": eachCustomResource.userFunctionName,
}).Debug("Registering customResource URL")
customResourceMap[eachCustomResource.userFunctionName] = eachCustomResource
}
}
return &LambdaHTTPHandler{
LambdaDispatchMap: lookupMap,
customResourceDispatchMap: customResourceMap,
logger: logger,
}
}
Support custom credentials for CGO codepath
package sparta
import (
"encoding/json"
"fmt"
"net/http"
"github.com/mweagle/cloudformationresources"
"expvar"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws/credentials"
)
// Dispatch map for user defined CloudFormation CustomResources to
// lambda functions
type dispatchMap map[string]*LambdaAWSInfo
// Dispatch map for normal AWS Lambda to user defined Sparta lambda functions
type customResourceDispatchMap map[string]*customResourceInfo
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func userDefinedCustomResourceForwarder(customResource *customResourceInfo,
event *json.RawMessage,
context *LambdaContext,
w http.ResponseWriter,
logger *logrus.Logger) {
var rawProps map[string]interface{}
json.Unmarshal([]byte(*event), &rawProps)
var lambdaEvent cloudformationresources.CloudFormationLambdaEvent
jsonErr := json.Unmarshal([]byte(*event), &lambdaEvent)
if jsonErr != nil {
logger.WithFields(logrus.Fields{
"RawEvent": rawProps,
"UnmarshalError": jsonErr,
}).Warn("Raw event data")
http.Error(w, jsonErr.Error(), http.StatusInternalServerError)
}
logger.WithFields(logrus.Fields{
"LambdaEvent": lambdaEvent,
}).Debug("CloudFormation user resource lambda event")
// Create the new request and send it off
customResourceRequest := &cloudformationresources.UserFuncResourceRequest{}
customResourceRequest.LambdaHandler = func(requestType string,
stackID string,
properties map[string]interface{},
logger *logrus.Logger) (map[string]interface{}, error) {
// Descend to get the "UserProperties" field iff defined by the customResource
var userProperties map[string]interface{}
if _, exists := lambdaEvent.ResourceProperties["UserProperties"]; exists {
childProps, ok := lambdaEvent.ResourceProperties["UserProperties"].(map[string]interface{})
if !ok {
return nil, fmt.Errorf("Failed to extract UserProperties from payload")
}
userProperties = childProps
}
return customResource.userFunction(requestType, stackID, userProperties, logger)
}
customResourceRequest.RequestType = lambdaEvent.RequestType
customResourceRequest.ResponseURL = lambdaEvent.ResponseURL
customResourceRequest.StackID = lambdaEvent.StackID
customResourceRequest.RequestID = lambdaEvent.RequestID
customResourceRequest.LogicalResourceID = lambdaEvent.LogicalResourceID
customResourceRequest.PhysicalResourceID = lambdaEvent.PhysicalResourceID
customResourceRequest.LogGroupName = context.LogGroupName
customResourceRequest.LogStreamName = context.LogStreamName
customResourceRequest.ResourceProperties = lambdaEvent.ResourceProperties
if "" == customResourceRequest.PhysicalResourceID {
customResourceRequest.PhysicalResourceID = fmt.Sprintf("LogStreamName: %s", context.LogStreamName)
}
requestErr := cloudformationresources.Run(customResourceRequest, logger)
if requestErr != nil {
http.Error(w, requestErr.Error(), http.StatusInternalServerError)
} else {
fmt.Fprint(w, "CustomResource handled: "+lambdaEvent.LogicalResourceID)
}
}
// Extract the fields and forward the event to the resource
func spartaCustomResourceForwarder(creds credentials.Value,
event *json.RawMessage,
context *LambdaContext,
w http.ResponseWriter,
logger *logrus.Logger) {
var rawProps map[string]interface{}
json.Unmarshal([]byte(*event), &rawProps)
var lambdaEvent cloudformationresources.CloudFormationLambdaEvent
jsonErr := json.Unmarshal([]byte(*event), &lambdaEvent)
if jsonErr != nil {
logger.WithFields(logrus.Fields{
"RawEvent": rawProps,
"UnmarshalError": jsonErr,
}).Warn("Raw event data")
http.Error(w, jsonErr.Error(), http.StatusInternalServerError)
}
logger.WithFields(logrus.Fields{
"LambdaEvent": lambdaEvent,
}).Debug("CloudFormation Lambda event")
// Setup the request and send it off
customResourceRequest := &cloudformationresources.CustomResourceRequest{}
customResourceRequest.RequestType = lambdaEvent.RequestType
customResourceRequest.ResponseURL = lambdaEvent.ResponseURL
customResourceRequest.StackID = lambdaEvent.StackID
customResourceRequest.RequestID = lambdaEvent.RequestID
customResourceRequest.LogicalResourceID = lambdaEvent.LogicalResourceID
customResourceRequest.PhysicalResourceID = lambdaEvent.PhysicalResourceID
customResourceRequest.LogGroupName = context.LogGroupName
customResourceRequest.LogStreamName = context.LogStreamName
customResourceRequest.ResourceProperties = lambdaEvent.ResourceProperties
if "" == customResourceRequest.PhysicalResourceID {
customResourceRequest.PhysicalResourceID = fmt.Sprintf("LogStreamName: %s", context.LogStreamName)
}
requestErr := cloudformationresources.Handle(customResourceRequest, creds, logger)
if requestErr != nil {
http.Error(w, requestErr.Error(), http.StatusInternalServerError)
} else {
fmt.Fprint(w, "CustomResource handled: "+lambdaEvent.LogicalResourceID)
}
}
// LambdaHTTPHandler is an HTTP compliant handler that implements
// ServeHTTP
type LambdaHTTPHandler struct {
LambdaDispatchMap dispatchMap
muValue sync.Mutex
customCreds credentials.Value
customResourceDispatchMap customResourceDispatchMap
logger *logrus.Logger
}
// Credentials allows the user to supply a custom Credentials value
// object for any internal calls
func (handler *LambdaHTTPHandler) Credentials(creds credentials.Value) {
handler.muValue.Lock()
defer handler.muValue.Unlock()
handler.customCreds = creds
}
func (handler *LambdaHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// If this is the expvar handler then skip it
if "/golang/expvar" == req.URL.Path {
expvarHandler(w, req)
return
}
// Remove the leading slash and dispatch it to the golang handler
lambdaFunc := strings.TrimLeft(req.URL.Path, "/")
decoder := json.NewDecoder(req.Body)
var request lambdaRequest
defer func() {
if r := recover(); r != nil {
err, ok := r.(error)
if !ok {
err = fmt.Errorf("%v", r)
}
errorString := fmt.Sprintf("Lambda handler panic: %#v", err)
http.Error(w, errorString, http.StatusBadRequest)
}
}()
err := decoder.Decode(&request)
if nil != err {
errorString := fmt.Sprintf("Failed to decode proxy request: %s", err.Error())
http.Error(w, errorString, http.StatusBadRequest)
return
}
handler.logger.WithFields(logrus.Fields{
"Request": request,
"LookupName": lambdaFunc,
}).Debug("Dispatching")
lambdaAWSInfo := handler.LambdaDispatchMap[lambdaFunc]
if nil != lambdaAWSInfo {
lambdaAWSInfo.lambdaFn(&request.Event, &request.Context, w, handler.logger)
} else if strings.Contains(lambdaFunc, "::") {
// Not the most exhaustive guard, but the CloudFormation custom resources
// all have "::" delimiters in their type field. Even if there is a false
// positive, the spartaCustomResourceForwarder will simply error out.
spartaCustomResourceForwarder(handler.customCreds,
&request.Event,
&request.Context,
w,
handler.logger)
} else {
// Final check for user-defined resource
customResource, exists := handler.customResourceDispatchMap[lambdaFunc]
handler.logger.WithFields(logrus.Fields{
"Request": request,
"LookupName": lambdaFunc,
"Exists": exists,
}).Debug("Custom Resource request")
if exists {
userDefinedCustomResourceForwarder(customResource,
&request.Event,
&request.Context,
w,
handler.logger)
} else {
http.Error(w, "Unsupported path: "+lambdaFunc, http.StatusBadRequest)
}
}
}
// NewLambdaHTTPHandler returns an initialized LambdaHTTPHandler instance. The returned value
// can be provided to https://golang.org/pkg/net/http/httptest/#NewServer to perform
// localhost testing.
func NewLambdaHTTPHandler(lambdaAWSInfos []*LambdaAWSInfo, logger *logrus.Logger) *LambdaHTTPHandler {
lookupMap := make(dispatchMap, 0)
customResourceMap := make(customResourceDispatchMap, 0)
for _, eachLambdaInfo := range lambdaAWSInfos {
logger.WithFields(logrus.Fields{
"Path": eachLambdaInfo.lambdaFunctionName(),
}).Debug("Registering lambda URL")
lookupMap[eachLambdaInfo.lambdaFunctionName()] = eachLambdaInfo
// Build up the customResourceDispatchMap
for _, eachCustomResource := range eachLambdaInfo.customResources {
logger.WithFields(logrus.Fields{
"Path": eachCustomResource.userFunctionName,
}).Debug("Registering customResource URL")
customResourceMap[eachCustomResource.userFunctionName] = eachCustomResource
}
}
return &LambdaHTTPHandler{
LambdaDispatchMap: lookupMap,
customResourceDispatchMap: customResourceMap,
logger: logger,
}
}
|
package probe
import (
"sync"
"time"
"github.com/armon/go-metrics"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/scope/report"
)
const (
reportBufferSize = 16
)
// ReportPublisher publishes reports, probably to a remote collector.
type ReportPublisher interface {
Publish(r report.Report) error
}
// Probe sits there, generating and publishing reports.
type Probe struct {
spyInterval, publishInterval time.Duration
publisher ReportPublisher
noControls bool
tickers []Ticker
reporters []Reporter
taggers []Tagger
quit chan struct{}
done sync.WaitGroup
spiedReports chan report.Report
shortcutReports chan report.Report
}
// Tagger tags nodes with value-add node metadata.
type Tagger interface {
Name() string
Tag(r report.Report) (report.Report, error)
}
// Reporter generates Reports.
type Reporter interface {
Name() string
Report() (report.Report, error)
}
// ReporterFunc uses a function to implement a Reporter
func ReporterFunc(name string, f func() (report.Report, error)) Reporter {
return reporterFunc{name, f}
}
type reporterFunc struct {
name string
f func() (report.Report, error)
}
func (r reporterFunc) Name() string { return r.name }
func (r reporterFunc) Report() (report.Report, error) { return r.f() }
// Ticker is something which will be invoked every spyDuration.
// It's useful for things that should be updated on that interval.
// For example, cached shared state between Taggers and Reporters.
type Ticker interface {
Name() string
Tick() error
}
// New makes a new Probe.
func New(
spyInterval, publishInterval time.Duration,
publisher ReportPublisher,
noControls bool,
) *Probe {
result := &Probe{
spyInterval: spyInterval,
publishInterval: publishInterval,
publisher: publisher,
noControls: noControls,
quit: make(chan struct{}),
spiedReports: make(chan report.Report, reportBufferSize),
shortcutReports: make(chan report.Report, reportBufferSize),
}
return result
}
// AddTagger adds a new Tagger to the Probe
func (p *Probe) AddTagger(ts ...Tagger) {
p.taggers = append(p.taggers, ts...)
}
// AddReporter adds a new Reported to the Probe
func (p *Probe) AddReporter(rs ...Reporter) {
p.reporters = append(p.reporters, rs...)
}
// AddTicker adds a new Ticker to the Probe
func (p *Probe) AddTicker(ts ...Ticker) {
p.tickers = append(p.tickers, ts...)
}
// Start starts the probe
func (p *Probe) Start() {
p.done.Add(2)
go p.spyLoop()
go p.publishLoop()
}
// Stop stops the probe
func (p *Probe) Stop() error {
close(p.quit)
p.done.Wait()
return nil
}
// Publish will queue a report for immediate publication,
// bypassing the spy tick
func (p *Probe) Publish(rpt report.Report) {
rpt = p.tag(rpt)
p.shortcutReports <- rpt
}
func (p *Probe) spyLoop() {
defer p.done.Done()
spyTick := time.Tick(p.spyInterval)
for {
select {
case <-spyTick:
t := time.Now()
p.tick()
rpt := p.report()
rpt = p.tag(rpt)
p.spiedReports <- rpt
metrics.MeasureSince([]string{"Report Generaton"}, t)
case <-p.quit:
return
}
}
}
func (p *Probe) tick() {
for _, ticker := range p.tickers {
t := time.Now()
err := ticker.Tick()
metrics.MeasureSince([]string{ticker.Name(), "ticker"}, t)
if err != nil {
log.Errorf("error doing ticker: %v", err)
}
}
}
func (p *Probe) report() report.Report {
reports := make(chan report.Report, len(p.reporters))
for _, rep := range p.reporters {
go func(rep Reporter) {
t := time.Now()
timer := time.AfterFunc(p.spyInterval, func() { log.Warningf("%v reporter took longer than %v", rep.Name(), p.spyInterval) })
newReport, err := rep.Report()
if !timer.Stop() {
log.Warningf("%v reporter took %v (longer than %v)", rep.Name(), time.Now().Sub(t), p.spyInterval)
}
metrics.MeasureSince([]string{rep.Name(), "reporter"}, t)
if err != nil {
log.Errorf("error generating report: %v", err)
newReport = report.MakeReport() // empty is OK to merge
}
reports <- newReport
}(rep)
}
result := report.MakeReport()
for i := 0; i < cap(reports); i++ {
result = result.Merge(<-reports)
}
return result
}
func (p *Probe) tag(r report.Report) report.Report {
var err error
for _, tagger := range p.taggers {
t := time.Now()
timer := time.AfterFunc(p.spyInterval, func() { log.Warningf("%v tagger took longer than %v", tagger.Name(), p.spyInterval) })
r, err = tagger.Tag(r)
if !timer.Stop() {
log.Warningf("%v tagger took %v (longer than %v)", tagger.Name(), time.Now().Sub(t), p.spyInterval)
}
metrics.MeasureSince([]string{tagger.Name(), "tagger"}, t)
if err != nil {
log.Errorf("error applying tagger: %v", err)
}
}
return r
}
func (p *Probe) drainAndPublish(rpt report.Report, rs chan report.Report) {
ForLoop:
for {
select {
case r := <-rs:
rpt = rpt.Merge(r)
default:
break ForLoop
}
}
if p.noControls {
rpt.WalkTopologies(func(t *report.Topology) {
t.Controls = report.Controls{}
})
}
if err := p.publisher.Publish(rpt); err != nil {
log.Infof("publish: %v", err)
}
}
func (p *Probe) publishLoop() {
defer p.done.Done()
pubTick := time.Tick(p.publishInterval)
for {
select {
case <-pubTick:
p.drainAndPublish(report.MakeReport(), p.spiedReports)
case rpt := <-p.shortcutReports:
p.drainAndPublish(rpt, p.shortcutReports)
case <-p.quit:
return
}
}
}
refactor(logs): Add reporter name to error logs.
1. Adding reporter name to logs improves tracing of error.
2. Capitalize first letter of error log.
Fixes https://github.com/weaveworks/scope/issues/3306
Signed-off-by: Prince Rachit Sinha <0070ab145a802efb4bd608afc498d66f3357eb84@mayadata.io>
package probe
import (
"sync"
"time"
"github.com/armon/go-metrics"
log "github.com/sirupsen/logrus"
"github.com/weaveworks/scope/report"
)
const (
reportBufferSize = 16
)
// ReportPublisher publishes reports, probably to a remote collector.
type ReportPublisher interface {
Publish(r report.Report) error
}
// Probe sits there, generating and publishing reports.
type Probe struct {
spyInterval, publishInterval time.Duration
publisher ReportPublisher
noControls bool
tickers []Ticker
reporters []Reporter
taggers []Tagger
quit chan struct{}
done sync.WaitGroup
spiedReports chan report.Report
shortcutReports chan report.Report
}
// Tagger tags nodes with value-add node metadata.
type Tagger interface {
Name() string
Tag(r report.Report) (report.Report, error)
}
// Reporter generates Reports.
type Reporter interface {
Name() string
Report() (report.Report, error)
}
// ReporterFunc uses a function to implement a Reporter
func ReporterFunc(name string, f func() (report.Report, error)) Reporter {
return reporterFunc{name, f}
}
type reporterFunc struct {
name string
f func() (report.Report, error)
}
func (r reporterFunc) Name() string { return r.name }
func (r reporterFunc) Report() (report.Report, error) { return r.f() }
// Ticker is something which will be invoked every spyDuration.
// It's useful for things that should be updated on that interval.
// For example, cached shared state between Taggers and Reporters.
type Ticker interface {
Name() string
Tick() error
}
// New makes a new Probe.
func New(
spyInterval, publishInterval time.Duration,
publisher ReportPublisher,
noControls bool,
) *Probe {
result := &Probe{
spyInterval: spyInterval,
publishInterval: publishInterval,
publisher: publisher,
noControls: noControls,
quit: make(chan struct{}),
spiedReports: make(chan report.Report, reportBufferSize),
shortcutReports: make(chan report.Report, reportBufferSize),
}
return result
}
// AddTagger adds a new Tagger to the Probe
func (p *Probe) AddTagger(ts ...Tagger) {
p.taggers = append(p.taggers, ts...)
}
// AddReporter adds a new Reported to the Probe
func (p *Probe) AddReporter(rs ...Reporter) {
p.reporters = append(p.reporters, rs...)
}
// AddTicker adds a new Ticker to the Probe
func (p *Probe) AddTicker(ts ...Ticker) {
p.tickers = append(p.tickers, ts...)
}
// Start starts the probe
func (p *Probe) Start() {
p.done.Add(2)
go p.spyLoop()
go p.publishLoop()
}
// Stop stops the probe
func (p *Probe) Stop() error {
close(p.quit)
p.done.Wait()
return nil
}
// Publish will queue a report for immediate publication,
// bypassing the spy tick
func (p *Probe) Publish(rpt report.Report) {
rpt = p.tag(rpt)
p.shortcutReports <- rpt
}
func (p *Probe) spyLoop() {
defer p.done.Done()
spyTick := time.Tick(p.spyInterval)
for {
select {
case <-spyTick:
t := time.Now()
p.tick()
rpt := p.report()
rpt = p.tag(rpt)
p.spiedReports <- rpt
metrics.MeasureSince([]string{"Report Generaton"}, t)
case <-p.quit:
return
}
}
}
func (p *Probe) tick() {
for _, ticker := range p.tickers {
t := time.Now()
err := ticker.Tick()
metrics.MeasureSince([]string{ticker.Name(), "ticker"}, t)
if err != nil {
log.Errorf("Error doing ticker: %v", err)
}
}
}
func (p *Probe) report() report.Report {
reports := make(chan report.Report, len(p.reporters))
for _, rep := range p.reporters {
go func(rep Reporter) {
t := time.Now()
timer := time.AfterFunc(p.spyInterval, func() { log.Warningf("%v reporter took longer than %v", rep.Name(), p.spyInterval) })
newReport, err := rep.Report()
if !timer.Stop() {
log.Warningf("%v reporter took %v (longer than %v)", rep.Name(), time.Now().Sub(t), p.spyInterval)
}
metrics.MeasureSince([]string{rep.Name(), "reporter"}, t)
if err != nil {
log.Errorf("Error generating %s report: %v", rep.Name(), err)
newReport = report.MakeReport() // empty is OK to merge
}
reports <- newReport
}(rep)
}
result := report.MakeReport()
for i := 0; i < cap(reports); i++ {
result = result.Merge(<-reports)
}
return result
}
func (p *Probe) tag(r report.Report) report.Report {
var err error
for _, tagger := range p.taggers {
t := time.Now()
timer := time.AfterFunc(p.spyInterval, func() { log.Warningf("%v tagger took longer than %v", tagger.Name(), p.spyInterval) })
r, err = tagger.Tag(r)
if !timer.Stop() {
log.Warningf("%v tagger took %v (longer than %v)", tagger.Name(), time.Now().Sub(t), p.spyInterval)
}
metrics.MeasureSince([]string{tagger.Name(), "tagger"}, t)
if err != nil {
log.Errorf("Error applying tagger: %v", err)
}
}
return r
}
func (p *Probe) drainAndPublish(rpt report.Report, rs chan report.Report) {
ForLoop:
for {
select {
case r := <-rs:
rpt = rpt.Merge(r)
default:
break ForLoop
}
}
if p.noControls {
rpt.WalkTopologies(func(t *report.Topology) {
t.Controls = report.Controls{}
})
}
if err := p.publisher.Publish(rpt); err != nil {
log.Infof("Publish: %v", err)
}
}
func (p *Probe) publishLoop() {
defer p.done.Done()
pubTick := time.Tick(p.publishInterval)
for {
select {
case <-pubTick:
p.drainAndPublish(report.MakeReport(), p.spiedReports)
case rpt := <-p.shortcutReports:
p.drainAndPublish(rpt, p.shortcutReports)
case <-p.quit:
return
}
}
}
|
package hoverfly_end_to_end_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
"github.com/dghubble/sling"
"strings"
"net/http"
"fmt"
"encoding/json"
"io/ioutil"
"path/filepath"
"os"
"os/exec"
"strconv"
"time"
"gopkg.in/yaml.v2"
)
const (
simulate = "simulate"
capture = "capture"
synthesize = "synthesize"
modify = "modify"
)
var (
hoverctlBinary string
hoverctlCacheDir string
workingDirectory string
)
func TestHoverflyEndToEnd(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Hoverfly End To End Suite")
}
var _ = BeforeSuite(func() {
workingDirectory, _ := os.Getwd()
hoverctlCacheDir = filepath.Join(workingDirectory, ".hoverfly/cache")
hoverctlBinary = filepath.Join(workingDirectory, "bin/hoverctl")
binDirectory := filepath.Join(workingDirectory, "bin")
os.Setenv("PATH", fmt.Sprintf("%v:%v", binDirectory, os.Getenv("PATH")))
})
func SetHoverflyMode(mode string, port int) {
req := sling.New().Post(fmt.Sprintf("http://localhost:%v/api/state", port)).Body(strings.NewReader(`{"mode":"` + mode +`"}`))
res := DoRequest(req)
Expect(res.StatusCode).To(Equal(200))
}
func DoRequest(r *sling.Sling) (*http.Response) {
req, err := r.Request()
Expect(err).To(BeNil())
response, err := http.DefaultClient.Do(req)
Expect(err).To(BeNil())
return response
}
func GetHoverflyMode(port int) string {
currentState := &stateRequest{}
resp := DoRequest(sling.New().Get(fmt.Sprintf("http://localhost:%v/api/state", port)))
body, err := ioutil.ReadAll(resp.Body)
Expect(err).To(BeNil())
err = json.Unmarshal(body, currentState)
Expect(err).To(BeNil())
return currentState.Mode
}
type stateRequest struct {
Mode string `json:"mode"`
Destination string `json:"destination"`
}
func startHoverfly(adminPort, proxyPort int, workingDir string) * exec.Cmd {
hoverflyBinaryUri := filepath.Join(workingDir, "bin/hoverfly")
hoverflyCmd := exec.Command(hoverflyBinaryUri, "-db", "memory", "-ap", strconv.Itoa(adminPort), "-pp", strconv.Itoa(proxyPort))
err := hoverflyCmd.Start()
if err != nil {
fmt.Println("Unable to start Hoverfly")
fmt.Println(hoverflyBinaryUri)
fmt.Println("Is the binary there?")
os.Exit(1)
}
Eventually(func() int {
resp, err := http.Get(fmt.Sprintf("http://localhost:%v/api/state", adminPort))
if err == nil {
return resp.StatusCode
} else {
fmt.Println(err.Error())
return 0
}
}, time.Second * 3).Should(BeNumerically("==", http.StatusOK))
return hoverflyCmd
}
func startHoverflyWithAuth(adminPort, proxyPort int, workingDir, username, password string) (*exec.Cmd) {
hoverflyBinaryUri := filepath.Join(workingDir, "bin/hoverfly")
hoverflyAddUserCmd := exec.Command(hoverflyBinaryUri, "-add", "-username", username, "-password", password)
err := hoverflyAddUserCmd.Start()
if err != nil {
fmt.Println("Unable to start Hoverfly to add user")
fmt.Println(hoverflyBinaryUri)
fmt.Println("Is the binary there?")
os.Exit(1)
}
hoverflyCmd := exec.Command(hoverflyBinaryUri, "-auth", "true", "-db", "memory", "-ap", strconv.Itoa(adminPort), "-pp", strconv.Itoa(proxyPort))
err = hoverflyCmd.Start()
if err != nil {
fmt.Println("Unable to start Hoverfly")
fmt.Println(hoverflyBinaryUri)
fmt.Println("Is the binary there?")
os.Exit(1)
}
Eventually(func() int {
resp, err := http.Get(fmt.Sprintf("http://localhost:%v/api/state", adminPort))
if err == nil {
return resp.StatusCode
} else {
fmt.Println(err.Error())
return 0
}
}, time.Second * 3).Should(BeNumerically("==", http.StatusOK))
return hoverflyCmd
}
type testConfig struct {
HoverflyHost string `yaml:"hoverfly.host"`
HoverflyAdminPort string `yaml:"hoverfly.admin.port"`
HoverflyProxyPort string `yaml:"hoverfly.proxy.port"`
HoverflyUsername string `yaml:"hoverfly.username"`
HoverflyPassword string `yaml:"hoverfly.password"`
}
func WriteConfiguration(host, adminPort, proxyPort string) {
WriteConfigurationWithAuth(host, adminPort, proxyPort, "", "")
}
func WriteConfigurationWithAuth(host, adminPort, proxyPort, username, password string) {
configHost := "localhost"
configAdminPort := "8888"
configProxyPort := "8500"
configUsername := ""
configPassword := ""
if len(host) > 0 {
configHost = host
}
if len(adminPort) > 0 {
configAdminPort = adminPort
}
if len(proxyPort) > 0 {
configProxyPort = proxyPort
}
if len(username) > 0 {
configUsername = username
}
if len(password) > 0 {
configPassword = password
}
testConfig := testConfig{
HoverflyHost:configHost,
HoverflyAdminPort: configAdminPort,
HoverflyProxyPort: configProxyPort,
HoverflyUsername: configUsername,
HoverflyPassword: configPassword,
}
data, _ := yaml.Marshal(testConfig)
filepath := filepath.Join(workingDirectory, ".hoverfly", "config.yaml")
ioutil.WriteFile(filepath, data, 0644)
}
Changed the hoverfly command for adding the test user to use Run() instead of Start()
package hoverfly_end_to_end_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
"github.com/dghubble/sling"
"strings"
"net/http"
"fmt"
"encoding/json"
"io/ioutil"
"path/filepath"
"os"
"os/exec"
"strconv"
"time"
"gopkg.in/yaml.v2"
)
const (
simulate = "simulate"
capture = "capture"
synthesize = "synthesize"
modify = "modify"
)
var (
hoverctlBinary string
hoverctlCacheDir string
workingDirectory string
)
func TestHoverflyEndToEnd(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Hoverfly End To End Suite")
}
var _ = BeforeSuite(func() {
workingDirectory, _ := os.Getwd()
hoverctlCacheDir = filepath.Join(workingDirectory, ".hoverfly/cache")
hoverctlBinary = filepath.Join(workingDirectory, "bin/hoverctl")
binDirectory := filepath.Join(workingDirectory, "bin")
os.Setenv("PATH", fmt.Sprintf("%v:%v", binDirectory, os.Getenv("PATH")))
})
func SetHoverflyMode(mode string, port int) {
req := sling.New().Post(fmt.Sprintf("http://localhost:%v/api/state", port)).Body(strings.NewReader(`{"mode":"` + mode +`"}`))
res := DoRequest(req)
Expect(res.StatusCode).To(Equal(200))
}
func DoRequest(r *sling.Sling) (*http.Response) {
req, err := r.Request()
Expect(err).To(BeNil())
response, err := http.DefaultClient.Do(req)
Expect(err).To(BeNil())
return response
}
func GetHoverflyMode(port int) string {
currentState := &stateRequest{}
resp := DoRequest(sling.New().Get(fmt.Sprintf("http://localhost:%v/api/state", port)))
body, err := ioutil.ReadAll(resp.Body)
Expect(err).To(BeNil())
err = json.Unmarshal(body, currentState)
Expect(err).To(BeNil())
return currentState.Mode
}
type stateRequest struct {
Mode string `json:"mode"`
Destination string `json:"destination"`
}
func startHoverfly(adminPort, proxyPort int, workingDir string) * exec.Cmd {
hoverflyBinaryUri := filepath.Join(workingDir, "bin/hoverfly")
hoverflyCmd := exec.Command(hoverflyBinaryUri, "-db", "memory", "-ap", strconv.Itoa(adminPort), "-pp", strconv.Itoa(proxyPort))
err := hoverflyCmd.Start()
if err != nil {
fmt.Println("Unable to start Hoverfly")
fmt.Println(hoverflyBinaryUri)
fmt.Println("Is the binary there?")
os.Exit(1)
}
Eventually(func() int {
resp, err := http.Get(fmt.Sprintf("http://localhost:%v/api/state", adminPort))
if err == nil {
return resp.StatusCode
} else {
fmt.Println(err.Error())
return 0
}
}, time.Second * 3).Should(BeNumerically("==", http.StatusOK))
return hoverflyCmd
}
func startHoverflyWithAuth(adminPort, proxyPort int, workingDir, username, password string) (*exec.Cmd) {
hoverflyBinaryUri := filepath.Join(workingDir, "bin/hoverfly")
hoverflyAddUserCmd := exec.Command(hoverflyBinaryUri, "-add", "-username", username, "-password", password, "-ap", strconv.Itoa(adminPort), "-pp", strconv.Itoa(proxyPort))
err := hoverflyAddUserCmd.Run()
if err != nil {
fmt.Println("Unable to start Hoverfly to add user")
fmt.Println(hoverflyBinaryUri)
fmt.Println("Is the binary there?")
os.Exit(1)
}
hoverflyCmd := exec.Command(hoverflyBinaryUri, "-auth", "true", "-db", "memory", "-ap", strconv.Itoa(adminPort), "-pp", strconv.Itoa(proxyPort))
err = hoverflyCmd.Start()
if err != nil {
fmt.Println("Unable to start Hoverfly")
fmt.Println(hoverflyBinaryUri)
fmt.Println("Is the binary there?")
os.Exit(1)
}
Eventually(func() int {
resp, err := http.Get(fmt.Sprintf("http://localhost:%v", adminPort))
if err == nil {
return resp.StatusCode
} else {
fmt.Println(err.Error())
return 0
}
}, time.Second * 3).Should(BeNumerically("==", http.StatusOK))
return hoverflyCmd
}
type testConfig struct {
HoverflyHost string `yaml:"hoverfly.host"`
HoverflyAdminPort string `yaml:"hoverfly.admin.port"`
HoverflyProxyPort string `yaml:"hoverfly.proxy.port"`
HoverflyUsername string `yaml:"hoverfly.username"`
HoverflyPassword string `yaml:"hoverfly.password"`
}
func WriteConfiguration(host, adminPort, proxyPort string) {
WriteConfigurationWithAuth(host, adminPort, proxyPort, "", "")
}
func WriteConfigurationWithAuth(host, adminPort, proxyPort, username, password string) {
configHost := "localhost"
configAdminPort := "8888"
configProxyPort := "8500"
configUsername := ""
configPassword := ""
if len(host) > 0 {
configHost = host
}
if len(adminPort) > 0 {
configAdminPort = adminPort
}
if len(proxyPort) > 0 {
configProxyPort = proxyPort
}
if len(username) > 0 {
configUsername = username
}
if len(password) > 0 {
configPassword = password
}
testConfig := testConfig{
HoverflyHost:configHost,
HoverflyAdminPort: configAdminPort,
HoverflyProxyPort: configProxyPort,
HoverflyUsername: configUsername,
HoverflyPassword: configPassword,
}
data, _ := yaml.Marshal(testConfig)
filepath := filepath.Join(workingDirectory, ".hoverfly", "config.yaml")
ioutil.WriteFile(filepath, data, 0644)
} |
package universal
Add a test for the signer/universal package
package universal
import (
"testing"
"time"
"github.com/cloudflare/cfssl/config"
)
var expiry = 1 * time.Minute
var validLocalConfig = &config.Config{
Signing: &config.Signing{
Profiles: map[string]*config.SigningProfile{
"valid": {
Usage: []string{"digital signature"},
Expiry: expiry,
},
},
Default: &config.SigningProfile{
Usage: []string{"digital signature"},
Expiry: expiry,
},
},
}
func TestNewSigner(t *testing.T) {
h := map[string]string{
"key-file": "../local/testdata/ca_key.pem",
"cert-file": "../local/testdata/ca.pem",
}
r := &Root{
Config: h,
ForceRemote: false,
}
_, err := NewSigner(*r, validLocalConfig.Signing)
if err != nil {
t.Fatal(err)
}
}
|
package todolist
import (
"fmt"
"regexp"
"strings"
"time"
)
type App struct {
TodoStore *FileStore
TodoList *TodoList
}
func NewApp() *App {
app := &App{TodoList: &TodoList{}, TodoStore: NewFileStore()}
return app
}
func (a *App) InitializeRepo() {
a.TodoStore.Initialize()
}
func (a *App) AddTodo(input string) {
a.Load()
parser := &Parser{}
todo := parser.ParseNewTodo(input)
if todo == nil {
fmt.Println("I need more information. Try something like 'todo a chat with @bob due tom'")
return
}
id := a.TodoList.NextId()
a.TodoList.Add(todo)
a.Save()
fmt.Printf("Todo %d added.\n", id)
}
func (a *App) DeleteTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Delete(id)
a.Save()
fmt.Println("Todo deleted.")
}
func (a *App) CompleteTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Complete(id)
a.Save()
fmt.Println("Todo completed.")
}
func (a *App) UncompleteTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Uncomplete(id)
a.Save()
fmt.Println("Todo uncompleted.")
}
func (a *App) ArchiveTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Archive(id)
a.Save()
fmt.Println("Todo archived.")
}
func (a *App) UnarchiveTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Unarchive(id)
a.Save()
fmt.Println("Todo unarchived.")
}
func (a *App) EditTodoSubject(input string) {
a.Load()
_, id, subject := Parser{input}.Parse()
if id == -1 {
return
}
_, todo := a.getId(input)
todo.Subject = subject
a.Save()
fmt.Println("Todo subject updated.")
}
func (a *App) EditTodoDue(input string) {
a.Load()
id, todo := a.getId(input)
if id == -1 {
return
}
parser := &Parser{}
todo.Due = parser.Due(input, time.Now())
a.Save()
fmt.Println("Todo due date updated.")
}
func (a *App) ExpandTodo(input string) {
a.Load()
id, _ := a.getId(input)
parser := &Parser{}
if id == -1 {
return
}
commonProject := parser.ExpandProject(input)
todos := strings.LastIndex(input, ":")
if commonProject == "" || len(input) <= todos+1 || todos == -1 {
fmt.Println("I'm expecting a format like \"todolist ex <project>: <todo1>, <todo2>, ...\"")
return
}
newTodos := strings.Split(input[todos+1:], ",")
for _, todo := range newTodos {
args := []string{"add ", commonProject, " ", todo}
a.AddTodo(strings.Join(args, ""))
}
a.TodoList.Delete(id)
a.Save()
fmt.Println("Todo expanded.")
}
func (a *App) ArchiveCompleted() {
a.Load()
for _, todo := range a.TodoList.Todos() {
if todo.Completed {
todo.Archived = true
}
}
a.Save()
fmt.Println("All completed todos have been archived.")
}
func (a *App) ListTodos(input string) {
a.Load()
filtered := NewFilter(a.TodoList.Todos()).Filter(input)
grouped := a.getGroups(input, filtered)
formatter := NewFormatter(grouped)
formatter.Print()
}
func (a *App) PrioritizeTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Prioritize(id)
a.Save()
fmt.Println("Todo prioritized.")
}
func (a *App) UnprioritizeTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Unprioritize(id)
a.Save()
fmt.Println("Todo un-prioritized.")
}
func (a *App) getId(input string) (int, *Todo) {
_, id, _ := Parser{input}.Parse()
todo := a.TodoList.FindById(id)
if todo == nil {
fmt.Println("No such id.")
return -1, nil
}
return id, todo
}
func (a *App) getGroups(input string, todos []*Todo) *GroupedTodos {
grouper := &Grouper{}
contextRegex, _ := regexp.Compile(`by c.*$`)
projectRegex, _ := regexp.Compile(`by p.*$`)
var grouped *GroupedTodos
if contextRegex.MatchString(input) {
grouped = grouper.GroupByContext(todos)
} else if projectRegex.MatchString(input) {
grouped = grouper.GroupByProject(todos)
} else {
grouped = grouper.GroupByNothing(todos)
}
return grouped
}
func (a *App) GarbageCollect() {
a.Load()
a.TodoList.GarbageCollect()
a.Save()
fmt.Println("Garbage collection complete.")
}
func (a *App) Load() error {
todos, err := a.TodoStore.Load()
if err != nil {
return err
}
a.TodoList.Load(todos)
return nil
}
func (a *App) Save() {
a.TodoStore.Save(a.TodoList.Data)
}
error out when invalid id given
package todolist
import (
"fmt"
"regexp"
"strings"
"time"
)
type App struct {
TodoStore *FileStore
TodoList *TodoList
}
func NewApp() *App {
app := &App{TodoList: &TodoList{}, TodoStore: NewFileStore()}
return app
}
func (a *App) InitializeRepo() {
a.TodoStore.Initialize()
}
func (a *App) AddTodo(input string) {
a.Load()
parser := &Parser{}
todo := parser.ParseNewTodo(input)
if todo == nil {
fmt.Println("I need more information. Try something like 'todo a chat with @bob due tom'")
return
}
id := a.TodoList.NextId()
a.TodoList.Add(todo)
a.Save()
fmt.Printf("Todo %d added.\n", id)
}
func (a *App) DeleteTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Delete(id)
a.Save()
fmt.Println("Todo deleted.")
}
func (a *App) CompleteTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Complete(id)
a.Save()
fmt.Println("Todo completed.")
}
func (a *App) UncompleteTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Uncomplete(id)
a.Save()
fmt.Println("Todo uncompleted.")
}
func (a *App) ArchiveTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Archive(id)
a.Save()
fmt.Println("Todo archived.")
}
func (a *App) UnarchiveTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Unarchive(id)
a.Save()
fmt.Println("Todo unarchived.")
}
func (a *App) EditTodoSubject(input string) {
a.Load()
_, id, subject := Parser{input}.Parse()
if id == -1 {
return
}
_, todo := a.getId(input)
if todo == nil {
fmt.Println("Todo not found.")
return
}
todo.Subject = subject
a.Save()
fmt.Println("Todo subject updated.")
}
func (a *App) EditTodoDue(input string) {
a.Load()
id, todo := a.getId(input)
if id == -1 {
return
}
parser := &Parser{}
todo.Due = parser.Due(input, time.Now())
a.Save()
fmt.Println("Todo due date updated.")
}
func (a *App) ExpandTodo(input string) {
a.Load()
id, _ := a.getId(input)
parser := &Parser{}
if id == -1 {
return
}
commonProject := parser.ExpandProject(input)
todos := strings.LastIndex(input, ":")
if commonProject == "" || len(input) <= todos+1 || todos == -1 {
fmt.Println("I'm expecting a format like \"todolist ex <project>: <todo1>, <todo2>, ...\"")
return
}
newTodos := strings.Split(input[todos+1:], ",")
for _, todo := range newTodos {
args := []string{"add ", commonProject, " ", todo}
a.AddTodo(strings.Join(args, ""))
}
a.TodoList.Delete(id)
a.Save()
fmt.Println("Todo expanded.")
}
func (a *App) ArchiveCompleted() {
a.Load()
for _, todo := range a.TodoList.Todos() {
if todo.Completed {
todo.Archived = true
}
}
a.Save()
fmt.Println("All completed todos have been archived.")
}
func (a *App) ListTodos(input string) {
a.Load()
filtered := NewFilter(a.TodoList.Todos()).Filter(input)
grouped := a.getGroups(input, filtered)
formatter := NewFormatter(grouped)
formatter.Print()
}
func (a *App) PrioritizeTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Prioritize(id)
a.Save()
fmt.Println("Todo prioritized.")
}
func (a *App) UnprioritizeTodo(input string) {
a.Load()
id, _ := a.getId(input)
if id == -1 {
return
}
a.TodoList.Unprioritize(id)
a.Save()
fmt.Println("Todo un-prioritized.")
}
func (a *App) getId(input string) (int, *Todo) {
_, id, _ := Parser{input}.Parse()
todo := a.TodoList.FindById(id)
if todo == nil {
fmt.Println("No such id.")
return -1, nil
}
return id, todo
}
func (a *App) getGroups(input string, todos []*Todo) *GroupedTodos {
grouper := &Grouper{}
contextRegex, _ := regexp.Compile(`by c.*$`)
projectRegex, _ := regexp.Compile(`by p.*$`)
var grouped *GroupedTodos
if contextRegex.MatchString(input) {
grouped = grouper.GroupByContext(todos)
} else if projectRegex.MatchString(input) {
grouped = grouper.GroupByProject(todos)
} else {
grouped = grouper.GroupByNothing(todos)
}
return grouped
}
func (a *App) GarbageCollect() {
a.Load()
a.TodoList.GarbageCollect()
a.Save()
fmt.Println("Garbage collection complete.")
}
func (a *App) Load() error {
todos, err := a.TodoStore.Load()
if err != nil {
return err
}
a.TodoList.Load(todos)
return nil
}
func (a *App) Save() {
a.TodoStore.Save(a.TodoList.Data)
}
|
package infrastructure_test
import (
. "bosh/infrastructure"
fakeplatform "bosh/platform/fakes"
boshsettings "bosh/settings"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net/http"
"net/http/httptest"
"net/url"
"strings"
)
type FakeDnsResolver struct {
LookupHostIp string
LookupHostDnsServers []string
LookupHostHost string
}
func (res *FakeDnsResolver) LookupHost(dnsServers []string, host string) (ip string, err error) {
res.LookupHostDnsServers = dnsServers
res.LookupHostHost = host
ip = res.LookupHostIp
return
}
func init() {
Describe("AWS Infrastructure", func() {
Describe("SetupSsh", func() {
var (
ts *httptest.Server
aws Infrastructure
platform *fakeplatform.FakePlatform
)
const expectedKey = "some public key"
BeforeEach(func() {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
Expect(r.URL.Path).To(Equal("/latest/meta-data/public-keys/0/openssh-key"))
w.Write([]byte(expectedKey))
})
ts = httptest.NewServer(handler)
platform = fakeplatform.NewFakePlatform()
})
AfterEach(func() {
ts.Close()
})
It("gets the public key and sets up ssh via the platform", func() {
aws = NewAwsInfrastructure(ts.URL, &FakeDnsResolver{}, platform)
err := aws.SetupSsh("vcap")
Expect(err).NotTo(HaveOccurred())
Expect(platform.SetupSshPublicKey).To(Equal(expectedKey))
Expect(platform.SetupSshUsername).To(Equal("vcap"))
})
})
Describe("GetSettings", func() {
var (
settingsJson string
expectedSettings boshsettings.Settings
)
BeforeEach(func() {
settingsJson = `{
"agent_id": "my-agent-id",
"blobstore": {
"options": {
"bucket_name": "george",
"encryption_key": "optional encryption key",
"access_key_id": "optional access key id",
"secret_access_key": "optional secret access key"
},
"provider": "s3"
},
"disks": {
"ephemeral": "/dev/sdb",
"persistent": {
"vol-xxxxxx": "/dev/sdf"
},
"system": "/dev/sda1"
},
"env": {
"bosh": {
"password": "some encrypted password"
}
},
"networks": {
"netA": {
"default": ["dns", "gateway"],
"ip": "ww.ww.ww.ww",
"dns": [
"xx.xx.xx.xx",
"yy.yy.yy.yy"
]
},
"netB": {
"dns": [
"zz.zz.zz.zz"
]
}
},
"mbus": "https://vcap:b00tstrap@0.0.0.0:6868",
"ntp": [
"0.north-america.pool.ntp.org",
"1.north-america.pool.ntp.org"
],
"vm": {
"name": "vm-abc-def"
}
}`
settingsJson = strings.Replace(settingsJson, `"`, `\"`, -1)
settingsJson = strings.Replace(settingsJson, "\n", "", -1)
settingsJson = strings.Replace(settingsJson, "\t", "", -1)
settingsJson = fmt.Sprintf(`{"settings": "%s"}`, settingsJson)
expectedSettings = boshsettings.Settings{
AgentId: "my-agent-id",
Blobstore: boshsettings.Blobstore{
Options: map[string]string{
"bucket_name": "george",
"encryption_key": "optional encryption key",
"access_key_id": "optional access key id",
"secret_access_key": "optional secret access key",
},
Type: "s3",
},
Disks: boshsettings.Disks{
Ephemeral: "/dev/sdb",
Persistent: map[string]string{"vol-xxxxxx": "/dev/sdf"},
System: "/dev/sda1",
},
Env: boshsettings.Env{
Bosh: boshsettings.BoshEnv{
Password: "some encrypted password",
},
},
Networks: boshsettings.Networks{
"netA": boshsettings.Network{
Default: []string{"dns", "gateway"},
Ip: "ww.ww.ww.ww",
Dns: []string{"xx.xx.xx.xx", "yy.yy.yy.yy"},
},
"netB": boshsettings.Network{
Dns: []string{"zz.zz.zz.zz"},
},
},
Mbus: "https://vcap:b00tstrap@0.0.0.0:6868",
Ntp: []string{
"0.north-america.pool.ntp.org",
"1.north-america.pool.ntp.org",
},
Vm: boshsettings.Vm{
Name: "vm-abc-def",
},
}
})
Context("when a dns is not provided", func() {
It("aws get settings", func() {
boshRegistryHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
Expect(r.URL.Path).To(Equal("/instances/123-456-789/settings"))
w.Write([]byte(settingsJson))
})
registryTs := httptest.NewServer(boshRegistryHandler)
defer registryTs.Close()
expectedUserData := fmt.Sprintf(`{"registry":{"endpoint":"%s"}}`, registryTs.URL)
instanceId := "123-456-789"
awsMetaDataHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
switch r.URL.Path {
case "/latest/user-data":
w.Write([]byte(expectedUserData))
case "/latest/meta-data/instance-id":
w.Write([]byte(instanceId))
}
})
metadataTs := httptest.NewServer(awsMetaDataHandler)
defer metadataTs.Close()
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure(metadataTs.URL, &FakeDnsResolver{}, platform)
settings, err := aws.GetSettings()
Expect(err).NotTo(HaveOccurred())
Expect(settings).To(Equal(expectedSettings))
})
})
Context("when dns servers are provided", func() {
It("aws get settings", func() {
fakeDnsResolver := &FakeDnsResolver{
LookupHostIp: "127.0.0.1",
}
registryHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
Expect(r.URL.Path).To(Equal("/instances/123-456-789/settings"))
w.Write([]byte(settingsJson))
})
registryTs := httptest.NewServer(registryHandler)
registryUrl, err := url.Parse(registryTs.URL)
Expect(err).NotTo(HaveOccurred())
registryTsPort := strings.Split(registryUrl.Host, ":")[1]
defer registryTs.Close()
expectedUserData := fmt.Sprintf(`
{
"registry":{
"endpoint":"http://the.registry.name:%s"
},
"dns":{
"nameserver": ["8.8.8.8", "9.9.9.9"]
}
}`, registryTsPort)
instanceId := "123-456-789"
awsMetaDataHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
switch r.URL.Path {
case "/latest/user-data":
w.Write([]byte(expectedUserData))
case "/latest/meta-data/instance-id":
w.Write([]byte(instanceId))
}
})
metadataTs := httptest.NewServer(awsMetaDataHandler)
defer metadataTs.Close()
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure(metadataTs.URL, fakeDnsResolver, platform)
settings, err := aws.GetSettings()
Expect(err).NotTo(HaveOccurred())
Expect(settings).To(Equal(expectedSettings))
Expect(fakeDnsResolver.LookupHostHost).To(Equal("the.registry.name"))
Expect(fakeDnsResolver.LookupHostDnsServers).To(Equal([]string{"8.8.8.8", "9.9.9.9"}))
})
})
})
It("aws setup networking", func() {
fakeDnsResolver := &FakeDnsResolver{}
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure("", fakeDnsResolver, platform)
networks := boshsettings.Networks{"bosh": boshsettings.Network{}}
aws.SetupNetworking(networks)
Expect(platform.SetupDhcpNetworks).To(Equal(networks))
})
It("aws get ephemeral disk path", func() {
fakeDnsResolver := &FakeDnsResolver{}
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure("", fakeDnsResolver, platform)
platform.NormalizeDiskPathRealPath = "/dev/xvdb"
platform.NormalizeDiskPathFound = true
realPath, found := aws.GetEphemeralDiskPath("/dev/sdb")
Expect(found).To(Equal(true))
Expect(realPath).To(Equal("/dev/xvdb"))
Expect(platform.NormalizeDiskPathPath).To(Equal("/dev/sdb"))
})
})
}
Clearer AWS infrastructure test descriptions
package infrastructure_test
import (
. "bosh/infrastructure"
fakeplatform "bosh/platform/fakes"
boshsettings "bosh/settings"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"net/http"
"net/http/httptest"
"net/url"
"strings"
)
type FakeDnsResolver struct {
LookupHostIp string
LookupHostDnsServers []string
LookupHostHost string
}
func (res *FakeDnsResolver) LookupHost(dnsServers []string, host string) (ip string, err error) {
res.LookupHostDnsServers = dnsServers
res.LookupHostHost = host
ip = res.LookupHostIp
return
}
func init() {
Describe("AWS Infrastructure", func() {
Describe("SetupSsh", func() {
var (
ts *httptest.Server
aws Infrastructure
platform *fakeplatform.FakePlatform
)
const expectedKey = "some public key"
BeforeEach(func() {
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
Expect(r.URL.Path).To(Equal("/latest/meta-data/public-keys/0/openssh-key"))
w.Write([]byte(expectedKey))
})
ts = httptest.NewServer(handler)
platform = fakeplatform.NewFakePlatform()
})
AfterEach(func() {
ts.Close()
})
It("gets the public key and sets up ssh via the platform", func() {
aws = NewAwsInfrastructure(ts.URL, &FakeDnsResolver{}, platform)
err := aws.SetupSsh("vcap")
Expect(err).NotTo(HaveOccurred())
Expect(platform.SetupSshPublicKey).To(Equal(expectedKey))
Expect(platform.SetupSshUsername).To(Equal("vcap"))
})
})
Describe("GetSettings", func() {
var (
settingsJson string
expectedSettings boshsettings.Settings
)
BeforeEach(func() {
settingsJson = `{
"agent_id": "my-agent-id",
"blobstore": {
"options": {
"bucket_name": "george",
"encryption_key": "optional encryption key",
"access_key_id": "optional access key id",
"secret_access_key": "optional secret access key"
},
"provider": "s3"
},
"disks": {
"ephemeral": "/dev/sdb",
"persistent": {
"vol-xxxxxx": "/dev/sdf"
},
"system": "/dev/sda1"
},
"env": {
"bosh": {
"password": "some encrypted password"
}
},
"networks": {
"netA": {
"default": ["dns", "gateway"],
"ip": "ww.ww.ww.ww",
"dns": [
"xx.xx.xx.xx",
"yy.yy.yy.yy"
]
},
"netB": {
"dns": [
"zz.zz.zz.zz"
]
}
},
"mbus": "https://vcap:b00tstrap@0.0.0.0:6868",
"ntp": [
"0.north-america.pool.ntp.org",
"1.north-america.pool.ntp.org"
],
"vm": {
"name": "vm-abc-def"
}
}`
settingsJson = strings.Replace(settingsJson, `"`, `\"`, -1)
settingsJson = strings.Replace(settingsJson, "\n", "", -1)
settingsJson = strings.Replace(settingsJson, "\t", "", -1)
settingsJson = fmt.Sprintf(`{"settings": "%s"}`, settingsJson)
expectedSettings = boshsettings.Settings{
AgentId: "my-agent-id",
Blobstore: boshsettings.Blobstore{
Options: map[string]string{
"bucket_name": "george",
"encryption_key": "optional encryption key",
"access_key_id": "optional access key id",
"secret_access_key": "optional secret access key",
},
Type: "s3",
},
Disks: boshsettings.Disks{
Ephemeral: "/dev/sdb",
Persistent: map[string]string{"vol-xxxxxx": "/dev/sdf"},
System: "/dev/sda1",
},
Env: boshsettings.Env{
Bosh: boshsettings.BoshEnv{
Password: "some encrypted password",
},
},
Networks: boshsettings.Networks{
"netA": boshsettings.Network{
Default: []string{"dns", "gateway"},
Ip: "ww.ww.ww.ww",
Dns: []string{"xx.xx.xx.xx", "yy.yy.yy.yy"},
},
"netB": boshsettings.Network{
Dns: []string{"zz.zz.zz.zz"},
},
},
Mbus: "https://vcap:b00tstrap@0.0.0.0:6868",
Ntp: []string{
"0.north-america.pool.ntp.org",
"1.north-america.pool.ntp.org",
},
Vm: boshsettings.Vm{
Name: "vm-abc-def",
},
}
})
Context("when a dns is not provided", func() {
It("aws get settings", func() {
boshRegistryHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
Expect(r.URL.Path).To(Equal("/instances/123-456-789/settings"))
w.Write([]byte(settingsJson))
})
registryTs := httptest.NewServer(boshRegistryHandler)
defer registryTs.Close()
expectedUserData := fmt.Sprintf(`{"registry":{"endpoint":"%s"}}`, registryTs.URL)
instanceId := "123-456-789"
awsMetaDataHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
switch r.URL.Path {
case "/latest/user-data":
w.Write([]byte(expectedUserData))
case "/latest/meta-data/instance-id":
w.Write([]byte(instanceId))
}
})
metadataTs := httptest.NewServer(awsMetaDataHandler)
defer metadataTs.Close()
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure(metadataTs.URL, &FakeDnsResolver{}, platform)
settings, err := aws.GetSettings()
Expect(err).NotTo(HaveOccurred())
Expect(settings).To(Equal(expectedSettings))
})
})
Context("when dns servers are provided", func() {
It("aws get settings", func() {
fakeDnsResolver := &FakeDnsResolver{
LookupHostIp: "127.0.0.1",
}
registryHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
Expect(r.URL.Path).To(Equal("/instances/123-456-789/settings"))
w.Write([]byte(settingsJson))
})
registryTs := httptest.NewServer(registryHandler)
registryUrl, err := url.Parse(registryTs.URL)
Expect(err).NotTo(HaveOccurred())
registryTsPort := strings.Split(registryUrl.Host, ":")[1]
defer registryTs.Close()
expectedUserData := fmt.Sprintf(`
{
"registry":{
"endpoint":"http://the.registry.name:%s"
},
"dns":{
"nameserver": ["8.8.8.8", "9.9.9.9"]
}
}`, registryTsPort)
instanceId := "123-456-789"
awsMetaDataHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal("GET"))
switch r.URL.Path {
case "/latest/user-data":
w.Write([]byte(expectedUserData))
case "/latest/meta-data/instance-id":
w.Write([]byte(instanceId))
}
})
metadataTs := httptest.NewServer(awsMetaDataHandler)
defer metadataTs.Close()
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure(metadataTs.URL, fakeDnsResolver, platform)
settings, err := aws.GetSettings()
Expect(err).NotTo(HaveOccurred())
Expect(settings).To(Equal(expectedSettings))
Expect(fakeDnsResolver.LookupHostHost).To(Equal("the.registry.name"))
Expect(fakeDnsResolver.LookupHostDnsServers).To(Equal([]string{"8.8.8.8", "9.9.9.9"}))
})
})
})
Describe("SetupNetworking", func() {
It("sets up DHCP on the platform", func() {
fakeDnsResolver := &FakeDnsResolver{}
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure("", fakeDnsResolver, platform)
networks := boshsettings.Networks{"bosh": boshsettings.Network{}}
aws.SetupNetworking(networks)
Expect(platform.SetupDhcpNetworks).To(Equal(networks))
})
})
Describe("GetEphemeralDiskPath", func() {
It("returns the real disk path given an AWS EBS hint", func() {
fakeDnsResolver := &FakeDnsResolver{}
platform := fakeplatform.NewFakePlatform()
aws := NewAwsInfrastructure("", fakeDnsResolver, platform)
platform.NormalizeDiskPathRealPath = "/dev/xvdb"
platform.NormalizeDiskPathFound = true
realPath, found := aws.GetEphemeralDiskPath("/dev/sdb")
Expect(found).To(Equal(true))
Expect(realPath).To(Equal("/dev/xvdb"))
Expect(platform.NormalizeDiskPathPath).To(Equal("/dev/sdb"))
})
})
})
}
|
package goboots
import (
by "bytes"
"encoding/json"
"fmt"
"github.com/gabstv/dson2json"
"github.com/gabstv/i18ngo"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"strings"
"text/template"
"time"
)
type App struct {
// "public"
AppConfigPath string
Config AppConfig
Routes []Route
ByteCaches *ByteCacheCollection
GenericCaches *GenericCacheCollection
Random *rand.Rand
// "private"
controllerMap map[string]IController
templateMap map[string]*templateInfo
basePath string
entryHTTP *appHTTP
entryHTTPS *appHTTPS
didRunRoutines bool
mainChan chan error
loadedAll bool
}
type appHTTP struct {
app *App
}
type appHTTPS struct {
app *App
}
func (a *appHTTP) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if a.app.Config.TLSRedirect {
// redirect to https
h0 := strings.Split(r.Host, ":")
h1 := strings.Split(a.app.Config.HostAddrTLS, ":")
h0o := h0[0]
if len(h1) > 1 {
if h1[1] != "443" {
h0[0] = h0[0] + ":" + h1[1]
}
}
urls := r.URL.String()
if strings.Contains(urls, h0o) {
urls = strings.Replace(urls, h0o, "", 1)
}
log.Println("TLS Redirect: ", r.URL.String(), "https://"+h0[0]+urls)
http.Redirect(w, r, "https://"+h0[0]+urls, 301)
return
}
a.app.ServeHTTP(w, r)
}
func (a *appHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
a.app.ServeHTTP(w, r)
}
func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Println("R:", r.URL.String())
routed := app.enroute(w, r)
//if routes didn't find anything
if !routed {
app.servePublicFolder(w, r)
}
}
func (app *App) Listen() error {
app.mainChan = make(chan error)
app.loadAll()
go func() {
app.listen()
}()
go func() {
app.listenTLS()
}()
app.runRoutines()
defer CloseSessionStorage()
var err error
err = <-app.mainChan
return err
}
func (app *App) listen() {
app.loadAll()
if len(app.Config.HostAddr) < 1 {
return
}
er3 := http.ListenAndServe(app.Config.HostAddr, app.entryHTTP)
app.mainChan <- er3
}
func (app *App) listenTLS() {
app.loadAll()
if len(app.Config.HostAddrTLS) < 1 {
//TODO: error is TLS needs to be enforced (add config option)
return
}
if len(app.Config.TLSCertificatePath) < 1 || len(app.Config.TLSKeyPath) < 1 {
// app needs key and cert to do SSL
er2 := &AppError{
Id: ErrTLSNil,
Message: "Config TLSCertificatePath or TLSKeyPath is null. Cannot listen to TLS connections.",
}
app.mainChan <- er2
return
}
er3 := http.ListenAndServeTLS(app.Config.HostAddrTLS, app.Config.TLSCertificatePath, app.Config.TLSKeyPath, app.entryHTTPS)
app.mainChan <- er3
}
func (a *App) RegisterController(c IController) {
v := reflect.ValueOf(c)
//pt := v.Type()
t := v.Elem().Type()
name := t.Name()
a.registerControllerMethods(c)
if a.controllerMap == nil {
a.controllerMap = make(map[string]IController, 0)
}
//
// Register methods
//
c.Init(a)
a.controllerMap[name] = c
log.Printf("controller '%s' registered", name)
}
func (a *App) GetViewTemplate(localpath string) *template.Template {
if len(a.Config.LocalePath) > 0 {
localpath = localpath + "_" + i18ngo.GetDefaultLanguageCode()
}
//pieces := strings.Split(localpath, "/")
//path := strings.Join(append([]string{a.basePath, a.AppConfigPath, "view"}, pieces...), string(os.PathSeparator))
//return a.templateMap[path].data
return a.templateMap[a.Config.ViewsFolderPath+"/"+localpath].data
}
func (a *App) GetLocalizedViewTemplate(localpath string, w http.ResponseWriter, r *http.Request) *template.Template {
localpath = localpath + "_" + GetUserLang(w, r)
//pieces := strings.Split(localpath, "/")
//path := strings.Join(append([]string{a.basePath, a.AppConfigPath, "view"}, pieces...), string(os.PathSeparator))
//return a.templateMap[path].data
//log.Println("GET-TEMPLATE" + localpath)
//TODO: fix get/set templates!
return a.templateMap[a.Config.ViewsFolderPath+"/"+localpath].data
}
func (a *App) GetLayout(name string) *template.Template {
return a.GetViewTemplate(StrConcat("layouts/", name, ".tpl"))
}
func (a *App) GetLocalizedLayout(name string, w http.ResponseWriter, r *http.Request) *template.Template {
return a.GetLocalizedViewTemplate("layouts/"+name+".tpl", w, r)
}
func (a *App) DoHTTPError(w http.ResponseWriter, r *http.Request, err int) {
w.WriteHeader(err)
errorLayout := a.GetLayout("error")
var erDesc string
switch err {
case 400:
erDesc = "<strong>Bad Request</strong> - The request cannot be fulfilled due to bad syntax."
case 401:
erDesc = "<strong>Unauthorized</strong> - You must authenticate to view the source."
case 403:
erDesc = "<strong>Forbidden</strong> - You're not authorized to view the requested source."
case 404:
erDesc = "<strong>Not Found</strong> - The requested resource could not be found."
case 405:
erDesc = "<strong>Method Not Allowed</strong> - A request was made of a resource using a request method not supported by that resource."
case 406:
erDesc = "<strong>Not Acceptable</strong> - The requested resource is only capable of generating content not acceptable according to the Accept headers sent in the request."
default:
erDesc = "<a href=\"http://en.wikipedia.org/wiki/List_of_HTTP_status_codes\">The request could not be fulfilled.</a>"
}
if errorLayout == nil {
fmt.Fprint(w, erDesc)
return
}
page := &ErrorPageContent{
Title: a.Config.Name + " - " + fmt.Sprintf("%d", err),
ErrorTitle: fmt.Sprintf("%d", err),
ErrorMessage: erDesc,
Content: " ",
}
errorLayout.Execute(w, page)
}
func (a *App) loadAll() {
if a.loadedAll {
return
}
// load routes if they were added statically
if controllers != nil {
for _, v := range controllers {
a.RegisterController(v)
}
}
a.entryHTTP = &appHTTP{a}
a.entryHTTPS = &appHTTPS{a}
a.loadConfig()
a.loadTemplates()
a.loadedAll = true
}
func (app *App) LoadConfigFile() error {
if len(app.AppConfigPath) < 1 {
// try to get appconfig path from env
app.AppConfigPath = os.Getenv("APPCONFIGPATH")
if len(app.AppConfigPath) < 1 {
app.AppConfigPath = os.Getenv("APPCONFIG")
if len(app.AppConfigPath) < 1 {
// try to get $cwd/AppConfig.json
_, err := os.Stat("AppConfig.json")
if os.IsNotExist(err) {
return err
}
app.AppConfigPath = "AppConfig.json"
}
}
}
dir := FormatPath(app.AppConfigPath)
bytes, err := ioutil.ReadFile(dir)
if err != nil {
return err
}
if xt := filepath.Ext(app.AppConfigPath); xt == ".dson" {
var bf0, bf1 by.Buffer
bf0.Write(bytes)
err = dson2json.Convert(&bf0, &bf1)
if err != nil {
return err
}
bytes = bf1.Bytes()
log.Println("such program very dson wow")
}
return json.Unmarshal(bytes, &app.Config)
}
func (app *App) loadConfig() {
// setup Random
src := rand.NewSource(time.Now().Unix())
app.Random = rand.New(src)
app.basePath, _ = os.Getwd()
var bytes []byte
var err error
//
// LOAD AppConfig.json
//
err = app.LoadConfigFile()
__panic(err)
// set default views extension if none
if len(app.Config.ViewsExtensions) < 1 {
app.Config.ViewsExtensions = []string{".tpl", ".html"}
}
// parse Config
app.Config.ParseEnv()
//
// LOAD Routes.json
//
// 2014-07-22 Now accepts multiple paths, separated by semicolons
routespaths := strings.Split(app.Config.RoutesConfigPath, ";")
app.Routes = make([]Route, 0)
for _, rpath := range routespaths {
rpath = strings.TrimSpace(rpath)
fdir := FormatPath(rpath)
bytes, err = ioutil.ReadFile(fdir)
__panic(err)
tempslice := make([]Route, 0)
if xt := filepath.Ext(fdir); xt == ".dson" {
var bf0, bf1 by.Buffer
bf0.Write(bytes)
err = dson2json.Convert(&bf0, &bf1)
__panic(err)
bytes = bf1.Bytes()
}
err = json.Unmarshal(bytes, &tempslice)
__panic(err)
for _, v := range tempslice {
log.Println("Route `" + v.Path + "` loaded.")
app.Routes = append(app.Routes, v)
}
}
for i := 0; i < len(app.Routes); i++ {
if strings.Index(app.Routes[i].Path, "^") == 0 {
app.Routes[i]._t = routeMethodRegExp
} else if strings.HasSuffix(app.Routes[i].Path, "*") {
app.Routes[i]._t = routeMethodRemainder
} else if strings.HasSuffix(app.Routes[i].Path, "/?") {
app.Routes[i]._t = routeMethodIgnoreTrail
}
}
//
// LOAD Localization Files (i18n)
//
if len(app.Config.LocalePath) > 0 {
locPath := FormatPath(app.Config.LocalePath)
fi, _ := os.Stat(locPath)
if fi == nil {
log.Fatal("Could not load i18n files at path " + locPath + "\n")
return
}
if !fi.IsDir() {
log.Fatal("Path " + locPath + " is not a directory!\n")
return
}
i18ngo.LoadPoAll(locPath)
log.Println("i18n loaded.")
}
//
// Setup cache
//
app.ByteCaches = NewByteCacheCollection()
app.GenericCaches = NewGenericCacheCollection()
}
func (a *App) loadTemplates() {
log.Println("loading template files (" + strings.Join(a.Config.ViewsExtensions, ",") + ")")
a.templateMap = make(map[string]*templateInfo, 0)
fdir := FormatPath(a.Config.ViewsFolderPath)
bytesLoaded := int(0)
langs := i18ngo.GetLanguageCodes()
vPath := func(path string, f os.FileInfo, err error) error {
ext := filepath.Ext(path)
extensionIsValid := false
for _, v := range a.Config.ViewsExtensions {
if v == ext {
extensionIsValid = true
}
}
if extensionIsValid {
bytes, _ := ioutil.ReadFile(path)
if len(a.Config.LocalePath) < 1 {
tplInfo := &templateInfo{
path: path,
lastUpdate: time.Now(),
}
templ := template.New(path)
templ, err0 := templ.Parse(string(bytes))
__panic(err0)
tplInfo.data = templ
a.templateMap[path] = tplInfo
bytesLoaded += len(bytes)
} else {
for _, lcv := range langs {
tplInfo := &templateInfo{
path: path,
lastUpdate: time.Now(),
}
locPName := path + "_" + lcv
templ := template.New(locPName)
templ, err0 := templ.Parse(LocalizeTemplate(string(bytes), lcv))
__panic(err0)
tplInfo.data = templ
a.templateMap[locPName] = tplInfo
bytesLoaded += len(bytes)
}
}
}
return nil
}
err := filepath.Walk(fdir, vPath)
__panic(err)
log.Printf("%d templates loaded (%d bytes)\n", len(a.templateMap), bytesLoaded)
}
func (app *App) servePublicFolder(w http.ResponseWriter, r *http.Request) {
//niceurl, _ := url.QueryUnescape(r.URL.String())
niceurl := r.URL.Path
//TODO: place that into access log
//log.Println("requested " + niceurl)
// after all routes are dealt with
//TODO: have an option to have these files in memory
fdir := FormatPath(app.Config.PublicFolderPath + "/" + niceurl)
//
info, err := os.Stat(fdir)
if os.IsNotExist(err) {
app.DoHTTPError(w, r, 404)
return
}
if info.IsDir() {
app.DoHTTPError(w, r, 403)
return
}
//
http.ServeFile(w, r, fdir)
}
func (app *App) enroute(w http.ResponseWriter, r *http.Request) bool {
niceurl, _ := url.QueryUnescape(r.URL.String())
niceurl = strings.Split(niceurl, "?")[0]
urlbits := strings.Split(niceurl, "/")[1:]
for _, v := range app.Routes {
if v.IsMatch(niceurl) {
// enroute based on method
c := app.controllerMap[v.Controller]
if c == nil {
//TODO: display page error instead of panic
log.Fatalf("Controller '%s' is not registered!\n", v.Controller)
}
if v.RedirectTLS {
if r.TLS == nil {
// redirect to https
h0 := strings.Split(r.Host, ":")
h1 := strings.Split(app.Config.HostAddrTLS, ":")
h0o := h0[0]
if len(h1) > 1 {
if h1[1] != "443" {
h0[0] = h0[0] + ":" + h1[1]
}
}
urls := r.URL.String()
if strings.Contains(urls, h0o) {
urls = strings.Replace(urls, h0o, "", 1)
}
log.Println("TLS Redirect: ", r.URL.String(), "https://"+h0[0]+urls)
http.Redirect(w, r, "https://"+h0[0]+urls, 302)
return true
}
}
// run pre filter
// you may want to run something before all the other methods, this is where you do it
prec := c.PreFilter(w, r, urlbits)
if prec != nil {
if v9, ok9 := prec.(bool); ok9 && !v9 {
return true
}
c.Render(w, r, prec)
return true
}
// run main controller function
var content interface{}
if len(v.Method) == 0 {
content = c.Run(w, r, urlbits)
} else {
rVal, rValOK := c.getMethod(v.Method)
if !rValOK {
//TODO: display page error instead of panic
log.Fatalf("Controller '%s' does not contain a method '%s', or it's not valid.", v.Controller, v.Method)
} else {
// finally run it
var in []reflect.Value
var inObj *In
if rVal.MethodKindIn == controllerMethodKindLegacy {
in = make([]reflect.Value, 4)
in[0] = reflect.ValueOf(c)
in[1] = reflect.ValueOf(w)
in[2] = reflect.ValueOf(r)
in[3] = reflect.ValueOf(urlbits)
} else if rVal.MethodKindIn == controllerMethodKindNew {
in = make([]reflect.Value, 2)
in[0] = reflect.ValueOf(c)
inObj = &In{
r,
w,
urlbits,
nil,
}
in[1] = reflect.ValueOf(inObj)
}
var out []reflect.Value
out = rVal.Val.Call(in)
if rVal.MethodKindOut == controllerMethodKindLegacy {
content = out[0].Interface()
} else if rVal.MethodKindOut == controllerMethodKindNew {
o0, _ := (out[0].Interface()).(*Out)
if rVal.MethodKindIn == controllerMethodKindLegacy {
w, _ := (in[1].Interface()).(http.ResponseWriter)
c.RenderNew(w, o0)
} else if rVal.MethodKindIn == controllerMethodKindNew {
c.RenderNew(inObj.W, o0)
}
return true
}
}
}
//c.SetContext(c)
if content == nil {
return true
}
c.Render(w, r, content)
return true
}
}
return false
}
func (a *App) registerControllerMethods(c IController) {
v := reflect.ValueOf(c)
pt := v.Type()
//t := v.Elem().Type()
//name := t.Name()
//log.Printf("registerControllerMethods: %s", name)
inType := reflect.TypeOf((*In)(nil)).Elem()
outType := reflect.TypeOf((*Out)(nil)).Elem()
// mmap
n := pt.NumMethod()
for i := 0; i < n; i++ {
m := pt.Method(i)
name := m.Name
// don't register known methods
switch name {
case "Init", "Run", "Render", "PreFilter", "ParseContent":
continue
case "Redirect", "PageError", "registerMethod", "getMethod":
continue
}
mt := m.Type
// outp must be 1 (interface{})
outp := mt.NumOut()
if outp != 1 {
continue
}
//log.Printf("Method: %s, IN:%d, OUT:%d", name, inpt, outp)
outk := -1
methodOut := mt.Out(0)
if methodOut.Kind() == reflect.Interface {
outk = controllerMethodKindLegacy
} else if methodOut.Elem() == outType {
outk = controllerMethodKindNew
} else {
continue
}
// in amount is (c *Obj) (var1 type, var2 type)
// # 1 # # 2 # # 3 #
inpt := mt.NumIn()
// input must be 4 (controller + 3) or 2 (controller + 1)
if inpt != 4 && inpt != 2 {
continue
} else if inpt == 4 {
if mt.In(1).Kind() != reflect.Interface {
continue
}
if mt.In(2).Kind() != reflect.Ptr {
continue
}
if mt.In(3).Kind() != reflect.Slice {
continue
}
c.registerMethod(name, m.Func, controllerMethodKindLegacy, outk)
} else {
// 2 params
log.Println(name, "2 params and is kind ", mt.In(1).Kind().String())
if mt.In(1).Kind() != reflect.Ptr {
continue
}
//
if mt.In(1).Elem() != inType {
log.Println(mt.In(1).Elem().String(), "is not", inType.String())
}
c.registerMethod(name, m.Func, controllerMethodKindNew, outk)
}
}
}
fixed out register
package goboots
import (
by "bytes"
"encoding/json"
"fmt"
"github.com/gabstv/dson2json"
"github.com/gabstv/i18ngo"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"strings"
"text/template"
"time"
)
type App struct {
// "public"
AppConfigPath string
Config AppConfig
Routes []Route
ByteCaches *ByteCacheCollection
GenericCaches *GenericCacheCollection
Random *rand.Rand
// "private"
controllerMap map[string]IController
templateMap map[string]*templateInfo
basePath string
entryHTTP *appHTTP
entryHTTPS *appHTTPS
didRunRoutines bool
mainChan chan error
loadedAll bool
}
type appHTTP struct {
app *App
}
type appHTTPS struct {
app *App
}
func (a *appHTTP) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if a.app.Config.TLSRedirect {
// redirect to https
h0 := strings.Split(r.Host, ":")
h1 := strings.Split(a.app.Config.HostAddrTLS, ":")
h0o := h0[0]
if len(h1) > 1 {
if h1[1] != "443" {
h0[0] = h0[0] + ":" + h1[1]
}
}
urls := r.URL.String()
if strings.Contains(urls, h0o) {
urls = strings.Replace(urls, h0o, "", 1)
}
log.Println("TLS Redirect: ", r.URL.String(), "https://"+h0[0]+urls)
http.Redirect(w, r, "https://"+h0[0]+urls, 301)
return
}
a.app.ServeHTTP(w, r)
}
func (a *appHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
a.app.ServeHTTP(w, r)
}
func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Println("R:", r.URL.String())
routed := app.enroute(w, r)
//if routes didn't find anything
if !routed {
app.servePublicFolder(w, r)
}
}
func (app *App) Listen() error {
app.mainChan = make(chan error)
app.loadAll()
go func() {
app.listen()
}()
go func() {
app.listenTLS()
}()
app.runRoutines()
defer CloseSessionStorage()
var err error
err = <-app.mainChan
return err
}
func (app *App) listen() {
app.loadAll()
if len(app.Config.HostAddr) < 1 {
return
}
er3 := http.ListenAndServe(app.Config.HostAddr, app.entryHTTP)
app.mainChan <- er3
}
func (app *App) listenTLS() {
app.loadAll()
if len(app.Config.HostAddrTLS) < 1 {
//TODO: error is TLS needs to be enforced (add config option)
return
}
if len(app.Config.TLSCertificatePath) < 1 || len(app.Config.TLSKeyPath) < 1 {
// app needs key and cert to do SSL
er2 := &AppError{
Id: ErrTLSNil,
Message: "Config TLSCertificatePath or TLSKeyPath is null. Cannot listen to TLS connections.",
}
app.mainChan <- er2
return
}
er3 := http.ListenAndServeTLS(app.Config.HostAddrTLS, app.Config.TLSCertificatePath, app.Config.TLSKeyPath, app.entryHTTPS)
app.mainChan <- er3
}
func (a *App) RegisterController(c IController) {
v := reflect.ValueOf(c)
//pt := v.Type()
t := v.Elem().Type()
name := t.Name()
a.registerControllerMethods(c)
if a.controllerMap == nil {
a.controllerMap = make(map[string]IController, 0)
}
//
// Register methods
//
c.Init(a)
a.controllerMap[name] = c
log.Printf("controller '%s' registered", name)
}
func (a *App) GetViewTemplate(localpath string) *template.Template {
if len(a.Config.LocalePath) > 0 {
localpath = localpath + "_" + i18ngo.GetDefaultLanguageCode()
}
//pieces := strings.Split(localpath, "/")
//path := strings.Join(append([]string{a.basePath, a.AppConfigPath, "view"}, pieces...), string(os.PathSeparator))
//return a.templateMap[path].data
return a.templateMap[a.Config.ViewsFolderPath+"/"+localpath].data
}
func (a *App) GetLocalizedViewTemplate(localpath string, w http.ResponseWriter, r *http.Request) *template.Template {
localpath = localpath + "_" + GetUserLang(w, r)
//pieces := strings.Split(localpath, "/")
//path := strings.Join(append([]string{a.basePath, a.AppConfigPath, "view"}, pieces...), string(os.PathSeparator))
//return a.templateMap[path].data
//log.Println("GET-TEMPLATE" + localpath)
//TODO: fix get/set templates!
return a.templateMap[a.Config.ViewsFolderPath+"/"+localpath].data
}
func (a *App) GetLayout(name string) *template.Template {
return a.GetViewTemplate(StrConcat("layouts/", name, ".tpl"))
}
func (a *App) GetLocalizedLayout(name string, w http.ResponseWriter, r *http.Request) *template.Template {
return a.GetLocalizedViewTemplate("layouts/"+name+".tpl", w, r)
}
func (a *App) DoHTTPError(w http.ResponseWriter, r *http.Request, err int) {
w.WriteHeader(err)
errorLayout := a.GetLayout("error")
var erDesc string
switch err {
case 400:
erDesc = "<strong>Bad Request</strong> - The request cannot be fulfilled due to bad syntax."
case 401:
erDesc = "<strong>Unauthorized</strong> - You must authenticate to view the source."
case 403:
erDesc = "<strong>Forbidden</strong> - You're not authorized to view the requested source."
case 404:
erDesc = "<strong>Not Found</strong> - The requested resource could not be found."
case 405:
erDesc = "<strong>Method Not Allowed</strong> - A request was made of a resource using a request method not supported by that resource."
case 406:
erDesc = "<strong>Not Acceptable</strong> - The requested resource is only capable of generating content not acceptable according to the Accept headers sent in the request."
default:
erDesc = "<a href=\"http://en.wikipedia.org/wiki/List_of_HTTP_status_codes\">The request could not be fulfilled.</a>"
}
if errorLayout == nil {
fmt.Fprint(w, erDesc)
return
}
page := &ErrorPageContent{
Title: a.Config.Name + " - " + fmt.Sprintf("%d", err),
ErrorTitle: fmt.Sprintf("%d", err),
ErrorMessage: erDesc,
Content: " ",
}
errorLayout.Execute(w, page)
}
func (a *App) loadAll() {
if a.loadedAll {
return
}
// load routes if they were added statically
if controllers != nil {
for _, v := range controllers {
a.RegisterController(v)
}
}
a.entryHTTP = &appHTTP{a}
a.entryHTTPS = &appHTTPS{a}
a.loadConfig()
a.loadTemplates()
a.loadedAll = true
}
func (app *App) LoadConfigFile() error {
if len(app.AppConfigPath) < 1 {
// try to get appconfig path from env
app.AppConfigPath = os.Getenv("APPCONFIGPATH")
if len(app.AppConfigPath) < 1 {
app.AppConfigPath = os.Getenv("APPCONFIG")
if len(app.AppConfigPath) < 1 {
// try to get $cwd/AppConfig.json
_, err := os.Stat("AppConfig.json")
if os.IsNotExist(err) {
return err
}
app.AppConfigPath = "AppConfig.json"
}
}
}
dir := FormatPath(app.AppConfigPath)
bytes, err := ioutil.ReadFile(dir)
if err != nil {
return err
}
if xt := filepath.Ext(app.AppConfigPath); xt == ".dson" {
var bf0, bf1 by.Buffer
bf0.Write(bytes)
err = dson2json.Convert(&bf0, &bf1)
if err != nil {
return err
}
bytes = bf1.Bytes()
log.Println("such program very dson wow")
}
return json.Unmarshal(bytes, &app.Config)
}
func (app *App) loadConfig() {
// setup Random
src := rand.NewSource(time.Now().Unix())
app.Random = rand.New(src)
app.basePath, _ = os.Getwd()
var bytes []byte
var err error
//
// LOAD AppConfig.json
//
err = app.LoadConfigFile()
__panic(err)
// set default views extension if none
if len(app.Config.ViewsExtensions) < 1 {
app.Config.ViewsExtensions = []string{".tpl", ".html"}
}
// parse Config
app.Config.ParseEnv()
//
// LOAD Routes.json
//
// 2014-07-22 Now accepts multiple paths, separated by semicolons
routespaths := strings.Split(app.Config.RoutesConfigPath, ";")
app.Routes = make([]Route, 0)
for _, rpath := range routespaths {
rpath = strings.TrimSpace(rpath)
fdir := FormatPath(rpath)
bytes, err = ioutil.ReadFile(fdir)
__panic(err)
tempslice := make([]Route, 0)
if xt := filepath.Ext(fdir); xt == ".dson" {
var bf0, bf1 by.Buffer
bf0.Write(bytes)
err = dson2json.Convert(&bf0, &bf1)
__panic(err)
bytes = bf1.Bytes()
}
err = json.Unmarshal(bytes, &tempslice)
__panic(err)
for _, v := range tempslice {
log.Println("Route `" + v.Path + "` loaded.")
app.Routes = append(app.Routes, v)
}
}
for i := 0; i < len(app.Routes); i++ {
if strings.Index(app.Routes[i].Path, "^") == 0 {
app.Routes[i]._t = routeMethodRegExp
} else if strings.HasSuffix(app.Routes[i].Path, "*") {
app.Routes[i]._t = routeMethodRemainder
} else if strings.HasSuffix(app.Routes[i].Path, "/?") {
app.Routes[i]._t = routeMethodIgnoreTrail
}
}
//
// LOAD Localization Files (i18n)
//
if len(app.Config.LocalePath) > 0 {
locPath := FormatPath(app.Config.LocalePath)
fi, _ := os.Stat(locPath)
if fi == nil {
log.Fatal("Could not load i18n files at path " + locPath + "\n")
return
}
if !fi.IsDir() {
log.Fatal("Path " + locPath + " is not a directory!\n")
return
}
i18ngo.LoadPoAll(locPath)
log.Println("i18n loaded.")
}
//
// Setup cache
//
app.ByteCaches = NewByteCacheCollection()
app.GenericCaches = NewGenericCacheCollection()
}
func (a *App) loadTemplates() {
log.Println("loading template files (" + strings.Join(a.Config.ViewsExtensions, ",") + ")")
a.templateMap = make(map[string]*templateInfo, 0)
fdir := FormatPath(a.Config.ViewsFolderPath)
bytesLoaded := int(0)
langs := i18ngo.GetLanguageCodes()
vPath := func(path string, f os.FileInfo, err error) error {
ext := filepath.Ext(path)
extensionIsValid := false
for _, v := range a.Config.ViewsExtensions {
if v == ext {
extensionIsValid = true
}
}
if extensionIsValid {
bytes, _ := ioutil.ReadFile(path)
if len(a.Config.LocalePath) < 1 {
tplInfo := &templateInfo{
path: path,
lastUpdate: time.Now(),
}
templ := template.New(path)
templ, err0 := templ.Parse(string(bytes))
__panic(err0)
tplInfo.data = templ
a.templateMap[path] = tplInfo
bytesLoaded += len(bytes)
} else {
for _, lcv := range langs {
tplInfo := &templateInfo{
path: path,
lastUpdate: time.Now(),
}
locPName := path + "_" + lcv
templ := template.New(locPName)
templ, err0 := templ.Parse(LocalizeTemplate(string(bytes), lcv))
__panic(err0)
tplInfo.data = templ
a.templateMap[locPName] = tplInfo
bytesLoaded += len(bytes)
}
}
}
return nil
}
err := filepath.Walk(fdir, vPath)
__panic(err)
log.Printf("%d templates loaded (%d bytes)\n", len(a.templateMap), bytesLoaded)
}
func (app *App) servePublicFolder(w http.ResponseWriter, r *http.Request) {
//niceurl, _ := url.QueryUnescape(r.URL.String())
niceurl := r.URL.Path
//TODO: place that into access log
//log.Println("requested " + niceurl)
// after all routes are dealt with
//TODO: have an option to have these files in memory
fdir := FormatPath(app.Config.PublicFolderPath + "/" + niceurl)
//
info, err := os.Stat(fdir)
if os.IsNotExist(err) {
app.DoHTTPError(w, r, 404)
return
}
if info.IsDir() {
app.DoHTTPError(w, r, 403)
return
}
//
http.ServeFile(w, r, fdir)
}
func (app *App) enroute(w http.ResponseWriter, r *http.Request) bool {
niceurl, _ := url.QueryUnescape(r.URL.String())
niceurl = strings.Split(niceurl, "?")[0]
urlbits := strings.Split(niceurl, "/")[1:]
for _, v := range app.Routes {
if v.IsMatch(niceurl) {
// enroute based on method
c := app.controllerMap[v.Controller]
if c == nil {
//TODO: display page error instead of panic
log.Fatalf("Controller '%s' is not registered!\n", v.Controller)
}
if v.RedirectTLS {
if r.TLS == nil {
// redirect to https
h0 := strings.Split(r.Host, ":")
h1 := strings.Split(app.Config.HostAddrTLS, ":")
h0o := h0[0]
if len(h1) > 1 {
if h1[1] != "443" {
h0[0] = h0[0] + ":" + h1[1]
}
}
urls := r.URL.String()
if strings.Contains(urls, h0o) {
urls = strings.Replace(urls, h0o, "", 1)
}
log.Println("TLS Redirect: ", r.URL.String(), "https://"+h0[0]+urls)
http.Redirect(w, r, "https://"+h0[0]+urls, 302)
return true
}
}
// run pre filter
// you may want to run something before all the other methods, this is where you do it
prec := c.PreFilter(w, r, urlbits)
if prec != nil {
if v9, ok9 := prec.(bool); ok9 && !v9 {
return true
}
c.Render(w, r, prec)
return true
}
// run main controller function
var content interface{}
if len(v.Method) == 0 {
content = c.Run(w, r, urlbits)
} else {
rVal, rValOK := c.getMethod(v.Method)
if !rValOK {
//TODO: display page error instead of panic
log.Fatalf("Controller '%s' does not contain a method '%s', or it's not valid.", v.Controller, v.Method)
} else {
// finally run it
var in []reflect.Value
var inObj *In
if rVal.MethodKindIn == controllerMethodKindLegacy {
in = make([]reflect.Value, 4)
in[0] = reflect.ValueOf(c)
in[1] = reflect.ValueOf(w)
in[2] = reflect.ValueOf(r)
in[3] = reflect.ValueOf(urlbits)
} else if rVal.MethodKindIn == controllerMethodKindNew {
in = make([]reflect.Value, 2)
in[0] = reflect.ValueOf(c)
inObj = &In{
r,
w,
urlbits,
nil,
}
in[1] = reflect.ValueOf(inObj)
}
var out []reflect.Value
out = rVal.Val.Call(in)
if rVal.MethodKindOut == controllerMethodKindLegacy {
content = out[0].Interface()
} else if rVal.MethodKindOut == controllerMethodKindNew {
o0, _ := (out[0].Interface()).(*Out)
if rVal.MethodKindIn == controllerMethodKindLegacy {
w, _ := (in[1].Interface()).(http.ResponseWriter)
c.RenderNew(w, o0)
} else if rVal.MethodKindIn == controllerMethodKindNew {
c.RenderNew(inObj.W, o0)
}
return true
}
}
}
//c.SetContext(c)
if content == nil {
return true
}
c.Render(w, r, content)
return true
}
}
return false
}
func (a *App) registerControllerMethods(c IController) {
v := reflect.ValueOf(c)
pt := v.Type()
//t := v.Elem().Type()
//name := t.Name()
//log.Printf("registerControllerMethods: %s", name)
inType := reflect.TypeOf((*In)(nil)).Elem()
outType := reflect.TypeOf((*Out)(nil)).Elem()
// mmap
n := pt.NumMethod()
for i := 0; i < n; i++ {
m := pt.Method(i)
name := m.Name
// don't register known methods
switch name {
case "Init", "Run", "Render", "PreFilter", "ParseContent":
continue
case "Redirect", "PageError", "registerMethod", "getMethod":
continue
}
mt := m.Type
// outp must be 1 (interface{})
outp := mt.NumOut()
if outp != 1 {
continue
}
//log.Printf("Method: %s, IN:%d, OUT:%d", name, inpt, outp)
outk := -1
methodOut := mt.Out(0)
if methodOut.Kind() == reflect.Interface {
outk = controllerMethodKindLegacy
} else if methodOut.Kind() == reflect.Ptr {
if methodOut.Elem() == outType {
outk = controllerMethodKindNew
} else {
continue
}
} else {
continue
}
// in amount is (c *Obj) (var1 type, var2 type)
// # 1 # # 2 # # 3 #
inpt := mt.NumIn()
// input must be 4 (controller + 3) or 2 (controller + 1)
if inpt != 4 && inpt != 2 {
continue
} else if inpt == 4 {
if mt.In(1).Kind() != reflect.Interface {
continue
}
if mt.In(2).Kind() != reflect.Ptr {
continue
}
if mt.In(3).Kind() != reflect.Slice {
continue
}
c.registerMethod(name, m.Func, controllerMethodKindLegacy, outk)
} else {
// 2 params
log.Println(name, "2 params and is kind ", mt.In(1).Kind().String())
if mt.In(1).Kind() != reflect.Ptr {
continue
}
//
if mt.In(1).Elem() != inType {
log.Println(mt.In(1).Elem().String(), "is not", inType.String())
}
c.registerMethod(name, m.Func, controllerMethodKindNew, outk)
}
}
}
|
/*
Copyright 2021 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"crypto/x509/pkix"
"fmt"
"net"
"os"
"strings"
"text/template"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/client/proto"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/api/utils/keys"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/ghodss/yaml"
"github.com/gravitational/trace"
)
// onAppLogin implements "tsh app login" command.
func onAppLogin(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
app, err := getRegisteredApp(cf, tc)
if err != nil {
return trace.Wrap(err)
}
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
rootCluster, err := tc.RootClusterName(cf.Context)
if err != nil {
return trace.Wrap(err)
}
var arn string
if app.IsAWSConsole() {
var err error
arn, err = getARNFromFlags(cf, profile, app)
if err != nil {
return trace.Wrap(err)
}
}
ws, err := tc.CreateAppSession(cf.Context, types.CreateAppSessionRequest{
Username: tc.Username,
PublicAddr: app.GetPublicAddr(),
ClusterName: tc.SiteName,
AWSRoleARN: arn,
})
if err != nil {
return trace.Wrap(err)
}
err = tc.ReissueUserCerts(cf.Context, client.CertCacheKeep, client.ReissueParams{
RouteToCluster: tc.SiteName,
RouteToApp: proto.RouteToApp{
Name: app.GetName(),
SessionID: ws.GetName(),
PublicAddr: app.GetPublicAddr(),
ClusterName: tc.SiteName,
AWSRoleARN: arn,
},
AccessRequests: profile.ActiveRequests.AccessRequests,
})
if err != nil {
return trace.Wrap(err)
}
if err := tc.SaveProfile(cf.HomePath, true); err != nil {
return trace.Wrap(err)
}
if app.IsAWSConsole() {
return awsCliTpl.Execute(os.Stdout, map[string]string{
"awsAppName": app.GetName(),
"awsCmd": "s3 ls",
})
}
if app.IsTCP() {
return appLoginTCPTpl.Execute(os.Stdout, map[string]string{
"appName": app.GetName(),
})
}
curlCmd, err := formatAppConfig(tc, profile, app.GetName(), app.GetPublicAddr(), appFormatCURL, rootCluster)
if err != nil {
return trace.Wrap(err)
}
return appLoginTpl.Execute(os.Stdout, map[string]string{
"appName": app.GetName(),
"curlCmd": curlCmd,
})
}
// appLoginTpl is the message that gets printed to a user upon successful login
// into an HTTP application.
var appLoginTpl = template.Must(template.New("").Parse(
`Logged into app {{.appName}}. Example curl command:
{{.curlCmd}}
`))
// appLoginTCPTpl is the message that gets printed to a user upon successful
// login into a TCP application.
var appLoginTCPTpl = template.Must(template.New("").Parse(
`Logged into TCP app {{.appName}}. Start the local TCP proxy for it:
tsh proxy app {{.appName}}
Then connect to the application through this proxy.
`))
// awsCliTpl is the message that gets printed to a user upon successful login
// into an AWS Console application.
var awsCliTpl = template.Must(template.New("").Parse(
`Logged into AWS app {{.awsAppName}}. Example AWS CLI command:
tsh aws {{.awsCmd}}
`))
// getRegisteredApp returns the registered application with the specified name.
func getRegisteredApp(cf *CLIConf, tc *client.TeleportClient) (app types.Application, err error) {
var apps []types.Application
err = client.RetryWithRelogin(cf.Context, tc, func() error {
allApps, err := tc.ListApps(cf.Context, &proto.ListResourcesRequest{
Namespace: tc.Namespace,
PredicateExpression: fmt.Sprintf(`name == "%s"`, cf.AppName),
})
// Kept for fallback in case older auth does not apply filters.
// DELETE IN 11.0.0
for _, a := range allApps {
if a.GetName() == cf.AppName {
apps = append(apps, a)
return nil
}
}
return trace.Wrap(err)
})
if err != nil {
return nil, trace.Wrap(err)
}
if len(apps) == 0 {
return nil, trace.NotFound("app %q not found, use `tsh app ls` to see registered apps", cf.AppName)
}
return apps[0], nil
}
// onAppLogout implements "tsh app logout" command.
func onAppLogout(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
var logout []tlsca.RouteToApp
// If app name wasn't given on the command line, log out of all.
if cf.AppName == "" {
logout = profile.Apps
} else {
for _, app := range profile.Apps {
if app.Name == cf.AppName {
logout = append(logout, app)
}
}
if len(logout) == 0 {
return trace.BadParameter("not logged into app %q",
cf.AppName)
}
}
for _, app := range logout {
err = tc.DeleteAppSession(cf.Context, app.SessionID)
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
err = tc.LogoutApp(app.Name)
if err != nil {
return trace.Wrap(err)
}
removeAppLocalFiles(profile, app.Name)
}
if len(logout) == 1 {
fmt.Printf("Logged out of app %q\n", logout[0].Name)
} else {
fmt.Println("Logged out of all apps")
}
return nil
}
// onAppConfig implements "tsh app config" command.
func onAppConfig(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
app, err := pickActiveApp(cf)
if err != nil {
return trace.Wrap(err)
}
conf, err := formatAppConfig(tc, profile, app.Name, app.PublicAddr, cf.Format, "")
if err != nil {
return trace.Wrap(err)
}
fmt.Print(conf)
return nil
}
func formatAppConfig(tc *client.TeleportClient, profile *client.ProfileStatus, appName, appPublicAddr, format, cluster string) (string, error) {
var uri string
if port := tc.WebProxyPort(); port == teleport.StandardHTTPSPort {
uri = fmt.Sprintf("https://%v", appPublicAddr)
} else {
uri = fmt.Sprintf("https://%v:%v", appPublicAddr, port)
}
curlCmd := fmt.Sprintf(`curl \
--cacert %v \
--cert %v \
--key %v \
%v`,
profile.CACertPathForCluster(cluster),
profile.AppCertPath(appName),
profile.KeyPath(),
uri)
format = strings.ToLower(format)
switch format {
case appFormatURI:
return uri, nil
case appFormatCA:
return profile.CACertPathForCluster(cluster), nil
case appFormatCert:
return profile.AppCertPath(appName), nil
case appFormatKey:
return profile.KeyPath(), nil
case appFormatCURL:
return curlCmd, nil
case appFormatJSON, appFormatYAML:
appConfig := &appConfigInfo{
appName, uri, profile.CACertPathForCluster(cluster),
profile.AppCertPath(appName), profile.KeyPath(), curlCmd,
}
out, err := serializeAppConfig(appConfig, format)
if err != nil {
return "", trace.Wrap(err)
}
return fmt.Sprintf("%s\n", out), nil
}
return fmt.Sprintf(`Name: %v
URI: %v
CA: %v
Cert: %v
Key: %v
`, appName, uri, profile.CACertPathForCluster(cluster),
profile.AppCertPath(appName), profile.KeyPath()), nil
}
type appConfigInfo struct {
Name string `json:"name"`
URI string `json:"uri"`
CA string `json:"ca"`
Cert string `json:"cert"`
Key string `json:"key"`
Curl string `json:"curl"`
}
func serializeAppConfig(configInfo *appConfigInfo, format string) (string, error) {
var out []byte
var err error
if format == appFormatJSON {
out, err = utils.FastMarshalIndent(configInfo, "", " ")
} else {
out, err = yaml.Marshal(configInfo)
}
return string(out), trace.Wrap(err)
}
// pickActiveApp returns the app the current profile is logged into.
//
// If logged into multiple apps, returns an error unless one was specified
// explicitly on CLI.
func pickActiveApp(cf *CLIConf) (*tlsca.RouteToApp, error) {
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return nil, trace.Wrap(err)
}
if len(profile.Apps) == 0 {
return nil, trace.NotFound("please login using 'tsh app login' first")
}
name := cf.AppName
if name == "" {
apps := profile.AppNames()
if len(apps) > 1 {
return nil, trace.BadParameter("multiple apps are available (%v), please specify one via CLI argument",
strings.Join(apps, ", "))
}
name = apps[0]
}
for _, app := range profile.Apps {
if app.Name == name {
return &app, nil
}
}
return nil, trace.NotFound("not logged into app %q", name)
}
// removeAppLocalFiles removes generated local files for the provided app.
func removeAppLocalFiles(profile *client.ProfileStatus, appName string) {
removeFileIfExist(profile.AppLocalCAPath(appName))
}
// removeFileIfExist removes a local file if it exists.
func removeFileIfExist(filePath string) {
if !utils.FileExists(filePath) {
return
}
if err := os.Remove(filePath); err != nil {
log.WithError(err).Warnf("Failed to remove %v", filePath)
}
}
// loadAppSelfSignedCA loads self-signed CA for provided app, or tries to
// generate a new CA if first load fails.
func loadAppSelfSignedCA(profile *client.ProfileStatus, tc *client.TeleportClient, appName string) (tls.Certificate, error) {
caPath := profile.AppLocalCAPath(appName)
keyPath := profile.KeyPath()
caTLSCert, err := keys.LoadX509KeyPair(caPath, keyPath)
if err == nil {
return caTLSCert, trace.Wrap(err)
}
// Generate and load again.
log.WithError(err).Debugf("Failed to load certificate from %v. Generating local self signed CA.", caPath)
if err = generateAppSelfSignedCA(profile, tc, appName); err != nil {
return tls.Certificate{}, err
}
caTLSCert, err = keys.LoadX509KeyPair(caPath, keyPath)
if err != nil {
return tls.Certificate{}, trace.Wrap(err)
}
return caTLSCert, nil
}
// generateAppSelfSignedCA generates a new self-signed CA for provided app and
// saves/overwrites the local CA file in the profile directory.
func generateAppSelfSignedCA(profile *client.ProfileStatus, tc *client.TeleportClient, appName string) error {
appCerts, err := loadAppCertificate(tc, appName)
if err != nil {
return trace.Wrap(err)
}
appCertsExpireAt, err := getTLSCertExpireTime(appCerts)
if err != nil {
return trace.Wrap(err)
}
keyPem, err := utils.ReadPath(profile.KeyPath())
if err != nil {
return trace.Wrap(err)
}
key, err := utils.ParsePrivateKey(keyPem)
if err != nil {
return trace.Wrap(err)
}
certPem, err := tlsca.GenerateSelfSignedCAWithConfig(tlsca.GenerateCAConfig{
Entity: pkix.Name{
CommonName: "localhost",
Organization: []string{"Teleport"},
},
Signer: key,
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{net.ParseIP(defaults.Localhost)},
TTL: time.Until(appCertsExpireAt),
})
if err != nil {
return trace.Wrap(err)
}
// WriteFile truncates existing file before writing.
if err = os.WriteFile(profile.AppLocalCAPath(appName), certPem, 0600); err != nil {
return trace.ConvertSystemError(err)
}
return nil
}
const (
// appFormatURI prints app URI.
appFormatURI = "uri"
// appFormatCA prints app CA cert path.
appFormatCA = "ca"
// appFormatCert prints app cert path.
appFormatCert = "cert"
// appFormatKey prints app key path.
appFormatKey = "key"
// appFormatCURL prints app curl command.
appFormatCURL = "curl"
// appFormatJSON prints app URI, CA cert path, cert path, key path, and curl command in JSON format.
appFormatJSON = "json"
// appFormatYAML prints app URI, CA cert path, cert path, key path, and curl command in YAML format.
appFormatYAML = "yaml"
)
Fix "tsh aws" cannot load private key (#15964)
/*
Copyright 2021 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"crypto/tls"
"crypto/x509/pkix"
"fmt"
"net"
"os"
"strings"
"text/template"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/client/proto"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/api/utils/keys"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/ghodss/yaml"
"github.com/gravitational/trace"
)
// onAppLogin implements "tsh app login" command.
func onAppLogin(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
app, err := getRegisteredApp(cf, tc)
if err != nil {
return trace.Wrap(err)
}
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
rootCluster, err := tc.RootClusterName(cf.Context)
if err != nil {
return trace.Wrap(err)
}
var arn string
if app.IsAWSConsole() {
var err error
arn, err = getARNFromFlags(cf, profile, app)
if err != nil {
return trace.Wrap(err)
}
}
ws, err := tc.CreateAppSession(cf.Context, types.CreateAppSessionRequest{
Username: tc.Username,
PublicAddr: app.GetPublicAddr(),
ClusterName: tc.SiteName,
AWSRoleARN: arn,
})
if err != nil {
return trace.Wrap(err)
}
err = tc.ReissueUserCerts(cf.Context, client.CertCacheKeep, client.ReissueParams{
RouteToCluster: tc.SiteName,
RouteToApp: proto.RouteToApp{
Name: app.GetName(),
SessionID: ws.GetName(),
PublicAddr: app.GetPublicAddr(),
ClusterName: tc.SiteName,
AWSRoleARN: arn,
},
AccessRequests: profile.ActiveRequests.AccessRequests,
})
if err != nil {
return trace.Wrap(err)
}
if err := tc.SaveProfile(cf.HomePath, true); err != nil {
return trace.Wrap(err)
}
if app.IsAWSConsole() {
return awsCliTpl.Execute(os.Stdout, map[string]string{
"awsAppName": app.GetName(),
"awsCmd": "s3 ls",
})
}
if app.IsTCP() {
return appLoginTCPTpl.Execute(os.Stdout, map[string]string{
"appName": app.GetName(),
})
}
curlCmd, err := formatAppConfig(tc, profile, app.GetName(), app.GetPublicAddr(), appFormatCURL, rootCluster)
if err != nil {
return trace.Wrap(err)
}
return appLoginTpl.Execute(os.Stdout, map[string]string{
"appName": app.GetName(),
"curlCmd": curlCmd,
})
}
// appLoginTpl is the message that gets printed to a user upon successful login
// into an HTTP application.
var appLoginTpl = template.Must(template.New("").Parse(
`Logged into app {{.appName}}. Example curl command:
{{.curlCmd}}
`))
// appLoginTCPTpl is the message that gets printed to a user upon successful
// login into a TCP application.
var appLoginTCPTpl = template.Must(template.New("").Parse(
`Logged into TCP app {{.appName}}. Start the local TCP proxy for it:
tsh proxy app {{.appName}}
Then connect to the application through this proxy.
`))
// awsCliTpl is the message that gets printed to a user upon successful login
// into an AWS Console application.
var awsCliTpl = template.Must(template.New("").Parse(
`Logged into AWS app {{.awsAppName}}. Example AWS CLI command:
tsh aws {{.awsCmd}}
`))
// getRegisteredApp returns the registered application with the specified name.
func getRegisteredApp(cf *CLIConf, tc *client.TeleportClient) (app types.Application, err error) {
var apps []types.Application
err = client.RetryWithRelogin(cf.Context, tc, func() error {
allApps, err := tc.ListApps(cf.Context, &proto.ListResourcesRequest{
Namespace: tc.Namespace,
PredicateExpression: fmt.Sprintf(`name == "%s"`, cf.AppName),
})
// Kept for fallback in case older auth does not apply filters.
// DELETE IN 11.0.0
for _, a := range allApps {
if a.GetName() == cf.AppName {
apps = append(apps, a)
return nil
}
}
return trace.Wrap(err)
})
if err != nil {
return nil, trace.Wrap(err)
}
if len(apps) == 0 {
return nil, trace.NotFound("app %q not found, use `tsh app ls` to see registered apps", cf.AppName)
}
return apps[0], nil
}
// onAppLogout implements "tsh app logout" command.
func onAppLogout(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
var logout []tlsca.RouteToApp
// If app name wasn't given on the command line, log out of all.
if cf.AppName == "" {
logout = profile.Apps
} else {
for _, app := range profile.Apps {
if app.Name == cf.AppName {
logout = append(logout, app)
}
}
if len(logout) == 0 {
return trace.BadParameter("not logged into app %q",
cf.AppName)
}
}
for _, app := range logout {
err = tc.DeleteAppSession(cf.Context, app.SessionID)
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
err = tc.LogoutApp(app.Name)
if err != nil {
return trace.Wrap(err)
}
removeAppLocalFiles(profile, app.Name)
}
if len(logout) == 1 {
fmt.Printf("Logged out of app %q\n", logout[0].Name)
} else {
fmt.Println("Logged out of all apps")
}
return nil
}
// onAppConfig implements "tsh app config" command.
func onAppConfig(cf *CLIConf) error {
tc, err := makeClient(cf, false)
if err != nil {
return trace.Wrap(err)
}
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return trace.Wrap(err)
}
app, err := pickActiveApp(cf)
if err != nil {
return trace.Wrap(err)
}
conf, err := formatAppConfig(tc, profile, app.Name, app.PublicAddr, cf.Format, "")
if err != nil {
return trace.Wrap(err)
}
fmt.Print(conf)
return nil
}
func formatAppConfig(tc *client.TeleportClient, profile *client.ProfileStatus, appName, appPublicAddr, format, cluster string) (string, error) {
var uri string
if port := tc.WebProxyPort(); port == teleport.StandardHTTPSPort {
uri = fmt.Sprintf("https://%v", appPublicAddr)
} else {
uri = fmt.Sprintf("https://%v:%v", appPublicAddr, port)
}
curlCmd := fmt.Sprintf(`curl \
--cacert %v \
--cert %v \
--key %v \
%v`,
profile.CACertPathForCluster(cluster),
profile.AppCertPath(appName),
profile.KeyPath(),
uri)
format = strings.ToLower(format)
switch format {
case appFormatURI:
return uri, nil
case appFormatCA:
return profile.CACertPathForCluster(cluster), nil
case appFormatCert:
return profile.AppCertPath(appName), nil
case appFormatKey:
return profile.KeyPath(), nil
case appFormatCURL:
return curlCmd, nil
case appFormatJSON, appFormatYAML:
appConfig := &appConfigInfo{
appName, uri, profile.CACertPathForCluster(cluster),
profile.AppCertPath(appName), profile.KeyPath(), curlCmd,
}
out, err := serializeAppConfig(appConfig, format)
if err != nil {
return "", trace.Wrap(err)
}
return fmt.Sprintf("%s\n", out), nil
}
return fmt.Sprintf(`Name: %v
URI: %v
CA: %v
Cert: %v
Key: %v
`, appName, uri, profile.CACertPathForCluster(cluster),
profile.AppCertPath(appName), profile.KeyPath()), nil
}
type appConfigInfo struct {
Name string `json:"name"`
URI string `json:"uri"`
CA string `json:"ca"`
Cert string `json:"cert"`
Key string `json:"key"`
Curl string `json:"curl"`
}
func serializeAppConfig(configInfo *appConfigInfo, format string) (string, error) {
var out []byte
var err error
if format == appFormatJSON {
out, err = utils.FastMarshalIndent(configInfo, "", " ")
} else {
out, err = yaml.Marshal(configInfo)
}
return string(out), trace.Wrap(err)
}
// pickActiveApp returns the app the current profile is logged into.
//
// If logged into multiple apps, returns an error unless one was specified
// explicitly on CLI.
func pickActiveApp(cf *CLIConf) (*tlsca.RouteToApp, error) {
profile, err := client.StatusCurrent(cf.HomePath, cf.Proxy, cf.IdentityFileIn)
if err != nil {
return nil, trace.Wrap(err)
}
if len(profile.Apps) == 0 {
return nil, trace.NotFound("please login using 'tsh app login' first")
}
name := cf.AppName
if name == "" {
apps := profile.AppNames()
if len(apps) > 1 {
return nil, trace.BadParameter("multiple apps are available (%v), please specify one via CLI argument",
strings.Join(apps, ", "))
}
name = apps[0]
}
for _, app := range profile.Apps {
if app.Name == name {
return &app, nil
}
}
return nil, trace.NotFound("not logged into app %q", name)
}
// removeAppLocalFiles removes generated local files for the provided app.
func removeAppLocalFiles(profile *client.ProfileStatus, appName string) {
removeFileIfExist(profile.AppLocalCAPath(appName))
}
// removeFileIfExist removes a local file if it exists.
func removeFileIfExist(filePath string) {
if !utils.FileExists(filePath) {
return
}
if err := os.Remove(filePath); err != nil {
log.WithError(err).Warnf("Failed to remove %v", filePath)
}
}
// loadAppSelfSignedCA loads self-signed CA for provided app, or tries to
// generate a new CA if first load fails.
func loadAppSelfSignedCA(profile *client.ProfileStatus, tc *client.TeleportClient, appName string) (tls.Certificate, error) {
caPath := profile.AppLocalCAPath(appName)
keyPath := profile.KeyPath()
caTLSCert, err := keys.LoadX509KeyPair(caPath, keyPath)
if err == nil {
return caTLSCert, trace.Wrap(err)
}
// Generate and load again.
log.WithError(err).Debugf("Failed to load certificate from %v. Generating local self signed CA.", caPath)
if err = generateAppSelfSignedCA(profile, tc, appName); err != nil {
return tls.Certificate{}, err
}
caTLSCert, err = keys.LoadX509KeyPair(caPath, keyPath)
if err != nil {
return tls.Certificate{}, trace.Wrap(err)
}
return caTLSCert, nil
}
// generateAppSelfSignedCA generates a new self-signed CA for provided app and
// saves/overwrites the local CA file in the profile directory.
func generateAppSelfSignedCA(profile *client.ProfileStatus, tc *client.TeleportClient, appName string) error {
appCerts, err := loadAppCertificate(tc, appName)
if err != nil {
return trace.Wrap(err)
}
appCertsExpireAt, err := getTLSCertExpireTime(appCerts)
if err != nil {
return trace.Wrap(err)
}
keyPem, err := utils.ReadPath(profile.KeyPath())
if err != nil {
return trace.Wrap(err)
}
key, err := keys.ParsePrivateKey(keyPem)
if err != nil {
return trace.Wrap(err)
}
certPem, err := tlsca.GenerateSelfSignedCAWithConfig(tlsca.GenerateCAConfig{
Entity: pkix.Name{
CommonName: "localhost",
Organization: []string{"Teleport"},
},
Signer: key,
DNSNames: []string{"localhost"},
IPAddresses: []net.IP{net.ParseIP(defaults.Localhost)},
TTL: time.Until(appCertsExpireAt),
})
if err != nil {
return trace.Wrap(err)
}
// WriteFile truncates existing file before writing.
if err = os.WriteFile(profile.AppLocalCAPath(appName), certPem, 0600); err != nil {
return trace.ConvertSystemError(err)
}
return nil
}
const (
// appFormatURI prints app URI.
appFormatURI = "uri"
// appFormatCA prints app CA cert path.
appFormatCA = "ca"
// appFormatCert prints app cert path.
appFormatCert = "cert"
// appFormatKey prints app key path.
appFormatKey = "key"
// appFormatCURL prints app curl command.
appFormatCURL = "curl"
// appFormatJSON prints app URI, CA cert path, cert path, key path, and curl command in JSON format.
appFormatJSON = "json"
// appFormatYAML prints app URI, CA cert path, cert path, key path, and curl command in YAML format.
appFormatYAML = "yaml"
)
|
package proxy
import (
"fmt"
"os"
"strconv"
"strings"
)
var usersBasePath string = "/run/secrets/dfp_users_%s"
// ServiceDest holds data used to generate proxy configuration. It is extracted as a separate struct since a single service can have multiple combinations.
type ServiceDest struct {
// The list of allowed methods. If specified, a request with a method that is not on the list will be denied.
AllowedMethods []string
// The list of denied methods. If specified, a request with a method that is on the list will be denied.
DeniedMethods []string
// Whether to deny HTTP requests thus allowing only HTTPS.
DenyHttp bool
// Whether to redirect all http requests to https
HttpsOnly bool
// http code for http to https redirects
HttpsRedirectCode string
// Whether to ignore authorization for this service destination.
IgnoreAuthorization bool
// The internal port of a service that should be reconfigured.
// The port is used only in the *swarm* mode.
Port string
// The request mode. The proxy should be able to work with any mode supported by HAProxy.
// However, actively supported and tested modes are *http*, *tcp*, and *sni*.
ReqMode string
// Internal use only. Do not modify.
ReqModeFormatted string
// The domain of the service.
// If set, the proxy will allow access only to requests coming to that domain.
ServiceDomain []string
// Headers used to filter requests
ServiceHeader map[string]string
// The URL path of the service.
ServicePath []string
// The source (entry) port of a service.
// Useful only when specifying multiple destinations of a single service.
SrcPort int
// Internal use only. Do not modify.
SrcPortAcl string
// Internal use only. Do not modify.
SrcPortAclName string
// Whether to verify client SSL and deny request when it is invalid
VerifyClientSsl bool
// If specified, only requests with the same agent will be forwarded to the backend.
UserAgent UserAgent
// Internal use only
Index int
}
// UserAgent holds data used to generate proxy configuration. It is extracted as a separate struct since each user agent needs an ACL identifier. If specified, only requests with the same agent will be forwarded to the backend.
type UserAgent struct {
Value []string
AclName string
}
// Service contains description of a service that should be added to the proxy configuration.
type Service struct {
// ACLs are ordered alphabetically by their names.
// If not specified, serviceName is used instead.
AclName string `split_words:"true"`
// Additional headers that will be added to the request before forwarding it to the service.
// Please consult https://www.haproxy.com/doc/aloha/7.0/haproxy/http_rewriting.html#add-a-header-to-the-request for more info.
AddReqHeader []string `split_words:"true"`
// Additional headers that will be added to the response before forwarding it to the client.
AddResHeader []string `split_words:"true"`
// Additional configuration that will be added to the bottom of the service backend
BackendExtra string `split_words:"true"`
// Whether to use `docker` as a check resolver. Set through the environment variable CHECK_RESOLVERS
CheckResolvers bool `split_words:"true"`
// One of the five connection modes supported by the HAProxy.
// `http-keep-alive`: all requests and responses are processed.
// `http-tunnel`: only the first request and response are processed, everything else is forwarded with no analysis.
// `httpclose`: tunnel with "Connection: close" added in both directions.
// `http-server-close`: the server-facing connection is closed after the response.
// `forceclose`: the connection is actively closed after end of response.
// In general, it is preferred to use http-server-close with application servers, and some static servers might benefit from http-keep-alive.
// Connection mode is restricted to HTTP mode only.
// If specified, connection mode will be applied to the backend section.
ConnectionMode string `split_words:"true"`
// Internal use only
Debug bool
// Internal use only
DebugFormat string
// Additional headers that will be deleted in the request before forwarding it to the service. Please consult https://www.haproxy.com/doc/aloha/7.0/haproxy/http_rewriting.html#delete-a-header-in-the-request for more info.
DelReqHeader []string `split_words:"true"`
// Additional headers that will be deleted in the response before forwarding it to the client. Please consult https://www.haproxy.com/doc/aloha/7.0/haproxy/http_rewriting.html#delete-a-header-in-the-response for more info.
DelResHeader []string `split_words:"true"`
// Whether to distribute a request to all the instances of the proxy.
// Used only in the swarm mode.
Distribute bool `split_words:"true"`
// The internal HTTPS port of a service that should be reconfigured.
// The port is used only in the swarm mode.
// If not specified, the `port` parameter will be used instead.
HttpsPort int `split_words:"true"`
// If set to true, it will be the default_backend service.
IsDefaultBackend bool `split_words:"true"`
// The hostname where the service is running, for instance on a separate swarm.
// If specified, the proxy will dispatch requests to that domain.
OutboundHostname string `split_words:"true"`
// The ACL derivative. Defaults to path_beg.
// See https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#7.3.6-path for more info.
PathType string `split_words:"true"`
// Whether to redirect to https when X-Forwarded-Proto is http
RedirectWhenHttpProto bool `split_words:"true"`
// A regular expression to apply the modification.
// If specified, `reqPathSearch` needs to be set as well.
ReqPathReplace string `split_words:"true"`
// A regular expression to search the content to be replaced.
// If specified, `reqPathReplace` needs to be set as well.
ReqPathSearch string `split_words:"true"`
// Content of the PEM-encoded certificate to be used by the proxy when serving traffic over SSL.
ServiceCert string `split_words:"true"`
// The algorithm that should be applied to domain acl. The default value is `hdr(host)`.
ServiceDomainAlgo string
// The name of the service.
// It must match the name of the Swarm service.
ServiceName string `split_words:"true"`
// Determines the type of sticky sessions. If set to `sticky-server`, session cookie will be set by the proxy. Any other value means that sticky sessions are not used and load balancing is performed by Docker's Overlay network. Please open an issue if you'd like support for other types of sticky sessions.
SessionType string `split_words:"true"`
// Additional headers that will be set to the request before forwarding it to the service. If a specified header exists, it will be replaced with the new one.
SetReqHeader []string `split_words:"true"`
// Additional headers that will be set to the response before forwarding it to the client. If a specified header exists, it will be replaced with the new one.
SetResHeader []string `split_words:"true"`
// If set to true, server certificates are not verified. This flag should be set for SSL enabled backend services.
SslVerifyNone bool `split_words:"true"`
// The path to the template representing a snippet of the backend configuration.
// If specified, the backend template will be loaded from the specified file.
// If specified, `templateFePath` must be set as well.
// See the https://github.com/vfarcic/docker-flow-proxy#templates section for more info.
TemplateBePath string `split_words:"true"`
// The path to the template representing a snippet of the frontend configuration.
// If specified, the frontend template will be loaded from the specified file.
// If specified, `templateBePath` must be set as well.
// See the https://github.com/vfarcic/docker-flow-proxy#templates section for more info.
TemplateFePath string `split_words:"true"`
// The server timeout in seconds
TimeoutServer string `split_words:"true"`
// The tunnel timeout in seconds
TimeoutTunnel string `split_words:"true"`
// Internal use only.
UseGlobalUsers bool
// A comma-separated list of credentials(<user>:<pass>) for HTTP basic auth, which applies only to the service that will be reconfigured.
Users []User `split_words:"true"`
// Whether to add "X-Forwarded-Proto https" header.
XForwardedProto bool `envconfig:"x_forwarded_proto" split_words:"true"`
// The rest of variables are for internal use only
ServicePort string
AclCondition string
Host string
LookupRetry int
LookupRetryInterval int
ServiceDest []ServiceDest
Tasks []string
}
// Services contains the list of services used inside the proxy
type Services []Service
func (slice Services) Len() int {
return len(slice)
}
func (slice Services) Less(i, j int) bool {
firstHasRoot := hasRoot(slice[i])
secondHasRoot := hasRoot(slice[j])
firstHasWellKnown := hasWellKnown(slice[i])
secondHasWellKnown := hasWellKnown(slice[j])
if firstHasWellKnown && !secondHasWellKnown {
return true
} else if !firstHasWellKnown && secondHasWellKnown {
return false
} else if firstHasRoot && !secondHasRoot {
return false
} else if !firstHasRoot && secondHasRoot {
return true
} else {
return slice[i].AclName < slice[j].AclName
}
}
func (slice Services) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func hasRoot(service Service) bool {
for _, sd := range service.ServiceDest {
for _, path := range sd.ServicePath {
if path == "/" && len(sd.ServiceDomain) == 0 {
return true
}
}
}
return false
}
func hasWellKnown(service Service) bool {
for _, sd := range service.ServiceDest {
for _, path := range sd.ServicePath {
if strings.HasPrefix(strings.ToLower(path), "/.well-known") {
return true
}
}
}
return false
}
func extractUsersFromString(context, usersString string, encrypted, skipEmptyPassword bool) []*User {
collectedUsers := []*User{}
// TODO: Test
if len(usersString) == 0 {
return collectedUsers
}
splitter := func(x rune) bool {
return x == '\n' || x == ','
}
users := strings.FieldsFunc(usersString, splitter)
for _, user := range users {
user = strings.Trim(user, "\n\t ")
if len(user) == 0 {
continue
}
if strings.Contains(user, ":") {
colonIndex := strings.Index(user, ":")
userName := strings.Trim(user[0:colonIndex], "\t ")
userPass := strings.Trim(user[colonIndex+1:], "\t ")
if len(userName) == 0 || len(userPass) == 0 {
logPrintf("There is a user with no name or with invalid format for the service %s", context)
} else {
collectedUsers = append(collectedUsers, &User{Username: userName, Password: userPass, PassEncrypted: encrypted})
}
} else {
if len(user) == 0 { // TODO: Test
logPrintf("There is a user with no name or with invalid format for the service %s", context)
} else if skipEmptyPassword { // TODO: Test
logPrintf(
"For service %s There is a user %s with no password for the service %s",
user,
context,
)
} else if !skipEmptyPassword {
collectedUsers = append(collectedUsers, &User{Username: user})
}
}
}
return collectedUsers
}
// ServiceParameterProvider defines common interface for translating parameters into structs.
type ServiceParameterProvider interface {
Fill(service *Service)
GetString(name string) string
}
// GetServiceFromMap returns Service struct by extracting request parameters
func GetServiceFromMap(req *map[string]string) *Service {
provider := mapParameterProvider{theMap: req}
return GetServiceFromProvider(&provider)
}
// GetServiceFromProvider returns Service by extracting parameters from ServiceParameterProvider
func GetServiceFromProvider(provider ServiceParameterProvider) *Service {
sr := new(Service)
provider.Fill(sr)
separator := os.Getenv("SEPARATOR")
// TODO: Remove. It's added to maintain backwards compatibility with the deprecated parameter serviceDomainMatchAll (since July 2017)
if strings.EqualFold(provider.GetString("serviceDomainMatchAll"), "true") {
sr.ServiceDomainAlgo = "hdr_dom(host)"
}
if len(provider.GetString("httpsPort")) > 0 {
sr.HttpsPort, _ = strconv.Atoi(provider.GetString("httpsPort"))
}
if len(provider.GetString("addReqHeader")) > 0 {
sr.AddReqHeader = strings.Split(provider.GetString("addReqHeader"), separator)
} else if len(provider.GetString("addHeader")) > 0 { // TODO: Deprecated since Apr. 2017.
sr.AddReqHeader = strings.Split(provider.GetString("addHeader"), separator)
}
if len(provider.GetString("setReqHeader")) > 0 {
sr.SetReqHeader = strings.Split(provider.GetString("setReqHeader"), separator)
} else if len(provider.GetString("setHeader")) > 0 { // TODO: Deprecated since Apr. 2017.
sr.SetReqHeader = strings.Split(provider.GetString("setHeader"), separator)
}
if len(provider.GetString("delReqHeader")) > 0 {
sr.DelReqHeader = strings.Split(provider.GetString("delReqHeader"), separator)
}
if len(provider.GetString("addResHeader")) > 0 {
sr.AddResHeader = strings.Split(provider.GetString("addResHeader"), separator)
}
if len(provider.GetString("setResHeader")) > 0 {
sr.SetResHeader = strings.Split(provider.GetString("setResHeader"), separator)
}
if len(provider.GetString("delResHeader")) > 0 {
sr.DelResHeader = strings.Split(provider.GetString("delResHeader"), separator)
}
if len(sr.SessionType) > 0 {
sr.Tasks, _ = lookupHost("tasks." + sr.ServiceName)
}
globalUsersString := getSecretOrEnvVar("USERS", "")
globalUsersEncrypted := strings.EqualFold(getSecretOrEnvVar("USERS_PASS_ENCRYPTED", ""), "true")
sr.Users = mergeUsers(
sr.ServiceName,
provider.GetString("users"),
provider.GetString("usersSecret"),
getBoolParam(provider, "usersPassEncrypted"),
globalUsersString,
globalUsersEncrypted,
)
sr.ServiceDest = getServiceDestList(sr, provider)
return sr
}
func getServiceDestList(sr *Service, provider ServiceParameterProvider) []ServiceDest {
sdList := []ServiceDest{}
sd := getServiceDest(sr, provider, -1)
serviceDomain := []string{}
if isServiceDestValid(&sd) {
sdList = append(sdList, sd)
} else {
serviceDomain = sd.ServiceDomain
}
httpsOnly := sd.HttpsOnly
for i := 1; i <= 10; i++ {
sd := getServiceDest(sr, provider, i)
if isServiceDestValid(&sd) {
sdList = append(sdList, sd)
} else {
break
}
}
if len(sdList) == 0 {
reqMode := "http"
if len(provider.GetString("reqMode")) > 0 {
reqMode = provider.GetString("reqMode")
}
sdList = append(sdList, ServiceDest{ReqMode: reqMode})
}
for i, sd := range sdList {
if len(sd.ServiceDomain) > 0 && len(sd.ServicePath) == 0 {
sdList[i].ServicePath = []string{"/"}
} else if len(sd.ServiceDomain) == 0 && len(serviceDomain) > 0 {
sdList[i].ServiceDomain = serviceDomain
}
if httpsOnly && !sd.HttpsOnly {
sdList[i].HttpsOnly = true
}
}
return sdList
}
func getServiceDest(sr *Service, provider ServiceParameterProvider, index int) ServiceDest {
separator := os.Getenv("SEPARATOR")
suffix := ""
if index > 0 {
suffix = fmt.Sprintf(".%d", index)
}
//httpsRedirectCode := "302"
//if len(provider.GetString(fmt.Sprintf("httpsRedirectCode%s", suffix))) > 0 {
// httpsRedirectCode = provider.GetString(fmt.Sprintf("httpsRedirectCode%s", suffix))
//}
userAgent := UserAgent{}
if len(provider.GetString(fmt.Sprintf("userAgent%s", suffix))) > 0 {
userAgent.Value = strings.Split(provider.GetString(fmt.Sprintf("userAgent%s", suffix)), separator)
userAgent.AclName = replaceNonAlphabetAndNumbers(userAgent.Value)
}
reqMode := "http"
if len(provider.GetString(fmt.Sprintf("reqMode%s", suffix))) > 0 {
reqMode = provider.GetString(fmt.Sprintf("reqMode%s", suffix))
}
srcPort, _ := strconv.Atoi(provider.GetString(fmt.Sprintf("srcPort%s", suffix)))
headerString := provider.GetString(fmt.Sprintf("serviceHeader%s", suffix))
header := map[string]string{}
if len(headerString) > 0 {
for _, value := range strings.Split(headerString, separator) {
values := strings.Split(value, ":")
if len(values) == 2 {
header[strings.Trim(values[0], " ")] = strings.Trim(values[1], " ")
}
}
}
sdIndex := index
if sdIndex < 0 {
sdIndex = 0
}
return ServiceDest{
AllowedMethods: getSliceFromString(provider, fmt.Sprintf("allowedMethods%s", suffix)),
DeniedMethods: getSliceFromString(provider, fmt.Sprintf("deniedMethods%s", suffix)),
DenyHttp: getBoolParam(provider, fmt.Sprintf("denyHttp%s", suffix)),
HttpsOnly: getBoolParam(provider, fmt.Sprintf("httpsOnly%s", suffix)),
HttpsRedirectCode: provider.GetString(fmt.Sprintf("httpsRedirectCode%s", suffix)),
IgnoreAuthorization: getBoolParam(provider, fmt.Sprintf("ignoreAuthorization%s", suffix)),
Port: provider.GetString(fmt.Sprintf("port%s", suffix)),
ReqMode: reqMode,
ServiceDomain: getSliceFromString(provider, fmt.Sprintf("serviceDomain%s", suffix)),
ServiceHeader: header,
ServicePath: getSliceFromString(provider, fmt.Sprintf("servicePath%s", suffix)),
SrcPort: srcPort,
VerifyClientSsl: getBoolParam(provider, fmt.Sprintf("verifyClientSsl%s", suffix)),
UserAgent: userAgent,
Index: sdIndex,
}
}
func getSliceFromString(provider ServiceParameterProvider, key string) []string {
separator := os.Getenv("SEPARATOR")
value := []string{}
if len(provider.GetString(key)) > 0 {
value = strings.Split(provider.GetString(key), separator)
}
return value
}
func isServiceDestValid(sd *ServiceDest) bool {
return len(sd.ServicePath) > 0 || len(sd.Port) > 0
}
func getBoolParam(req ServiceParameterProvider, param string) bool {
value := false
if len(req.GetString(param)) > 0 {
value, _ = strconv.ParseBool(req.GetString(param))
}
return value
}
func mergeUsers(
serviceName,
usersParam,
usersSecret string,
usersPassEncrypted bool,
globalUsersString string,
globalUsersEncrypted bool,
) []User {
var collectedUsers []*User
paramUsers := extractUsersFromString(serviceName, usersParam, usersPassEncrypted, false)
fileUsers, _ := getUsersFromFile(serviceName, usersSecret, usersPassEncrypted)
if len(paramUsers) > 0 {
if !allUsersHavePasswords(paramUsers) {
if len(usersSecret) == 0 {
fileUsers = extractUsersFromString(serviceName, globalUsersString, globalUsersEncrypted, true)
}
for _, u := range paramUsers {
if !u.hasPassword() {
if userByName := findUserByName(fileUsers, u.Username); userByName != nil {
u.Password = "sdasdsad"
u.Password = userByName.Password
u.PassEncrypted = userByName.PassEncrypted
} else {
// TODO: Return an error
// TODO: Test
logPrintf("For service %s it was impossible to find password for user %s.",
serviceName, u.Username)
}
}
}
}
collectedUsers = paramUsers
} else {
collectedUsers = fileUsers
}
ret := []User{}
for _, u := range collectedUsers {
if u.hasPassword() {
ret = append(ret, *u)
}
}
if len(ret) == 0 && (len(usersParam) != 0 || len(usersSecret) != 0) {
//we haven't found any users but they were requested so generating dummy one
ret = append(ret, *randomUser())
}
if len(ret) == 0 {
return nil
}
return ret
}
func getUsersFromFile(serviceName, fileName string, passEncrypted bool) ([]*User, error) {
if len(fileName) > 0 {
usersFile := fmt.Sprintf(usersBasePath, fileName)
content, err := readFile(usersFile)
if err != nil {
logPrintf(
"For service %s it was impossible to load userFile %s due to error %s",
serviceName,
usersFile,
err.Error(),
)
return []*User{}, err
}
userContents := strings.TrimRight(string(content[:]), "\n")
return extractUsersFromString(serviceName, userContents, passEncrypted, true), nil
}
return []*User{}, nil
}
func allUsersHavePasswords(users []*User) bool {
for _, u := range users {
if !u.hasPassword() {
return false
}
}
return true
}
func findUserByName(users []*User, name string) *User {
for _, u := range users {
if strings.EqualFold(name, u.Username) {
return u
}
}
return nil
}
cleanup
package proxy
import (
"fmt"
"os"
"strconv"
"strings"
)
var usersBasePath string = "/run/secrets/dfp_users_%s"
// ServiceDest holds data used to generate proxy configuration. It is extracted as a separate struct since a single service can have multiple combinations.
type ServiceDest struct {
// The list of allowed methods. If specified, a request with a method that is not on the list will be denied.
AllowedMethods []string
// The list of denied methods. If specified, a request with a method that is on the list will be denied.
DeniedMethods []string
// Whether to deny HTTP requests thus allowing only HTTPS.
DenyHttp bool
// Whether to redirect all http requests to https
HttpsOnly bool
// http code for http to https redirects
HttpsRedirectCode string
// Whether to ignore authorization for this service destination.
IgnoreAuthorization bool
// The internal port of a service that should be reconfigured.
// The port is used only in the *swarm* mode.
Port string
// The request mode. The proxy should be able to work with any mode supported by HAProxy.
// However, actively supported and tested modes are *http*, *tcp*, and *sni*.
ReqMode string
// Internal use only. Do not modify.
ReqModeFormatted string
// The domain of the service.
// If set, the proxy will allow access only to requests coming to that domain.
ServiceDomain []string
// Headers used to filter requests
ServiceHeader map[string]string
// The URL path of the service.
ServicePath []string
// The source (entry) port of a service.
// Useful only when specifying multiple destinations of a single service.
SrcPort int
// Internal use only. Do not modify.
SrcPortAcl string
// Internal use only. Do not modify.
SrcPortAclName string
// Whether to verify client SSL and deny request when it is invalid
VerifyClientSsl bool
// If specified, only requests with the same agent will be forwarded to the backend.
UserAgent UserAgent
// Internal use only
Index int
}
// UserAgent holds data used to generate proxy configuration. It is extracted as a separate struct since each user agent needs an ACL identifier. If specified, only requests with the same agent will be forwarded to the backend.
type UserAgent struct {
Value []string
AclName string
}
// Service contains description of a service that should be added to the proxy configuration.
type Service struct {
// ACLs are ordered alphabetically by their names.
// If not specified, serviceName is used instead.
AclName string `split_words:"true"`
// Additional headers that will be added to the request before forwarding it to the service.
// Please consult https://www.haproxy.com/doc/aloha/7.0/haproxy/http_rewriting.html#add-a-header-to-the-request for more info.
AddReqHeader []string `split_words:"true"`
// Additional headers that will be added to the response before forwarding it to the client.
AddResHeader []string `split_words:"true"`
// Additional configuration that will be added to the bottom of the service backend
BackendExtra string `split_words:"true"`
// Whether to use `docker` as a check resolver. Set through the environment variable CHECK_RESOLVERS
CheckResolvers bool `split_words:"true"`
// One of the five connection modes supported by the HAProxy.
// `http-keep-alive`: all requests and responses are processed.
// `http-tunnel`: only the first request and response are processed, everything else is forwarded with no analysis.
// `httpclose`: tunnel with "Connection: close" added in both directions.
// `http-server-close`: the server-facing connection is closed after the response.
// `forceclose`: the connection is actively closed after end of response.
// In general, it is preferred to use http-server-close with application servers, and some static servers might benefit from http-keep-alive.
// Connection mode is restricted to HTTP mode only.
// If specified, connection mode will be applied to the backend section.
ConnectionMode string `split_words:"true"`
// Internal use only
Debug bool
// Internal use only
DebugFormat string
// Additional headers that will be deleted in the request before forwarding it to the service. Please consult https://www.haproxy.com/doc/aloha/7.0/haproxy/http_rewriting.html#delete-a-header-in-the-request for more info.
DelReqHeader []string `split_words:"true"`
// Additional headers that will be deleted in the response before forwarding it to the client. Please consult https://www.haproxy.com/doc/aloha/7.0/haproxy/http_rewriting.html#delete-a-header-in-the-response for more info.
DelResHeader []string `split_words:"true"`
// Whether to distribute a request to all the instances of the proxy.
// Used only in the swarm mode.
Distribute bool `split_words:"true"`
// The internal HTTPS port of a service that should be reconfigured.
// The port is used only in the swarm mode.
// If not specified, the `port` parameter will be used instead.
HttpsPort int `split_words:"true"`
// If set to true, it will be the default_backend service.
IsDefaultBackend bool `split_words:"true"`
// The hostname where the service is running, for instance on a separate swarm.
// If specified, the proxy will dispatch requests to that domain.
OutboundHostname string `split_words:"true"`
// The ACL derivative. Defaults to path_beg.
// See https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#7.3.6-path for more info.
PathType string `split_words:"true"`
// Whether to redirect to https when X-Forwarded-Proto is http
RedirectWhenHttpProto bool `split_words:"true"`
// A regular expression to apply the modification.
// If specified, `reqPathSearch` needs to be set as well.
ReqPathReplace string `split_words:"true"`
// A regular expression to search the content to be replaced.
// If specified, `reqPathReplace` needs to be set as well.
ReqPathSearch string `split_words:"true"`
// Content of the PEM-encoded certificate to be used by the proxy when serving traffic over SSL.
ServiceCert string `split_words:"true"`
// The algorithm that should be applied to domain acl. The default value is `hdr(host)`.
ServiceDomainAlgo string
// The name of the service.
// It must match the name of the Swarm service.
ServiceName string `split_words:"true"`
// Determines the type of sticky sessions. If set to `sticky-server`, session cookie will be set by the proxy. Any other value means that sticky sessions are not used and load balancing is performed by Docker's Overlay network. Please open an issue if you'd like support for other types of sticky sessions.
SessionType string `split_words:"true"`
// Additional headers that will be set to the request before forwarding it to the service. If a specified header exists, it will be replaced with the new one.
SetReqHeader []string `split_words:"true"`
// Additional headers that will be set to the response before forwarding it to the client. If a specified header exists, it will be replaced with the new one.
SetResHeader []string `split_words:"true"`
// If set to true, server certificates are not verified. This flag should be set for SSL enabled backend services.
SslVerifyNone bool `split_words:"true"`
// The path to the template representing a snippet of the backend configuration.
// If specified, the backend template will be loaded from the specified file.
// If specified, `templateFePath` must be set as well.
// See the https://github.com/vfarcic/docker-flow-proxy#templates section for more info.
TemplateBePath string `split_words:"true"`
// The path to the template representing a snippet of the frontend configuration.
// If specified, the frontend template will be loaded from the specified file.
// If specified, `templateBePath` must be set as well.
// See the https://github.com/vfarcic/docker-flow-proxy#templates section for more info.
TemplateFePath string `split_words:"true"`
// The server timeout in seconds
TimeoutServer string `split_words:"true"`
// The tunnel timeout in seconds
TimeoutTunnel string `split_words:"true"`
// Internal use only.
UseGlobalUsers bool
// A comma-separated list of credentials(<user>:<pass>) for HTTP basic auth, which applies only to the service that will be reconfigured.
Users []User `split_words:"true"`
// Whether to add "X-Forwarded-Proto https" header.
XForwardedProto bool `envconfig:"x_forwarded_proto" split_words:"true"`
// The rest of variables are for internal use only
ServicePort string
AclCondition string
Host string
LookupRetry int
LookupRetryInterval int
ServiceDest []ServiceDest
Tasks []string
}
// Services contains the list of services used inside the proxy
type Services []Service
func (slice Services) Len() int {
return len(slice)
}
func (slice Services) Less(i, j int) bool {
firstHasRoot := hasRoot(slice[i])
secondHasRoot := hasRoot(slice[j])
firstHasWellKnown := hasWellKnown(slice[i])
secondHasWellKnown := hasWellKnown(slice[j])
if firstHasWellKnown && !secondHasWellKnown {
return true
} else if !firstHasWellKnown && secondHasWellKnown {
return false
} else if firstHasRoot && !secondHasRoot {
return false
} else if !firstHasRoot && secondHasRoot {
return true
} else {
return slice[i].AclName < slice[j].AclName
}
}
func (slice Services) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func hasRoot(service Service) bool {
for _, sd := range service.ServiceDest {
for _, path := range sd.ServicePath {
if path == "/" && len(sd.ServiceDomain) == 0 {
return true
}
}
}
return false
}
func hasWellKnown(service Service) bool {
for _, sd := range service.ServiceDest {
for _, path := range sd.ServicePath {
if strings.HasPrefix(strings.ToLower(path), "/.well-known") {
return true
}
}
}
return false
}
func extractUsersFromString(context, usersString string, encrypted, skipEmptyPassword bool) []*User {
collectedUsers := []*User{}
// TODO: Test
if len(usersString) == 0 {
return collectedUsers
}
splitter := func(x rune) bool {
return x == '\n' || x == ','
}
users := strings.FieldsFunc(usersString, splitter)
for _, user := range users {
user = strings.Trim(user, "\n\t ")
if len(user) == 0 {
continue
}
if strings.Contains(user, ":") {
colonIndex := strings.Index(user, ":")
userName := strings.Trim(user[0:colonIndex], "\t ")
userPass := strings.Trim(user[colonIndex+1:], "\t ")
if len(userName) == 0 || len(userPass) == 0 {
logPrintf("There is a user with no name or with invalid format for the service %s", context)
} else {
collectedUsers = append(collectedUsers, &User{Username: userName, Password: userPass, PassEncrypted: encrypted})
}
} else {
if len(user) == 0 { // TODO: Test
logPrintf("There is a user with no name or with invalid format for the service %s", context)
} else if skipEmptyPassword { // TODO: Test
logPrintf(
"For service %s There is a user %s with no password for the service %s",
user,
context,
)
} else if !skipEmptyPassword {
collectedUsers = append(collectedUsers, &User{Username: user})
}
}
}
return collectedUsers
}
// ServiceParameterProvider defines common interface for translating parameters into structs.
type ServiceParameterProvider interface {
Fill(service *Service)
GetString(name string) string
}
// GetServiceFromMap returns Service struct by extracting request parameters
func GetServiceFromMap(req *map[string]string) *Service {
provider := mapParameterProvider{theMap: req}
return GetServiceFromProvider(&provider)
}
// GetServiceFromProvider returns Service by extracting parameters from ServiceParameterProvider
func GetServiceFromProvider(provider ServiceParameterProvider) *Service {
sr := new(Service)
provider.Fill(sr)
separator := os.Getenv("SEPARATOR")
// TODO: Remove. It's added to maintain backwards compatibility with the deprecated parameter serviceDomainMatchAll (since July 2017)
if strings.EqualFold(provider.GetString("serviceDomainMatchAll"), "true") {
sr.ServiceDomainAlgo = "hdr_dom(host)"
}
if len(provider.GetString("httpsPort")) > 0 {
sr.HttpsPort, _ = strconv.Atoi(provider.GetString("httpsPort"))
}
if len(provider.GetString("addReqHeader")) > 0 {
sr.AddReqHeader = strings.Split(provider.GetString("addReqHeader"), separator)
} else if len(provider.GetString("addHeader")) > 0 { // TODO: Deprecated since Apr. 2017.
sr.AddReqHeader = strings.Split(provider.GetString("addHeader"), separator)
}
if len(provider.GetString("setReqHeader")) > 0 {
sr.SetReqHeader = strings.Split(provider.GetString("setReqHeader"), separator)
} else if len(provider.GetString("setHeader")) > 0 { // TODO: Deprecated since Apr. 2017.
sr.SetReqHeader = strings.Split(provider.GetString("setHeader"), separator)
}
if len(provider.GetString("delReqHeader")) > 0 {
sr.DelReqHeader = strings.Split(provider.GetString("delReqHeader"), separator)
}
if len(provider.GetString("addResHeader")) > 0 {
sr.AddResHeader = strings.Split(provider.GetString("addResHeader"), separator)
}
if len(provider.GetString("setResHeader")) > 0 {
sr.SetResHeader = strings.Split(provider.GetString("setResHeader"), separator)
}
if len(provider.GetString("delResHeader")) > 0 {
sr.DelResHeader = strings.Split(provider.GetString("delResHeader"), separator)
}
if len(sr.SessionType) > 0 {
sr.Tasks, _ = lookupHost("tasks." + sr.ServiceName)
}
globalUsersString := getSecretOrEnvVar("USERS", "")
globalUsersEncrypted := strings.EqualFold(getSecretOrEnvVar("USERS_PASS_ENCRYPTED", ""), "true")
sr.Users = mergeUsers(
sr.ServiceName,
provider.GetString("users"),
provider.GetString("usersSecret"),
getBoolParam(provider, "usersPassEncrypted"),
globalUsersString,
globalUsersEncrypted,
)
sr.ServiceDest = getServiceDestList(sr, provider)
return sr
}
func getServiceDestList(sr *Service, provider ServiceParameterProvider) []ServiceDest {
sdList := []ServiceDest{}
sd := getServiceDest(sr, provider, -1)
serviceDomain := []string{}
if isServiceDestValid(&sd) {
sdList = append(sdList, sd)
} else {
serviceDomain = sd.ServiceDomain
}
httpsOnly := sd.HttpsOnly
for i := 1; i <= 10; i++ {
sd := getServiceDest(sr, provider, i)
if isServiceDestValid(&sd) {
sdList = append(sdList, sd)
} else {
break
}
}
if len(sdList) == 0 {
reqMode := "http"
if len(provider.GetString("reqMode")) > 0 {
reqMode = provider.GetString("reqMode")
}
sdList = append(sdList, ServiceDest{ReqMode: reqMode})
}
for i, sd := range sdList {
if len(sd.ServiceDomain) > 0 && len(sd.ServicePath) == 0 {
sdList[i].ServicePath = []string{"/"}
} else if len(sd.ServiceDomain) == 0 && len(serviceDomain) > 0 {
sdList[i].ServiceDomain = serviceDomain
}
if httpsOnly && !sd.HttpsOnly {
sdList[i].HttpsOnly = true
}
}
return sdList
}
func getServiceDest(sr *Service, provider ServiceParameterProvider, index int) ServiceDest {
separator := os.Getenv("SEPARATOR")
suffix := ""
if index > 0 {
suffix = fmt.Sprintf(".%d", index)
}
userAgent := UserAgent{}
if len(provider.GetString(fmt.Sprintf("userAgent%s", suffix))) > 0 {
userAgent.Value = strings.Split(provider.GetString(fmt.Sprintf("userAgent%s", suffix)), separator)
userAgent.AclName = replaceNonAlphabetAndNumbers(userAgent.Value)
}
reqMode := "http"
if len(provider.GetString(fmt.Sprintf("reqMode%s", suffix))) > 0 {
reqMode = provider.GetString(fmt.Sprintf("reqMode%s", suffix))
}
srcPort, _ := strconv.Atoi(provider.GetString(fmt.Sprintf("srcPort%s", suffix)))
headerString := provider.GetString(fmt.Sprintf("serviceHeader%s", suffix))
header := map[string]string{}
if len(headerString) > 0 {
for _, value := range strings.Split(headerString, separator) {
values := strings.Split(value, ":")
if len(values) == 2 {
header[strings.Trim(values[0], " ")] = strings.Trim(values[1], " ")
}
}
}
sdIndex := index
if sdIndex < 0 {
sdIndex = 0
}
return ServiceDest{
AllowedMethods: getSliceFromString(provider, fmt.Sprintf("allowedMethods%s", suffix)),
DeniedMethods: getSliceFromString(provider, fmt.Sprintf("deniedMethods%s", suffix)),
DenyHttp: getBoolParam(provider, fmt.Sprintf("denyHttp%s", suffix)),
HttpsOnly: getBoolParam(provider, fmt.Sprintf("httpsOnly%s", suffix)),
HttpsRedirectCode: provider.GetString(fmt.Sprintf("httpsRedirectCode%s", suffix)),
IgnoreAuthorization: getBoolParam(provider, fmt.Sprintf("ignoreAuthorization%s", suffix)),
Port: provider.GetString(fmt.Sprintf("port%s", suffix)),
ReqMode: reqMode,
ServiceDomain: getSliceFromString(provider, fmt.Sprintf("serviceDomain%s", suffix)),
ServiceHeader: header,
ServicePath: getSliceFromString(provider, fmt.Sprintf("servicePath%s", suffix)),
SrcPort: srcPort,
VerifyClientSsl: getBoolParam(provider, fmt.Sprintf("verifyClientSsl%s", suffix)),
UserAgent: userAgent,
Index: sdIndex,
}
}
func getSliceFromString(provider ServiceParameterProvider, key string) []string {
separator := os.Getenv("SEPARATOR")
value := []string{}
if len(provider.GetString(key)) > 0 {
value = strings.Split(provider.GetString(key), separator)
}
return value
}
func isServiceDestValid(sd *ServiceDest) bool {
return len(sd.ServicePath) > 0 || len(sd.Port) > 0
}
func getBoolParam(req ServiceParameterProvider, param string) bool {
value := false
if len(req.GetString(param)) > 0 {
value, _ = strconv.ParseBool(req.GetString(param))
}
return value
}
func mergeUsers(
serviceName,
usersParam,
usersSecret string,
usersPassEncrypted bool,
globalUsersString string,
globalUsersEncrypted bool,
) []User {
var collectedUsers []*User
paramUsers := extractUsersFromString(serviceName, usersParam, usersPassEncrypted, false)
fileUsers, _ := getUsersFromFile(serviceName, usersSecret, usersPassEncrypted)
if len(paramUsers) > 0 {
if !allUsersHavePasswords(paramUsers) {
if len(usersSecret) == 0 {
fileUsers = extractUsersFromString(serviceName, globalUsersString, globalUsersEncrypted, true)
}
for _, u := range paramUsers {
if !u.hasPassword() {
if userByName := findUserByName(fileUsers, u.Username); userByName != nil {
u.Password = "sdasdsad"
u.Password = userByName.Password
u.PassEncrypted = userByName.PassEncrypted
} else {
// TODO: Return an error
// TODO: Test
logPrintf("For service %s it was impossible to find password for user %s.",
serviceName, u.Username)
}
}
}
}
collectedUsers = paramUsers
} else {
collectedUsers = fileUsers
}
ret := []User{}
for _, u := range collectedUsers {
if u.hasPassword() {
ret = append(ret, *u)
}
}
if len(ret) == 0 && (len(usersParam) != 0 || len(usersSecret) != 0) {
//we haven't found any users but they were requested so generating dummy one
ret = append(ret, *randomUser())
}
if len(ret) == 0 {
return nil
}
return ret
}
func getUsersFromFile(serviceName, fileName string, passEncrypted bool) ([]*User, error) {
if len(fileName) > 0 {
usersFile := fmt.Sprintf(usersBasePath, fileName)
content, err := readFile(usersFile)
if err != nil {
logPrintf(
"For service %s it was impossible to load userFile %s due to error %s",
serviceName,
usersFile,
err.Error(),
)
return []*User{}, err
}
userContents := strings.TrimRight(string(content[:]), "\n")
return extractUsersFromString(serviceName, userContents, passEncrypted, true), nil
}
return []*User{}, nil
}
func allUsersHavePasswords(users []*User) bool {
for _, u := range users {
if !u.hasPassword() {
return false
}
}
return true
}
func findUserByName(users []*User, name string) *User {
for _, u := range users {
if strings.EqualFold(name, u.Username) {
return u
}
}
return nil
}
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Native Client SRPC message passing.
// This code is needed to invoke SecureRandom, the NaCl equivalent of /dev/random.
package syscall
import (
"errors"
"sync"
"unsafe"
)
// An srpcClient represents the client side of an SRPC connection.
type srpcClient struct {
fd int // to server
r msgReceiver
s msgSender
service map[string]srpcService // services by name
outMu sync.Mutex // protects writing to connection
mu sync.Mutex // protects following fields
muxer bool // is someone reading and muxing responses
pending map[uint32]*srpc
idGen uint32 // generator for request IDs
}
// An srpcService is a single method that the server offers.
type srpcService struct {
num uint32 // method number
fmt string // argument format; see "parsing of RPC messages" below
}
// An srpc represents a single srpc issued by a client.
type srpc struct {
Ret []interface{}
Done chan *srpc
Err error
c *srpcClient
id uint32
}
// newClient allocates a new SRPC client using the file descriptor fd.
func newClient(fd int) (*srpcClient, error) {
c := new(srpcClient)
c.fd = fd
c.r.fd = fd
c.s.fd = fd
c.service = make(map[string]srpcService)
c.pending = make(map[uint32]*srpc)
// service discovery request
m := &msg{
isRequest: 1,
template: []interface{}{[]byte(nil)},
size: []int{4000}, // max size to accept for returned byte slice
}
if err := m.pack(); err != nil {
return nil, errors.New("Native Client SRPC service_discovery: preparing request: " + err.Error())
}
c.s.send(m)
m, err := c.r.recv()
if err != nil {
return nil, err
}
m.unpack()
if m.status != uint32(srpcOK) {
return nil, errors.New("Native Client SRPC service_discovery: " + srpcErrno(m.status).Error())
}
list := m.value[0].([]byte)
var n uint32
for len(list) > 0 {
var line []byte
i := byteIndex(list, '\n')
if i < 0 {
line, list = list, nil
} else {
line, list = list[:i], list[i+1:]
}
i = byteIndex(line, ':')
if i >= 0 {
c.service[string(line)] = srpcService{n, string(line[i+1:])}
}
n++
}
return c, nil
}
func byteIndex(b []byte, c byte) int {
for i, bi := range b {
if bi == c {
return i
}
}
return -1
}
var yourTurn srpc
func (c *srpcClient) wait(r *srpc) {
var rx *srpc
for rx = range r.Done {
if rx != &yourTurn {
break
}
c.input()
}
return
}
func (c *srpcClient) input() {
// read message
m, err := c.r.recv()
if err != nil {
println("Native Client SRPC receive error:", err.Error())
return
}
if m.unpack(); m.status != uint32(srpcOK) {
println("Native Client SRPC receive error: invalid message: ", srpcErrno(m.status).Error())
return
}
// deliver to intended recipient
c.mu.Lock()
rpc, ok := c.pending[m.id]
if ok {
delete(c.pending, m.id)
}
// wake a new muxer if there are more RPCs to read
c.muxer = false
for _, rpc := range c.pending {
c.muxer = true
rpc.Done <- &yourTurn
break
}
c.mu.Unlock()
if !ok {
println("Native Client: unexpected response for ID", m.id)
return
}
rpc.Ret = m.value
rpc.Done <- rpc
}
// Wait blocks until the RPC has finished.
func (r *srpc) Wait() {
r.c.wait(r)
}
// Start issues an RPC request for method name with the given arguments.
// The RPC r must not be in use for another pending request.
// To wait for the RPC to finish, receive from r.Done and then
// inspect r.Ret and r.Errno.
func (r *srpc) Start(name string, arg []interface{}) {
r.Err = nil
r.c.mu.Lock()
srv, ok := r.c.service[name]
if !ok {
r.c.mu.Unlock()
r.Err = srpcErrBadRPCNumber
r.Done <- r
return
}
r.c.pending[r.id] = r
if !r.c.muxer {
r.c.muxer = true
r.Done <- &yourTurn
}
r.c.mu.Unlock()
var m msg
m.id = r.id
m.isRequest = 1
m.rpc = srv.num
m.value = arg
// Fill in the return values and sizes to generate
// the right type chars. We'll take most any size.
// Skip over input arguments.
// We could check them against arg, but the server
// will do that anyway.
i := 0
for srv.fmt[i] != ':' {
i++
}
format := srv.fmt[i+1:]
// Now the return prototypes.
m.template = make([]interface{}, len(format))
m.size = make([]int, len(format))
for i := 0; i < len(format); i++ {
switch format[i] {
default:
println("Native Client SRPC: unexpected service type " + string(format[i]))
r.Err = srpcErrBadRPCNumber
r.Done <- r
return
case 'b':
m.template[i] = false
case 'C':
m.template[i] = []byte(nil)
m.size[i] = 1 << 30
case 'd':
m.template[i] = float64(0)
case 'D':
m.template[i] = []float64(nil)
m.size[i] = 1 << 30
case 'h':
m.template[i] = int(-1)
case 'i':
m.template[i] = int32(0)
case 'I':
m.template[i] = []int32(nil)
m.size[i] = 1 << 30
case 's':
m.template[i] = ""
m.size[i] = 1 << 30
}
}
if err := m.pack(); err != nil {
r.Err = errors.New("Native Client RPC Start " + name + ": preparing request: " + err.Error())
r.Done <- r
return
}
r.c.outMu.Lock()
r.c.s.send(&m)
r.c.outMu.Unlock()
}
// Call is a convenience wrapper that starts the RPC request,
// waits for it to finish, and then returns the results.
// Its implementation is:
//
// r.Start(name, arg)
// r.Wait()
// return r.Ret, r.Errno
//
func (c *srpcClient) Call(name string, arg ...interface{}) (ret []interface{}, err error) {
r := c.NewRPC(nil)
r.Start(name, arg)
r.Wait()
return r.Ret, r.Err
}
// NewRPC creates a new RPC on the client connection.
func (c *srpcClient) NewRPC(done chan *srpc) *srpc {
if done == nil {
done = make(chan *srpc, 1)
}
c.mu.Lock()
id := c.idGen
c.idGen++
c.mu.Unlock()
return &srpc{Done: done, c: c, id: id}
}
// The current protocol number.
// Kind of useless, since there have been backwards-incompatible changes
// to the wire protocol that did not update the protocol number.
// At this point it's really just a sanity check.
const protocol = 0xc0da0002
// An srpcErrno is an SRPC status code.
type srpcErrno uint32
const (
srpcOK srpcErrno = 256 + iota
srpcErrBreak
srpcErrMessageTruncated
srpcErrNoMemory
srpcErrProtocolMismatch
srpcErrBadRPCNumber
srpcErrBadArgType
srpcErrTooFewArgs
srpcErrTooManyArgs
srpcErrInArgTypeMismatch
srpcErrOutArgTypeMismatch
srpcErrInternalError
srpcErrAppError
)
var srpcErrstr = [...]string{
srpcOK - srpcOK: "ok",
srpcErrBreak - srpcOK: "break",
srpcErrMessageTruncated - srpcOK: "message truncated",
srpcErrNoMemory - srpcOK: "out of memory",
srpcErrProtocolMismatch - srpcOK: "protocol mismatch",
srpcErrBadRPCNumber - srpcOK: "invalid RPC method number",
srpcErrBadArgType - srpcOK: "unexpected argument type",
srpcErrTooFewArgs - srpcOK: "too few arguments",
srpcErrTooManyArgs - srpcOK: "too many arguments",
srpcErrInArgTypeMismatch - srpcOK: "input argument type mismatch",
srpcErrOutArgTypeMismatch - srpcOK: "output argument type mismatch",
srpcErrInternalError - srpcOK: "internal error",
srpcErrAppError - srpcOK: "application error",
}
func (e srpcErrno) Error() string {
if e < srpcOK || int(e-srpcOK) >= len(srpcErrstr) {
return "srpcErrno(" + itoa(int(e)) + ")"
}
return srpcErrstr[e-srpcOK]
}
// A msgHdr is the data argument to the imc_recvmsg
// and imc_sendmsg system calls.
type msgHdr struct {
iov *iov
niov int32
desc *int32
ndesc int32
flags uint32
}
// A single region for I/O.
type iov struct {
base *byte
len int32
}
const maxMsgSize = 1<<16 - 4*4
// A msgReceiver receives messages from a file descriptor.
type msgReceiver struct {
fd int
data [maxMsgSize]byte
desc [8]int32
hdr msgHdr
iov iov
}
func (r *msgReceiver) recv() (*msg, error) {
// Init pointers to buffers where syscall recvmsg can write.
r.iov.base = &r.data[0]
r.iov.len = int32(len(r.data))
r.hdr.iov = &r.iov
r.hdr.niov = 1
r.hdr.desc = &r.desc[0]
r.hdr.ndesc = int32(len(r.desc))
n, _, e := Syscall(sys_imc_recvmsg, uintptr(r.fd), uintptr(unsafe.Pointer(&r.hdr)), 0)
if e != 0 {
println("Native Client imc_recvmsg: ", e.Error())
return nil, e
}
// Make a copy of the data so that the next recvmsg doesn't
// smash it. The system call did not update r.iov.len. Instead it
// returned the total byte count as n.
m := new(msg)
m.data = make([]byte, n)
copy(m.data, r.data[0:])
// Make a copy of the desc too.
// The system call *did* update r.hdr.ndesc.
if r.hdr.ndesc > 0 {
m.desc = make([]int32, r.hdr.ndesc)
copy(m.desc, r.desc[:])
}
return m, nil
}
// A msgSender sends messages on a file descriptor.
type msgSender struct {
fd int
hdr msgHdr
iov iov
}
func (s *msgSender) send(m *msg) error {
if len(m.data) > 0 {
s.iov.base = &m.data[0]
}
s.iov.len = int32(len(m.data))
s.hdr.iov = &s.iov
s.hdr.niov = 1
s.hdr.desc = nil
s.hdr.ndesc = 0
_, _, e := Syscall(sys_imc_sendmsg, uintptr(s.fd), uintptr(unsafe.Pointer(&s.hdr)), 0)
if e != 0 {
println("Native Client imc_sendmsg: ", e.Error())
return e
}
return nil
}
// A msg is the Go representation of an SRPC message.
type msg struct {
data []byte // message data
desc []int32 // message file descriptors
// parsed version of message
id uint32
isRequest uint32
rpc uint32
status uint32
value []interface{}
template []interface{}
size []int
format string
broken bool
}
// reading from a msg
func (m *msg) uint32() uint32 {
if m.broken {
return 0
}
if len(m.data) < 4 {
m.broken = true
return 0
}
b := m.data[:4]
x := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
m.data = m.data[4:]
return x
}
func (m *msg) uint64() uint64 {
x := uint64(m.uint32()) | uint64(m.uint32())<<32
if m.broken {
return 0
}
return x
}
func (m *msg) bytes(n int) []byte {
if m.broken {
return nil
}
if len(m.data) < n {
m.broken = true
return nil
}
x := m.data[0:n]
m.data = m.data[n:]
return x
}
// writing to a msg
func (m *msg) wuint32(x uint32) {
m.data = append(m.data, byte(x), byte(x>>8), byte(x>>16), byte(x>>24))
}
func (m *msg) wuint64(x uint64) {
lo := uint32(x)
hi := uint32(x >> 32)
m.data = append(m.data, byte(lo), byte(lo>>8), byte(lo>>16), byte(lo>>24), byte(hi), byte(hi>>8), byte(hi>>16), byte(hi>>24))
}
func (m *msg) wbytes(p []byte) {
m.data = append(m.data, p...)
}
func (m *msg) wstring(s string) {
m.data = append(m.data, s...)
}
// Parsing of RPC messages.
//
// Each message begins with
// total_size uint32
// total_descs uint32
// fragment_size uint32
// fragment_descs uint32
//
// If fragment_size < total_size or fragment_descs < total_descs, the actual
// message is broken up in multiple messages; follow-up messages omit
// the "total" fields and begin with the "fragment" fields.
// We do not support putting fragmented messages back together.
// To do this we would need to change the message receiver.
//
// After that size information, the message header follows:
// protocol uint32
// requestID uint32
// isRequest uint32
// rpcNumber uint32
// status uint32
// numValue uint32
// numTemplate uint32
//
// After the header come numTemplate fixed-size arguments,
// numValue fixed-size arguments, and then the variable-sized
// part of the values. The templates describe the expected results
// and have no associated variable sized data in the request.
//
// Each fixed-size argument has the form:
// tag uint32 // really a char, like 'b' or 'C'
// pad uint32 // unused
// val1 uint32
// val2 uint32
//
// The tags are:
// 'b': bool; val1 == 0 or 1
// 'C': []byte; val1 == len, data in variable-sized section
// 'd': float64; (val1, val2) is data
// 'D': []float64; val1 == len, data in variable-sized section
// 'h': int; val1 == file descriptor
// 'i': int32; descriptor in next entry in m.desc
// 'I': []int; val1 == len, data in variable-sized section
// 's': string; val1 == len, data in variable-sized section
//
func (m *msg) pack() error {
m.data = m.data[:0]
m.desc = m.desc[:0]
// sizes, to fill in later
m.wuint32(0)
m.wuint32(0)
m.wuint32(0)
m.wuint32(0)
// message header
m.wuint32(protocol)
m.wuint32(m.id)
m.wuint32(m.isRequest)
m.wuint32(m.rpc)
m.wuint32(m.status)
m.wuint32(uint32(len(m.value)))
m.wuint32(uint32(len(m.template)))
// fixed-size templates
for i, x := range m.template {
var tag, val1, val2 uint32
switch x.(type) {
default:
return errors.New("unexpected template type")
case bool:
tag = 'b'
case []byte:
tag = 'C'
val1 = uint32(m.size[i])
case float64:
tag = 'd'
case []float64:
tag = 'D'
val1 = uint32(m.size[i])
case int:
tag = 'h'
case int32:
tag = 'i'
case []int32:
tag = 'I'
val1 = uint32(m.size[i])
case string:
tag = 's'
val1 = uint32(m.size[i])
}
m.wuint32(tag)
m.wuint32(0)
m.wuint32(val1)
m.wuint32(val2)
}
// fixed-size values
for _, x := range m.value {
var tag, val1, val2 uint32
switch x := x.(type) {
default:
return errors.New("unexpected value type")
case bool:
tag = 'b'
if x {
val1 = 1
}
case []byte:
tag = 'C'
val1 = uint32(len(x))
case float64:
tag = 'd'
v := float64bits(x)
val1 = uint32(v)
val2 = uint32(v >> 32)
case []float64:
tag = 'D'
val1 = uint32(len(x))
case int32:
tag = 'i'
m.desc = append(m.desc, x)
case []int32:
tag = 'I'
val1 = uint32(len(x))
case string:
tag = 's'
val1 = uint32(len(x) + 1)
}
m.wuint32(tag)
m.wuint32(0)
m.wuint32(val1)
m.wuint32(val2)
}
// variable-length data for values
for _, x := range m.value {
switch x := x.(type) {
case []byte:
m.wbytes(x)
case []float64:
for _, f := range x {
m.wuint64(float64bits(f))
}
case []int32:
for _, j := range x {
m.wuint32(uint32(j))
}
case string:
m.wstring(x)
m.wstring("\x00")
}
}
// fill in sizes
data := m.data
m.data = m.data[:0]
m.wuint32(uint32(len(data)))
m.wuint32(uint32(len(m.desc)))
m.wuint32(uint32(len(data)))
m.wuint32(uint32(len(m.desc)))
m.data = data
return nil
}
func (m *msg) unpack() error {
totalSize := m.uint32()
totalDesc := m.uint32()
fragSize := m.uint32()
fragDesc := m.uint32()
if totalSize != fragSize || totalDesc != fragDesc {
return errors.New("Native Client: fragmented RPC messages not supported")
}
if m.uint32() != protocol {
return errors.New("Native Client: RPC protocol mismatch")
}
// message header
m.id = m.uint32()
m.isRequest = m.uint32()
m.rpc = m.uint32()
m.status = m.uint32()
m.value = make([]interface{}, m.uint32())
m.template = make([]interface{}, m.uint32())
m.size = make([]int, len(m.template))
if m.broken {
return errors.New("Native Client: malformed message")
}
// fixed-size templates
for i := range m.template {
tag := m.uint32()
m.uint32() // padding
val1 := m.uint32()
m.uint32() // val2
switch tag {
default:
return errors.New("Native Client: unexpected template type " + string(rune(tag)))
case 'b':
m.template[i] = false
case 'C':
m.template[i] = []byte(nil)
m.size[i] = int(val1)
case 'd':
m.template[i] = float64(0)
case 'D':
m.template[i] = []float64(nil)
m.size[i] = int(val1)
case 'i':
m.template[i] = int32(0)
case 'I':
m.template[i] = []int32(nil)
m.size[i] = int(val1)
case 'h':
m.template[i] = int(0)
case 's':
m.template[i] = ""
m.size[i] = int(val1)
}
}
// fixed-size values
var (
strsize []uint32
d int
)
for i := range m.value {
tag := m.uint32()
m.uint32() // padding
val1 := m.uint32()
val2 := m.uint32()
switch tag {
default:
return errors.New("Native Client: unexpected value type " + string(rune(tag)))
case 'b':
m.value[i] = val1 > 0
case 'C':
m.value[i] = []byte(nil)
strsize = append(strsize, val1)
case 'd':
m.value[i] = float64frombits(uint64(val1) | uint64(val2)<<32)
case 'D':
m.value[i] = make([]float64, val1)
case 'i':
m.value[i] = int32(val1)
case 'I':
m.value[i] = make([]int32, val1)
case 'h':
m.value[i] = int(m.desc[d])
d++
case 's':
m.value[i] = ""
strsize = append(strsize, val1)
}
}
// variable-sized parts of values
for i, x := range m.value {
switch x := x.(type) {
case []byte:
m.value[i] = m.bytes(int(strsize[0]))
strsize = strsize[1:]
case []float64:
for i := range x {
x[i] = float64frombits(m.uint64())
}
case []int32:
for i := range x {
x[i] = int32(m.uint32())
}
case string:
m.value[i] = string(m.bytes(int(strsize[0])))
strsize = strsize[1:]
}
}
if len(m.data) > 0 {
return errors.New("Native Client: junk at end of message")
}
return nil
}
func float64bits(x float64) uint64 {
return *(*uint64)(unsafe.Pointer(&x))
}
func float64frombits(x uint64) float64 {
return *(*float64)(unsafe.Pointer(&x))
}
// At startup, connect to the name service.
var nsClient = nsConnect()
func nsConnect() *srpcClient {
var ns int32 = -1
_, _, errno := Syscall(sys_nameservice, uintptr(unsafe.Pointer(&ns)), 0, 0)
if errno != 0 {
println("Native Client nameservice:", errno.Error())
return nil
}
sock, _, errno := Syscall(sys_imc_connect, uintptr(ns), 0, 0)
if errno != 0 {
println("Native Client nameservice connect:", errno.Error())
return nil
}
c, err := newClient(int(sock))
if err != nil {
println("Native Client nameservice init:", err.Error())
return nil
}
return c
}
const (
nsSuccess = 0
nsNameNotFound = 1
nsDuplicateName = 2
nsInsufficientResources = 3
nsPermissionDenied = 4
nsInvalidArgument = 5
)
func openNamedService(name string, mode int32) (fd int, err error) {
if nsClient == nil {
return 0, errors.New("no name service")
}
ret, err := nsClient.Call("lookup:si:ih", name, int32(mode))
if err != nil {
return 0, err
}
status := ret[0].(int32)
fd = ret[1].(int)
switch status {
case nsSuccess:
// ok
case nsNameNotFound:
return -1, ENOENT
case nsDuplicateName:
return -1, EEXIST
case nsInsufficientResources:
return -1, EWOULDBLOCK
case nsPermissionDenied:
return -1, EPERM
case nsInvalidArgument:
return -1, EINVAL
default:
return -1, EINVAL
}
return fd, nil
}
syscall: remove nacl srpc helper
Fixes #11961
Minux removed the use of SRPC in 003dccfa, but the SRPC name service
code was left in the tree. SRPC was removed in pepper_42 making the
code, which ran on startup, fail, even though it was not used.
Removing srpc_nacl.go for a total diff of -822 lines has got to count
as one of the easiest nacl fixes we've had to date.
Change-Id: Ic4e348146bfe47450bbb9cabb91699ba153e6bf0
Reviewed-on: https://go-review.googlesource.com/13958
Reviewed-by: Brad Fitzpatrick <ae9783c0b0efc69cd85ab025ddd17aa44cdc4aa5@golang.org>
Reviewed-by: Minux Ma <72d850dbd8858b791995463d8539b5a379577f28@golang.org>
Run-TryBot: Minux Ma <72d850dbd8858b791995463d8539b5a379577f28@golang.org>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
|
/*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package topology
import (
"github.com/nu7hatch/gouuid"
"github.com/skydive-project/skydive/logging"
"github.com/skydive-project/skydive/topology/graph"
)
type TIDMapper struct {
graph.DefaultGraphListener
Graph *graph.Graph
hostID graph.Identifier
}
func (t *TIDMapper) Start() {
t.Graph.AddEventListener(t)
}
func (t *TIDMapper) Stop() {
t.Graph.RemoveEventListener(t)
}
func (t *TIDMapper) setTID(parent, child *graph.Node) {
if t, ok := child.Metadata()["Type"]; !ok || t == "" {
return
}
if tid, ok := parent.Metadata()["TID"]; ok {
tid = tid.(string) + child.Metadata()["Name"].(string) + child.Metadata()["Type"].(string)
u, _ := uuid.NewV5(uuid.NamespaceOID, []byte(tid.(string)))
t.Graph.AddMetadata(child, "TID", u.String())
}
}
func (t *TIDMapper) setChildrenTID(parent *graph.Node) {
children := t.Graph.LookupChildren(parent, graph.Metadata{"RelationType": "ownership"})
for _, child := range children {
t.setTID(parent, child)
}
}
// onNodeEvent set TID
// TID is UUIDV5(ID/UUID) of "root" node like host, netns, ovsport, fabric
// for other nodes TID is UUIDV5(rootTID + Name + Type)
func (t *TIDMapper) onNodeEvent(n *graph.Node) {
if _, ok := n.Metadata()["TID"]; !ok {
if tp, ok := n.Metadata()["Type"]; ok {
switch tp.(string) {
case "host":
t.hostID = n.ID
t.Graph.AddMetadata(n, "TID", string(n.ID))
t.setChildrenTID(n)
case "netns":
tid := string(t.hostID) + n.Metadata()["Path"].(string) + tp.(string)
u, _ := uuid.NewV5(uuid.NamespaceOID, []byte(tid))
t.Graph.AddMetadata(n, "TID", u.String())
t.setChildrenTID(n)
case "ovsport":
tid := string(t.hostID) + n.Metadata()["UUID"].(string) + tp.(string)
u, _ := uuid.NewV5(uuid.NamespaceOID, []byte(tid))
t.Graph.AddMetadata(n, "TID", u.String())
t.setChildrenTID(n)
default:
if n.Metadata()["Probe"] == "fabric" {
t.Graph.AddMetadata(n, "TID", string(n.ID))
} else {
parents := t.Graph.LookupParents(n, graph.Metadata{"RelationType": "ownership"})
if len(parents) > 1 {
logging.GetLogger().Errorf("A should always only have one ownership parent: %v", n)
} else if len(parents) == 1 {
t.setTID(parents[0], n)
}
}
}
}
}
}
func (t *TIDMapper) OnNodeUpdated(n *graph.Node) {
t.onNodeEvent(n)
}
func (t *TIDMapper) OnNodeAdded(n *graph.Node) {
t.onNodeEvent(n)
}
// onEdgeEvent set TID for child TID nodes which is composed of the name
// the TID of the parent node and the type.
func (t *TIDMapper) onEdgeEvent(e *graph.Edge) {
if e.Metadata()["RelationType"] != "ownership" {
return
}
parent, child := t.Graph.GetEdgeNodes(e)
if parent == nil {
return
}
if _, ok := child.Metadata()["TID"]; ok {
return
}
t.setTID(parent, child)
}
func (t *TIDMapper) OnEdgeUpdated(e *graph.Edge) {
t.onEdgeEvent(e)
}
func (t *TIDMapper) OnEdgeAdded(e *graph.Edge) {
t.onEdgeEvent(e)
}
func NewTIDMapper(g *graph.Graph) *TIDMapper {
return &TIDMapper{
Graph: g,
}
}
tid: fix LookupParents/LookupChildren RelationType
Change-Id: I5539555189c3f578999d41ac2d1518f2b7bfe3ad
Reviewed-on: http://softwarefactory-project.io/r/5014
Reviewed-by: Sylvain Baubeau <b0a7dc08a3f38d713e107103b0a85b0badc14b84@redhat.com>
Tested-by: Sylvain Baubeau <b0a7dc08a3f38d713e107103b0a85b0badc14b84@redhat.com>
Workflow: Sylvain Baubeau <b0a7dc08a3f38d713e107103b0a85b0badc14b84@redhat.com>
/*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package topology
import (
"github.com/nu7hatch/gouuid"
"github.com/skydive-project/skydive/logging"
"github.com/skydive-project/skydive/topology/graph"
)
type TIDMapper struct {
graph.DefaultGraphListener
Graph *graph.Graph
hostID graph.Identifier
}
func (t *TIDMapper) Start() {
t.Graph.AddEventListener(t)
}
func (t *TIDMapper) Stop() {
t.Graph.RemoveEventListener(t)
}
func (t *TIDMapper) setTID(parent, child *graph.Node) {
if t, ok := child.Metadata()["Type"]; !ok || t == "" {
return
}
if tid, ok := parent.Metadata()["TID"]; ok {
tid = tid.(string) + child.Metadata()["Name"].(string) + child.Metadata()["Type"].(string)
u, _ := uuid.NewV5(uuid.NamespaceOID, []byte(tid.(string)))
t.Graph.AddMetadata(child, "TID", u.String())
}
}
func (t *TIDMapper) setChildrenTID(parent *graph.Node) {
children := t.Graph.LookupChildren(parent, graph.Metadata{}, graph.Metadata{"RelationType": "ownership"})
for _, child := range children {
t.setTID(parent, child)
}
}
// onNodeEvent set TID
// TID is UUIDV5(ID/UUID) of "root" node like host, netns, ovsport, fabric
// for other nodes TID is UUIDV5(rootTID + Name + Type)
func (t *TIDMapper) onNodeEvent(n *graph.Node) {
if _, ok := n.Metadata()["TID"]; !ok {
if tp, ok := n.Metadata()["Type"]; ok {
switch tp.(string) {
case "host":
t.hostID = n.ID
t.Graph.AddMetadata(n, "TID", string(n.ID))
t.setChildrenTID(n)
case "netns":
tid := string(t.hostID) + n.Metadata()["Path"].(string) + tp.(string)
u, _ := uuid.NewV5(uuid.NamespaceOID, []byte(tid))
t.Graph.AddMetadata(n, "TID", u.String())
t.setChildrenTID(n)
case "ovsport":
tid := string(t.hostID) + n.Metadata()["UUID"].(string) + tp.(string)
u, _ := uuid.NewV5(uuid.NamespaceOID, []byte(tid))
t.Graph.AddMetadata(n, "TID", u.String())
t.setChildrenTID(n)
default:
if n.Metadata()["Probe"] == "fabric" {
t.Graph.AddMetadata(n, "TID", string(n.ID))
} else {
parents := t.Graph.LookupParents(n, graph.Metadata{}, graph.Metadata{"RelationType": "ownership"})
if len(parents) > 1 {
logging.GetLogger().Errorf("A should always only have one ownership parent: %v", n)
} else if len(parents) == 1 {
t.setTID(parents[0], n)
}
}
}
}
}
}
func (t *TIDMapper) OnNodeUpdated(n *graph.Node) {
t.onNodeEvent(n)
}
func (t *TIDMapper) OnNodeAdded(n *graph.Node) {
t.onNodeEvent(n)
}
// onEdgeEvent set TID for child TID nodes which is composed of the name
// the TID of the parent node and the type.
func (t *TIDMapper) onEdgeEvent(e *graph.Edge) {
if e.Metadata()["RelationType"] != "ownership" {
return
}
parent, child := t.Graph.GetEdgeNodes(e)
if parent == nil {
return
}
t.setTID(parent, child)
}
func (t *TIDMapper) OnEdgeUpdated(e *graph.Edge) {
t.onEdgeEvent(e)
}
func (t *TIDMapper) OnEdgeAdded(e *graph.Edge) {
t.onEdgeEvent(e)
}
func NewTIDMapper(g *graph.Graph) *TIDMapper {
return &TIDMapper{
Graph: g,
}
}
|
// This utility program synchronises a file containing compressed log entries
// to disk. It will download any new log entries and check the tree hash.
package main
import (
"bytes"
"fmt"
"os"
"sync"
"time"
"github.com/jmhodges/certificatetransparency"
)
func clearLine() {
fmt.Printf("\x1b[80D\x1b[2K")
}
func displayProgress(statusChan chan certificatetransparency.OperationStatus, wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
symbols := []string{"|", "/", "-", "\\"}
symbolIndex := 0
status, ok := <-statusChan
if !ok {
return
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
for {
select {
case status, ok = <-statusChan:
if !ok {
return
}
case <-ticker.C:
symbolIndex = (symbolIndex + 1) % len(symbols)
}
clearLine()
fmt.Printf("%s %.1f%% (%d of %d)", symbols[symbolIndex], status.Percentage(), status.Current, status.Length)
}
}()
}
func main() {
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, "Usage: %s <log entries file>\n", os.Args[0])
os.Exit(1)
}
fileName := os.Args[1]
out, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to open entries file: %s\n", err)
os.Exit(1)
}
defer out.Close()
entriesFile := certificatetransparency.EntriesFile{out}
fmt.Printf("Counting existing entries... ")
count, err := entriesFile.Count()
if err != nil {
fmt.Fprintf(os.Stderr, "\nFailed to read entries file: %s\n", err)
os.Exit(1)
}
fmt.Printf("%d\n", count)
certlyPEM := `-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAECyPLhWKYYUgEc+tUXfPQB4wtGS2M
NvXrjwFCCnyYJifBtd2Sk7Cu+Js9DNhMTh35FftHaHu6ZrclnNBKwmbbSA==
-----END PUBLIC KEY-----`
ctLog, err := certificatetransparency.NewLog("https://log.certly.io", certlyPEM)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
fmt.Printf("Fetching signed tree head... ")
sth, err := ctLog.GetSignedTreeHead()
// sth, err := certificatetransparency.PilotLog.GetSignedTreeHead()
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
fmt.Printf("%d total entries at %s\n", sth.Size, sth.Time.Format(time.ANSIC))
if count == sth.Size {
fmt.Printf("Nothing to do\n")
return
}
statusChan := make(chan certificatetransparency.OperationStatus, 1)
wg := new(sync.WaitGroup)
displayProgress(statusChan, wg)
_, err = certificatetransparency.PilotLog.DownloadRange(out, statusChan, count, sth.Size)
wg.Wait()
clearLine()
if err != nil {
fmt.Fprintf(os.Stderr, "Error while downloading: %s\n", err)
os.Exit(1)
}
fmt.Printf("Hashing tree\n")
entriesFile.Seek(0, 0)
statusChan = make(chan certificatetransparency.OperationStatus, 1)
wg = new(sync.WaitGroup)
displayProgress(statusChan, wg)
treeHash, err := entriesFile.HashTree(statusChan, sth.Size)
wg.Wait()
clearLine()
if err != nil {
fmt.Fprintf(os.Stderr, "Error hashing tree: %s\n", err)
os.Exit(1)
}
if !bytes.Equal(treeHash[:], sth.Hash) {
fmt.Fprintf(os.Stderr, "Hashes do not match! Calculated: %x, STH contains %x\n", treeHash, sth.Hash)
os.Exit(1)
}
}
correct call from PilotLog to certly log
// This utility program synchronises a file containing compressed log entries
// to disk. It will download any new log entries and check the tree hash.
package main
import (
"bytes"
"fmt"
"os"
"sync"
"time"
"github.com/jmhodges/certificatetransparency"
)
func clearLine() {
fmt.Printf("\x1b[80D\x1b[2K")
}
func displayProgress(statusChan chan certificatetransparency.OperationStatus, wg *sync.WaitGroup) {
wg.Add(1)
go func() {
defer wg.Done()
symbols := []string{"|", "/", "-", "\\"}
symbolIndex := 0
status, ok := <-statusChan
if !ok {
return
}
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
for {
select {
case status, ok = <-statusChan:
if !ok {
return
}
case <-ticker.C:
symbolIndex = (symbolIndex + 1) % len(symbols)
}
clearLine()
fmt.Printf("%s %.1f%% (%d of %d)", symbols[symbolIndex], status.Percentage(), status.Current, status.Length)
}
}()
}
func main() {
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, "Usage: %s <log entries file>\n", os.Args[0])
os.Exit(1)
}
fileName := os.Args[1]
out, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to open entries file: %s\n", err)
os.Exit(1)
}
defer out.Close()
entriesFile := certificatetransparency.EntriesFile{out}
fmt.Printf("Counting existing entries... ")
count, err := entriesFile.Count()
if err != nil {
fmt.Fprintf(os.Stderr, "\nFailed to read entries file: %s\n", err)
os.Exit(1)
}
fmt.Printf("%d\n", count)
certlyPEM := `-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAECyPLhWKYYUgEc+tUXfPQB4wtGS2M
NvXrjwFCCnyYJifBtd2Sk7Cu+Js9DNhMTh35FftHaHu6ZrclnNBKwmbbSA==
-----END PUBLIC KEY-----`
ctLog, err := certificatetransparency.NewLog("https://log.certly.io", certlyPEM)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
fmt.Printf("Fetching signed tree head... ")
sth, err := ctLog.GetSignedTreeHead()
// sth, err := certificatetransparency.PilotLog.GetSignedTreeHead()
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
fmt.Printf("%d total entries at %s\n", sth.Size, sth.Time.Format(time.ANSIC))
if count == sth.Size {
fmt.Printf("Nothing to do\n")
return
}
statusChan := make(chan certificatetransparency.OperationStatus, 1)
wg := new(sync.WaitGroup)
displayProgress(statusChan, wg)
_, err = ctLog.DownloadRange(out, statusChan, count, sth.Size)
wg.Wait()
clearLine()
if err != nil {
fmt.Fprintf(os.Stderr, "Error while downloading: %s\n", err)
os.Exit(1)
}
fmt.Printf("Hashing tree\n")
entriesFile.Seek(0, 0)
statusChan = make(chan certificatetransparency.OperationStatus, 1)
wg = new(sync.WaitGroup)
displayProgress(statusChan, wg)
treeHash, err := entriesFile.HashTree(statusChan, sth.Size)
wg.Wait()
clearLine()
if err != nil {
fmt.Fprintf(os.Stderr, "Error hashing tree: %s\n", err)
os.Exit(1)
}
if !bytes.Equal(treeHash[:], sth.Hash) {
fmt.Fprintf(os.Stderr, "Hashes do not match! Calculated: %x, STH contains %x\n", treeHash, sth.Hash)
os.Exit(1)
}
}
|
package goreport
import (
"encoding/json"
"io"
)
// JsonFormatter
// Note: Only supports encoding/json types as Entry values
type JsonFormatter struct {
}
func NewJsonFormatter() Formatter {
return &JsonFormatter{}
}
func (f *JsonFormatter) Write(entry Entry, w io.Writer) error {
e := json.NewEncoder(w)
return e.Encode(entry)
}
Add PrettyPrint to JsonFormatter
package goreport
import (
"encoding/json"
"io"
)
// JsonFormatter
// Note: Only supports encoding/json types as Entry values
type JsonFormatter struct {
PrettyPrint bool
Indent string
Prefix string
}
func NewJsonFormatter() Formatter {
return &JsonFormatter{
PrettyPrint: true,
Indent: "\t",
Prefix: "",
}
}
func (f *JsonFormatter) Write(entry Entry, w io.Writer) error {
if f.PrettyPrint {
b, err := json.MarshalIndent(entry, f.Prefix, f.Indent)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
e := json.NewEncoder(w)
return e.Encode(entry)
}
|
package isolated
import (
"fmt"
"io/ioutil"
"path/filepath"
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("Push with health check", func() {
Context("help", func() {
Context("when displaying help in the refactor", func() {
It("Displays command usage to output", func() {
session := helpers.CF("push", "--help")
Eventually(session).Should(Say("--health-check-type, -u\\s+Application health check type \\(Default: 'port', 'none' accepted for 'process', 'http' implies endpoint '/'\\)"))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the environment is set up correctly", func() {
var (
appName string
orgName string
spaceName string
)
BeforeEach(func() {
orgName = helpers.NewOrgName()
spaceName = helpers.PrefixedRandomName("SPACE")
setupCF(orgName, spaceName)
appName = helpers.PrefixedRandomName("app")
})
Context("when displaying help in the old code", func() {
It("Displays command usage to output", func() {
session := helpers.CF("push")
Eventually(session).Should(Say("--health-check-type, -u\\s+Application health check type \\(Default: 'port', 'none' accepted for 'process', 'http' implies endpoint '/'\\)"))
Eventually(session).Should(Exit(1))
})
})
Context("when pushing with flags", func() {
Context("when setting the health check", func() {
DescribeTable("displays the correct health check type",
func(healthCheckType string, endpoint string) {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", healthCheckType)).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+%s", healthCheckType))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+%s\n", endpoint))
Eventually(session).Should(Exit(0))
},
Entry("when the health check type is none", "none", ""),
Entry("when the health check type is process", "process", ""),
Entry("when the health check type is port", "port", ""),
Entry("when the health check type is http", "http", "/"),
)
})
Context("when the health check type is not 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "port")).Should(Exit(0))
})
})
Context("when the health check type is set to 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
})
It("sets the endpoint to /", func() {
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+\\/\n"))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the app has a health check 'http' endpoint set", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
Eventually(helpers.CF("set-health-check", appName, "http", "--endpoint", "/some-endpoint")).Should(Exit(0))
appGUID := helpers.AppGUID(appName)
session := helpers.CF("curl", fmt.Sprintf("/v2/apps/%s", appGUID))
Eventually(session.Out).Should(Say(`"health_check_http_endpoint": "/some-endpoint"`))
Eventually(session).Should(Exit(0))
})
Context("when the health check type is set to 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
})
It("preserves the existing endpoint", func() {
appGUID := helpers.AppGUID(appName)
session := helpers.CF("curl", fmt.Sprintf("/v2/apps/%s", appGUID))
Eventually(session.Out).Should(Say(`"health_check_http_endpoint": "/some-endpoint"`))
Eventually(session).Should(Exit(0))
})
})
Context("when the health check type is set to something other than 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "port")).Should(Exit(0))
})
})
It("preserves the existing endpoint", func() {
appGUID := helpers.AppGUID(appName)
session := helpers.CF("curl", fmt.Sprintf("/v2/apps/%s", appGUID))
Eventually(session.Out).Should(Say(`"health_check_http_endpoint": "/some-endpoint"`))
Eventually(session).Should(Exit(0))
})
})
})
})
Context("when pushing with manifest", func() {
Context("when the health type is http and an endpoint is provided", func() {
FIt("sets the health check type and endpoint", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: http
health-check-http-endpoint: /some-endpoint
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+http"))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+/some-endpoint\n"))
Eventually(session).Should(Exit(0))
})
})
Context("when the health type is not http and an endpoint is provided", func() {
FIt("displays an error", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: port
health-check-http-endpoint: /some-endpoint
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
session := helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack")
Eventually(session.Out).Should(Say("Health check type must be 'http' to set a health check HTTP endpoint."))
Eventually(session).Should(Exit(1))
})
})
})
Context("when passing an 'http' health check type with the -u option", func() {
It("resets the endpoint to the default", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: http
health-check-http-endpoint: /some-endpoint
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+http"))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+/\n"))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the manifest contains the health check type attribute", func() {
DescribeTable("displays the correct health check type",
func(healthCheckType string, endpoint string) {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: %s
`, appName, healthCheckType))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+%s", healthCheckType))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+%s\n", endpoint))
Eventually(session).Should(Exit(0))
},
Entry("when the health check type is none", "none", ""),
Entry("when the health check type is process", "process", ""),
Entry("when the health check type is port", "port", ""),
Entry("when the health check type is http", "http", "/"),
)
Context("when passing a health check type with the -u option", func() {
It("overrides any health check types in the manifest", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: port
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+http"))
Eventually(session).Should(Exit(0))
})
})
})
})
})
remove focus
Signed-off-by: Alex Melnik <133b34882c9a7536fb0c644d95ec8ca567374456@hpe.com>
package isolated
import (
"fmt"
"io/ioutil"
"path/filepath"
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("Push with health check", func() {
Context("help", func() {
Context("when displaying help in the refactor", func() {
It("Displays command usage to output", func() {
session := helpers.CF("push", "--help")
Eventually(session).Should(Say("--health-check-type, -u\\s+Application health check type \\(Default: 'port', 'none' accepted for 'process', 'http' implies endpoint '/'\\)"))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the environment is set up correctly", func() {
var (
appName string
orgName string
spaceName string
)
BeforeEach(func() {
orgName = helpers.NewOrgName()
spaceName = helpers.PrefixedRandomName("SPACE")
setupCF(orgName, spaceName)
appName = helpers.PrefixedRandomName("app")
})
Context("when displaying help in the old code", func() {
It("Displays command usage to output", func() {
session := helpers.CF("push")
Eventually(session).Should(Say("--health-check-type, -u\\s+Application health check type \\(Default: 'port', 'none' accepted for 'process', 'http' implies endpoint '/'\\)"))
Eventually(session).Should(Exit(1))
})
})
Context("when pushing with flags", func() {
Context("when setting the health check", func() {
DescribeTable("displays the correct health check type",
func(healthCheckType string, endpoint string) {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", healthCheckType)).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+%s", healthCheckType))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+%s\n", endpoint))
Eventually(session).Should(Exit(0))
},
Entry("when the health check type is none", "none", ""),
Entry("when the health check type is process", "process", ""),
Entry("when the health check type is port", "port", ""),
Entry("when the health check type is http", "http", "/"),
)
})
Context("when the health check type is not 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "port")).Should(Exit(0))
})
})
Context("when the health check type is set to 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
})
It("sets the endpoint to /", func() {
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+\\/\n"))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the app has a health check 'http' endpoint set", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
Eventually(helpers.CF("set-health-check", appName, "http", "--endpoint", "/some-endpoint")).Should(Exit(0))
appGUID := helpers.AppGUID(appName)
session := helpers.CF("curl", fmt.Sprintf("/v2/apps/%s", appGUID))
Eventually(session.Out).Should(Say(`"health_check_http_endpoint": "/some-endpoint"`))
Eventually(session).Should(Exit(0))
})
Context("when the health check type is set to 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
})
It("preserves the existing endpoint", func() {
appGUID := helpers.AppGUID(appName)
session := helpers.CF("curl", fmt.Sprintf("/v2/apps/%s", appGUID))
Eventually(session.Out).Should(Say(`"health_check_http_endpoint": "/some-endpoint"`))
Eventually(session).Should(Exit(0))
})
})
Context("when the health check type is set to something other than 'http'", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CF("push", appName, "--no-start", "-p", appDir, "-b", "staticfile_buildpack", "-u", "port")).Should(Exit(0))
})
})
It("preserves the existing endpoint", func() {
appGUID := helpers.AppGUID(appName)
session := helpers.CF("curl", fmt.Sprintf("/v2/apps/%s", appGUID))
Eventually(session.Out).Should(Say(`"health_check_http_endpoint": "/some-endpoint"`))
Eventually(session).Should(Exit(0))
})
})
})
})
Context("when pushing with manifest", func() {
Context("when the health type is http and an endpoint is provided", func() {
It("sets the health check type and endpoint", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: http
health-check-http-endpoint: /some-endpoint
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+http"))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+/some-endpoint\n"))
Eventually(session).Should(Exit(0))
})
})
Context("when the health type is not http and an endpoint is provided", func() {
It("displays an error", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: port
health-check-http-endpoint: /some-endpoint
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
session := helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack")
Eventually(session.Out).Should(Say("Health check type must be 'http' to set a health check HTTP endpoint."))
Eventually(session).Should(Exit(1))
})
})
})
Context("when passing an 'http' health check type with the -u option", func() {
It("resets the endpoint to the default", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: http
health-check-http-endpoint: /some-endpoint
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+http"))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+/\n"))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the manifest contains the health check type attribute", func() {
DescribeTable("displays the correct health check type",
func(healthCheckType string, endpoint string) {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: %s
`, appName, healthCheckType))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+%s", healthCheckType))
Eventually(session.Out).Should(Say("Endpoint \\(for http type\\):\\s+%s\n", endpoint))
Eventually(session).Should(Exit(0))
},
Entry("when the health check type is none", "none", ""),
Entry("when the health check type is process", "process", ""),
Entry("when the health check type is port", "port", ""),
Entry("when the health check type is http", "http", "/"),
)
Context("when passing a health check type with the -u option", func() {
It("overrides any health check types in the manifest", func() {
helpers.WithHelloWorldApp(func(appDir string) {
manifestContents := []byte(fmt.Sprintf(`
---
applications:
- name: %s
memory: 128M
health-check-type: port
`, appName))
manifestPath := filepath.Join(appDir, "manifest.yml")
err := ioutil.WriteFile(manifestPath, manifestContents, 0666)
Expect(err).ToNot(HaveOccurred())
Eventually(helpers.CF("push", "--no-start", "-p", appDir, "-f", manifestPath, "-b", "staticfile_buildpack", "-u", "http")).Should(Exit(0))
})
session := helpers.CF("get-health-check", appName)
Eventually(session.Out).Should(Say("Health check type:\\s+http"))
Eventually(session).Should(Exit(0))
})
})
})
})
})
|
package tesTaskEngineWorker
import (
"bufio"
"context"
"fmt"
"github.com/docker/docker/client"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
pbe "tes/ga4gh"
"time"
)
// DockerCmd is responsible for configuring and running a docker container.
type DockerCmd struct {
ImageName string
CmdString []string
Volumes []Volume
Workdir string
Ports []*pbe.Ports
ContainerName string
RemoveContainer bool
Stdin *os.File
Stdout *os.File
Stderr *os.File
Cmd *exec.Cmd
// store last 200 lines of both stdout and stderr
Log map[string][]byte
}
// GetVolumes takes a jobID and returns an array of string.
func formatVolumeArg(v Volume) string {
// `o` is structed as "HostPath:ContainerPath:Mode".
return fmt.Sprintf("%s:%s:%s", v.HostPath, v.ContainerPath, v.Mode)
}
// SetupCommand sets up the command to be run and sets DockerCmd.Cmd.
// Essentially it prepares commandline arguments for Docker.
func (dcmd DockerCmd) SetupCommand() (*DockerCmd, error) {
args := []string{"run", "-i"}
if dcmd.RemoveContainer {
args = append(args, "--rm")
}
if dcmd.Ports != nil {
for i := range dcmd.Ports {
hostPort := dcmd.Ports[i].Host
containerPort := dcmd.Ports[i].Container
if hostPort <= 1024 && hostPort != 0 {
return nil, fmt.Errorf("Error cannot use restricted ports")
}
args = append(args, "-p", fmt.Sprintf("%d:%d", hostPort, containerPort))
}
}
if dcmd.ContainerName != "" {
args = append(args, "--name", dcmd.ContainerName)
}
if dcmd.Workdir != "" {
args = append(args, "-w", dcmd.Workdir)
}
for _, vol := range dcmd.Volumes {
arg := formatVolumeArg(vol)
args = append(args, "-v", arg)
}
args = append(args, dcmd.ImageName)
args = append(args, dcmd.CmdString...)
log.Debug("DockerCmd", "dmcd", dcmd)
// Roughly: `docker run --rm -i -w [workdir] -v [bindings] [imageName] [cmd]`
cmd := exec.Command("docker", args...)
dcmd.Cmd = cmd
if dcmd.Stdin != nil {
cmd.Stdin = dcmd.Stdin
}
stdoutReader, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("Error creating StdoutPipe for Cmd: %s", err)
}
stdoutScanner := bufio.NewScanner(stdoutReader)
go func() {
for stdoutScanner.Scan() {
s := stdoutScanner.Text()
dcmd.Stdout.WriteString(s + "\n")
dcmd.Log["Stdout"] = updateAndTrim(dcmd.Log["Stdout"], []byte(s+"\n"))
}
}()
stderrReader, err := cmd.StderrPipe()
if err != nil {
return nil, fmt.Errorf("Error creating StderrPipe for Cmd: %s", err)
}
stderrScanner := bufio.NewScanner(stderrReader)
go func() {
for stderrScanner.Scan() {
e := stderrScanner.Text()
dcmd.Stderr.WriteString(e + "\n")
dcmd.Log["Stderr"] = updateAndTrim(dcmd.Log["Stderr"], []byte(e+"\n"))
}
}()
return &dcmd, nil
}
func updateAndTrim(l []byte, v []byte) []byte {
// 10 KB max stored
max := 10000
l = append(l[:], v[:]...)
if len(l) > max {
return l[len(l)-max:]
}
return l
}
// InspectContainer returns metadata about the container (calls "docker inspect").
func (dcmd DockerCmd) InspectContainer(ctx context.Context) []*pbe.Ports {
log.Info("Fetching container metadata")
dclient := setupDockerClient()
// close the docker client connection
defer dclient.Close()
for {
select {
case <-ctx.Done():
return nil
default:
metadata, err := dclient.ContainerInspect(ctx, dcmd.ContainerName)
if err == nil && metadata.State.Running == true {
var portMap []*pbe.Ports
// extract exposed host port from
// https://godoc.org/github.com/docker/go-connections/nat#PortMap
for k, v := range metadata.NetworkSettings.Ports {
// will end up taking the last binding listed
for i := range v {
p := strings.Split(string(k), "/")
containerPort, err := strconv.Atoi(p[0])
//TODO handle errors
if err != nil {
return nil
}
hostPort, err := strconv.Atoi(v[i].HostPort)
//TODO handle errors
if err != nil {
return nil
}
portMap = append(portMap, &pbe.Ports{
Container: int32(containerPort),
Host: int32(hostPort),
})
}
}
return portMap
}
}
}
}
// StopContainer stops the container.
func (dcmd DockerCmd) StopContainer() error {
log.Info("Stopping container", "container", dcmd.ContainerName)
dclient := setupDockerClient()
// close the docker client connection
defer dclient.Close()
// Set timeout
timeout := time.Second * 10
// Issue stop call
err := dclient.ContainerStop(context.Background(), dcmd.ContainerName, &timeout)
return err
}
func setupDockerClient() *client.Client {
dclient, err := client.NewEnvClient()
if err != nil {
log.Info("Docker error", "err", err)
return nil
}
// If the api version is not set test if the client can communicate with the
// server; if not infer API version from error message and inform the client
// to use that version for future communication
if os.Getenv("DOCKER_API_VERSION") == "" {
_, err := dclient.ServerVersion(context.Background())
if err != nil {
re := regexp.MustCompile(`([0-9\.]+)`)
version := re.FindAllString(err.Error(), -1)
// Error message example:
// Error getting metadata for container: Error response from daemon: client is newer than server (client API version: 1.26, server API version: 1.24)
log.Debug("DOCKER_API_VERSION", "version", version[1])
os.Setenv("DOCKER_API_VERSION", version[1])
return setupDockerClient()
}
}
return dclient
}
bugfix for concurrent map writes (Stdout and stderr)
package tesTaskEngineWorker
import (
"bufio"
"context"
"fmt"
"github.com/docker/docker/client"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
pbe "tes/ga4gh"
"time"
)
// DockerCmd is responsible for configuring and running a docker container.
type DockerCmd struct {
ImageName string
CmdString []string
Volumes []Volume
Workdir string
Ports []*pbe.Ports
ContainerName string
RemoveContainer bool
Stdin *os.File
Stdout *os.File
Stderr *os.File
Cmd *exec.Cmd
// store last 200 lines of both stdout and stderr
Log map[string][]byte
}
// GetVolumes takes a jobID and returns an array of string.
func formatVolumeArg(v Volume) string {
// `o` is structed as "HostPath:ContainerPath:Mode".
return fmt.Sprintf("%s:%s:%s", v.HostPath, v.ContainerPath, v.Mode)
}
// SetupCommand sets up the command to be run and sets DockerCmd.Cmd.
// Essentially it prepares commandline arguments for Docker.
func (dcmd DockerCmd) SetupCommand() (*DockerCmd, error) {
args := []string{"run", "-i"}
if dcmd.RemoveContainer {
args = append(args, "--rm")
}
if dcmd.Ports != nil {
for i := range dcmd.Ports {
hostPort := dcmd.Ports[i].Host
containerPort := dcmd.Ports[i].Container
if hostPort <= 1024 && hostPort != 0 {
return nil, fmt.Errorf("Error cannot use restricted ports")
}
args = append(args, "-p", fmt.Sprintf("%d:%d", hostPort, containerPort))
}
}
if dcmd.ContainerName != "" {
args = append(args, "--name", dcmd.ContainerName)
}
if dcmd.Workdir != "" {
args = append(args, "-w", dcmd.Workdir)
}
for _, vol := range dcmd.Volumes {
arg := formatVolumeArg(vol)
args = append(args, "-v", arg)
}
args = append(args, dcmd.ImageName)
args = append(args, dcmd.CmdString...)
log.Debug("DockerCmd", "dmcd", dcmd)
// Roughly: `docker run --rm -i -w [workdir] -v [bindings] [imageName] [cmd]`
cmd := exec.Command("docker", args...)
dcmd.Cmd = cmd
if dcmd.Stdin != nil {
cmd.Stdin = dcmd.Stdin
}
stdoutReader, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("Error creating StdoutPipe for Cmd: %s", err)
}
stdoutScanner := bufio.NewScanner(stdoutReader)
stderrReader, err := cmd.StderrPipe()
if err != nil {
return nil, fmt.Errorf("Error creating StderrPipe for Cmd: %s", err)
}
stderrScanner := bufio.NewScanner(stderrReader)
go func() {
for stdoutScanner.Scan() {
s := stdoutScanner.Text()
dcmd.Stdout.WriteString(s + "\n")
dcmd.Log["Stdout"] = updateAndTrim(dcmd.Log["Stdout"], []byte(s+"\n"))
}
for stderrScanner.Scan() {
e := stderrScanner.Text()
dcmd.Stderr.WriteString(e + "\n")
dcmd.Log["Stderr"] = updateAndTrim(dcmd.Log["Stderr"], []byte(e+"\n"))
}
}()
return &dcmd, nil
}
func updateAndTrim(l []byte, v []byte) []byte {
// 10 KB max stored
max := 10000
l = append(l[:], v[:]...)
if len(l) > max {
return l[len(l)-max:]
}
return l
}
// InspectContainer returns metadata about the container (calls "docker inspect").
func (dcmd DockerCmd) InspectContainer(ctx context.Context) []*pbe.Ports {
log.Info("Fetching container metadata")
dclient := setupDockerClient()
// close the docker client connection
defer dclient.Close()
for {
select {
case <-ctx.Done():
return nil
default:
metadata, err := dclient.ContainerInspect(ctx, dcmd.ContainerName)
if err == nil && metadata.State.Running == true {
var portMap []*pbe.Ports
// extract exposed host port from
// https://godoc.org/github.com/docker/go-connections/nat#PortMap
for k, v := range metadata.NetworkSettings.Ports {
// will end up taking the last binding listed
for i := range v {
p := strings.Split(string(k), "/")
containerPort, err := strconv.Atoi(p[0])
//TODO handle errors
if err != nil {
return nil
}
hostPort, err := strconv.Atoi(v[i].HostPort)
//TODO handle errors
if err != nil {
return nil
}
portMap = append(portMap, &pbe.Ports{
Container: int32(containerPort),
Host: int32(hostPort),
})
}
}
return portMap
}
}
}
}
// StopContainer stops the container.
func (dcmd DockerCmd) StopContainer() error {
log.Info("Stopping container", "container", dcmd.ContainerName)
dclient := setupDockerClient()
// close the docker client connection
defer dclient.Close()
// Set timeout
timeout := time.Second * 10
// Issue stop call
err := dclient.ContainerStop(context.Background(), dcmd.ContainerName, &timeout)
return err
}
func setupDockerClient() *client.Client {
dclient, err := client.NewEnvClient()
if err != nil {
log.Info("Docker error", "err", err)
return nil
}
// If the api version is not set test if the client can communicate with the
// server; if not infer API version from error message and inform the client
// to use that version for future communication
if os.Getenv("DOCKER_API_VERSION") == "" {
_, err := dclient.ServerVersion(context.Background())
if err != nil {
re := regexp.MustCompile(`([0-9\.]+)`)
version := re.FindAllString(err.Error(), -1)
// Error message example:
// Error getting metadata for container: Error response from daemon: client is newer than server (client API version: 1.26, server API version: 1.24)
log.Debug("DOCKER_API_VERSION", "version", version[1])
os.Setenv("DOCKER_API_VERSION", version[1])
return setupDockerClient()
}
}
return dclient
}
|
// Copyright ©2016 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package quad
import (
"math"
"github.com/gonum/floats"
"github.com/gonum/mathext/airy"
)
// Hermite generates sample locations and weights for performing quadrature with
// with a squared-exponential weight
// int_-inf^inf e^(-x^2) f(x) dx .
type Hermite struct{}
func (h Hermite) FixedLocations(x, weight []float64, min, max float64) {
// TODO(btracey): Implement the case where x > 20, x < 200 so that we don't
// need to store all of that data.
// References:
// Algorithm:
// G. H. Golub and J. A. Welsch, "Calculation of Gauss quadrature rules",
// Math. Comp. 23:221-230, 1969.
// A. Glaser, X. Liu and V. Rokhlin, "A fast algorithm for the
// calculation of the roots of special functions", SIAM Journal
// on Scientific Computing", 29(4):1420-1438:, 2007.
// A. Townsend, T. Trogdon, and S.Olver, Fast computation of Gauss quadrature
// nodes and weights on the whole real line, IMA J. Numer. Anal., 36: 337–358,
// 2016. http://arxiv.org/abs/1410.5286
//
// Algorithm adapted from Chubfun http://www.chebfun.org/.
if len(x) != len(weight) {
panic("hermite: slice length mismatch")
}
if min >= max {
panic("hermite: min >= max")
}
if !math.IsInf(min, -1) || !math.IsInf(max, 1) {
panic("hermite: non-infinite bound")
}
h.locations(x, weight)
}
func (h Hermite) locations(x, weights []float64) {
n := len(x)
switch {
case 0 < n && n <= 200:
copy(x, xCacheHermite[n-1])
copy(weights, wCacheHermite[n-1])
case n > 200:
xasy, weightsasy := h.locationsAsy(n)
copy(x, xasy)
copy(weights, weightsasy)
}
}
// Algorithm adapted from Chebfun http://www.chebfun.org/. Specific code
// https://github.com/chebfun/chebfun/blob/development/hermpts.m.
// Original Copyright Notice:
/*
Copyright (c) 2015, The Chancellor, Masters and Scholars of the University
of Oxford, and the Chebfun Developers. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
func (h Hermite) locationsAsy(n int) (x, w []float64) {
// A. Townsend, T. Trogdon, and S.Olver, Fast computation of Gauss quadrature
// nodes and weights the whole real line, IMA J. Numer. Anal.,
// 36: 337–358, 2016. http://arxiv.org/abs/1410.5286
xa := make([]float64, n/2+n%2)
wa := make([]float64, n/2+n%2)
for i := range xa {
xa[i], wa[i] = h.locationsAsy0(i, n)
}
if n%2 == 1 {
for i := len(xa) - 1; i >= 0; i-- {
x = append(x, -xa[i])
w = append(w, wa[i])
}
for i := 1; i < len(xa); i++ {
x = append(x, xa[i])
w = append(w, wa[i])
}
} else {
lxa := len(xa)
x = make([]float64, 2*lxa)
for i, v := range xa {
x[lxa-1-i] = -v
x[lxa+i] = v
}
lwa := len(wa)
w = make([]float64, 2*lwa)
for i, v := range wa {
w[lwa-1-i] = v
w[lwa+i] = v
}
}
sumW := floats.Sum(w)
c := math.SqrtPi / sumW
floats.Scale(c, w)
return x, w
}
// locationsAsy0 returns the location and weight for location i in an n-point
// quadrature rule. The rule is symmetric, so i should be <= n/2 + n%2.
func (h Hermite) locationsAsy0(i, n int) (x, w float64) {
theta0 := h.hermiteInitialGuess(i, n)
t0 := theta0 / math.Sqrt(2*float64(n)+1)
theta0 = math.Acos(t0)
sqrt2np1 := math.Sqrt(2*float64(n) + 1)
var vali, dvali float64
for k := 0; k < 20; k++ {
vali, dvali = h.hermpolyAsyAiry(i, n, theta0)
dt := -vali / (math.Sqrt2 * sqrt2np1 * dvali * math.Sin(theta0))
theta0 -= dt
if math.Abs(theta0) < 1e-9 {
break
}
}
x = sqrt2np1 * math.Cos(theta0)
ders := x*vali + math.Sqrt2*dvali
w = math.Exp(-x*x) / (ders * ders)
return x, w
}
// hermpolyAsyAiry evaluates the Hermite polynomials using the Airy asymptotic
// formula in theta-space.
func (h Hermite) hermpolyAsyAiry(i, n int, t float64) (valVec, dvalVec float64) {
musq := 2*float64(n) + 1
cosT := math.Cos(t)
sinT := math.Sin(t)
sin2T := 2 * cosT * sinT
eta := 0.5*t - 0.25*sin2T
chi := -math.Pow(3*eta/2, 2.0/3)
phi := math.Pow(-chi/(sinT*sinT), 1.0/4)
cnst := 2 * math.SqrtPi * math.Pow(musq, 1.0/6) * phi
airy0 := real(airy.Ai(complex(math.Pow(musq, 2.0/3)*chi, 0)))
airy1 := real(airy.AiDeriv(complex(math.Pow(musq, 2.0/3)*chi, 0)))
// Terms in 12.10.43:
const (
a1 = 15.0 / 144
b1 = -7.0 / 5 * a1
a2 = 5.0 * 7 * 9 * 11.0 / 2.0 / 144.0 / 144.0
b2 = -13.0 / 11 * a2
a3 = 7.0 * 9 * 11 * 13 * 15 * 17 / 6.0 / 144.0 / 144.0 / 144.0
b3 = -19.0 / 17 * a3
)
// Pre-compute terms.
cos2T := cosT * cosT
cos3T := cos2T * cosT
cos4T := cos3T * cosT
cos5T := cos4T * cosT
cos7T := cos5T * cos2T
cos9T := cos7T * cos2T
chi2 := chi * chi
chi3 := chi2 * chi
chi4 := chi3 * chi
chi5 := chi4 * chi
phi6 := math.Pow(phi, 6)
phi12 := phi6 * phi6
phi18 := phi12 * phi6
// u polynomials in 12.10.9.
u1 := (cos3T - 6*cosT) / 24.0
u2 := (-9*cos4T + 249*cos2T + 145) / 1152.0
u3 := (-4042*cos9T + 18189*cos7T - 28287*cos5T - 151995*cos3T - 259290*cosT) / 414720.0
val := airy0
B0 := -(phi6*u1 + a1) / chi2
val += B0 * airy1 / math.Pow(musq, 4.0/3)
A1 := (phi12*u2 + b1*phi6*u1 + b2) / chi3
val += A1 * airy0 / (musq * musq)
B1 := -(phi18*u3 + a1*phi12*u2 + a2*phi6*u1 + a3) / chi5
val += B1 * airy1 / math.Pow(musq, 4.0/3+2)
val *= cnst
// Derivative.
eta = 0.5*t - 0.25*sin2T
chi = -math.Pow(3*eta/2, 2.0/3)
phi = math.Pow(-chi/(sinT*sinT), 1.0/4)
cnst = math.Sqrt2 * math.SqrtPi * math.Pow(musq, 1.0/3) / phi
// v polynomials in 12.10.10.
v1 := (cos3T + 6*cosT) / 24
v2 := (15*cos4T - 327*cos2T - 143) / 1152
v3 := (259290*cosT + 238425*cos3T - 36387*cos5T + 18189*cos7T - 4042*cos9T) / 414720
C0 := -(phi6*v1 + b1) / chi
dval := C0 * airy0 / math.Pow(musq, 2.0/3)
dval += airy1
C1 := -(phi18*v3 + b1*phi12*v2 + b2*phi6*v1 + b3) / chi4
dval += C1 * airy0 / math.Pow(musq, 2.0/3+2)
D1 := (phi12*v2 + a1*phi6*v1 + a2) / chi3
dval += D1 * airy1 / (musq * musq)
dval *= cnst
return val, dval
}
// hermiteInitialGuess returns the initial guess for node i in an n-point Hermite
// quadrature rule. The rule is symmetric, so i should be <= n/2 + n%2.
func (h Hermite) hermiteInitialGuess(i, n int) float64 {
// There are two different formulas for the initial guesses of the hermite
// quadrature locations. The first uses the Gatteschi formula and is good
// near x = sqrt(n+0.5)
// [1] L. Gatteschi, Asymptotics and bounds for the zeros of Laguerre
// polynomials: a survey, J. Comput. Appl. Math., 144 (2002), pp. 7-27.
// The second is the Tricomi initial guesses, good near x = 0. This is
// equation 2.1 in [1] and is originally from
// [2] F. G. Tricomi, Sugli zeri delle funzioni di cui si conosce una
// rappresentazione asintotica, Ann. Mat. Pura Appl. 26 (1947), pp. 283-300.
// If the number of points is odd, there is a quadrature point at 1, which
// has an initial guess of 0.
if n%2 == 1 {
if i == 0 {
return 0
}
i--
}
m := n / 2
a := -0.5
if n%2 == 1 {
a = 0.5
}
nu := 4*float64(m) + 2*a + 2
// Find the split between Gatteschi guesses and Tricomi guesses.
p := 0.4985 + math.SmallestNonzeroFloat64
pidx := int(math.Floor(p * float64(n)))
// Use the Tricomi initial guesses in the first half where x is nearer to zero.
// Note: zeros of besselj(+/-.5,x) are integer and half-integer multiples of pi.
if i < pidx {
rhs := math.Pi * (4*float64(m) - 4*(float64(i)+1) + 3) / nu
tnk := math.Pi / 2
for k := 0; k < 7; k++ {
val := tnk - math.Sin(tnk) - rhs
dval := 1 - math.Cos(tnk)
dTnk := val / dval
tnk -= dTnk
if math.Abs(dTnk) < 1e-14 {
break
}
}
vc := math.Cos(tnk / 2)
t := vc * vc
return math.Sqrt(nu*t - (5.0/(4.0*(1-t)*(1-t))-1.0/(1-t)-1+3*a*a)/3/nu)
}
// Use Gatteschi guesses in the second half where x is nearer to sqrt(n+0.5)
i = i + 1 - m
var ar float64
if i < len(airyRtsExact) {
ar = airyRtsExact[i]
} else {
t := 3.0 / 8 * math.Pi * (4*(float64(i)+1) - 1)
ar = math.Pow(t, 2.0/3) * (1 +
5.0/48*math.Pow(t, -2) -
5.0/36*math.Pow(t, -4) +
77125.0/82944*math.Pow(t, -6) -
108056875.0/6967296*math.Pow(t, -8) +
162375596875.0/334430208*math.Pow(t, -10))
}
r := nu + math.Pow(2, 2.0/3)*ar*math.Pow(nu, 1.0/3) +
0.2*math.Pow(2, 4.0/3)*ar*ar*math.Pow(nu, -1.0/3) +
(11.0/35-a*a-12.0/175*ar*ar*ar)/nu +
(16.0/1575*ar+92.0/7875*math.Pow(ar, 4))*math.Pow(2, 2.0/3)*math.Pow(nu, -5.0/3) -
(15152.0/3031875*math.Pow(ar, 5)+1088.0/121275*ar*ar)*math.Pow(2, 1.0/3)*math.Pow(nu, -7.0/3)
if r < 0 {
ar = 0
} else {
ar = math.Sqrt(r)
}
return ar
}
// airyRtsExact are the first airy roots.
var airyRtsExact = []float64{
-2.338107410459762,
-4.087949444130970,
-5.520559828095555,
-6.786708090071765,
-7.944133587120863,
-9.022650853340979,
-10.040174341558084,
-11.008524303733260,
-11.936015563236262,
-12.828776752865757,
}
Improve locationsAsy to reduce overhead and simplify code
// Copyright ©2016 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package quad
import (
"math"
"github.com/gonum/floats"
"github.com/gonum/mathext/airy"
)
// Hermite generates sample locations and weights for performing quadrature with
// with a squared-exponential weight
// int_-inf^inf e^(-x^2) f(x) dx .
type Hermite struct{}
func (h Hermite) FixedLocations(x, weight []float64, min, max float64) {
// TODO(btracey): Implement the case where x > 20, x < 200 so that we don't
// need to store all of that data.
// Algorithm adapted from Chebfun http://www.chebfun.org/.
//
// References:
// Algorithm:
// G. H. Golub and J. A. Welsch, "Calculation of Gauss quadrature rules",
// Math. Comp. 23:221-230, 1969.
// A. Glaser, X. Liu and V. Rokhlin, "A fast algorithm for the
// calculation of the roots of special functions", SIAM Journal
// on Scientific Computing", 29(4):1420-1438:, 2007.
// A. Townsend, T. Trogdon, and S.Olver, Fast computation of Gauss quadrature
// nodes and weights on the whole real line, IMA J. Numer. Anal., 36: 337–358,
// 2016. http://arxiv.org/abs/1410.5286
if len(x) != len(weight) {
panic("hermite: slice length mismatch")
}
if min >= max {
panic("hermite: min >= max")
}
if !math.IsInf(min, -1) || !math.IsInf(max, 1) {
panic("hermite: non-infinite bound")
}
h.locations(x, weight)
}
func (h Hermite) locations(x, weights []float64) {
n := len(x)
switch {
case 0 < n && n <= 200:
copy(x, xCacheHermite[n-1])
copy(weights, wCacheHermite[n-1])
case n > 200:
h.locationsAsy(x, weights)
}
}
// Algorithm adapted from Chebfun http://www.chebfun.org/. Specific code
// https://github.com/chebfun/chebfun/blob/development/hermpts.m.
// Original Copyright Notice:
/*
Copyright (c) 2015, The Chancellor, Masters and Scholars of the University
of Oxford, and the Chebfun Developers. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// locationAsy returns the node locations and weights of a Hermite quadrature rule
// with len(x) points.
func (h Hermite) locationsAsy(x, w []float64) {
// A. Townsend, T. Trogdon, and S.Olver, Fast computation of Gauss quadrature
// nodes and weights the whole real line, IMA J. Numer. Anal.,
// 36: 337–358, 2016. http://arxiv.org/abs/1410.5286
// Find the positive locations and weights.
n := len(x)
l := n / 2
xa := x[l:]
wa := w[l:]
for i := range xa {
xa[i], wa[i] = h.locationsAsy0(i, n)
}
// Flip around zero -- copy the negative x locations with the corresponding
// weights.
if n%2 == 0 {
l--
}
for i, v := range xa {
x[l-i] = -v
}
for i, v := range wa {
w[l-i] = v
}
sumW := floats.Sum(w)
c := math.SqrtPi / sumW
floats.Scale(c, w)
}
// locationsAsy0 returns the location and weight for location i in an n-point
// quadrature rule. The rule is symmetric, so i should be <= n/2 + n%2.
func (h Hermite) locationsAsy0(i, n int) (x, w float64) {
const convTol = 1e-16
const convIter = 20
theta0 := h.hermiteInitialGuess(i, n)
t0 := theta0 / math.Sqrt(2*float64(n)+1)
theta0 = math.Acos(t0)
sqrt2np1 := math.Sqrt(2*float64(n) + 1)
var vali, dvali float64
for k := 0; k < convIter; k++ {
vali, dvali = h.hermpolyAsyAiry(i, n, theta0)
dt := -vali / (math.Sqrt2 * sqrt2np1 * dvali * math.Sin(theta0))
theta0 -= dt
if math.Abs(theta0) < convTol {
break
}
}
x = sqrt2np1 * math.Cos(theta0)
ders := x*vali + math.Sqrt2*dvali
w = math.Exp(-x*x) / (ders * ders)
return x, w
}
// hermpolyAsyAiry evaluates the Hermite polynomials using the Airy asymptotic
// formula in theta-space.
func (h Hermite) hermpolyAsyAiry(i, n int, t float64) (valVec, dvalVec float64) {
musq := 2*float64(n) + 1
cosT := math.Cos(t)
sinT := math.Sin(t)
sin2T := 2 * cosT * sinT
eta := 0.5*t - 0.25*sin2T
chi := -math.Pow(3*eta/2, 2.0/3)
phi := math.Pow(-chi/(sinT*sinT), 1.0/4)
cnst := 2 * math.SqrtPi * math.Pow(musq, 1.0/6) * phi
airy0 := real(airy.Ai(complex(math.Pow(musq, 2.0/3)*chi, 0)))
airy1 := real(airy.AiDeriv(complex(math.Pow(musq, 2.0/3)*chi, 0)))
// Terms in 12.10.43:
const (
a1 = 15.0 / 144
b1 = -7.0 / 5 * a1
a2 = 5.0 * 7 * 9 * 11.0 / 2.0 / 144.0 / 144.0
b2 = -13.0 / 11 * a2
a3 = 7.0 * 9 * 11 * 13 * 15 * 17 / 6.0 / 144.0 / 144.0 / 144.0
b3 = -19.0 / 17 * a3
)
// Pre-compute terms.
cos2T := cosT * cosT
cos3T := cos2T * cosT
cos4T := cos3T * cosT
cos5T := cos4T * cosT
cos7T := cos5T * cos2T
cos9T := cos7T * cos2T
chi2 := chi * chi
chi3 := chi2 * chi
chi4 := chi3 * chi
chi5 := chi4 * chi
phi6 := math.Pow(phi, 6)
phi12 := phi6 * phi6
phi18 := phi12 * phi6
// u polynomials in 12.10.9.
u1 := (cos3T - 6*cosT) / 24.0
u2 := (-9*cos4T + 249*cos2T + 145) / 1152.0
u3 := (-4042*cos9T + 18189*cos7T - 28287*cos5T - 151995*cos3T - 259290*cosT) / 414720.0
val := airy0
B0 := -(phi6*u1 + a1) / chi2
val += B0 * airy1 / math.Pow(musq, 4.0/3)
A1 := (phi12*u2 + b1*phi6*u1 + b2) / chi3
val += A1 * airy0 / (musq * musq)
B1 := -(phi18*u3 + a1*phi12*u2 + a2*phi6*u1 + a3) / chi5
val += B1 * airy1 / math.Pow(musq, 4.0/3+2)
val *= cnst
// Derivative.
eta = 0.5*t - 0.25*sin2T
chi = -math.Pow(3*eta/2, 2.0/3)
phi = math.Pow(-chi/(sinT*sinT), 1.0/4)
cnst = math.Sqrt2 * math.SqrtPi * math.Pow(musq, 1.0/3) / phi
// v polynomials in 12.10.10.
v1 := (cos3T + 6*cosT) / 24
v2 := (15*cos4T - 327*cos2T - 143) / 1152
v3 := (259290*cosT + 238425*cos3T - 36387*cos5T + 18189*cos7T - 4042*cos9T) / 414720
C0 := -(phi6*v1 + b1) / chi
dval := C0 * airy0 / math.Pow(musq, 2.0/3)
dval += airy1
C1 := -(phi18*v3 + b1*phi12*v2 + b2*phi6*v1 + b3) / chi4
dval += C1 * airy0 / math.Pow(musq, 2.0/3+2)
D1 := (phi12*v2 + a1*phi6*v1 + a2) / chi3
dval += D1 * airy1 / (musq * musq)
dval *= cnst
return val, dval
}
// hermiteInitialGuess returns the initial guess for node i in an n-point Hermite
// quadrature rule. The rule is symmetric, so i should be <= n/2 + n%2.
func (h Hermite) hermiteInitialGuess(i, n int) float64 {
// There are two different formulas for the initial guesses of the hermite
// quadrature locations. The first uses the Gatteschi formula and is good
// near x = sqrt(n+0.5)
// [1] L. Gatteschi, Asymptotics and bounds for the zeros of Laguerre
// polynomials: a survey, J. Comput. Appl. Math., 144 (2002), pp. 7-27.
// The second is the Tricomi initial guesses, good near x = 0. This is
// equation 2.1 in [1] and is originally from
// [2] F. G. Tricomi, Sugli zeri delle funzioni di cui si conosce una
// rappresentazione asintotica, Ann. Mat. Pura Appl. 26 (1947), pp. 283-300.
// If the number of points is odd, there is a quadrature point at 1, which
// has an initial guess of 0.
if n%2 == 1 {
if i == 0 {
return 0
}
i--
}
m := n / 2
a := -0.5
if n%2 == 1 {
a = 0.5
}
nu := 4*float64(m) + 2*a + 2
// Find the split between Gatteschi guesses and Tricomi guesses.
p := 0.4985 + math.SmallestNonzeroFloat64
pidx := int(math.Floor(p * float64(n)))
// Use the Tricomi initial guesses in the first half where x is nearer to zero.
// Note: zeros of besselj(+/-.5,x) are integer and half-integer multiples of pi.
if i < pidx {
rhs := math.Pi * (4*float64(m) - 4*(float64(i)+1) + 3) / nu
tnk := math.Pi / 2
for k := 0; k < 7; k++ {
val := tnk - math.Sin(tnk) - rhs
dval := 1 - math.Cos(tnk)
dTnk := val / dval
tnk -= dTnk
if math.Abs(dTnk) < 1e-14 {
break
}
}
vc := math.Cos(tnk / 2)
t := vc * vc
return math.Sqrt(nu*t - (5.0/(4.0*(1-t)*(1-t))-1.0/(1-t)-1+3*a*a)/3/nu)
}
// Use Gatteschi guesses in the second half where x is nearer to sqrt(n+0.5)
i = i + 1 - m
var ar float64
if i < len(airyRtsExact) {
ar = airyRtsExact[i]
} else {
t := 3.0 / 8 * math.Pi * (4*(float64(i)+1) - 1)
ar = math.Pow(t, 2.0/3) * (1 +
5.0/48*math.Pow(t, -2) -
5.0/36*math.Pow(t, -4) +
77125.0/82944*math.Pow(t, -6) -
108056875.0/6967296*math.Pow(t, -8) +
162375596875.0/334430208*math.Pow(t, -10))
}
r := nu + math.Pow(2, 2.0/3)*ar*math.Pow(nu, 1.0/3) +
0.2*math.Pow(2, 4.0/3)*ar*ar*math.Pow(nu, -1.0/3) +
(11.0/35-a*a-12.0/175*ar*ar*ar)/nu +
(16.0/1575*ar+92.0/7875*math.Pow(ar, 4))*math.Pow(2, 2.0/3)*math.Pow(nu, -5.0/3) -
(15152.0/3031875*math.Pow(ar, 5)+1088.0/121275*ar*ar)*math.Pow(2, 1.0/3)*math.Pow(nu, -7.0/3)
if r < 0 {
ar = 0
} else {
ar = math.Sqrt(r)
}
return ar
}
// airyRtsExact are the first airy roots.
var airyRtsExact = []float64{
-2.338107410459762,
-4.087949444130970,
-5.520559828095555,
-6.786708090071765,
-7.944133587120863,
-9.022650853340979,
-10.040174341558084,
-11.008524303733260,
-11.936015563236262,
-12.828776752865757,
}
|
package mssql
import (
"bytes"
"context"
"database/sql"
"database/sql/driver"
"fmt"
"math"
"net"
"strings"
"testing"
"time"
"log"
"sync"
"reflect"
)
func driverWithProcess(t *testing.T) *Driver {
return &Driver{
log: optionalLogger{testLogger{t}},
processQueryText: true,
}
}
func driverNoProcess(t *testing.T) *Driver {
return &Driver{
log: optionalLogger{testLogger{t}},
processQueryText: false,
}
}
func TestSelect(t *testing.T) {
conn := open(t)
defer conn.Close()
type testStruct struct {
sql string
val interface{}
}
longstr := strings.Repeat("x", 10000)
values := []testStruct{
{"1", int64(1)},
{"-1", int64(-1)},
{"cast(1 as int)", int64(1)},
{"cast(-1 as int)", int64(-1)},
{"cast(1 as tinyint)", int64(1)},
{"cast(255 as tinyint)", int64(255)},
{"cast(1 as smallint)", int64(1)},
{"cast(-1 as smallint)", int64(-1)},
{"cast(1 as bigint)", int64(1)},
{"cast(-1 as bigint)", int64(-1)},
{"cast(1 as bit)", true},
{"cast(0 as bit)", false},
{"'abc'", string("abc")},
{"cast(0.5 as float)", float64(0.5)},
{"cast(0.5 as real)", float64(0.5)},
{"cast(1 as decimal)", []byte("1")},
{"cast(1.2345 as money)", []byte("1.2345")},
{"cast(-1.2345 as money)", []byte("-1.2345")},
{"cast(1.2345 as smallmoney)", []byte("1.2345")},
{"cast(-1.2345 as smallmoney)", []byte("-1.2345")},
{"cast(0.5 as decimal(18,1))", []byte("0.5")},
{"cast(-0.5 as decimal(18,1))", []byte("-0.5")},
{"cast(-0.5 as numeric(18,1))", []byte("-0.5")},
{"cast(4294967296 as numeric(20,0))", []byte("4294967296")},
{"cast(-0.5 as numeric(18,2))", []byte("-0.50")},
{"N'abc'", string("abc")},
{"cast(null as nvarchar(3))", nil},
{"NULL", nil},
{"cast('1753-01-01' as datetime)", time.Date(1753, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01' as datetime)", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01T12:13:14.12' as datetime)",
time.Date(2000, 1, 1, 12, 13, 14, 120000000, time.UTC)},
{"cast('2014-06-26 11:08:09.673' as datetime)", time.Date(2014, 06, 26, 11, 8, 9, 673000000, time.UTC)},
{"cast('9999-12-31T23:59:59.997' as datetime)", time.Date(9999, 12, 31, 23, 59, 59, 997000000, time.UTC)},
{"cast(NULL as datetime)", nil},
{"cast('1900-01-01T00:00:00' as smalldatetime)",
time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01T12:13:00' as smalldatetime)",
time.Date(2000, 1, 1, 12, 13, 0, 0, time.UTC)},
{"cast('2079-06-06T23:59:00' as smalldatetime)",
time.Date(2079, 6, 6, 23, 59, 0, 0, time.UTC)},
{"cast(NULL as smalldatetime)", nil},
{"cast(0x6F9619FF8B86D011B42D00C04FC964FF as uniqueidentifier)",
[]byte{0x6F, 0x96, 0x19, 0xFF, 0x8B, 0x86, 0xD0, 0x11, 0xB4, 0x2D, 0x00, 0xC0, 0x4F, 0xC9, 0x64, 0xFF}},
{"cast(NULL as uniqueidentifier)", nil},
{"cast(0x1234 as varbinary(2))", []byte{0x12, 0x34}},
{"cast(N'abc' as nvarchar(max))", "abc"},
{"cast(null as nvarchar(max))", nil},
{"cast('<root/>' as xml)", "<root/>"},
{"cast('abc' as text)", "abc"},
{"cast(null as text)", nil},
{"cast(N'abc' as ntext)", "abc"},
{"cast(0x1234 as image)", []byte{0x12, 0x34}},
{"cast('abc' as char(3))", "abc"},
{"cast('abc' as varchar(3))", "abc"},
{"cast(N'проверка' as nvarchar(max))", "проверка"},
{"cast(N'Δοκιμή' as nvarchar(max))", "Δοκιμή"},
{"cast(cast(N'สวัสดี' as nvarchar(max)) collate Thai_CI_AI as varchar(max))", "สวัสดี"}, // cp874
{"cast(cast(N'你好' as nvarchar(max)) collate Chinese_PRC_CI_AI as varchar(max))", "你好"}, // cp936
{"cast(cast(N'こんにちは' as nvarchar(max)) collate Japanese_CI_AI as varchar(max))", "こんにちは"}, // cp939
{"cast(cast(N'안녕하세요.' as nvarchar(max)) collate Korean_90_CI_AI as varchar(max))", "안녕하세요."}, // cp949
{"cast(cast(N'你好' as nvarchar(max)) collate Chinese_Hong_Kong_Stroke_90_CI_AI as varchar(max))", "你好"}, // cp950
{"cast(cast(N'cześć' as nvarchar(max)) collate Polish_CI_AI as varchar(max))", "cześć"}, // cp1250
{"cast(cast(N'Алло' as nvarchar(max)) collate Cyrillic_General_CI_AI as varchar(max))", "Алло"}, // cp1251
{"cast(cast(N'Bonjour' as nvarchar(max)) collate French_CI_AI as varchar(max))", "Bonjour"}, // cp1252
{"cast(cast(N'Γεια σας' as nvarchar(max)) collate Greek_CI_AI as varchar(max))", "Γεια σας"}, // cp1253
{"cast(cast(N'Merhaba' as nvarchar(max)) collate Turkish_CI_AI as varchar(max))", "Merhaba"}, // cp1254
{"cast(cast(N'שלום' as nvarchar(max)) collate Hebrew_CI_AI as varchar(max))", "שלום"}, // cp1255
{"cast(cast(N'مرحبا' as nvarchar(max)) collate Arabic_CI_AI as varchar(max))", "مرحبا"}, // cp1256
{"cast(cast(N'Sveiki' as nvarchar(max)) collate Lithuanian_CI_AI as varchar(max))", "Sveiki"}, // cp1257
{"cast(cast(N'chào' as nvarchar(max)) collate Vietnamese_CI_AI as varchar(max))", "chào"}, // cp1258
{fmt.Sprintf("cast(N'%s' as nvarchar(max))", longstr), longstr},
{"cast(NULL as sql_variant)", nil},
{"cast(cast(0x6F9619FF8B86D011B42D00C04FC964FF as uniqueidentifier) as sql_variant)",
[]byte{0x6F, 0x96, 0x19, 0xFF, 0x8B, 0x86, 0xD0, 0x11, 0xB4, 0x2D, 0x00, 0xC0, 0x4F, 0xC9, 0x64, 0xFF}},
{"cast(cast(1 as bit) as sql_variant)", true},
{"cast(cast(10 as tinyint) as sql_variant)", int64(10)},
{"cast(cast(-10 as smallint) as sql_variant)", int64(-10)},
{"cast(cast(-20 as int) as sql_variant)", int64(-20)},
{"cast(cast(-20 as bigint) as sql_variant)", int64(-20)},
{"cast(cast('2000-01-01' as datetime) as sql_variant)", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast(cast('2000-01-01T12:13:00' as smalldatetime) as sql_variant)",
time.Date(2000, 1, 1, 12, 13, 0, 0, time.UTC)},
{"cast(cast(0.125 as real) as sql_variant)", float64(0.125)},
{"cast(cast(0.125 as float) as sql_variant)", float64(0.125)},
{"cast(cast(1.2345 as smallmoney) as sql_variant)", []byte("1.2345")},
{"cast(cast(1.2345 as money) as sql_variant)", []byte("1.2345")},
{"cast(cast(0x1234 as varbinary(2)) as sql_variant)", []byte{0x12, 0x34}},
{"cast(cast(0x1234 as binary(2)) as sql_variant)", []byte{0x12, 0x34}},
{"cast(cast(-0.5 as decimal(18,1)) as sql_variant)", []byte("-0.5")},
{"cast(cast(-0.5 as numeric(18,1)) as sql_variant)", []byte("-0.5")},
{"cast(cast('abc' as varchar(3)) as sql_variant)", "abc"},
{"cast(cast('abc' as char(3)) as sql_variant)", "abc"},
{"cast(N'abc' as sql_variant)", "abc"},
}
for _, test := range values {
t.Run(test.sql, func (t *testing.T){
stmt, err := conn.Prepare("select " + test.sql)
if err != nil {
t.Error("Prepare failed:", test.sql, err.Error())
return
}
defer stmt.Close()
row := stmt.QueryRow()
var retval interface{}
err = row.Scan(&retval)
if err != nil {
t.Error("Scan failed:", test.sql, err.Error())
return
}
var same bool
switch decodedval := retval.(type) {
case []byte:
switch decodedvaltest := test.val.(type) {
case []byte:
same = bytes.Equal(decodedval, decodedvaltest)
default:
same = false
}
default:
same = retval == test.val
}
if !same {
t.Errorf("Values don't match '%s' '%s' for test: %s", retval, test.val, test.sql)
return
}
})
}
}
func TestSelectDateTimeOffset(t *testing.T) {
type testStruct struct {
sql string
val time.Time
}
values := []testStruct{
{"cast('2010-11-15T11:56:45.123+14:00' as datetimeoffset(3))",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.FixedZone("", 14*60*60))},
{"cast(cast('2010-11-15T11:56:45.123-14:00' as datetimeoffset(3)) as sql_variant)",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.FixedZone("", -14*60*60))},
{"cast('0001-01-01T00:00:00.0000000+00:00' as datetimeoffset(7))",
time.Date(1, 1, 1, 0, 0, 0, 0, time.FixedZone("", 0))},
{"cast('9999-12-31T23:59:59.9999999+00:00' as datetimeoffset(7))",
time.Date(9999, 12, 31, 23, 59, 59, 999999900, time.FixedZone("", 0))},
}
conn := open(t)
defer conn.Close()
for _, test := range values {
row := conn.QueryRow("select " + test.sql)
var retval interface{}
err := row.Scan(&retval)
if err != nil {
t.Error("Scan failed:", test.sql, err.Error())
continue
}
retvalDate := retval.(time.Time)
if retvalDate.UTC() != test.val.UTC() {
t.Errorf("UTC values don't match '%v' '%v' for test: %s", retvalDate, test.val, test.sql)
continue
}
if retvalDate.String() != test.val.String() {
t.Errorf("Locations don't match '%v' '%v' for test: %s", retvalDate.String(), test.val.String(), test.sql)
continue
}
}
}
func TestSelectNewTypes(t *testing.T) {
conn := open(t)
defer conn.Close()
var ver string
err := conn.QueryRow("select SERVERPROPERTY('productversion')").Scan(&ver)
if err != nil {
t.Fatalf("cannot select productversion: %s", err)
}
var n int
_, err = fmt.Sscanf(ver, "%d", &n)
if err != nil {
t.Fatalf("cannot parse productversion: %s", err)
}
// 8 is SQL 2000, 9 is SQL 2005, 10 is SQL 2008, 11 is SQL 2012
if n < 10 {
return
}
// run tests for new data types available only in SQL Server 2008 and later
type testStruct struct {
sql string
val interface{}
}
values := []testStruct{
{"cast('0001-01-01' as date)",
time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01' as date)",
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('9999-12-31' as date)",
time.Date(9999, 12, 31, 0, 0, 0, 0, time.UTC)},
{"cast(NULL as date)", nil},
{"cast('00:00:00.0000000' as time(7))",
time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('00:00:45.123' as time(3))",
time.Date(1, 1, 1, 00, 00, 45, 123000000, time.UTC)},
{"cast('11:56:45.123' as time(3))",
time.Date(1, 1, 1, 11, 56, 45, 123000000, time.UTC)},
{"cast('11:56:45' as time(0))",
time.Date(1, 1, 1, 11, 56, 45, 0, time.UTC)},
{"cast('23:59:59.9999999' as time(7))",
time.Date(1, 1, 1, 23, 59, 59, 999999900, time.UTC)},
{"cast(null as time(0))", nil},
{"cast('0001-01-01T00:00:00.0000000' as datetime2(7))",
time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2010-11-15T11:56:45.123' as datetime2(3))",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.UTC)},
{"cast('2010-11-15T11:56:45' as datetime2(0))",
time.Date(2010, 11, 15, 11, 56, 45, 0, time.UTC)},
{"cast(cast('2000-01-01' as date) as sql_variant)",
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast(cast('00:00:45.123' as time(3)) as sql_variant)",
time.Date(1, 1, 1, 00, 00, 45, 123000000, time.UTC)},
{"cast(cast('2010-11-15T11:56:45.123' as datetime2(3)) as sql_variant)",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.UTC)},
{"cast('9999-12-31T23:59:59.9999999' as datetime2(7))",
time.Date(9999, 12, 31, 23, 59, 59, 999999900, time.UTC)},
{"cast(null as datetime2(3))", nil},
}
for _, test := range values {
stmt, err := conn.Prepare("select " + test.sql)
if err != nil {
t.Error("Prepare failed:", test.sql, err.Error())
return
}
defer stmt.Close()
row := stmt.QueryRow()
var retval interface{}
err = row.Scan(&retval)
if err != nil {
t.Error("Scan failed:", test.sql, err.Error())
continue
}
if retval != test.val {
t.Errorf("Values don't match '%s' '%s' for test: %s", retval, test.val, test.sql)
continue
}
}
}
func TestTrans(t *testing.T) {
conn := open(t)
defer conn.Close()
var tx *sql.Tx
var err error
if tx, err = conn.Begin(); err != nil {
t.Fatal("Begin failed", err.Error())
}
if err = tx.Commit(); err != nil {
t.Fatal("Commit failed", err.Error())
}
if tx, err = conn.Begin(); err != nil {
t.Fatal("Begin failed", err.Error())
}
if _, err = tx.Exec("create table #abc (fld int)"); err != nil {
t.Fatal("Create table failed", err.Error())
}
if err = tx.Rollback(); err != nil {
t.Fatal("Rollback failed", err.Error())
}
}
func TestNull(t *testing.T) {
conn := open(t)
defer conn.Close()
types := []string{
"tinyint",
"smallint",
"int",
"bigint",
"real",
"float",
"smallmoney",
"money",
"decimal",
//"varbinary(15)",
//"binary(15)",
"nvarchar(15)",
"nchar(15)",
"varchar(15)",
"char(15)",
"bit",
"smalldatetime",
"date",
"time",
"datetime",
"datetime2",
"datetimeoffset",
"uniqueidentifier",
"sql_variant",
}
for _, typ := range types {
row := conn.QueryRow("declare @x "+typ+" = ?; select @x", nil)
var retval interface{}
err := row.Scan(&retval)
if err != nil {
t.Error("Scan failed for type "+typ, err.Error())
return
}
if retval != nil {
t.Error("Value should be nil, but it is ", retval)
return
}
}
}
func TestParams(t *testing.T) {
longstr := strings.Repeat("x", 10000)
longbytes := make([]byte, 10000)
testdate, err := time.Parse(time.RFC3339, "2010-01-01T00:00:00-00:00")
if err != nil {
t.Fatal(err)
}
values := []interface{}{
int64(5),
"hello",
"",
[]byte{1, 2, 3},
[]byte{},
float64(1.12313554),
true,
false,
nil,
longstr,
longbytes,
testdate.UTC(),
}
conn := open(t)
defer conn.Close()
for _, val := range values {
t.Run(fmt.Sprintf("%T:%#v", val, val), func(t *testing.T){
row := conn.QueryRow("select ?", val)
var retval interface{}
err := row.Scan(&retval)
if err != nil {
t.Error("Scan failed", err.Error())
return
}
var same bool
switch decodedval := retval.(type) {
case []byte:
switch decodedvaltest := val.(type) {
case []byte:
same = bytes.Equal(decodedval, decodedvaltest)
default:
same = false
}
case time.Time:
same = decodedval.UTC() == val
default:
same = retval == val
}
if !same {
t.Error("Value don't match", retval, val)
return
}
})
}
}
func TestExec(t *testing.T) {
conn := open(t)
defer conn.Close()
res, err := conn.Exec("create table #abc (fld int)")
if err != nil {
t.Fatal("Exec failed", err.Error())
}
_ = res
}
func TestShortTimeout(t *testing.T) {
if testing.Short() {
t.Skip("short")
}
checkConnStr(t)
SetLogger(testLogger{t})
dsn := makeConnStr(t)
dsnParams := dsn.Query()
dsnParams.Set("Connection Timeout", "2")
dsn.RawQuery = dsnParams.Encode()
conn, err := sql.Open("mssql", dsn.String())
if err != nil {
t.Fatal("Open connection failed:", err.Error())
}
defer conn.Close()
_, err = conn.Exec("waitfor delay '00:00:15'")
if err == nil {
t.Fatal("Exec should fail with timeout, but no failure occurred")
}
if neterr, ok := err.(net.Error); !ok || !neterr.Timeout() {
t.Fatal("failure not a timeout, failed with", err)
}
// connection should be usable after timeout
row := conn.QueryRow("select 1")
var val int64
err = row.Scan(&val)
if err != nil {
t.Fatal("Scan failed with", err)
}
}
func TestTwoQueries(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query("select 1")
if err != nil {
t.Fatal("First exec failed", err)
}
if !rows.Next() {
t.Fatal("First query didn't return row")
}
var i int
if err = rows.Scan(&i); err != nil {
t.Fatal("Scan failed", err)
}
if i != 1 {
t.Fatalf("Wrong value returned %d, should be 1", i)
}
if rows, err = conn.Query("select 2"); err != nil {
t.Fatal("Second query failed", err)
}
if !rows.Next() {
t.Fatal("Second query didn't return row")
}
if err = rows.Scan(&i); err != nil {
t.Fatal("Scan failed", err)
}
if i != 2 {
t.Fatalf("Wrong value returned %d, should be 2", i)
}
}
func TestError(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Query("exec bad")
if err == nil {
t.Fatal("Query should fail")
}
if sqlerr, ok := err.(Error); !ok {
t.Fatalf("Should be sql error, actually %T, %v", err, err)
} else {
if sqlerr.Number != 2812 { // Could not find stored procedure 'bad'
t.Fatalf("Should be specific error code 2812, actually %d %s", sqlerr.Number, sqlerr)
}
}
}
func TestQueryNoRows(t *testing.T) {
conn := open(t)
defer conn.Close()
var rows *sql.Rows
var err error
if rows, err = conn.Query("create table #abc (fld int)"); err != nil {
t.Fatal("Query failed", err)
}
if rows.Next() {
t.Fatal("Query shoulnd't return any rows")
}
}
func TestQueryManyNullsRow(t *testing.T) {
conn := open(t)
defer conn.Close()
var row *sql.Row
var err error
if row = conn.QueryRow("select null, null, null, null, null, null, null, null"); err != nil {
t.Fatal("Query failed", err)
}
var v [8]sql.NullInt64
if err = row.Scan(&v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7]); err != nil {
t.Fatal("Scan failed", err)
}
}
func TestOrderBy(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
_, err = tx.Exec("if (exists(select * from INFORMATION_SCHEMA.TABLES where TABLE_NAME='tbl')) drop table tbl")
if err != nil {
t.Fatal("Drop table failed", err)
}
_, err = tx.Exec("create table tbl (fld1 int primary key, fld2 int)")
if err != nil {
t.Fatal("Create table failed", err)
}
_, err = tx.Exec("insert into tbl (fld1, fld2) values (1, 2)")
if err != nil {
t.Fatal("Insert failed", err)
}
_, err = tx.Exec("insert into tbl (fld1, fld2) values (2, 1)")
if err != nil {
t.Fatal("Insert failed", err)
}
rows, err := tx.Query("select * from tbl order by fld1")
if err != nil {
t.Fatal("Query failed", err)
}
for rows.Next() {
var fld1 int32
var fld2 int32
err = rows.Scan(&fld1, &fld2)
if err != nil {
t.Fatal("Scan failed", err)
}
}
err = rows.Err()
if err != nil {
t.Fatal("Rows have errors", err)
}
}
func TestScanDecimal(t *testing.T) {
conn := open(t)
defer conn.Close()
var f float64
err := conn.QueryRow("select cast(0.5 as numeric(25,1))").Scan(&f)
if err != nil {
t.Error("query row / scan failed:", err.Error())
return
}
if math.Abs(f-0.5) > 0.000001 {
t.Error("Value is not 0.5:", f)
return
}
var s string
err = conn.QueryRow("select cast(-0.05 as numeric(25,2))").Scan(&s)
if err != nil {
t.Error("query row / scan failed:", err.Error())
return
}
if s != "-0.05" {
t.Error("Value is not -0.05:", s)
return
}
}
func TestAffectedRows(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
res, err := tx.Exec("create table #foo (bar int)")
if err != nil {
t.Fatal("create table failed")
}
n, err := res.RowsAffected()
if err != nil {
t.Fatal("rows affected failed")
}
if n != 0 {
t.Error("Expected 0 rows affected, got ", n)
}
res, err = tx.Exec("insert into #foo (bar) values (1)")
if err != nil {
t.Fatal("insert failed")
}
n, err = res.RowsAffected()
if err != nil {
t.Fatal("rows affected failed")
}
if n != 1 {
t.Error("Expected 1 row affected, got ", n)
}
res, err = tx.Exec("insert into #foo (bar) values (?)", 2)
if err != nil {
t.Fatal("insert failed")
}
n, err = res.RowsAffected()
if err != nil {
t.Fatal("rows affected failed")
}
if n != 1 {
t.Error("Expected 1 row affected, got ", n)
}
}
func TestIdentity(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
res, err := tx.Exec("create table #foo (bar int identity, baz int unique)")
if err != nil {
t.Fatal("create table failed")
}
res, err = tx.Exec("insert into #foo (baz) values (1)")
if err != nil {
t.Fatal("insert failed")
}
n, err := res.LastInsertId()
if err != nil {
t.Fatal("last insert id failed")
}
if n != 1 {
t.Error("Expected 1 for identity, got ", n)
}
res, err = tx.Exec("insert into #foo (baz) values (20)")
if err != nil {
t.Fatal("insert failed")
}
n, err = res.LastInsertId()
if err != nil {
t.Fatal("last insert id failed")
}
if n != 2 {
t.Error("Expected 2 for identity, got ", n)
}
res, err = tx.Exec("insert into #foo (baz) values (1)")
if err == nil {
t.Fatal("insert should fail")
}
res, err = tx.Exec("insert into #foo (baz) values (?)", 1)
if err == nil {
t.Fatal("insert should fail")
}
}
func queryParamRoundTrip(db *sql.DB, param interface{}, dest interface{}) {
err := db.QueryRow("select ?", param).Scan(dest)
if err != nil {
log.Panicf("select / scan failed: %v", err.Error())
}
}
func TestDateTimeParam(t *testing.T) {
conn := open(t)
defer conn.Close()
type testStruct struct {
t time.Time
}
var emptydate time.Time
mindate := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
maxdate := time.Date(9999, 12, 31, 23, 59, 59, 999999900, time.UTC)
values := []testStruct{
{time.Date(2015, time.October, 12, 10, 22, 0, 0, time.FixedZone("PST", -8*60*60))}, // back to the future day
{time.Date(1961, time.April, 12, 9, 7, 0, 0, time.FixedZone("MSK", 3*60*60))}, // First man in space
{time.Date(1969, time.July, 20, 20, 18, 0, 0, time.UTC)}, // First man on the Moon
{time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)}, // UNIX date
{time.Date(1982, 1, 3, 12, 13, 14, 300, time.FixedZone("YAKT", 9*60*60))}, // some random date
{time.Date(4, 6, 3, 12, 13, 14, 150000000, time.UTC)}, // some random date
{mindate}, // minimal value
{maxdate}, // maximum value
{time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC)}, // just over limit
{emptydate},
}
for _, test := range values {
t.Run(fmt.Sprintf("Test for %v", test.t), func (t *testing.T) {
var t2 time.Time
queryParamRoundTrip(conn, test.t, &t2)
expected := test.t
// clip value
if test.t.Before(mindate) {
expected = mindate
}
if test.t.After(maxdate) {
expected = maxdate
}
if expected.Sub(t2) != 0 {
t.Errorf("expected: '%s', got: '%s' delta: %d", expected, t2, expected.Sub(t2))
}
})
}
}
func TestUniqueIdentifierParam(t *testing.T) {
conn := open(t)
defer conn.Close()
type testStruct struct {
name string
uuid interface{}
}
expected := UniqueIdentifier{0x01, 0x23, 0x45, 0x67,
0x89, 0xAB,
0xCD, 0xEF,
0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF,
}
values := []testStruct{
{
"[]byte",
[]byte{0x67, 0x45, 0x23, 0x01,
0xAB, 0x89,
0xEF, 0xCD,
0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}},
{
"string",
"01234567-89ab-cdef-0123-456789abcdef"},
}
for _, test := range values {
t.Run(test.name, func(t *testing.T) {
var uuid2 UniqueIdentifier
err := conn.QueryRow("select ?", test.uuid).Scan(&uuid2)
if err != nil {
t.Fatal("select / scan failed", err.Error())
}
if expected != uuid2 {
t.Errorf("uniqueidentifier does not match: '%s' '%s'", expected, uuid2)
}
})
}
}
func TestBigQuery(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query(`WITH n(n) AS
(
SELECT 1
UNION ALL
SELECT n+1 FROM n WHERE n < 10000
)
SELECT n, @@version FROM n ORDER BY n
OPTION (MAXRECURSION 10000);`)
if err != nil {
t.Fatal("cannot exec query", err)
}
rows.Next()
rows.Close()
var res int
err = conn.QueryRow("select 0").Scan(&res)
if err != nil {
t.Fatal("cannot scan value", err)
}
if res != 0 {
t.Fatal("expected 0, got ", res)
}
}
func TestBug32(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
_, err = tx.Exec("if (exists(select * from INFORMATION_SCHEMA.TABLES where TABLE_NAME='tbl')) drop table tbl")
if err != nil {
t.Fatal("Drop table failed", err)
}
_, err = tx.Exec("create table tbl(a int primary key,fld bit null)")
if err != nil {
t.Fatal("Create table failed", err)
}
_, err = tx.Exec("insert into tbl (a,fld) values (1,nullif(?, ''))", "")
if err != nil {
t.Fatal("Insert failed", err)
}
}
func TestIgnoreEmptyResults(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query("set nocount on; select 2")
if err != nil {
t.Fatal("Query failed", err.Error())
}
if !rows.Next() {
t.Fatal("Query didn't return row")
}
var fld1 int32
err = rows.Scan(&fld1)
if err != nil {
t.Fatal("Scan failed", err)
}
if fld1 != 2 {
t.Fatal("Returned value doesn't match")
}
}
func TestStmt_SetQueryNotification(t *testing.T) {
checkConnStr(t)
mssqldriver := driverWithProcess(t)
cn, err := mssqldriver.Open(makeConnStr(t).String())
if err != nil {
t.Fatalf("failed to open connection: %v", err)
}
stmt, err := cn.Prepare("SELECT 1")
if err != nil {
t.Error("Connection failed", err)
}
sqlstmt := stmt.(*Stmt)
sqlstmt.SetQueryNotification("ABC", "service=WebCacheNotifications", time.Hour)
rows, err := sqlstmt.Query(nil)
if err == nil {
rows.Close()
}
// notifications are sent to Service Broker
// see for more info: https://github.com/denisenkom/go-mssqldb/pull/90
}
func TestErrorInfo(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Exec("select bad")
if sqlError, ok := err.(Error); ok {
if sqlError.SQLErrorNumber() != 207 /*invalid column name*/ {
t.Errorf("Query failed with unexpected error number %d %s", sqlError.SQLErrorNumber(), sqlError.SQLErrorMessage())
}
} else {
t.Error("Failed to convert error to SQLErorr", err)
}
}
func TestSetLanguage(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Exec("set language russian")
if err != nil {
t.Errorf("Query failed with unexpected error %s", err)
}
row := conn.QueryRow("select cast(getdate() as varchar(50))")
var val interface{}
err = row.Scan(&val)
if err != nil {
t.Errorf("Query failed with unexpected error %s", err)
}
t.Log("Returned value", val)
}
func TestConnectionClosing(t *testing.T) {
pool := open(t)
defer pool.Close()
for i := 1; i <= 100; i++ {
if pool.Stats().OpenConnections > 1 {
t.Errorf("Open connections is expected to stay <= 1, but it is %d", pool.Stats().OpenConnections)
return
}
stmt, err := pool.Query("select 1")
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
for stmt.Next() {
var val interface{}
err := stmt.Scan(&val)
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
}
}
}
func TestBeginTranError(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
// close actual connection to make begin transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
ctx := context.Background()
_, err = conn.begin(ctx, isolationSnapshot)
if err == nil || conn.connectionGood == true {
t.Errorf("begin should fail as a bad connection, err=%v", err)
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
err = conn.sendBeginRequest(ctx, isolationSerializable)
if err != nil {
t.Fatalf("sendBeginRequest failed with error %v", err)
}
// close connection to cause processBeginResponse to fail
conn.sess.buf.transport.Close()
_, err = conn.processBeginResponse(ctx)
switch err {
case nil:
t.Error("processBeginResponse should fail but it succeeded")
case driver.ErrBadConn:
t.Error("processBeginResponse should fail with error different from ErrBadConn but it did")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
}
func TestCommitTranError(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
ctx := context.Background()
err = conn.Commit()
if err == nil || conn.connectionGood {
t.Errorf("begin should fail and set the connection to bad, but it returned %v", err)
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
err = conn.sendCommitRequest()
if err != nil {
t.Fatalf("sendCommitRequest failed with error %v", err)
}
// close connection to cause processBeginResponse to fail
conn.sess.buf.transport.Close()
err = conn.simpleProcessResp(ctx)
switch err {
case nil:
t.Error("simpleProcessResp should fail but it succeeded")
case driver.ErrBadConn:
t.Error("simpleProcessResp should fail with error different from ErrBadConn but it did")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
defer conn.Close()
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
// should fail because there is no transaction
err = conn.Commit()
switch err {
case nil:
t.Error("Commit should fail but it succeeded")
case driver.ErrBadConn:
t.Error("Commit should fail with error different from ErrBadConn but it did")
}
}
func TestRollbackTranError(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
ctx := context.Background()
err = conn.Rollback()
if err == nil || conn.connectionGood {
t.Errorf("Rollback should fail and set connection to bad but it returned %v", err)
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
err = conn.sendRollbackRequest()
if err != nil {
t.Fatalf("sendCommitRequest failed with error %v", err)
}
// close connection to cause processBeginResponse to fail
conn.sess.buf.transport.Close()
err = conn.simpleProcessResp(ctx)
switch err {
case nil:
t.Error("simpleProcessResp should fail but it succeeded")
case driver.ErrBadConn:
t.Error("simpleProcessResp should fail with error different from ErrBadConn but it did")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
defer conn.Close()
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
// should fail because there is no transaction
err = conn.Rollback()
switch err {
case nil:
t.Error("Commit should fail but it succeeded")
case driver.ErrBadConn:
t.Error("Commit should fail with error different from ErrBadConn but it did")
}
}
func TestSendQueryErrors(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.FailNow()
}
defer conn.Close()
stmt, err := conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.FailNow()
}
// should fail because parameter is invalid
_, err = stmt.Query([]driver.Value{conn})
if err == nil {
t.Fail()
}
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
// should fail because connection is closed
_, err = stmt.Query([]driver.Value{})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
stmt, err = conn.prepareContext(context.Background(), "select ?")
if err != nil {
t.FailNow()
}
// should fail because connection is closed
_, err = stmt.Query([]driver.Value{int64(1)})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
}
func TestProcessQueryErrors(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatal("open expected to succeed, but it failed with", err)
}
stmt, err := conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.Fatal("prepareContext expected to succeed, but it failed with", err)
}
err = stmt.sendQuery([]namedValue{})
if err != nil {
t.Fatal("sendQuery expected to succeed, but it failed with", err)
}
// close actual connection to make reading response to fail
conn.sess.buf.transport.Close()
_, err = stmt.processQueryResponse(context.Background())
if err == nil {
t.Error("processQueryResponse expected to fail but it succeeded")
}
// should not fail with ErrBadConn because query was successfully sent to server
if err == driver.ErrBadConn {
t.Error("processQueryResponse expected to fail with error other than ErrBadConn but it failed with it")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
}
func TestSendExecErrors(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.FailNow()
}
defer conn.Close()
stmt, err := conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.FailNow()
}
// should fail because parameter is invalid
_, err = stmt.Exec([]driver.Value{conn})
if err == nil {
t.Fail()
}
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
// should fail because connection is closed
_, err = stmt.Exec([]driver.Value{})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
stmt, err = conn.prepareContext(context.Background(), "select ?")
if err != nil {
t.FailNow()
}
// should fail because connection is closed
_, err = stmt.Exec([]driver.Value{int64(1)})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
}
func TestLongConnection(t *testing.T) {
checkConnStr(t)
list := []struct {
connTimeout string
queryTimeout string
ctxTimeout time.Duration
wantFail bool
}{
{"1", "00:00:02", 6 * time.Second, true},
{"2", "00:00:01", 6 * time.Second, false},
// Check no connection timeout.
{"0", "00:00:01", 2 * time.Second, false},
// {"0", "00:00:45", 60 * time.Second, false}, // Skip for normal testing to limit time.
}
for i, item := range list {
t.Run(fmt.Sprintf("item-index-%d,want-fail=%t", i, item.wantFail), func(t *testing.T) {
dsn := makeConnStr(t)
dsnParams := dsn.Query()
dsnParams.Set("connection timeout", item.connTimeout)
dsn.RawQuery = dsnParams.Encode()
db, err := sql.Open("sqlserver", dsn.String())
if err != nil {
t.Fatalf("failed to open driver sqlserver")
}
defer db.Close()
ctx, cancel := context.WithTimeout(context.Background(), item.ctxTimeout)
defer cancel()
_, err = db.ExecContext(ctx, "WAITFOR DELAY '"+item.queryTimeout+"';")
if item.wantFail && err == nil {
t.Fatal("exec no error")
}
if !item.wantFail && err != nil {
t.Fatal("exec error", err)
}
})
}
}
func TestNextResultSet(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query("select 1; select 2")
if err != nil {
t.Fatal("Query failed", err.Error())
}
defer func() {
err := rows.Err()
if err != nil {
t.Error("unexpected error:", err)
}
}()
defer rows.Close()
if !rows.Next() {
t.Fatal("Query didn't return row")
}
var fld1, fld2 int32
err = rows.Scan(&fld1)
if err != nil {
t.Fatal("Scan failed", err)
}
if fld1 != 1 {
t.Fatal("Returned value doesn't match")
}
if rows.Next() {
t.Fatal("Query returned unexpected second row.")
}
// calling next again should still return false
if rows.Next() {
t.Fatal("Query returned unexpected second row.")
}
if !rows.NextResultSet() {
t.Fatal("NextResultSet should return true but returned false")
}
if !rows.Next() {
t.Fatal("Query didn't return row")
}
err = rows.Scan(&fld2)
if err != nil {
t.Fatal("Scan failed", err)
}
if fld2 != 2 {
t.Fatal("Returned value doesn't match")
}
if rows.NextResultSet() {
t.Fatal("NextResultSet should return false but returned true")
}
}
func TestColumnTypeIntrospection(t *testing.T) {
type tst struct {
expr string
typeName string
reflType reflect.Type
hasSize bool
size int64
hasPrecScale bool
precision int64
scale int64
}
tests := []tst{
{"cast(1 as bit)", "BIT", reflect.TypeOf(true), false, 0, false, 0, 0},
{"cast(1 as tinyint)", "TINYINT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"cast(1 as smallint)", "SMALLINT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"1", "INT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"cast(1 as bigint)", "BIGINT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"cast(1 as real)", "REAL", reflect.TypeOf(0.0), false, 0, false, 0, 0},
{"cast(1 as float)", "FLOAT", reflect.TypeOf(0.0), false, 0, false, 0, 0},
{"cast('abc' as varbinary(3))", "VARBINARY", reflect.TypeOf([]byte{}), true, 3, false, 0, 0},
{"cast('abc' as varbinary(max))", "VARBINARY", reflect.TypeOf([]byte{}), true, 2147483645, false, 0, 0},
{"cast(1 as datetime)", "DATETIME", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(1 as smalldatetime)", "SMALLDATETIME", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as datetime2(7))", "DATETIME2", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as datetimeoffset(7))", "DATETIMEOFFSET", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as date)", "DATE", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as time)", "TIME", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"'abc'", "VARCHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast('abc' as varchar(max))", "VARCHAR", reflect.TypeOf(""), true, 2147483645, false, 0, 0},
{"N'abc'", "NVARCHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast(N'abc' as NVARCHAR(MAX))", "NVARCHAR", reflect.TypeOf(""), true, 1073741822, false, 0, 0},
{"cast(1 as decimal)", "DECIMAL", reflect.TypeOf([]byte{}), false, 0, true, 18, 0},
{"cast(1 as decimal(5, 2))", "DECIMAL", reflect.TypeOf([]byte{}), false, 0, true, 5, 2},
{"cast(1 as numeric(10, 4))", "DECIMAL", reflect.TypeOf([]byte{}), false, 0, true, 10, 4},
{"cast(1 as money)", "MONEY", reflect.TypeOf([]byte{}), false, 0, false, 0, 0},
{"cast(1 as smallmoney)", "SMALLMONEY", reflect.TypeOf([]byte{}), false, 0, false, 0, 0},
{"cast(0x6F9619FF8B86D011B42D00C04FC964FF as uniqueidentifier)", "UNIQUEIDENTIFIER", reflect.TypeOf([]byte{}), false, 0, false, 0, 0},
{"cast('<root/>' as xml)", "XML", reflect.TypeOf(""), true, 1073741822, false, 0, 0},
{"cast('abc' as text)", "TEXT", reflect.TypeOf(""), true, 2147483647, false, 0, 0},
{"cast(N'abc' as ntext)", "NTEXT", reflect.TypeOf(""), true, 1073741823, false, 0, 0},
{"cast('abc' as image)", "IMAGE", reflect.TypeOf([]byte{}), true, 2147483647, false, 0, 0},
{"cast('abc' as char(3))", "CHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast(N'abc' as nchar(3))", "NCHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast(1 as sql_variant)", "SQL_VARIANT", reflect.TypeOf(nil), false, 0, false, 0, 0},
}
conn := open(t)
defer conn.Close()
for _, tt := range tests {
rows, err := conn.Query("select " + tt.expr)
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
ct, err := rows.ColumnTypes()
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
if ct[0].DatabaseTypeName() != tt.typeName {
t.Errorf("Expected type %s but returned %s", tt.typeName, ct[0].DatabaseTypeName())
}
size, ok := ct[0].Length()
if ok != tt.hasSize {
t.Errorf("Expected has size %v but returned %v for %s", tt.hasSize, ok, tt.expr)
} else {
if ok && size != tt.size {
t.Errorf("Expected size %d but returned %d for %s", tt.size, size, tt.expr)
}
}
prec, scale, ok := ct[0].DecimalSize()
if ok != tt.hasPrecScale {
t.Errorf("Expected has prec/scale %v but returned %v for %s", tt.hasPrecScale, ok, tt.expr)
} else {
if ok && prec != tt.precision {
t.Errorf("Expected precision %d but returned %d for %s", tt.precision, prec, tt.expr)
}
if ok && scale != tt.scale {
t.Errorf("Expected scale %d but returned %d for %s", tt.scale, scale, tt.expr)
}
}
if ct[0].ScanType() != tt.reflType {
t.Errorf("Expected ScanType %v but got %v for %s", tt.reflType, ct[0].ScanType(), tt.expr)
}
}
}
func TestColumnIntrospection(t *testing.T) {
type tst struct {
expr string
fieldName string
typeName string
nullable bool
hasSize bool
size int64
hasPrecScale bool
precision int64
scale int64
}
tests := []tst{
{"f1 int null", "f1", "INT", true, false, 0, false, 0, 0},
{"f2 varchar(15) not null", "f2", "VARCHAR", false, true, 15, false, 0, 0},
{"f3 decimal(5, 2) null", "f3", "DECIMAL", true, false, 0, true, 5, 2},
}
conn := open(t)
defer conn.Close()
// making table variable with specified fields and making a select from it
exprs := make([]string, len(tests))
for i, test := range tests {
exprs[i] = test.expr
}
exprJoined := strings.Join(exprs, ",")
rows, err := conn.Query(fmt.Sprintf("declare @tbl table(%s); select * from @tbl", exprJoined))
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
ct, err := rows.ColumnTypes()
if err != nil {
t.Fatalf("ColumnTypes failed with unexpected error %s", err)
}
for i, test := range tests {
if ct[i].Name() != test.fieldName {
t.Errorf("Field expected have name %s but it has name %s", test.fieldName, ct[i].Name())
}
if ct[i].DatabaseTypeName() != test.typeName {
t.Errorf("Invalid type name returned %s expected %s", ct[i].DatabaseTypeName(), test.typeName)
}
nullable, ok := ct[i].Nullable()
if ok {
if nullable != test.nullable {
t.Errorf("Invalid nullable value returned %v", nullable)
}
} else {
t.Error("Nullable was expected to support Nullable but it didn't")
}
size, ok := ct[i].Length()
if ok != test.hasSize {
t.Errorf("Expected has size %v but returned %v for %s", test.hasSize, ok, test.expr)
} else {
if ok && size != test.size {
t.Errorf("Expected size %d but returned %d for %s", test.size, size, test.expr)
}
}
prec, scale, ok := ct[i].DecimalSize()
if ok != test.hasPrecScale {
t.Errorf("Expected has prec/scale %v but returned %v for %s", test.hasPrecScale, ok, test.expr)
} else {
if ok && prec != test.precision {
t.Errorf("Expected precision %d but returned %d for %s", test.precision, prec, test.expr)
}
if ok && scale != test.scale {
t.Errorf("Expected scale %d but returned %d for %s", test.scale, scale, test.expr)
}
}
}
}
func TestContext(t *testing.T) {
conn := open(t)
defer conn.Close()
opts := &sql.TxOptions{
Isolation: sql.LevelSerializable,
}
ctx := context.Background()
tx, err := conn.BeginTx(ctx, opts)
if err != nil {
t.Errorf("BeginTx failed with unexpected error %s", err)
return
}
rows, err := tx.QueryContext(ctx, "DBCC USEROPTIONS")
properties := make(map[string]string)
for rows.Next() {
var name, value string
if err = rows.Scan(&name, &value); err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
}
properties[name] = value
}
if properties["isolation level"] != "serializable" {
t.Errorf("Expected isolation level to be serializable but it is %s", properties["isolation level"])
}
row := tx.QueryRowContext(ctx, "select 1")
var val int64
if err = row.Scan(&val); err != nil {
t.Errorf("QueryRowContext failed with unexpected error %s", err)
}
if val != 1 {
t.Error("Incorrect value returned from query")
}
_, err = tx.ExecContext(ctx, "select 1")
if err != nil {
t.Errorf("ExecContext failed with unexpected error %s", err)
return
}
_, err = tx.PrepareContext(ctx, "select 1")
if err != nil {
t.Errorf("PrepareContext failed with unexpected error %s", err)
return
}
}
func TestBeginTxtReadOnlyNotSupported(t *testing.T) {
conn := open(t)
defer conn.Close()
opts := &sql.TxOptions{ReadOnly: true}
_, err := conn.BeginTx(context.Background(), opts)
if err == nil {
t.Error("BeginTx expected to fail for read only transaction because MSSQL doesn't support it, but it succeeded")
}
}
func TestConn_BeginTx(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Exec("create table test (f int)")
defer conn.Exec("drop table test")
if err != nil {
t.Fatal("create table failed with error", err)
}
tx1, err := conn.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal("BeginTx failed with error", err)
}
tx2, err := conn.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal("BeginTx failed with error", err)
}
_, err = tx1.Exec("insert into test (f) values (1)")
if err != nil {
t.Fatal("insert failed with error", err)
}
_, err = tx2.Exec("insert into test (f) values (2)")
if err != nil {
t.Fatal("insert failed with error", err)
}
tx1.Rollback()
tx2.Commit()
rows, err := conn.Query("select f from test")
if err != nil {
t.Fatal("select failed with error", err)
}
values := []int64{}
for rows.Next() {
var val int64
err = rows.Scan(&val)
if err != nil {
t.Fatal("scan failed with error", err)
}
values = append(values, val)
}
if !reflect.DeepEqual(values, []int64{2}) {
t.Errorf("Values is expected to be [1] but it is %v", values)
}
}
func TestNamedParameters(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :param2, :param1, :param2",
sql.Named("param1", 1),
sql.Named("param2", 2))
var col1, col2, col3 int64
err := row.Scan(&col1, &col2, &col3)
if err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
return
}
if col1 != 2 || col2 != 1 || col3 != 2 {
t.Errorf("Unexpected values returned col1=%d, col2=%d, col3=%d", col1, col2, col3)
}
}
func TestBadNamedParameters(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :param2, :param1, :param2",
sql.Named("badparam1", 1),
sql.Named("param2", 2))
var col1, col2, col3 int64
err := row.Scan(&col1, &col2, &col3)
if err == nil {
t.Error("Scan succeeded unexpectedly")
return
}
t.Logf("Scan failed as expected with error %s", err)
}
func TestMixedParameters(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :2, :param1, :param2",
5, // this parameter will be unused
6,
sql.Named("param1", 1),
sql.Named("param2", 2))
var col1, col2, col3 int64
err := row.Scan(&col1, &col2, &col3)
if err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
return
}
if col1 != 6 || col2 != 1 || col3 != 2 {
t.Errorf("Unexpected values returned col1=%d, col2=%d, col3=%d", col1, col2, col3)
}
}
/*
func TestMixedParametersExample(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :id, ?",
sql.Named("id", 1),
2,
)
var col1, col2 int64
err := row.Scan(&col1, &col2)
if err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
return
}
if col1 != 1 || col2 != 2 {
t.Errorf("Unexpected values returned col1=%d, col2=%d", col1, col2)
}
}
*/
func TestPinger(t *testing.T) {
conn := open(t)
defer conn.Close()
err := conn.Ping()
if err != nil {
t.Errorf("Failed to hit database")
}
}
func TestQueryCancelLowLevel(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background())
stmt, err := conn.prepareContext(ctx, "waitfor delay '00:00:03'")
if err != nil {
t.Fatalf("Prepare failed with error %v", err)
}
err = stmt.sendQuery([]namedValue{})
if err != nil {
t.Fatalf("sendQuery failed with error %v", err)
}
cancel()
_, err = stmt.processExec(ctx)
if err != context.Canceled {
t.Errorf("Expected error to be Cancelled but got %v", err)
}
// same connection should be usable again after it was cancelled
stmt, err = conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.Fatalf("Prepare failed with error %v", err)
}
rows, err := stmt.Query([]driver.Value{})
if err != nil {
t.Fatalf("Query failed with error %v", err)
}
values := []driver.Value{nil}
err = rows.Next(values)
if err != nil {
t.Fatalf("Next failed with error %v", err)
}
}
func TestQueryCancelHighLevel(t *testing.T) {
conn := open(t)
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(200 * time.Millisecond)
cancel()
}()
_, err := conn.ExecContext(ctx, "waitfor delay '00:00:03'")
if err != context.Canceled {
t.Errorf("ExecContext expected to fail with Cancelled but it returned %v", err)
}
// connection should be usable after timeout
row := conn.QueryRow("select 1")
var val int64
err = row.Scan(&val)
if err != nil {
t.Fatal("Scan failed with", err)
}
}
func TestQueryTimeout(t *testing.T) {
conn := open(t)
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
_, err := conn.ExecContext(ctx, "waitfor delay '00:00:03'")
if err != context.DeadlineExceeded {
t.Errorf("ExecContext expected to fail with DeadlineExceeded but it returned %v", err)
}
// connection should be usable after timeout
row := conn.QueryRow("select 1")
var val int64
err = row.Scan(&val)
if err != nil {
t.Fatal("Scan failed with", err)
}
}
func TestDriverParams(t *testing.T) {
checkConnStr(t)
SetLogger(testLogger{t})
type sqlCmd struct {
Name string
Driver string
Query string
Param []interface{}
Expect []interface{}
}
list := []sqlCmd{
{
Name: "preprocess-ordinal",
Driver: "mssql",
Query: `select V1=:1`,
Param: []interface{}{"abc"},
Expect: []interface{}{"abc"},
},
{
Name: "preprocess-name",
Driver: "mssql",
Query: `select V1=:First`,
Param: []interface{}{sql.Named("First", "abc")},
Expect: []interface{}{"abc"},
},
{
Name: "raw-ordinal",
Driver: "sqlserver",
Query: `select V1=@p1`,
Param: []interface{}{"abc"},
Expect: []interface{}{"abc"},
},
{
Name: "raw-name",
Driver: "sqlserver",
Query: `select V1=@First`,
Param: []interface{}{sql.Named("First", "abc")},
Expect: []interface{}{"abc"},
},
}
for cmdIndex, cmd := range list {
t.Run(cmd.Name, func(t *testing.T) {
db, err := sql.Open(cmd.Driver, makeConnStr(t).String())
if err != nil {
t.Fatalf("failed to open driver %q", cmd.Driver)
}
defer db.Close()
rows, err := db.Query(cmd.Query, cmd.Param...)
if err != nil {
t.Fatalf("failed to run query %q %v", cmd.Query, err)
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
t.Fatalf("failed to get column schema %v", err)
}
clen := len(columns)
if clen != len(cmd.Expect) {
t.Fatalf("query column has %d, expect %d columns", clen, len(cmd.Expect))
}
values := make([]interface{}, clen)
into := make([]interface{}, clen)
for i := 0; i < clen; i++ {
into[i] = &values[i]
}
for rows.Next() {
err = rows.Scan(into...)
if err != nil {
t.Fatalf("failed to scan into row for %d %q", cmdIndex, cmd.Driver)
}
for i := range cmd.Expect {
if values[i] != cmd.Expect[i] {
t.Fatalf("expected value in index %d %v != actual value %v", i, cmd.Expect[i], values[i])
}
}
}
})
}
}
type connInterrupt struct {
net.Conn
mu sync.Mutex
disruptRead bool
disruptWrite bool
}
func (c *connInterrupt) Interrupt(write bool) {
c.mu.Lock()
if write {
c.disruptWrite = true
} else {
c.disruptRead = true
}
c.mu.Unlock()
}
func (c *connInterrupt) Read(b []byte) (n int, err error) {
c.mu.Lock()
dis := c.disruptRead
c.mu.Unlock()
if dis {
return 0, disconnectError{}
}
return c.Conn.Read(b)
}
func (c *connInterrupt) Write(b []byte) (n int, err error) {
c.mu.Lock()
dis := c.disruptWrite
c.mu.Unlock()
if dis {
return 0, disconnectError{}
}
return c.Conn.Write(b)
}
type dialerInterrupt struct {
nd tcpDialer
mu sync.Mutex
list []*connInterrupt
}
func (d *dialerInterrupt) Dial(ctx context.Context, addr string) (net.Conn, error) {
conn, err := d.nd.Dial(ctx, addr)
if err != nil {
return nil, err
}
ci := &connInterrupt{Conn: conn}
d.mu.Lock()
d.list = append(d.list, ci)
d.mu.Unlock()
return ci, err
}
func (d *dialerInterrupt) Interrupt(write bool) {
d.mu.Lock()
defer d.mu.Unlock()
for _, ci := range d.list {
ci.Interrupt(write)
}
}
var _ net.Error = disconnectError{}
type disconnectError struct{}
func (disconnectError) Error() string {
return "disconnect"
}
func (disconnectError) Timeout() bool {
return true
}
func (disconnectError) Temporary() bool {
return true
}
// TestDisconnect1 ensures errors and states are handled correctly if
// the server is disconnected mid-query.
func TestDisconnect1(t *testing.T) {
if testing.Short() {
t.Skip("short")
}
checkConnStr(t)
SetLogger(testLogger{t})
// Revert to the normal dialer after the test is done.
normalCreateDialer := createDialer
defer func() {
createDialer = normalCreateDialer
}()
waitDisrupt := make(chan struct{})
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
createDialer = func(p *connectParams) dialer {
nd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}
di := &dialerInterrupt{nd: nd}
go func() {
<-waitDisrupt
di.Interrupt(true)
di.Interrupt(false)
}()
return di
}
db, err := sql.Open("sqlserver", makeConnStr(t).String())
if err != nil {
t.Fatal(err)
}
if err := db.PingContext(ctx); err != nil {
t.Fatal(err)
}
defer db.Close()
_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)
if err != nil {
t.Fatal(err)
}
go func() {
time.Sleep(time.Second * 1)
close(waitDisrupt)
}()
t.Log("prepare for query")
_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)
if err != nil {
t.Log("expected error after disconnect", err)
return
}
t.Fatal("wanted error after Exec")
}
// TestDisconnect2 tests a read error so the query is started
// but results cannot be read.
func TestDisconnect2(t *testing.T) {
if testing.Short() {
t.Skip("short")
}
checkConnStr(t)
SetLogger(testLogger{t})
// Revert to the normal dialer after the test is done.
normalCreateDialer := createDialer
defer func() {
createDialer = normalCreateDialer
}()
end := make(chan error)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
waitDisrupt := make(chan struct{})
ctx, cancel = context.WithTimeout(ctx, time.Second*2)
defer cancel()
createDialer = func(p *connectParams) dialer {
nd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}
di := &dialerInterrupt{nd: nd}
go func() {
<-waitDisrupt
di.Interrupt(false)
}()
return di
}
db, err := sql.Open("sqlserver", makeConnStr(t).String())
if err != nil {
t.Fatal(err)
}
if err := db.PingContext(ctx); err != nil {
t.Fatal(err)
}
defer db.Close()
_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)
if err != nil {
t.Fatal(err)
}
close(waitDisrupt)
_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)
end <- err
}()
timeout := time.After(10 * time.Second)
select {
case err := <-end:
if err == nil {
t.Fatal("test err")
}
case <-timeout:
t.Fatal("timeout")
}
}
more test coverage for error.go
package mssql
import (
"bytes"
"context"
"database/sql"
"database/sql/driver"
"fmt"
"math"
"net"
"strings"
"testing"
"time"
"log"
"sync"
"reflect"
)
func driverWithProcess(t *testing.T) *Driver {
return &Driver{
log: optionalLogger{testLogger{t}},
processQueryText: true,
}
}
func driverNoProcess(t *testing.T) *Driver {
return &Driver{
log: optionalLogger{testLogger{t}},
processQueryText: false,
}
}
func TestSelect(t *testing.T) {
conn := open(t)
defer conn.Close()
type testStruct struct {
sql string
val interface{}
}
longstr := strings.Repeat("x", 10000)
values := []testStruct{
{"1", int64(1)},
{"-1", int64(-1)},
{"cast(1 as int)", int64(1)},
{"cast(-1 as int)", int64(-1)},
{"cast(1 as tinyint)", int64(1)},
{"cast(255 as tinyint)", int64(255)},
{"cast(1 as smallint)", int64(1)},
{"cast(-1 as smallint)", int64(-1)},
{"cast(1 as bigint)", int64(1)},
{"cast(-1 as bigint)", int64(-1)},
{"cast(1 as bit)", true},
{"cast(0 as bit)", false},
{"'abc'", string("abc")},
{"cast(0.5 as float)", float64(0.5)},
{"cast(0.5 as real)", float64(0.5)},
{"cast(1 as decimal)", []byte("1")},
{"cast(1.2345 as money)", []byte("1.2345")},
{"cast(-1.2345 as money)", []byte("-1.2345")},
{"cast(1.2345 as smallmoney)", []byte("1.2345")},
{"cast(-1.2345 as smallmoney)", []byte("-1.2345")},
{"cast(0.5 as decimal(18,1))", []byte("0.5")},
{"cast(-0.5 as decimal(18,1))", []byte("-0.5")},
{"cast(-0.5 as numeric(18,1))", []byte("-0.5")},
{"cast(4294967296 as numeric(20,0))", []byte("4294967296")},
{"cast(-0.5 as numeric(18,2))", []byte("-0.50")},
{"N'abc'", string("abc")},
{"cast(null as nvarchar(3))", nil},
{"NULL", nil},
{"cast('1753-01-01' as datetime)", time.Date(1753, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01' as datetime)", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01T12:13:14.12' as datetime)",
time.Date(2000, 1, 1, 12, 13, 14, 120000000, time.UTC)},
{"cast('2014-06-26 11:08:09.673' as datetime)", time.Date(2014, 06, 26, 11, 8, 9, 673000000, time.UTC)},
{"cast('9999-12-31T23:59:59.997' as datetime)", time.Date(9999, 12, 31, 23, 59, 59, 997000000, time.UTC)},
{"cast(NULL as datetime)", nil},
{"cast('1900-01-01T00:00:00' as smalldatetime)",
time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01T12:13:00' as smalldatetime)",
time.Date(2000, 1, 1, 12, 13, 0, 0, time.UTC)},
{"cast('2079-06-06T23:59:00' as smalldatetime)",
time.Date(2079, 6, 6, 23, 59, 0, 0, time.UTC)},
{"cast(NULL as smalldatetime)", nil},
{"cast(0x6F9619FF8B86D011B42D00C04FC964FF as uniqueidentifier)",
[]byte{0x6F, 0x96, 0x19, 0xFF, 0x8B, 0x86, 0xD0, 0x11, 0xB4, 0x2D, 0x00, 0xC0, 0x4F, 0xC9, 0x64, 0xFF}},
{"cast(NULL as uniqueidentifier)", nil},
{"cast(0x1234 as varbinary(2))", []byte{0x12, 0x34}},
{"cast(N'abc' as nvarchar(max))", "abc"},
{"cast(null as nvarchar(max))", nil},
{"cast('<root/>' as xml)", "<root/>"},
{"cast('abc' as text)", "abc"},
{"cast(null as text)", nil},
{"cast(N'abc' as ntext)", "abc"},
{"cast(0x1234 as image)", []byte{0x12, 0x34}},
{"cast('abc' as char(3))", "abc"},
{"cast('abc' as varchar(3))", "abc"},
{"cast(N'проверка' as nvarchar(max))", "проверка"},
{"cast(N'Δοκιμή' as nvarchar(max))", "Δοκιμή"},
{"cast(cast(N'สวัสดี' as nvarchar(max)) collate Thai_CI_AI as varchar(max))", "สวัสดี"}, // cp874
{"cast(cast(N'你好' as nvarchar(max)) collate Chinese_PRC_CI_AI as varchar(max))", "你好"}, // cp936
{"cast(cast(N'こんにちは' as nvarchar(max)) collate Japanese_CI_AI as varchar(max))", "こんにちは"}, // cp939
{"cast(cast(N'안녕하세요.' as nvarchar(max)) collate Korean_90_CI_AI as varchar(max))", "안녕하세요."}, // cp949
{"cast(cast(N'你好' as nvarchar(max)) collate Chinese_Hong_Kong_Stroke_90_CI_AI as varchar(max))", "你好"}, // cp950
{"cast(cast(N'cześć' as nvarchar(max)) collate Polish_CI_AI as varchar(max))", "cześć"}, // cp1250
{"cast(cast(N'Алло' as nvarchar(max)) collate Cyrillic_General_CI_AI as varchar(max))", "Алло"}, // cp1251
{"cast(cast(N'Bonjour' as nvarchar(max)) collate French_CI_AI as varchar(max))", "Bonjour"}, // cp1252
{"cast(cast(N'Γεια σας' as nvarchar(max)) collate Greek_CI_AI as varchar(max))", "Γεια σας"}, // cp1253
{"cast(cast(N'Merhaba' as nvarchar(max)) collate Turkish_CI_AI as varchar(max))", "Merhaba"}, // cp1254
{"cast(cast(N'שלום' as nvarchar(max)) collate Hebrew_CI_AI as varchar(max))", "שלום"}, // cp1255
{"cast(cast(N'مرحبا' as nvarchar(max)) collate Arabic_CI_AI as varchar(max))", "مرحبا"}, // cp1256
{"cast(cast(N'Sveiki' as nvarchar(max)) collate Lithuanian_CI_AI as varchar(max))", "Sveiki"}, // cp1257
{"cast(cast(N'chào' as nvarchar(max)) collate Vietnamese_CI_AI as varchar(max))", "chào"}, // cp1258
{fmt.Sprintf("cast(N'%s' as nvarchar(max))", longstr), longstr},
{"cast(NULL as sql_variant)", nil},
{"cast(cast(0x6F9619FF8B86D011B42D00C04FC964FF as uniqueidentifier) as sql_variant)",
[]byte{0x6F, 0x96, 0x19, 0xFF, 0x8B, 0x86, 0xD0, 0x11, 0xB4, 0x2D, 0x00, 0xC0, 0x4F, 0xC9, 0x64, 0xFF}},
{"cast(cast(1 as bit) as sql_variant)", true},
{"cast(cast(10 as tinyint) as sql_variant)", int64(10)},
{"cast(cast(-10 as smallint) as sql_variant)", int64(-10)},
{"cast(cast(-20 as int) as sql_variant)", int64(-20)},
{"cast(cast(-20 as bigint) as sql_variant)", int64(-20)},
{"cast(cast('2000-01-01' as datetime) as sql_variant)", time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast(cast('2000-01-01T12:13:00' as smalldatetime) as sql_variant)",
time.Date(2000, 1, 1, 12, 13, 0, 0, time.UTC)},
{"cast(cast(0.125 as real) as sql_variant)", float64(0.125)},
{"cast(cast(0.125 as float) as sql_variant)", float64(0.125)},
{"cast(cast(1.2345 as smallmoney) as sql_variant)", []byte("1.2345")},
{"cast(cast(1.2345 as money) as sql_variant)", []byte("1.2345")},
{"cast(cast(0x1234 as varbinary(2)) as sql_variant)", []byte{0x12, 0x34}},
{"cast(cast(0x1234 as binary(2)) as sql_variant)", []byte{0x12, 0x34}},
{"cast(cast(-0.5 as decimal(18,1)) as sql_variant)", []byte("-0.5")},
{"cast(cast(-0.5 as numeric(18,1)) as sql_variant)", []byte("-0.5")},
{"cast(cast('abc' as varchar(3)) as sql_variant)", "abc"},
{"cast(cast('abc' as char(3)) as sql_variant)", "abc"},
{"cast(N'abc' as sql_variant)", "abc"},
}
for _, test := range values {
t.Run(test.sql, func (t *testing.T){
stmt, err := conn.Prepare("select " + test.sql)
if err != nil {
t.Error("Prepare failed:", test.sql, err.Error())
return
}
defer stmt.Close()
row := stmt.QueryRow()
var retval interface{}
err = row.Scan(&retval)
if err != nil {
t.Error("Scan failed:", test.sql, err.Error())
return
}
var same bool
switch decodedval := retval.(type) {
case []byte:
switch decodedvaltest := test.val.(type) {
case []byte:
same = bytes.Equal(decodedval, decodedvaltest)
default:
same = false
}
default:
same = retval == test.val
}
if !same {
t.Errorf("Values don't match '%s' '%s' for test: %s", retval, test.val, test.sql)
return
}
})
}
}
func TestSelectDateTimeOffset(t *testing.T) {
type testStruct struct {
sql string
val time.Time
}
values := []testStruct{
{"cast('2010-11-15T11:56:45.123+14:00' as datetimeoffset(3))",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.FixedZone("", 14*60*60))},
{"cast(cast('2010-11-15T11:56:45.123-14:00' as datetimeoffset(3)) as sql_variant)",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.FixedZone("", -14*60*60))},
{"cast('0001-01-01T00:00:00.0000000+00:00' as datetimeoffset(7))",
time.Date(1, 1, 1, 0, 0, 0, 0, time.FixedZone("", 0))},
{"cast('9999-12-31T23:59:59.9999999+00:00' as datetimeoffset(7))",
time.Date(9999, 12, 31, 23, 59, 59, 999999900, time.FixedZone("", 0))},
}
conn := open(t)
defer conn.Close()
for _, test := range values {
row := conn.QueryRow("select " + test.sql)
var retval interface{}
err := row.Scan(&retval)
if err != nil {
t.Error("Scan failed:", test.sql, err.Error())
continue
}
retvalDate := retval.(time.Time)
if retvalDate.UTC() != test.val.UTC() {
t.Errorf("UTC values don't match '%v' '%v' for test: %s", retvalDate, test.val, test.sql)
continue
}
if retvalDate.String() != test.val.String() {
t.Errorf("Locations don't match '%v' '%v' for test: %s", retvalDate.String(), test.val.String(), test.sql)
continue
}
}
}
func TestSelectNewTypes(t *testing.T) {
conn := open(t)
defer conn.Close()
var ver string
err := conn.QueryRow("select SERVERPROPERTY('productversion')").Scan(&ver)
if err != nil {
t.Fatalf("cannot select productversion: %s", err)
}
var n int
_, err = fmt.Sscanf(ver, "%d", &n)
if err != nil {
t.Fatalf("cannot parse productversion: %s", err)
}
// 8 is SQL 2000, 9 is SQL 2005, 10 is SQL 2008, 11 is SQL 2012
if n < 10 {
return
}
// run tests for new data types available only in SQL Server 2008 and later
type testStruct struct {
sql string
val interface{}
}
values := []testStruct{
{"cast('0001-01-01' as date)",
time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2000-01-01' as date)",
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('9999-12-31' as date)",
time.Date(9999, 12, 31, 0, 0, 0, 0, time.UTC)},
{"cast(NULL as date)", nil},
{"cast('00:00:00.0000000' as time(7))",
time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('00:00:45.123' as time(3))",
time.Date(1, 1, 1, 00, 00, 45, 123000000, time.UTC)},
{"cast('11:56:45.123' as time(3))",
time.Date(1, 1, 1, 11, 56, 45, 123000000, time.UTC)},
{"cast('11:56:45' as time(0))",
time.Date(1, 1, 1, 11, 56, 45, 0, time.UTC)},
{"cast('23:59:59.9999999' as time(7))",
time.Date(1, 1, 1, 23, 59, 59, 999999900, time.UTC)},
{"cast(null as time(0))", nil},
{"cast('0001-01-01T00:00:00.0000000' as datetime2(7))",
time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast('2010-11-15T11:56:45.123' as datetime2(3))",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.UTC)},
{"cast('2010-11-15T11:56:45' as datetime2(0))",
time.Date(2010, 11, 15, 11, 56, 45, 0, time.UTC)},
{"cast(cast('2000-01-01' as date) as sql_variant)",
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)},
{"cast(cast('00:00:45.123' as time(3)) as sql_variant)",
time.Date(1, 1, 1, 00, 00, 45, 123000000, time.UTC)},
{"cast(cast('2010-11-15T11:56:45.123' as datetime2(3)) as sql_variant)",
time.Date(2010, 11, 15, 11, 56, 45, 123000000, time.UTC)},
{"cast('9999-12-31T23:59:59.9999999' as datetime2(7))",
time.Date(9999, 12, 31, 23, 59, 59, 999999900, time.UTC)},
{"cast(null as datetime2(3))", nil},
}
for _, test := range values {
stmt, err := conn.Prepare("select " + test.sql)
if err != nil {
t.Error("Prepare failed:", test.sql, err.Error())
return
}
defer stmt.Close()
row := stmt.QueryRow()
var retval interface{}
err = row.Scan(&retval)
if err != nil {
t.Error("Scan failed:", test.sql, err.Error())
continue
}
if retval != test.val {
t.Errorf("Values don't match '%s' '%s' for test: %s", retval, test.val, test.sql)
continue
}
}
}
func TestTrans(t *testing.T) {
conn := open(t)
defer conn.Close()
var tx *sql.Tx
var err error
if tx, err = conn.Begin(); err != nil {
t.Fatal("Begin failed", err.Error())
}
if err = tx.Commit(); err != nil {
t.Fatal("Commit failed", err.Error())
}
if tx, err = conn.Begin(); err != nil {
t.Fatal("Begin failed", err.Error())
}
if _, err = tx.Exec("create table #abc (fld int)"); err != nil {
t.Fatal("Create table failed", err.Error())
}
if err = tx.Rollback(); err != nil {
t.Fatal("Rollback failed", err.Error())
}
}
func TestNull(t *testing.T) {
conn := open(t)
defer conn.Close()
types := []string{
"tinyint",
"smallint",
"int",
"bigint",
"real",
"float",
"smallmoney",
"money",
"decimal",
//"varbinary(15)",
//"binary(15)",
"nvarchar(15)",
"nchar(15)",
"varchar(15)",
"char(15)",
"bit",
"smalldatetime",
"date",
"time",
"datetime",
"datetime2",
"datetimeoffset",
"uniqueidentifier",
"sql_variant",
}
for _, typ := range types {
row := conn.QueryRow("declare @x "+typ+" = ?; select @x", nil)
var retval interface{}
err := row.Scan(&retval)
if err != nil {
t.Error("Scan failed for type "+typ, err.Error())
return
}
if retval != nil {
t.Error("Value should be nil, but it is ", retval)
return
}
}
}
func TestParams(t *testing.T) {
longstr := strings.Repeat("x", 10000)
longbytes := make([]byte, 10000)
testdate, err := time.Parse(time.RFC3339, "2010-01-01T00:00:00-00:00")
if err != nil {
t.Fatal(err)
}
values := []interface{}{
int64(5),
"hello",
"",
[]byte{1, 2, 3},
[]byte{},
float64(1.12313554),
true,
false,
nil,
longstr,
longbytes,
testdate.UTC(),
}
conn := open(t)
defer conn.Close()
for _, val := range values {
t.Run(fmt.Sprintf("%T:%#v", val, val), func(t *testing.T){
row := conn.QueryRow("select ?", val)
var retval interface{}
err := row.Scan(&retval)
if err != nil {
t.Error("Scan failed", err.Error())
return
}
var same bool
switch decodedval := retval.(type) {
case []byte:
switch decodedvaltest := val.(type) {
case []byte:
same = bytes.Equal(decodedval, decodedvaltest)
default:
same = false
}
case time.Time:
same = decodedval.UTC() == val
default:
same = retval == val
}
if !same {
t.Error("Value don't match", retval, val)
return
}
})
}
}
func TestExec(t *testing.T) {
conn := open(t)
defer conn.Close()
res, err := conn.Exec("create table #abc (fld int)")
if err != nil {
t.Fatal("Exec failed", err.Error())
}
_ = res
}
func TestShortTimeout(t *testing.T) {
if testing.Short() {
t.Skip("short")
}
checkConnStr(t)
SetLogger(testLogger{t})
dsn := makeConnStr(t)
dsnParams := dsn.Query()
dsnParams.Set("Connection Timeout", "2")
dsn.RawQuery = dsnParams.Encode()
conn, err := sql.Open("mssql", dsn.String())
if err != nil {
t.Fatal("Open connection failed:", err.Error())
}
defer conn.Close()
_, err = conn.Exec("waitfor delay '00:00:15'")
if err == nil {
t.Fatal("Exec should fail with timeout, but no failure occurred")
}
if neterr, ok := err.(net.Error); !ok || !neterr.Timeout() {
t.Fatal("failure not a timeout, failed with", err)
}
// connection should be usable after timeout
row := conn.QueryRow("select 1")
var val int64
err = row.Scan(&val)
if err != nil {
t.Fatal("Scan failed with", err)
}
}
func TestTwoQueries(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query("select 1")
if err != nil {
t.Fatal("First exec failed", err)
}
if !rows.Next() {
t.Fatal("First query didn't return row")
}
var i int
if err = rows.Scan(&i); err != nil {
t.Fatal("Scan failed", err)
}
if i != 1 {
t.Fatalf("Wrong value returned %d, should be 1", i)
}
if rows, err = conn.Query("select 2"); err != nil {
t.Fatal("Second query failed", err)
}
if !rows.Next() {
t.Fatal("Second query didn't return row")
}
if err = rows.Scan(&i); err != nil {
t.Fatal("Scan failed", err)
}
if i != 2 {
t.Fatalf("Wrong value returned %d, should be 2", i)
}
}
func TestError(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Query("exec bad")
if err == nil {
t.Fatal("Query should fail")
}
if sqlerr, ok := err.(Error); !ok {
t.Fatalf("Should be sql error, actually %T, %v", err, err)
} else {
if sqlerr.Number != 2812 { // Could not find stored procedure 'bad'
t.Fatalf("Should be specific error code 2812, actually %d %s", sqlerr.Number, sqlerr)
}
}
}
func TestQueryNoRows(t *testing.T) {
conn := open(t)
defer conn.Close()
var rows *sql.Rows
var err error
if rows, err = conn.Query("create table #abc (fld int)"); err != nil {
t.Fatal("Query failed", err)
}
if rows.Next() {
t.Fatal("Query shoulnd't return any rows")
}
}
func TestQueryManyNullsRow(t *testing.T) {
conn := open(t)
defer conn.Close()
var row *sql.Row
var err error
if row = conn.QueryRow("select null, null, null, null, null, null, null, null"); err != nil {
t.Fatal("Query failed", err)
}
var v [8]sql.NullInt64
if err = row.Scan(&v[0], &v[1], &v[2], &v[3], &v[4], &v[5], &v[6], &v[7]); err != nil {
t.Fatal("Scan failed", err)
}
}
func TestOrderBy(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
_, err = tx.Exec("if (exists(select * from INFORMATION_SCHEMA.TABLES where TABLE_NAME='tbl')) drop table tbl")
if err != nil {
t.Fatal("Drop table failed", err)
}
_, err = tx.Exec("create table tbl (fld1 int primary key, fld2 int)")
if err != nil {
t.Fatal("Create table failed", err)
}
_, err = tx.Exec("insert into tbl (fld1, fld2) values (1, 2)")
if err != nil {
t.Fatal("Insert failed", err)
}
_, err = tx.Exec("insert into tbl (fld1, fld2) values (2, 1)")
if err != nil {
t.Fatal("Insert failed", err)
}
rows, err := tx.Query("select * from tbl order by fld1")
if err != nil {
t.Fatal("Query failed", err)
}
for rows.Next() {
var fld1 int32
var fld2 int32
err = rows.Scan(&fld1, &fld2)
if err != nil {
t.Fatal("Scan failed", err)
}
}
err = rows.Err()
if err != nil {
t.Fatal("Rows have errors", err)
}
}
func TestScanDecimal(t *testing.T) {
conn := open(t)
defer conn.Close()
var f float64
err := conn.QueryRow("select cast(0.5 as numeric(25,1))").Scan(&f)
if err != nil {
t.Error("query row / scan failed:", err.Error())
return
}
if math.Abs(f-0.5) > 0.000001 {
t.Error("Value is not 0.5:", f)
return
}
var s string
err = conn.QueryRow("select cast(-0.05 as numeric(25,2))").Scan(&s)
if err != nil {
t.Error("query row / scan failed:", err.Error())
return
}
if s != "-0.05" {
t.Error("Value is not -0.05:", s)
return
}
}
func TestAffectedRows(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
res, err := tx.Exec("create table #foo (bar int)")
if err != nil {
t.Fatal("create table failed")
}
n, err := res.RowsAffected()
if err != nil {
t.Fatal("rows affected failed")
}
if n != 0 {
t.Error("Expected 0 rows affected, got ", n)
}
res, err = tx.Exec("insert into #foo (bar) values (1)")
if err != nil {
t.Fatal("insert failed")
}
n, err = res.RowsAffected()
if err != nil {
t.Fatal("rows affected failed")
}
if n != 1 {
t.Error("Expected 1 row affected, got ", n)
}
res, err = tx.Exec("insert into #foo (bar) values (?)", 2)
if err != nil {
t.Fatal("insert failed")
}
n, err = res.RowsAffected()
if err != nil {
t.Fatal("rows affected failed")
}
if n != 1 {
t.Error("Expected 1 row affected, got ", n)
}
}
func TestIdentity(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
res, err := tx.Exec("create table #foo (bar int identity, baz int unique)")
if err != nil {
t.Fatal("create table failed")
}
res, err = tx.Exec("insert into #foo (baz) values (1)")
if err != nil {
t.Fatal("insert failed")
}
n, err := res.LastInsertId()
if err != nil {
t.Fatal("last insert id failed")
}
if n != 1 {
t.Error("Expected 1 for identity, got ", n)
}
res, err = tx.Exec("insert into #foo (baz) values (20)")
if err != nil {
t.Fatal("insert failed")
}
n, err = res.LastInsertId()
if err != nil {
t.Fatal("last insert id failed")
}
if n != 2 {
t.Error("Expected 2 for identity, got ", n)
}
res, err = tx.Exec("insert into #foo (baz) values (1)")
if err == nil {
t.Fatal("insert should fail")
}
res, err = tx.Exec("insert into #foo (baz) values (?)", 1)
if err == nil {
t.Fatal("insert should fail")
}
}
func queryParamRoundTrip(db *sql.DB, param interface{}, dest interface{}) {
err := db.QueryRow("select ?", param).Scan(dest)
if err != nil {
log.Panicf("select / scan failed: %v", err.Error())
}
}
func TestDateTimeParam(t *testing.T) {
conn := open(t)
defer conn.Close()
type testStruct struct {
t time.Time
}
var emptydate time.Time
mindate := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
maxdate := time.Date(9999, 12, 31, 23, 59, 59, 999999900, time.UTC)
values := []testStruct{
{time.Date(2015, time.October, 12, 10, 22, 0, 0, time.FixedZone("PST", -8*60*60))}, // back to the future day
{time.Date(1961, time.April, 12, 9, 7, 0, 0, time.FixedZone("MSK", 3*60*60))}, // First man in space
{time.Date(1969, time.July, 20, 20, 18, 0, 0, time.UTC)}, // First man on the Moon
{time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)}, // UNIX date
{time.Date(1982, 1, 3, 12, 13, 14, 300, time.FixedZone("YAKT", 9*60*60))}, // some random date
{time.Date(4, 6, 3, 12, 13, 14, 150000000, time.UTC)}, // some random date
{mindate}, // minimal value
{maxdate}, // maximum value
{time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC)}, // just over limit
{emptydate},
}
for _, test := range values {
t.Run(fmt.Sprintf("Test for %v", test.t), func (t *testing.T) {
var t2 time.Time
queryParamRoundTrip(conn, test.t, &t2)
expected := test.t
// clip value
if test.t.Before(mindate) {
expected = mindate
}
if test.t.After(maxdate) {
expected = maxdate
}
if expected.Sub(t2) != 0 {
t.Errorf("expected: '%s', got: '%s' delta: %d", expected, t2, expected.Sub(t2))
}
})
}
}
func TestUniqueIdentifierParam(t *testing.T) {
conn := open(t)
defer conn.Close()
type testStruct struct {
name string
uuid interface{}
}
expected := UniqueIdentifier{0x01, 0x23, 0x45, 0x67,
0x89, 0xAB,
0xCD, 0xEF,
0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF,
}
values := []testStruct{
{
"[]byte",
[]byte{0x67, 0x45, 0x23, 0x01,
0xAB, 0x89,
0xEF, 0xCD,
0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}},
{
"string",
"01234567-89ab-cdef-0123-456789abcdef"},
}
for _, test := range values {
t.Run(test.name, func(t *testing.T) {
var uuid2 UniqueIdentifier
err := conn.QueryRow("select ?", test.uuid).Scan(&uuid2)
if err != nil {
t.Fatal("select / scan failed", err.Error())
}
if expected != uuid2 {
t.Errorf("uniqueidentifier does not match: '%s' '%s'", expected, uuid2)
}
})
}
}
func TestBigQuery(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query(`WITH n(n) AS
(
SELECT 1
UNION ALL
SELECT n+1 FROM n WHERE n < 10000
)
SELECT n, @@version FROM n ORDER BY n
OPTION (MAXRECURSION 10000);`)
if err != nil {
t.Fatal("cannot exec query", err)
}
rows.Next()
rows.Close()
var res int
err = conn.QueryRow("select 0").Scan(&res)
if err != nil {
t.Fatal("cannot scan value", err)
}
if res != 0 {
t.Fatal("expected 0, got ", res)
}
}
func TestBug32(t *testing.T) {
conn := open(t)
defer conn.Close()
tx, err := conn.Begin()
if err != nil {
t.Fatal("Begin tran failed", err)
}
defer tx.Rollback()
_, err = tx.Exec("if (exists(select * from INFORMATION_SCHEMA.TABLES where TABLE_NAME='tbl')) drop table tbl")
if err != nil {
t.Fatal("Drop table failed", err)
}
_, err = tx.Exec("create table tbl(a int primary key,fld bit null)")
if err != nil {
t.Fatal("Create table failed", err)
}
_, err = tx.Exec("insert into tbl (a,fld) values (1,nullif(?, ''))", "")
if err != nil {
t.Fatal("Insert failed", err)
}
}
func TestIgnoreEmptyResults(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query("set nocount on; select 2")
if err != nil {
t.Fatal("Query failed", err.Error())
}
if !rows.Next() {
t.Fatal("Query didn't return row")
}
var fld1 int32
err = rows.Scan(&fld1)
if err != nil {
t.Fatal("Scan failed", err)
}
if fld1 != 2 {
t.Fatal("Returned value doesn't match")
}
}
func TestStmt_SetQueryNotification(t *testing.T) {
checkConnStr(t)
mssqldriver := driverWithProcess(t)
cn, err := mssqldriver.Open(makeConnStr(t).String())
if err != nil {
t.Fatalf("failed to open connection: %v", err)
}
stmt, err := cn.Prepare("SELECT 1")
if err != nil {
t.Error("Connection failed", err)
}
sqlstmt := stmt.(*Stmt)
sqlstmt.SetQueryNotification("ABC", "service=WebCacheNotifications", time.Hour)
rows, err := sqlstmt.Query(nil)
if err == nil {
rows.Close()
}
// notifications are sent to Service Broker
// see for more info: https://github.com/denisenkom/go-mssqldb/pull/90
}
func TestErrorInfo(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Exec("select bad")
if sqlError, ok := err.(Error); ok {
if sqlError.SQLErrorNumber() != 207 /*invalid column name*/ {
t.Errorf("Query failed with unexpected error number %d %s", sqlError.SQLErrorNumber(), sqlError.SQLErrorMessage())
}
if sqlError.SQLErrorLineNo() != 1 {
t.Errorf("Unexpected line number returned %v, expected %v", sqlError.SQLErrorLineNo(), 1)
}
} else {
t.Error("Failed to convert error to SQLErorr", err)
}
_, err = conn.Exec("RAISERROR('test message', 18, 111)")
if sqlError, ok := err.(Error); ok {
if sqlError.SQLErrorNumber() != 50000 {
t.Errorf("Query failed with unexpected error number %d %s", sqlError.SQLErrorNumber(), sqlError.SQLErrorMessage())
}
if sqlError.SQLErrorMessage() != "test message" {
t.Fail()
}
if sqlError.SQLErrorClass() != 18 {
t.Fail()
}
if sqlError.SQLErrorState() != 111 {
t.Fail()
}
if sqlError.SQLErrorLineNo() != 1 {
t.Errorf("Unexpected line number returned %v, expected %v", sqlError.SQLErrorLineNo(), 1)
}
// just call those methods to make sure we have some coverage for them
sqlError.SQLErrorServerName()
sqlError.SQLErrorProcName()
} else {
t.Error("Failed to convert error to SQLErorr", err)
}
}
func TestSetLanguage(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Exec("set language russian")
if err != nil {
t.Errorf("Query failed with unexpected error %s", err)
}
row := conn.QueryRow("select cast(getdate() as varchar(50))")
var val interface{}
err = row.Scan(&val)
if err != nil {
t.Errorf("Query failed with unexpected error %s", err)
}
t.Log("Returned value", val)
}
func TestConnectionClosing(t *testing.T) {
pool := open(t)
defer pool.Close()
for i := 1; i <= 100; i++ {
if pool.Stats().OpenConnections > 1 {
t.Errorf("Open connections is expected to stay <= 1, but it is %d", pool.Stats().OpenConnections)
return
}
stmt, err := pool.Query("select 1")
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
for stmt.Next() {
var val interface{}
err := stmt.Scan(&val)
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
}
}
}
func TestBeginTranError(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
// close actual connection to make begin transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
ctx := context.Background()
_, err = conn.begin(ctx, isolationSnapshot)
if err == nil || conn.connectionGood == true {
t.Errorf("begin should fail as a bad connection, err=%v", err)
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
err = conn.sendBeginRequest(ctx, isolationSerializable)
if err != nil {
t.Fatalf("sendBeginRequest failed with error %v", err)
}
// close connection to cause processBeginResponse to fail
conn.sess.buf.transport.Close()
_, err = conn.processBeginResponse(ctx)
switch err {
case nil:
t.Error("processBeginResponse should fail but it succeeded")
case driver.ErrBadConn:
t.Error("processBeginResponse should fail with error different from ErrBadConn but it did")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
}
func TestCommitTranError(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
ctx := context.Background()
err = conn.Commit()
if err == nil || conn.connectionGood {
t.Errorf("begin should fail and set the connection to bad, but it returned %v", err)
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
err = conn.sendCommitRequest()
if err != nil {
t.Fatalf("sendCommitRequest failed with error %v", err)
}
// close connection to cause processBeginResponse to fail
conn.sess.buf.transport.Close()
err = conn.simpleProcessResp(ctx)
switch err {
case nil:
t.Error("simpleProcessResp should fail but it succeeded")
case driver.ErrBadConn:
t.Error("simpleProcessResp should fail with error different from ErrBadConn but it did")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
defer conn.Close()
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
// should fail because there is no transaction
err = conn.Commit()
switch err {
case nil:
t.Error("Commit should fail but it succeeded")
case driver.ErrBadConn:
t.Error("Commit should fail with error different from ErrBadConn but it did")
}
}
func TestRollbackTranError(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
ctx := context.Background()
err = conn.Rollback()
if err == nil || conn.connectionGood {
t.Errorf("Rollback should fail and set connection to bad but it returned %v", err)
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
err = conn.sendRollbackRequest()
if err != nil {
t.Fatalf("sendCommitRequest failed with error %v", err)
}
// close connection to cause processBeginResponse to fail
conn.sess.buf.transport.Close()
err = conn.simpleProcessResp(ctx)
switch err {
case nil:
t.Error("simpleProcessResp should fail but it succeeded")
case driver.ErrBadConn:
t.Error("simpleProcessResp should fail with error different from ErrBadConn but it did")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
// reopen connection
conn, err = drv.open(context.Background(), makeConnStr(t).String())
defer conn.Close()
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
// should fail because there is no transaction
err = conn.Rollback()
switch err {
case nil:
t.Error("Commit should fail but it succeeded")
case driver.ErrBadConn:
t.Error("Commit should fail with error different from ErrBadConn but it did")
}
}
func TestSendQueryErrors(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.FailNow()
}
defer conn.Close()
stmt, err := conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.FailNow()
}
// should fail because parameter is invalid
_, err = stmt.Query([]driver.Value{conn})
if err == nil {
t.Fail()
}
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
// should fail because connection is closed
_, err = stmt.Query([]driver.Value{})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
stmt, err = conn.prepareContext(context.Background(), "select ?")
if err != nil {
t.FailNow()
}
// should fail because connection is closed
_, err = stmt.Query([]driver.Value{int64(1)})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
}
func TestProcessQueryErrors(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatal("open expected to succeed, but it failed with", err)
}
stmt, err := conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.Fatal("prepareContext expected to succeed, but it failed with", err)
}
err = stmt.sendQuery([]namedValue{})
if err != nil {
t.Fatal("sendQuery expected to succeed, but it failed with", err)
}
// close actual connection to make reading response to fail
conn.sess.buf.transport.Close()
_, err = stmt.processQueryResponse(context.Background())
if err == nil {
t.Error("processQueryResponse expected to fail but it succeeded")
}
// should not fail with ErrBadConn because query was successfully sent to server
if err == driver.ErrBadConn {
t.Error("processQueryResponse expected to fail with error other than ErrBadConn but it failed with it")
}
if conn.connectionGood {
t.Fatal("Connection should be in a bad state")
}
}
func TestSendExecErrors(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.FailNow()
}
defer conn.Close()
stmt, err := conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.FailNow()
}
// should fail because parameter is invalid
_, err = stmt.Exec([]driver.Value{conn})
if err == nil {
t.Fail()
}
// close actual connection to make commit transaction to fail during sending of a packet
conn.sess.buf.transport.Close()
// should fail because connection is closed
_, err = stmt.Exec([]driver.Value{})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
stmt, err = conn.prepareContext(context.Background(), "select ?")
if err != nil {
t.FailNow()
}
// should fail because connection is closed
_, err = stmt.Exec([]driver.Value{int64(1)})
if err == nil || stmt.c.connectionGood {
t.Fail()
}
}
func TestLongConnection(t *testing.T) {
checkConnStr(t)
list := []struct {
connTimeout string
queryTimeout string
ctxTimeout time.Duration
wantFail bool
}{
{"1", "00:00:02", 6 * time.Second, true},
{"2", "00:00:01", 6 * time.Second, false},
// Check no connection timeout.
{"0", "00:00:01", 2 * time.Second, false},
// {"0", "00:00:45", 60 * time.Second, false}, // Skip for normal testing to limit time.
}
for i, item := range list {
t.Run(fmt.Sprintf("item-index-%d,want-fail=%t", i, item.wantFail), func(t *testing.T) {
dsn := makeConnStr(t)
dsnParams := dsn.Query()
dsnParams.Set("connection timeout", item.connTimeout)
dsn.RawQuery = dsnParams.Encode()
db, err := sql.Open("sqlserver", dsn.String())
if err != nil {
t.Fatalf("failed to open driver sqlserver")
}
defer db.Close()
ctx, cancel := context.WithTimeout(context.Background(), item.ctxTimeout)
defer cancel()
_, err = db.ExecContext(ctx, "WAITFOR DELAY '"+item.queryTimeout+"';")
if item.wantFail && err == nil {
t.Fatal("exec no error")
}
if !item.wantFail && err != nil {
t.Fatal("exec error", err)
}
})
}
}
func TestNextResultSet(t *testing.T) {
conn := open(t)
defer conn.Close()
rows, err := conn.Query("select 1; select 2")
if err != nil {
t.Fatal("Query failed", err.Error())
}
defer func() {
err := rows.Err()
if err != nil {
t.Error("unexpected error:", err)
}
}()
defer rows.Close()
if !rows.Next() {
t.Fatal("Query didn't return row")
}
var fld1, fld2 int32
err = rows.Scan(&fld1)
if err != nil {
t.Fatal("Scan failed", err)
}
if fld1 != 1 {
t.Fatal("Returned value doesn't match")
}
if rows.Next() {
t.Fatal("Query returned unexpected second row.")
}
// calling next again should still return false
if rows.Next() {
t.Fatal("Query returned unexpected second row.")
}
if !rows.NextResultSet() {
t.Fatal("NextResultSet should return true but returned false")
}
if !rows.Next() {
t.Fatal("Query didn't return row")
}
err = rows.Scan(&fld2)
if err != nil {
t.Fatal("Scan failed", err)
}
if fld2 != 2 {
t.Fatal("Returned value doesn't match")
}
if rows.NextResultSet() {
t.Fatal("NextResultSet should return false but returned true")
}
}
func TestColumnTypeIntrospection(t *testing.T) {
type tst struct {
expr string
typeName string
reflType reflect.Type
hasSize bool
size int64
hasPrecScale bool
precision int64
scale int64
}
tests := []tst{
{"cast(1 as bit)", "BIT", reflect.TypeOf(true), false, 0, false, 0, 0},
{"cast(1 as tinyint)", "TINYINT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"cast(1 as smallint)", "SMALLINT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"1", "INT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"cast(1 as bigint)", "BIGINT", reflect.TypeOf(int64(0)), false, 0, false, 0, 0},
{"cast(1 as real)", "REAL", reflect.TypeOf(0.0), false, 0, false, 0, 0},
{"cast(1 as float)", "FLOAT", reflect.TypeOf(0.0), false, 0, false, 0, 0},
{"cast('abc' as varbinary(3))", "VARBINARY", reflect.TypeOf([]byte{}), true, 3, false, 0, 0},
{"cast('abc' as varbinary(max))", "VARBINARY", reflect.TypeOf([]byte{}), true, 2147483645, false, 0, 0},
{"cast(1 as datetime)", "DATETIME", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(1 as smalldatetime)", "SMALLDATETIME", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as datetime2(7))", "DATETIME2", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as datetimeoffset(7))", "DATETIMEOFFSET", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as date)", "DATE", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"cast(getdate() as time)", "TIME", reflect.TypeOf(time.Time{}), false, 0, false, 0, 0},
{"'abc'", "VARCHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast('abc' as varchar(max))", "VARCHAR", reflect.TypeOf(""), true, 2147483645, false, 0, 0},
{"N'abc'", "NVARCHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast(N'abc' as NVARCHAR(MAX))", "NVARCHAR", reflect.TypeOf(""), true, 1073741822, false, 0, 0},
{"cast(1 as decimal)", "DECIMAL", reflect.TypeOf([]byte{}), false, 0, true, 18, 0},
{"cast(1 as decimal(5, 2))", "DECIMAL", reflect.TypeOf([]byte{}), false, 0, true, 5, 2},
{"cast(1 as numeric(10, 4))", "DECIMAL", reflect.TypeOf([]byte{}), false, 0, true, 10, 4},
{"cast(1 as money)", "MONEY", reflect.TypeOf([]byte{}), false, 0, false, 0, 0},
{"cast(1 as smallmoney)", "SMALLMONEY", reflect.TypeOf([]byte{}), false, 0, false, 0, 0},
{"cast(0x6F9619FF8B86D011B42D00C04FC964FF as uniqueidentifier)", "UNIQUEIDENTIFIER", reflect.TypeOf([]byte{}), false, 0, false, 0, 0},
{"cast('<root/>' as xml)", "XML", reflect.TypeOf(""), true, 1073741822, false, 0, 0},
{"cast('abc' as text)", "TEXT", reflect.TypeOf(""), true, 2147483647, false, 0, 0},
{"cast(N'abc' as ntext)", "NTEXT", reflect.TypeOf(""), true, 1073741823, false, 0, 0},
{"cast('abc' as image)", "IMAGE", reflect.TypeOf([]byte{}), true, 2147483647, false, 0, 0},
{"cast('abc' as char(3))", "CHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast(N'abc' as nchar(3))", "NCHAR", reflect.TypeOf(""), true, 3, false, 0, 0},
{"cast(1 as sql_variant)", "SQL_VARIANT", reflect.TypeOf(nil), false, 0, false, 0, 0},
}
conn := open(t)
defer conn.Close()
for _, tt := range tests {
rows, err := conn.Query("select " + tt.expr)
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
ct, err := rows.ColumnTypes()
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
if ct[0].DatabaseTypeName() != tt.typeName {
t.Errorf("Expected type %s but returned %s", tt.typeName, ct[0].DatabaseTypeName())
}
size, ok := ct[0].Length()
if ok != tt.hasSize {
t.Errorf("Expected has size %v but returned %v for %s", tt.hasSize, ok, tt.expr)
} else {
if ok && size != tt.size {
t.Errorf("Expected size %d but returned %d for %s", tt.size, size, tt.expr)
}
}
prec, scale, ok := ct[0].DecimalSize()
if ok != tt.hasPrecScale {
t.Errorf("Expected has prec/scale %v but returned %v for %s", tt.hasPrecScale, ok, tt.expr)
} else {
if ok && prec != tt.precision {
t.Errorf("Expected precision %d but returned %d for %s", tt.precision, prec, tt.expr)
}
if ok && scale != tt.scale {
t.Errorf("Expected scale %d but returned %d for %s", tt.scale, scale, tt.expr)
}
}
if ct[0].ScanType() != tt.reflType {
t.Errorf("Expected ScanType %v but got %v for %s", tt.reflType, ct[0].ScanType(), tt.expr)
}
}
}
func TestColumnIntrospection(t *testing.T) {
type tst struct {
expr string
fieldName string
typeName string
nullable bool
hasSize bool
size int64
hasPrecScale bool
precision int64
scale int64
}
tests := []tst{
{"f1 int null", "f1", "INT", true, false, 0, false, 0, 0},
{"f2 varchar(15) not null", "f2", "VARCHAR", false, true, 15, false, 0, 0},
{"f3 decimal(5, 2) null", "f3", "DECIMAL", true, false, 0, true, 5, 2},
}
conn := open(t)
defer conn.Close()
// making table variable with specified fields and making a select from it
exprs := make([]string, len(tests))
for i, test := range tests {
exprs[i] = test.expr
}
exprJoined := strings.Join(exprs, ",")
rows, err := conn.Query(fmt.Sprintf("declare @tbl table(%s); select * from @tbl", exprJoined))
if err != nil {
t.Fatalf("Query failed with unexpected error %s", err)
}
ct, err := rows.ColumnTypes()
if err != nil {
t.Fatalf("ColumnTypes failed with unexpected error %s", err)
}
for i, test := range tests {
if ct[i].Name() != test.fieldName {
t.Errorf("Field expected have name %s but it has name %s", test.fieldName, ct[i].Name())
}
if ct[i].DatabaseTypeName() != test.typeName {
t.Errorf("Invalid type name returned %s expected %s", ct[i].DatabaseTypeName(), test.typeName)
}
nullable, ok := ct[i].Nullable()
if ok {
if nullable != test.nullable {
t.Errorf("Invalid nullable value returned %v", nullable)
}
} else {
t.Error("Nullable was expected to support Nullable but it didn't")
}
size, ok := ct[i].Length()
if ok != test.hasSize {
t.Errorf("Expected has size %v but returned %v for %s", test.hasSize, ok, test.expr)
} else {
if ok && size != test.size {
t.Errorf("Expected size %d but returned %d for %s", test.size, size, test.expr)
}
}
prec, scale, ok := ct[i].DecimalSize()
if ok != test.hasPrecScale {
t.Errorf("Expected has prec/scale %v but returned %v for %s", test.hasPrecScale, ok, test.expr)
} else {
if ok && prec != test.precision {
t.Errorf("Expected precision %d but returned %d for %s", test.precision, prec, test.expr)
}
if ok && scale != test.scale {
t.Errorf("Expected scale %d but returned %d for %s", test.scale, scale, test.expr)
}
}
}
}
func TestContext(t *testing.T) {
conn := open(t)
defer conn.Close()
opts := &sql.TxOptions{
Isolation: sql.LevelSerializable,
}
ctx := context.Background()
tx, err := conn.BeginTx(ctx, opts)
if err != nil {
t.Errorf("BeginTx failed with unexpected error %s", err)
return
}
rows, err := tx.QueryContext(ctx, "DBCC USEROPTIONS")
properties := make(map[string]string)
for rows.Next() {
var name, value string
if err = rows.Scan(&name, &value); err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
}
properties[name] = value
}
if properties["isolation level"] != "serializable" {
t.Errorf("Expected isolation level to be serializable but it is %s", properties["isolation level"])
}
row := tx.QueryRowContext(ctx, "select 1")
var val int64
if err = row.Scan(&val); err != nil {
t.Errorf("QueryRowContext failed with unexpected error %s", err)
}
if val != 1 {
t.Error("Incorrect value returned from query")
}
_, err = tx.ExecContext(ctx, "select 1")
if err != nil {
t.Errorf("ExecContext failed with unexpected error %s", err)
return
}
_, err = tx.PrepareContext(ctx, "select 1")
if err != nil {
t.Errorf("PrepareContext failed with unexpected error %s", err)
return
}
}
func TestBeginTxtReadOnlyNotSupported(t *testing.T) {
conn := open(t)
defer conn.Close()
opts := &sql.TxOptions{ReadOnly: true}
_, err := conn.BeginTx(context.Background(), opts)
if err == nil {
t.Error("BeginTx expected to fail for read only transaction because MSSQL doesn't support it, but it succeeded")
}
}
func TestConn_BeginTx(t *testing.T) {
conn := open(t)
defer conn.Close()
_, err := conn.Exec("create table test (f int)")
defer conn.Exec("drop table test")
if err != nil {
t.Fatal("create table failed with error", err)
}
tx1, err := conn.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal("BeginTx failed with error", err)
}
tx2, err := conn.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal("BeginTx failed with error", err)
}
_, err = tx1.Exec("insert into test (f) values (1)")
if err != nil {
t.Fatal("insert failed with error", err)
}
_, err = tx2.Exec("insert into test (f) values (2)")
if err != nil {
t.Fatal("insert failed with error", err)
}
tx1.Rollback()
tx2.Commit()
rows, err := conn.Query("select f from test")
if err != nil {
t.Fatal("select failed with error", err)
}
values := []int64{}
for rows.Next() {
var val int64
err = rows.Scan(&val)
if err != nil {
t.Fatal("scan failed with error", err)
}
values = append(values, val)
}
if !reflect.DeepEqual(values, []int64{2}) {
t.Errorf("Values is expected to be [1] but it is %v", values)
}
}
func TestNamedParameters(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :param2, :param1, :param2",
sql.Named("param1", 1),
sql.Named("param2", 2))
var col1, col2, col3 int64
err := row.Scan(&col1, &col2, &col3)
if err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
return
}
if col1 != 2 || col2 != 1 || col3 != 2 {
t.Errorf("Unexpected values returned col1=%d, col2=%d, col3=%d", col1, col2, col3)
}
}
func TestBadNamedParameters(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :param2, :param1, :param2",
sql.Named("badparam1", 1),
sql.Named("param2", 2))
var col1, col2, col3 int64
err := row.Scan(&col1, &col2, &col3)
if err == nil {
t.Error("Scan succeeded unexpectedly")
return
}
t.Logf("Scan failed as expected with error %s", err)
}
func TestMixedParameters(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :2, :param1, :param2",
5, // this parameter will be unused
6,
sql.Named("param1", 1),
sql.Named("param2", 2))
var col1, col2, col3 int64
err := row.Scan(&col1, &col2, &col3)
if err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
return
}
if col1 != 6 || col2 != 1 || col3 != 2 {
t.Errorf("Unexpected values returned col1=%d, col2=%d, col3=%d", col1, col2, col3)
}
}
/*
func TestMixedParametersExample(t *testing.T) {
conn := open(t)
defer conn.Close()
row := conn.QueryRow(
"select :id, ?",
sql.Named("id", 1),
2,
)
var col1, col2 int64
err := row.Scan(&col1, &col2)
if err != nil {
t.Errorf("Scan failed with unexpected error %s", err)
return
}
if col1 != 1 || col2 != 2 {
t.Errorf("Unexpected values returned col1=%d, col2=%d", col1, col2)
}
}
*/
func TestPinger(t *testing.T) {
conn := open(t)
defer conn.Close()
err := conn.Ping()
if err != nil {
t.Errorf("Failed to hit database")
}
}
func TestQueryCancelLowLevel(t *testing.T) {
checkConnStr(t)
drv := driverWithProcess(t)
conn, err := drv.open(context.Background(), makeConnStr(t).String())
if err != nil {
t.Fatalf("Open failed with error %v", err)
}
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background())
stmt, err := conn.prepareContext(ctx, "waitfor delay '00:00:03'")
if err != nil {
t.Fatalf("Prepare failed with error %v", err)
}
err = stmt.sendQuery([]namedValue{})
if err != nil {
t.Fatalf("sendQuery failed with error %v", err)
}
cancel()
_, err = stmt.processExec(ctx)
if err != context.Canceled {
t.Errorf("Expected error to be Cancelled but got %v", err)
}
// same connection should be usable again after it was cancelled
stmt, err = conn.prepareContext(context.Background(), "select 1")
if err != nil {
t.Fatalf("Prepare failed with error %v", err)
}
rows, err := stmt.Query([]driver.Value{})
if err != nil {
t.Fatalf("Query failed with error %v", err)
}
values := []driver.Value{nil}
err = rows.Next(values)
if err != nil {
t.Fatalf("Next failed with error %v", err)
}
}
func TestQueryCancelHighLevel(t *testing.T) {
conn := open(t)
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(200 * time.Millisecond)
cancel()
}()
_, err := conn.ExecContext(ctx, "waitfor delay '00:00:03'")
if err != context.Canceled {
t.Errorf("ExecContext expected to fail with Cancelled but it returned %v", err)
}
// connection should be usable after timeout
row := conn.QueryRow("select 1")
var val int64
err = row.Scan(&val)
if err != nil {
t.Fatal("Scan failed with", err)
}
}
func TestQueryTimeout(t *testing.T) {
conn := open(t)
defer conn.Close()
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
defer cancel()
_, err := conn.ExecContext(ctx, "waitfor delay '00:00:03'")
if err != context.DeadlineExceeded {
t.Errorf("ExecContext expected to fail with DeadlineExceeded but it returned %v", err)
}
// connection should be usable after timeout
row := conn.QueryRow("select 1")
var val int64
err = row.Scan(&val)
if err != nil {
t.Fatal("Scan failed with", err)
}
}
func TestDriverParams(t *testing.T) {
checkConnStr(t)
SetLogger(testLogger{t})
type sqlCmd struct {
Name string
Driver string
Query string
Param []interface{}
Expect []interface{}
}
list := []sqlCmd{
{
Name: "preprocess-ordinal",
Driver: "mssql",
Query: `select V1=:1`,
Param: []interface{}{"abc"},
Expect: []interface{}{"abc"},
},
{
Name: "preprocess-name",
Driver: "mssql",
Query: `select V1=:First`,
Param: []interface{}{sql.Named("First", "abc")},
Expect: []interface{}{"abc"},
},
{
Name: "raw-ordinal",
Driver: "sqlserver",
Query: `select V1=@p1`,
Param: []interface{}{"abc"},
Expect: []interface{}{"abc"},
},
{
Name: "raw-name",
Driver: "sqlserver",
Query: `select V1=@First`,
Param: []interface{}{sql.Named("First", "abc")},
Expect: []interface{}{"abc"},
},
}
for cmdIndex, cmd := range list {
t.Run(cmd.Name, func(t *testing.T) {
db, err := sql.Open(cmd.Driver, makeConnStr(t).String())
if err != nil {
t.Fatalf("failed to open driver %q", cmd.Driver)
}
defer db.Close()
rows, err := db.Query(cmd.Query, cmd.Param...)
if err != nil {
t.Fatalf("failed to run query %q %v", cmd.Query, err)
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
t.Fatalf("failed to get column schema %v", err)
}
clen := len(columns)
if clen != len(cmd.Expect) {
t.Fatalf("query column has %d, expect %d columns", clen, len(cmd.Expect))
}
values := make([]interface{}, clen)
into := make([]interface{}, clen)
for i := 0; i < clen; i++ {
into[i] = &values[i]
}
for rows.Next() {
err = rows.Scan(into...)
if err != nil {
t.Fatalf("failed to scan into row for %d %q", cmdIndex, cmd.Driver)
}
for i := range cmd.Expect {
if values[i] != cmd.Expect[i] {
t.Fatalf("expected value in index %d %v != actual value %v", i, cmd.Expect[i], values[i])
}
}
}
})
}
}
type connInterrupt struct {
net.Conn
mu sync.Mutex
disruptRead bool
disruptWrite bool
}
func (c *connInterrupt) Interrupt(write bool) {
c.mu.Lock()
if write {
c.disruptWrite = true
} else {
c.disruptRead = true
}
c.mu.Unlock()
}
func (c *connInterrupt) Read(b []byte) (n int, err error) {
c.mu.Lock()
dis := c.disruptRead
c.mu.Unlock()
if dis {
return 0, disconnectError{}
}
return c.Conn.Read(b)
}
func (c *connInterrupt) Write(b []byte) (n int, err error) {
c.mu.Lock()
dis := c.disruptWrite
c.mu.Unlock()
if dis {
return 0, disconnectError{}
}
return c.Conn.Write(b)
}
type dialerInterrupt struct {
nd tcpDialer
mu sync.Mutex
list []*connInterrupt
}
func (d *dialerInterrupt) Dial(ctx context.Context, addr string) (net.Conn, error) {
conn, err := d.nd.Dial(ctx, addr)
if err != nil {
return nil, err
}
ci := &connInterrupt{Conn: conn}
d.mu.Lock()
d.list = append(d.list, ci)
d.mu.Unlock()
return ci, err
}
func (d *dialerInterrupt) Interrupt(write bool) {
d.mu.Lock()
defer d.mu.Unlock()
for _, ci := range d.list {
ci.Interrupt(write)
}
}
var _ net.Error = disconnectError{}
type disconnectError struct{}
func (disconnectError) Error() string {
return "disconnect"
}
func (disconnectError) Timeout() bool {
return true
}
func (disconnectError) Temporary() bool {
return true
}
// TestDisconnect1 ensures errors and states are handled correctly if
// the server is disconnected mid-query.
func TestDisconnect1(t *testing.T) {
if testing.Short() {
t.Skip("short")
}
checkConnStr(t)
SetLogger(testLogger{t})
// Revert to the normal dialer after the test is done.
normalCreateDialer := createDialer
defer func() {
createDialer = normalCreateDialer
}()
waitDisrupt := make(chan struct{})
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
createDialer = func(p *connectParams) dialer {
nd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}
di := &dialerInterrupt{nd: nd}
go func() {
<-waitDisrupt
di.Interrupt(true)
di.Interrupt(false)
}()
return di
}
db, err := sql.Open("sqlserver", makeConnStr(t).String())
if err != nil {
t.Fatal(err)
}
if err := db.PingContext(ctx); err != nil {
t.Fatal(err)
}
defer db.Close()
_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)
if err != nil {
t.Fatal(err)
}
go func() {
time.Sleep(time.Second * 1)
close(waitDisrupt)
}()
t.Log("prepare for query")
_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)
if err != nil {
t.Log("expected error after disconnect", err)
return
}
t.Fatal("wanted error after Exec")
}
// TestDisconnect2 tests a read error so the query is started
// but results cannot be read.
func TestDisconnect2(t *testing.T) {
if testing.Short() {
t.Skip("short")
}
checkConnStr(t)
SetLogger(testLogger{t})
// Revert to the normal dialer after the test is done.
normalCreateDialer := createDialer
defer func() {
createDialer = normalCreateDialer
}()
end := make(chan error)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
waitDisrupt := make(chan struct{})
ctx, cancel = context.WithTimeout(ctx, time.Second*2)
defer cancel()
createDialer = func(p *connectParams) dialer {
nd := tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}
di := &dialerInterrupt{nd: nd}
go func() {
<-waitDisrupt
di.Interrupt(false)
}()
return di
}
db, err := sql.Open("sqlserver", makeConnStr(t).String())
if err != nil {
t.Fatal(err)
}
if err := db.PingContext(ctx); err != nil {
t.Fatal(err)
}
defer db.Close()
_, err = db.ExecContext(ctx, `SET LOCK_TIMEOUT 1800;`)
if err != nil {
t.Fatal(err)
}
close(waitDisrupt)
_, err = db.ExecContext(ctx, `waitfor delay '00:00:3';`)
end <- err
}()
timeout := time.After(10 * time.Second)
select {
case err := <-end:
if err == nil {
t.Fatal("test err")
}
case <-timeout:
t.Fatal("timeout")
}
}
|
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"errors"
pb "github.com/coreos/etcd/raft/raftpb"
)
// ErrStepLocalMsg is returned when try to step a local raft message
var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
// ErrStepPeerNotFound is returned when try to step a response message
// but there is no peer found in raft.prs for that node.
var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
// RawNode is a thread-unsafe Node.
// The methods of this struct correspond to the methods of Node and are described
// more fully there.
type RawNode struct {
raft *raft
prevSoftSt *SoftState
prevHardSt pb.HardState
}
func (rn *RawNode) newReady() Ready {
return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
}
func (rn *RawNode) commitReady(rd Ready) {
if rd.SoftState != nil {
rn.prevSoftSt = rd.SoftState
}
if !IsEmptyHardState(rd.HardState) {
rn.prevHardSt = rd.HardState
}
if rn.prevHardSt.Commit != 0 {
// In most cases, prevHardSt and rd.HardState will be the same
// because when there are new entries to apply we just sent a
// HardState with an updated Commit value. However, on initial
// startup the two are different because we don't send a HardState
// until something changes, but we do send any un-applied but
// committed entries (and previously-committed entries may be
// incorporated into the snapshot, even if rd.CommittedEntries is
// empty). Therefore we mark all committed entries as applied
// whether they were included in rd.HardState or not.
rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
}
if len(rd.Entries) > 0 {
e := rd.Entries[len(rd.Entries)-1]
rn.raft.raftLog.stableTo(e.Index, e.Term)
}
if !IsEmptySnap(rd.Snapshot) {
rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
}
if len(rd.ReadStates) != 0 {
rn.raft.readStates = nil
}
}
// NewRawNode returns a new RawNode given configuration and a list of raft peers.
func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
if config.ID == 0 {
panic("config.ID must not be zero")
}
r := newRaft(config)
rn := &RawNode{
raft: r,
}
lastIndex, err := config.Storage.LastIndex()
if err != nil {
panic(err) // TODO(bdarnell)
}
// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
// restoring an existing RawNode (like RestartNode).
// TODO(bdarnell): rethink RawNode initialization and whether the application needs
// to be able to tell us when it expects the RawNode to exist.
if lastIndex == 0 {
r.becomeFollower(1, None)
ents := make([]pb.Entry, len(peers))
for i, peer := range peers {
cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
data, err := cc.Marshal()
if err != nil {
panic("unexpected marshal error")
}
ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
}
r.raftLog.append(ents...)
r.raftLog.committed = uint64(len(ents))
for _, peer := range peers {
r.addNode(peer.ID)
}
}
// Set the initial hard and soft states after performing all initialization.
rn.prevSoftSt = r.softState()
if lastIndex == 0 {
rn.prevHardSt = emptyState
} else {
rn.prevHardSt = r.hardState()
}
return rn, nil
}
// Tick advances the internal logical clock by a single tick.
func (rn *RawNode) Tick() {
rn.raft.tick()
}
// Campaign causes this RawNode to transition to candidate state.
func (rn *RawNode) Campaign() error {
return rn.raft.Step(pb.Message{
Type: pb.MsgHup,
})
}
// Propose proposes data be appended to the raft log.
func (rn *RawNode) Propose(data []byte) error {
return rn.raft.Step(pb.Message{
Type: pb.MsgProp,
From: rn.raft.id,
Entries: []pb.Entry{
{Data: data},
}})
}
// ProposeConfChange proposes a config change.
func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
data, err := cc.Marshal()
if err != nil {
return err
}
return rn.raft.Step(pb.Message{
Type: pb.MsgProp,
Entries: []pb.Entry{
{Type: pb.EntryConfChange, Data: data},
},
})
}
// ApplyConfChange applies a config change to the local node.
func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
if cc.NodeID == None {
rn.raft.resetPendingConf()
return &pb.ConfState{Nodes: rn.raft.nodes()}
}
switch cc.Type {
case pb.ConfChangeAddNode:
rn.raft.addNode(cc.NodeID)
case pb.ConfChangeRemoveNode:
rn.raft.removeNode(cc.NodeID)
case pb.ConfChangeUpdateNode:
rn.raft.resetPendingConf()
default:
panic("unexpected conf type")
}
return &pb.ConfState{Nodes: rn.raft.nodes()}
}
// Step advances the state machine using the given message.
func (rn *RawNode) Step(m pb.Message) error {
// ignore unexpected local messages receiving over network
if IsLocalMsg(m.Type) {
return ErrStepLocalMsg
}
if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m.Type) {
return rn.raft.Step(m)
}
return ErrStepPeerNotFound
}
// Ready returns the current point-in-time state of this RawNode.
func (rn *RawNode) Ready() Ready {
rd := rn.newReady()
rn.raft.msgs = nil
return rd
}
// HasReady called when RawNode user need to check if any Ready pending.
// Checking logic in this method should be consistent with Ready.containsUpdates().
func (rn *RawNode) HasReady() bool {
r := rn.raft
if !r.softState().equal(rn.prevSoftSt) {
return true
}
if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
return true
}
if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
return true
}
if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
return true
}
if len(r.readStates) != 0 {
return true
}
return false
}
// Advance notifies the RawNode that the application has applied and saved progress in the
// last Ready results.
func (rn *RawNode) Advance(rd Ready) {
rn.commitReady(rd)
}
// Status returns the current status of the given group.
func (rn *RawNode) Status() *Status {
status := getStatus(rn.raft)
return &status
}
// ReportUnreachable reports the given node is not reachable for the last send.
func (rn *RawNode) ReportUnreachable(id uint64) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
}
// ReportSnapshot reports the status of the sent snapshot.
func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
rej := status == SnapshotFailure
_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
}
// TransferLeader tries to transfer leadership to the given transferee.
func (rn *RawNode) TransferLeader(transferee uint64) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
}
// ReadIndex requests a read state. The read state will be set in ready.
// Read State has a read index. Once the application advances further than the read
// index, any linearizable read requests issued before the read request can be
// processed safely. The read state will have the same rctx attached.
func (rn *RawNode) ReadIndex(rctx []byte) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
}
raft: add RawNode.TickQuiesced
TickQuiesced allows the caller to support "quiesced" Raft groups which
do not perform periodic heartbeats and elections. This is useful in a
system with thousands of Raft groups where these periodic operations can
be overwhelming in an otherwise idle system.
It might seem possible to avoid advancing the logical clock at all in
such Raft groups, but doing so has an interaction with the CheckQuorum
functionality. If a follower is not quiesced while the leader is the
follower can call an election that will fail because the leader's lease
has not expired (electionElapsed < electionTimeout). The next time the
leader sends a heartbeat to this follower the follower will see that the
heartbeat is from a previous term and respond with a MsgAppResp. This in
turn will cause the leader to step down and become a follower even
though there isn't a leader in the group. By allowing the leader's
logical clock to advance via TickQuiesced, the leader won't reject the
election and there will be a smooth transfer of leadership to the
follower.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package raft
import (
"errors"
pb "github.com/coreos/etcd/raft/raftpb"
)
// ErrStepLocalMsg is returned when try to step a local raft message
var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
// ErrStepPeerNotFound is returned when try to step a response message
// but there is no peer found in raft.prs for that node.
var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
// RawNode is a thread-unsafe Node.
// The methods of this struct correspond to the methods of Node and are described
// more fully there.
type RawNode struct {
raft *raft
prevSoftSt *SoftState
prevHardSt pb.HardState
}
func (rn *RawNode) newReady() Ready {
return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
}
func (rn *RawNode) commitReady(rd Ready) {
if rd.SoftState != nil {
rn.prevSoftSt = rd.SoftState
}
if !IsEmptyHardState(rd.HardState) {
rn.prevHardSt = rd.HardState
}
if rn.prevHardSt.Commit != 0 {
// In most cases, prevHardSt and rd.HardState will be the same
// because when there are new entries to apply we just sent a
// HardState with an updated Commit value. However, on initial
// startup the two are different because we don't send a HardState
// until something changes, but we do send any un-applied but
// committed entries (and previously-committed entries may be
// incorporated into the snapshot, even if rd.CommittedEntries is
// empty). Therefore we mark all committed entries as applied
// whether they were included in rd.HardState or not.
rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
}
if len(rd.Entries) > 0 {
e := rd.Entries[len(rd.Entries)-1]
rn.raft.raftLog.stableTo(e.Index, e.Term)
}
if !IsEmptySnap(rd.Snapshot) {
rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
}
if len(rd.ReadStates) != 0 {
rn.raft.readStates = nil
}
}
// NewRawNode returns a new RawNode given configuration and a list of raft peers.
func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
if config.ID == 0 {
panic("config.ID must not be zero")
}
r := newRaft(config)
rn := &RawNode{
raft: r,
}
lastIndex, err := config.Storage.LastIndex()
if err != nil {
panic(err) // TODO(bdarnell)
}
// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
// restoring an existing RawNode (like RestartNode).
// TODO(bdarnell): rethink RawNode initialization and whether the application needs
// to be able to tell us when it expects the RawNode to exist.
if lastIndex == 0 {
r.becomeFollower(1, None)
ents := make([]pb.Entry, len(peers))
for i, peer := range peers {
cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
data, err := cc.Marshal()
if err != nil {
panic("unexpected marshal error")
}
ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
}
r.raftLog.append(ents...)
r.raftLog.committed = uint64(len(ents))
for _, peer := range peers {
r.addNode(peer.ID)
}
}
// Set the initial hard and soft states after performing all initialization.
rn.prevSoftSt = r.softState()
if lastIndex == 0 {
rn.prevHardSt = emptyState
} else {
rn.prevHardSt = r.hardState()
}
return rn, nil
}
// Tick advances the internal logical clock by a single tick.
func (rn *RawNode) Tick() {
rn.raft.tick()
}
// TickQuiesced advances the internal logical clock by a single tick without
// performing any other state machine processing. It allows the caller to avoid
// periodic heartbeats and elections when all of the peers in a Raft group are
// known to be at the same state. Expected usage is to periodically invoke Tick
// or TickQuiesced depending on whether the group is "active" or "quiesced".
//
// WARNING: Be very careful about using this method as it subverts the Raft
// state machine. You should probably be using Tick instead.
func (rn *RawNode) TickQuiesced() {
rn.raft.electionElapsed++
}
// Campaign causes this RawNode to transition to candidate state.
func (rn *RawNode) Campaign() error {
return rn.raft.Step(pb.Message{
Type: pb.MsgHup,
})
}
// Propose proposes data be appended to the raft log.
func (rn *RawNode) Propose(data []byte) error {
return rn.raft.Step(pb.Message{
Type: pb.MsgProp,
From: rn.raft.id,
Entries: []pb.Entry{
{Data: data},
}})
}
// ProposeConfChange proposes a config change.
func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
data, err := cc.Marshal()
if err != nil {
return err
}
return rn.raft.Step(pb.Message{
Type: pb.MsgProp,
Entries: []pb.Entry{
{Type: pb.EntryConfChange, Data: data},
},
})
}
// ApplyConfChange applies a config change to the local node.
func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
if cc.NodeID == None {
rn.raft.resetPendingConf()
return &pb.ConfState{Nodes: rn.raft.nodes()}
}
switch cc.Type {
case pb.ConfChangeAddNode:
rn.raft.addNode(cc.NodeID)
case pb.ConfChangeRemoveNode:
rn.raft.removeNode(cc.NodeID)
case pb.ConfChangeUpdateNode:
rn.raft.resetPendingConf()
default:
panic("unexpected conf type")
}
return &pb.ConfState{Nodes: rn.raft.nodes()}
}
// Step advances the state machine using the given message.
func (rn *RawNode) Step(m pb.Message) error {
// ignore unexpected local messages receiving over network
if IsLocalMsg(m.Type) {
return ErrStepLocalMsg
}
if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m.Type) {
return rn.raft.Step(m)
}
return ErrStepPeerNotFound
}
// Ready returns the current point-in-time state of this RawNode.
func (rn *RawNode) Ready() Ready {
rd := rn.newReady()
rn.raft.msgs = nil
return rd
}
// HasReady called when RawNode user need to check if any Ready pending.
// Checking logic in this method should be consistent with Ready.containsUpdates().
func (rn *RawNode) HasReady() bool {
r := rn.raft
if !r.softState().equal(rn.prevSoftSt) {
return true
}
if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
return true
}
if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
return true
}
if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
return true
}
if len(r.readStates) != 0 {
return true
}
return false
}
// Advance notifies the RawNode that the application has applied and saved progress in the
// last Ready results.
func (rn *RawNode) Advance(rd Ready) {
rn.commitReady(rd)
}
// Status returns the current status of the given group.
func (rn *RawNode) Status() *Status {
status := getStatus(rn.raft)
return &status
}
// ReportUnreachable reports the given node is not reachable for the last send.
func (rn *RawNode) ReportUnreachable(id uint64) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
}
// ReportSnapshot reports the status of the sent snapshot.
func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
rej := status == SnapshotFailure
_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
}
// TransferLeader tries to transfer leadership to the given transferee.
func (rn *RawNode) TransferLeader(transferee uint64) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
}
// ReadIndex requests a read state. The read state will be set in ready.
// Read State has a read index. Once the application advances further than the read
// index, any linearizable read requests issued before the read request can be
// processed safely. The read state will have the same rctx attached.
func (rn *RawNode) ReadIndex(rctx []byte) {
_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
}
|
package harvest
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
const HARVEST_DOMAIN = "harvestapp.com"
type API struct {
client *http.Client
BaseURL string
SubDomain string
User string
Password string
}
func NewBasicAuthAPI(subdomain, user, password string) *API {
a := API{}
a.client = http.DefaultClient
a.SubDomain = subdomain
a.User = user
a.Password = password
a.BaseURL = fmt.Sprintf("https://%s.%s", subdomain, HARVEST_DOMAIN)
return &a
}
func (a *API) Get(path string, args Arguments, target interface{}) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
req, err := http.NewRequest("GET", urlWithParams, nil)
req.Header.Set("Accept", "application/json")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid GET request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Errorf("HTTP request failure on %s: %s %s", url, string(body), err)
}
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(target)
if err != nil {
body, _ := ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "JSON decode failed on %s: %s", url, string(body))
}
return nil
}
func (a *API) Put(path string, args Arguments, postData interface{}, target interface{}) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
buffer := new(bytes.Buffer)
if postData != nil {
json.NewEncoder(buffer).Encode(postData)
}
req, err := http.NewRequest("PUT", urlWithParams, buffer)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json; charset=utf-8")
req.Header.Set("User-Agent", "github.com/adlio/harvest")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid PUT request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "HTTP request failure on %s: %s %s", url, string(body), err)
}
// Harvest V1 API returns an empty response, with a Location header including the
// URI of the created object (e.g. /projects/254454)
redirectDestination := resp.Header.Get("Location")
if redirectDestination != "" {
return a.Get(redirectDestination, args, target)
} else {
return errors.Errorf("PUT to %s failed to return a Location header. This means we couldn't fetch the new state of the record.", url)
}
return nil
}
func (a *API) Post(path string, args Arguments, postData interface{}, target interface{}) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
buffer := new(bytes.Buffer)
if postData != nil {
json.NewEncoder(buffer).Encode(postData)
}
req, err := http.NewRequest("POST", urlWithParams, buffer)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json; charset=utf-8")
req.Header.Set("User-Agent", "github.com/adlio/harvest")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid POST request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "HTTP request failure on %s: %s %s", url, string(body), err)
}
// Harvest V1 API returns an empty response, with a Location header including the
// URI of the created object (e.g. /projects/254454)
redirectDestination := resp.Header.Get("Location")
if redirectDestination != "" {
return a.Get(redirectDestination, args, target)
} else {
return errors.Errorf("POST to %s failed to return a Location header. This means we couldn't fetch the new state of the record.", url)
}
return nil
}
func (a *API) Delete(path string, args Arguments) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
req, err := http.NewRequest("DELETE", urlWithParams, nil)
req.Header.Set("Accept", "application/json")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid DELETE request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "HTTP request failure on %s: %s %s", url, string(body), err)
}
return nil
}
PUT requests sometimes return data directly instead of a Location: redirect. Updates code to JSON-parse this.
package harvest
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/pkg/errors"
)
const HARVEST_DOMAIN = "harvestapp.com"
type API struct {
client *http.Client
BaseURL string
SubDomain string
User string
Password string
}
func NewBasicAuthAPI(subdomain, user, password string) *API {
a := API{}
a.client = http.DefaultClient
a.SubDomain = subdomain
a.User = user
a.Password = password
a.BaseURL = fmt.Sprintf("https://%s.%s", subdomain, HARVEST_DOMAIN)
return &a
}
func (a *API) Get(path string, args Arguments, target interface{}) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
req, err := http.NewRequest("GET", urlWithParams, nil)
req.Header.Set("Accept", "application/json")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid GET request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Errorf("HTTP request failure on %s: %s %s", url, string(body), err)
}
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(target)
if err != nil {
body, _ := ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "JSON decode failed on %s: %s", url, string(body))
}
return nil
}
func (a *API) Put(path string, args Arguments, postData interface{}, target interface{}) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
buffer := new(bytes.Buffer)
if postData != nil {
json.NewEncoder(buffer).Encode(postData)
}
req, err := http.NewRequest("PUT", urlWithParams, buffer)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json; charset=utf-8")
req.Header.Set("User-Agent", "github.com/adlio/harvest")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid PUT request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "HTTP request failure on %s: %s %s", url, string(body), err)
}
// Harvest V1 API returns an empty response, with a Location header including the
// URI of the created object (e.g. /projects/254454)
redirectDestination := resp.Header.Get("Location")
if redirectDestination != "" {
return a.Get(redirectDestination, args, target)
} else {
decoder := json.NewDecoder(resp.Body)
err = decoder.Decode(target)
if err != nil {
body, _ := ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "JSON decode failed on %s: %s", url, string(body))
}
}
return nil
}
func (a *API) Post(path string, args Arguments, postData interface{}, target interface{}) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
buffer := new(bytes.Buffer)
if postData != nil {
json.NewEncoder(buffer).Encode(postData)
}
req, err := http.NewRequest("POST", urlWithParams, buffer)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json; charset=utf-8")
req.Header.Set("User-Agent", "github.com/adlio/harvest")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid POST request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "HTTP request failure on %s: %s %s", url, string(body), err)
}
// Harvest V1 API returns an empty response, with a Location header including the
// URI of the created object (e.g. /projects/254454)
redirectDestination := resp.Header.Get("Location")
if redirectDestination != "" {
return a.Get(redirectDestination, args, target)
} else {
return errors.Errorf("POST to %s failed to return a Location header. This means we couldn't fetch the new state of the record.", url)
}
return nil
}
func (a *API) Delete(path string, args Arguments) error {
url := fmt.Sprintf("%s%s", a.BaseURL, path)
urlWithParams := fmt.Sprintf("%s?%s", url, args.ToURLValues().Encode())
req, err := http.NewRequest("DELETE", urlWithParams, nil)
req.Header.Set("Accept", "application/json")
if a.User != "" && a.Password != "" {
req.SetBasicAuth(a.User, a.Password)
}
if err != nil {
return errors.Wrapf(err, "Invalid DELETE request %s", url)
}
resp, err := a.client.Do(req)
if err != nil {
return errors.Wrapf(err, "HTTP request failure on %s", url)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
return errors.Wrapf(err, "HTTP request failure on %s: %s %s", url, string(body), err)
}
return nil
}
|
package api
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/http/httputil"
"net/url"
"path/filepath"
"strings"
"encoding/json"
"encoding/xml"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
)
const (
POST = "POST"
GET = "GET"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
)
var types = map[string]string{
"html": "text/html",
"json": "application/json",
"xml": "application/xml",
"text": "text/plain",
"urlencoded": "application/x-www-form-urlencoded",
"form": "application/x-www-form-urlencoded",
"form-data": "application/x-www-form-urlencoded",
"multipart": "multipart/form-data",
}
type ResponseProcessor func(*http.Response) (*http.Response, error)
type Cipher interface {
Encrypt([]byte) ([]byte, error)
Decrypt([]byte) ([]byte, error)
}
const CIPHER_HEADER = "X-CIPHER-ENCODED"
type Agent struct {
u *url.URL
t string
m string
prefix string
headerIn http.Header
headerOut http.Header
query url.Values
cookies []*http.Cookie
files []*File
data io.Reader
length int
cipher Cipher
Error error
debug bool
conn *http.Client
processor ResponseProcessor
}
func URL(aurl string) *Agent {
u, err := url.Parse(aurl)
if err != nil {
panic(err)
}
prefix := strings.TrimSuffix(u.Path, "/")
return &Agent{
u: u,
t: types["html"],
m: GET,
prefix: prefix,
headerIn: make(map[string][]string),
headerOut: make(map[string][]string),
query: url.Values{},
cookies: make([]*http.Cookie, 0),
files: make([]*File, 0),
Error: err,
conn: http.DefaultClient,
}
}
func Get(aurl string) *Agent {
return URL(aurl).Method(GET)
}
func Post(aurl string) *Agent {
return URL(aurl).Method(POST)
}
func Patch(aurl string) *Agent {
return URL(aurl).Method(PATCH)
}
func Put(aurl string) *Agent {
return URL(aurl).Method(PUT)
}
func Head(aurl string) *Agent {
return URL(aurl).Method(HEAD)
}
func HTTP(host string) *Agent {
return URL(fmt.Sprintf("http://%s", host))
}
func HTTPs(host string) *Agent {
return URL(fmt.Sprintf("https://%s", host))
}
func (a *Agent) SetCipher(cipher Cipher) *Agent {
a.cipher = cipher
return a
}
func (a *Agent) ResponseProcessor(processor ResponseProcessor) *Agent {
a.processor = processor
return a
}
func (a *Agent) Prefix(prefix string) *Agent {
a.prefix = strings.TrimSuffix(prefix, "/")
return a
}
func (a *Agent) Transport(tr http.RoundTripper) *Agent {
a.conn = &http.Client{
Transport: tr,
}
return a
}
func (a *Agent) Debug(flag bool) *Agent {
a.debug = flag
return a
}
func (a *Agent) URI(uri string) *Agent {
a.u.Path = uri
if len(a.prefix) > 0 {
a.u.Path = a.prefix + uri
}
return a
}
func (a *Agent) QueryGet() url.Values {
q := a.u.Query()
for k, v := range a.query {
for _, vv := range v {
q.Add(k, vv)
}
}
return q
}
func (a *Agent) QuerySet(key string, value string) *Agent {
a.query.Set(key, value)
return a
}
func (a *Agent) QueryAdd(key string, value string) *Agent {
a.query.Add(key, value)
return a
}
func (a *Agent) Fragment(value string) *Agent {
a.u.Fragment = value
return a
}
func (a *Agent) QueryDel(key string) *Agent {
a.query.Del(key)
return a
}
func (a *Agent) SetHead(hdr http.Header) *Agent {
for k, vs := range hdr {
for _, v := range vs {
a.headerIn.Add(k, v)
}
}
return a
}
func (a *Agent) HeadSet(key string, value string) *Agent {
a.headerIn.Set(key, value)
return a
}
func (a *Agent) HeadAdd(key string, value string) *Agent {
a.headerIn.Add(key, value)
return a
}
func (a *Agent) HeadDel(key string) *Agent {
a.headerIn.Del(key)
return a
}
func (a *Agent) BasicAuthSet(user, password string) *Agent {
a.u.User = url.UserPassword(user, password)
return a
}
func (a *Agent) BasicAuthDel() *Agent {
a.u.User = nil
return a
}
func (a *Agent) CookiesAdd(cookies ...*http.Cookie) *Agent {
a.cookies = append(a.cookies, cookies...)
return a
}
func (a *Agent) Method(m string) *Agent {
a.m = m
return a
}
func (a *Agent) ContentType(t string) *Agent {
if ct, ok := types[t]; ok {
a.m = ct
}
return a
}
func (a *Agent) FormData(form map[string][]string) *Agent {
data := url.Values(form).Encode()
a.data = strings.NewReader(data)
a.length = len(data)
a.t = "form"
return a
}
func JSONMarshal(v interface{}, unescape bool) ([]byte, error) {
b, err := json.Marshal(v)
if unescape {
b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
}
return b, err
}
func (a *Agent) JSONData(args ...interface{}) *Agent {
if len(args) == 1 {
data, err := JSONMarshal(args[0], false)
a.data = bytes.NewBuffer(data)
a.length = len(data)
a.Error = err
}
if len(args) == 2 {
data, err := JSONMarshal(args[0], args[1].(bool))
a.data = bytes.NewBuffer(data)
a.length = len(data)
a.Error = err
}
a.t = "json"
return a
}
func (a *Agent) PBData(obj proto.Message) *Agent {
buf := bytes.NewBuffer([]byte{})
marshaler := &jsonpb.Marshaler{EmitDefaults: true}
err := marshaler.Marshal(buf, obj)
a.data = buf
a.Error = err
a.length = buf.Len()
a.t = "json"
return a
}
func (a *Agent) XMLData(obj interface{}) *Agent {
data, err := xml.Marshal(obj)
a.data = bytes.NewBuffer(data)
a.length = len(data)
a.Error = err
a.t = "xml"
return a
}
type File struct {
Filename string
Fieldname string
Data []byte
}
func NewFile(field string, filename string) (*File, error) {
absFile, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
fn := filepath.Base(absFile)
data, err := ioutil.ReadFile(absFile)
if err != nil {
return nil, err
}
return &File{
Filename: fn,
Fieldname: field,
Data: data,
}, nil
}
func NewFileByBytes(field string, filename string, data []byte) (*File, error) {
fn := filepath.Base(filename)
return &File{
Filename: fn,
Fieldname: field,
Data: data,
}, nil
}
func NewFileByReader(field string, filename string, rd io.Reader) (*File, error) {
fn := filepath.Base(filename)
data, err := ioutil.ReadAll(rd)
if err != nil {
return nil, err
}
return &File{
Filename: fn,
Fieldname: field,
Data: data,
}, nil
}
func (a *Agent) FileData(files ...*File) *Agent {
a.files = append(a.files, files...)
a.t = "multipart"
return a
}
func (a *Agent) Do() (*http.Response, error) {
if a.Error != nil {
return nil, a.Error
}
content_type := types[a.t]
if len(a.files) > 0 {
buf := &bytes.Buffer{}
mw := multipart.NewWriter(buf)
for _, file := range a.files {
fw, _ := mw.CreateFormFile(file.Fieldname, file.Filename)
fw.Write(file.Data)
}
a.data = buf
content_type = mw.FormDataContentType()
mw.Close()
}
//! cipher
if a.cipher != nil {
byts, err := ioutil.ReadAll(a.data)
if err != nil {
return nil, err
}
enbyts, err := a.cipher.Encrypt(byts)
if err != nil {
return nil, err
}
a.headerIn.Set("X-CIPHER-ENCODED", "true")
a.data = bytes.NewBuffer(enbyts)
a.length = len(enbyts)
}
req, err := http.NewRequest(a.m, a.u.String(), a.data)
if err != nil {
a.Error = err
return nil, err
}
//! headers
req.Header = a.headerIn
req.Header.Set("Content-Type", content_type)
//! query
q := req.URL.Query()
for k, v := range a.query {
for _, vv := range v {
q.Add(k, vv)
}
}
req.URL.RawQuery = q.Encode()
//! basic auth
if a.u.User != nil {
if password, ok := a.u.User.Password(); ok {
req.SetBasicAuth(a.u.User.Username(), password)
}
}
//! cookies
for _, cookie := range a.cookies {
req.AddCookie(cookie)
}
//! do
if a.debug {
dump, _ := httputil.DumpRequest(req, true)
log.Printf("api request\n-------------------------------\n%s\n", string(dump))
}
resp, err := a.conn.Do(req)
if resp != nil {
a.headerOut = resp.Header
}
if a.debug {
dump, _ := httputil.DumpResponse(resp, true)
log.Printf("api response\n-------------------------------\n%s\n", string(dump))
}
//! cipher
if a.cipher != nil {
if strings.ToLower(resp.Header.Get("X-CIPHER-ENCODED")) == "true" {
enbyts, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
debyts, err := a.cipher.Decrypt(enbyts)
if err != nil {
return nil, err
}
resp.Header.Del("X-CIPHER-ENCODED")
resp.Body = ioutil.NopCloser(bytes.NewBuffer(debyts))
resp.ContentLength = int64(len(debyts))
}
}
//response processor
if a.processor != nil && err == nil {
return a.processor(resp)
}
return resp, err
}
func (a *Agent) Status() (int, string, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError), err
}
return resp.StatusCode, resp.Status, nil
}
func (a *Agent) Bytes() (int, []byte, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, []byte{}, err
}
defer resp.Body.Close()
if a.debug {
dump, _ := httputil.DumpResponse(resp, true)
log.Printf("api response\n--------------------------------\n%s\n", string(dump))
}
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, nil, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(string(body))
return resp.StatusCode, nil, a.Error
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, nil, err
}
resp.Body = ioutil.NopCloser(bytes.NewBuffer(body))
return resp.StatusCode, body, a.Error
}
func (a *Agent) Text() (int, string, error) {
code, bytes, err := a.Bytes()
return code, string(bytes), err
}
func (a *Agent) JSON(obj interface{}) (int, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(resp.Status)
return resp.StatusCode, fmt.Errorf(string(body))
}
//! decode bytes to json
if obj != nil {
if err := json.NewDecoder(resp.Body).Decode(&obj); err != nil {
a.Error = err
return resp.StatusCode, err
}
}
return resp.StatusCode, a.Error
}
func (a *Agent) JSONPB(obj proto.Message) (int, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(resp.Status)
return resp.StatusCode, fmt.Errorf(string(body))
}
//! decode bytes to jsonpb
if obj != nil {
if err := jsonpb.Unmarshal(resp.Body, obj); err != nil {
a.Error = err
return resp.StatusCode, err
}
}
return resp.StatusCode, a.Error
}
func (a *Agent) XML(obj interface{}) (int, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(resp.Status)
return resp.StatusCode, fmt.Errorf(string(body))
}
//! decode bytes to json
if obj != nil {
if err := xml.NewDecoder(resp.Body).Decode(&obj); err != nil {
a.Error = err
return resp.StatusCode, err
}
}
return resp.StatusCode, a.Error
}
func (a *Agent) GetHeadIn() http.Header {
return a.headerIn
}
func (a *Agent) GetHeadOut() http.Header {
return a.headerOut
}
Update api.go
add opentracing support
package api
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/http/httputil"
"net/url"
"path/filepath"
"strings"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"github.com/opentracing-contrib/go-stdlib/nethttp"
"github.com/opentracing/opentracing-go"
)
const (
POST = "POST"
GET = "GET"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
)
var types = map[string]string{
"html": "text/html",
"json": "application/json",
"xml": "application/xml",
"text": "text/plain",
"urlencoded": "application/x-www-form-urlencoded",
"form": "application/x-www-form-urlencoded",
"form-data": "application/x-www-form-urlencoded",
"multipart": "multipart/form-data",
}
type ResponseProcessor func(*http.Response) (*http.Response, error)
type Cipher interface {
Encrypt([]byte) ([]byte, error)
Decrypt([]byte) ([]byte, error)
}
const CIPHER_HEADER = "X-CIPHER-ENCODED"
type Agent struct {
u *url.URL
t string
m string
prefix string
headerIn http.Header
headerOut http.Header
query url.Values
cookies []*http.Cookie
files []*File
data io.Reader
length int
cipher Cipher
Error error
debug bool
client *http.Client
processor ResponseProcessor
tracer opentracing.Tracer
}
func URL(aurl string) *Agent {
u, err := url.Parse(aurl)
if err != nil {
panic(err)
}
prefix := strings.TrimSuffix(u.Path, "/")
return &Agent{
u: u,
t: types["html"],
m: GET,
prefix: prefix,
headerIn: make(map[string][]string),
headerOut: make(map[string][]string),
query: url.Values{},
cookies: make([]*http.Cookie, 0),
files: make([]*File, 0),
Error: err,
client: http.DefaultClient,
}
}
func Get(aurl string) *Agent {
return URL(aurl).Method(GET)
}
func Post(aurl string) *Agent {
return URL(aurl).Method(POST)
}
func Patch(aurl string) *Agent {
return URL(aurl).Method(PATCH)
}
func Put(aurl string) *Agent {
return URL(aurl).Method(PUT)
}
func Head(aurl string) *Agent {
return URL(aurl).Method(HEAD)
}
func HTTP(host string) *Agent {
return URL(fmt.Sprintf("http://%s", host))
}
func HTTPs(host string) *Agent {
return URL(fmt.Sprintf("https://%s", host))
}
func (a *Agent) SetCipher(cipher Cipher) *Agent {
a.cipher = cipher
return a
}
func (a *Agent) ResponseProcessor(processor ResponseProcessor) *Agent {
a.processor = processor
return a
}
func (a *Agent) Prefix(prefix string) *Agent {
a.prefix = strings.TrimSuffix(prefix, "/")
return a
}
func (a *Agent) Transport(tr http.RoundTripper) *Agent {
a.client = &http.Client{
Transport: tr,
}
return a
}
func (a *Agent) Debug(flag bool) *Agent {
a.debug = flag
return a
}
func (a *Agent) URI(uri string) *Agent {
a.u.Path = uri
if len(a.prefix) > 0 {
a.u.Path = a.prefix + uri
}
return a
}
func (a *Agent) QueryGet() url.Values {
q := a.u.Query()
for k, v := range a.query {
for _, vv := range v {
q.Add(k, vv)
}
}
return q
}
func (a *Agent) QuerySet(key string, value string) *Agent {
a.query.Set(key, value)
return a
}
func (a *Agent) QueryAdd(key string, value string) *Agent {
a.query.Add(key, value)
return a
}
func (a *Agent) Fragment(value string) *Agent {
a.u.Fragment = value
return a
}
func (a *Agent) QueryDel(key string) *Agent {
a.query.Del(key)
return a
}
func (a *Agent) SetHead(hdr http.Header) *Agent {
for k, vs := range hdr {
for _, v := range vs {
a.headerIn.Add(k, v)
}
}
return a
}
func (a *Agent) HeadSet(key string, value string) *Agent {
a.headerIn.Set(key, value)
return a
}
func (a *Agent) HeadAdd(key string, value string) *Agent {
a.headerIn.Add(key, value)
return a
}
func (a *Agent) HeadDel(key string) *Agent {
a.headerIn.Del(key)
return a
}
func (a *Agent) BasicAuthSet(user, password string) *Agent {
a.u.User = url.UserPassword(user, password)
return a
}
func (a *Agent) BasicAuthDel() *Agent {
a.u.User = nil
return a
}
func (a *Agent) CookiesAdd(cookies ...*http.Cookie) *Agent {
a.cookies = append(a.cookies, cookies...)
return a
}
func (a *Agent) Method(m string) *Agent {
a.m = m
return a
}
func (a *Agent) ContentType(t string) *Agent {
if ct, ok := types[t]; ok {
a.m = ct
}
return a
}
func (a *Agent) SetTracer(tracer opentracing.Tracer) {
a.tracer = tracer
}
func (a *Agent) SetHttpClient(client *http.Client) {
a.client = client
}
func (a *Agent) FormData(form map[string][]string) *Agent {
data := url.Values(form).Encode()
a.data = strings.NewReader(data)
a.length = len(data)
a.t = "form"
return a
}
func JSONMarshal(v interface{}, unescape bool) ([]byte, error) {
b, err := json.Marshal(v)
if unescape {
b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1)
b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1)
b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1)
}
return b, err
}
func (a *Agent) JSONData(args ...interface{}) *Agent {
if len(args) == 1 {
data, err := JSONMarshal(args[0], false)
a.data = bytes.NewBuffer(data)
a.length = len(data)
a.Error = err
}
if len(args) == 2 {
data, err := JSONMarshal(args[0], args[1].(bool))
a.data = bytes.NewBuffer(data)
a.length = len(data)
a.Error = err
}
a.t = "json"
return a
}
func (a *Agent) PBData(obj proto.Message) *Agent {
buf := bytes.NewBuffer([]byte{})
marshaler := &jsonpb.Marshaler{EmitDefaults: true}
err := marshaler.Marshal(buf, obj)
a.data = buf
a.Error = err
a.length = buf.Len()
a.t = "json"
return a
}
func (a *Agent) XMLData(obj interface{}) *Agent {
data, err := xml.Marshal(obj)
a.data = bytes.NewBuffer(data)
a.length = len(data)
a.Error = err
a.t = "xml"
return a
}
type File struct {
Filename string
Fieldname string
Data []byte
}
func NewFile(field string, filename string) (*File, error) {
absFile, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
fn := filepath.Base(absFile)
data, err := ioutil.ReadFile(absFile)
if err != nil {
return nil, err
}
return &File{
Filename: fn,
Fieldname: field,
Data: data,
}, nil
}
func NewFileByBytes(field string, filename string, data []byte) (*File, error) {
fn := filepath.Base(filename)
return &File{
Filename: fn,
Fieldname: field,
Data: data,
}, nil
}
func NewFileByReader(field string, filename string, rd io.Reader) (*File, error) {
fn := filepath.Base(filename)
data, err := ioutil.ReadAll(rd)
if err != nil {
return nil, err
}
return &File{
Filename: fn,
Fieldname: field,
Data: data,
}, nil
}
func (a *Agent) FileData(files ...*File) *Agent {
a.files = append(a.files, files...)
a.t = "multipart"
return a
}
func (a *Agent) Do() (*http.Response, error) {
if a.Error != nil {
return nil, a.Error
}
content_type := types[a.t]
if len(a.files) > 0 {
buf := &bytes.Buffer{}
mw := multipart.NewWriter(buf)
for _, file := range a.files {
fw, _ := mw.CreateFormFile(file.Fieldname, file.Filename)
fw.Write(file.Data)
}
a.data = buf
content_type = mw.FormDataContentType()
mw.Close()
}
//! cipher
if a.cipher != nil {
byts, err := ioutil.ReadAll(a.data)
if err != nil {
return nil, err
}
enbyts, err := a.cipher.Encrypt(byts)
if err != nil {
return nil, err
}
a.headerIn.Set("X-CIPHER-ENCODED", "true")
a.data = bytes.NewBuffer(enbyts)
a.length = len(enbyts)
}
var req *http.Request
req, err := http.NewRequest(a.m, a.u.String(), a.data)
if err != nil {
a.Error = err
return nil, err
}
if a.tracer != nil {
traceReq, ht := nethttp.TraceRequest(a.tracer, req, nethttp.OperationName(fmt.Sprintf("HTTP %s %s", a.m, a.u.String())))
req = traceReq
defer ht.Finish()
}
//! headers
req.Header = a.headerIn
req.Header.Set("Content-Type", content_type)
//! query
q := req.URL.Query()
for k, v := range a.query {
for _, vv := range v {
q.Add(k, vv)
}
}
req.URL.RawQuery = q.Encode()
//! basic auth
if a.u.User != nil {
if password, ok := a.u.User.Password(); ok {
req.SetBasicAuth(a.u.User.Username(), password)
}
}
//! cookies
for _, cookie := range a.cookies {
req.AddCookie(cookie)
}
//! do
if a.debug {
dump, _ := httputil.DumpRequest(req, true)
log.Printf("api request\n-------------------------------\n%s\n", string(dump))
}
resp, err := a.client.Do(req)
if resp != nil {
a.headerOut = resp.Header
}
if a.debug {
dump, _ := httputil.DumpResponse(resp, true)
log.Printf("api response\n-------------------------------\n%s\n", string(dump))
}
//! cipher
if a.cipher != nil {
if strings.ToLower(resp.Header.Get("X-CIPHER-ENCODED")) == "true" {
enbyts, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
debyts, err := a.cipher.Decrypt(enbyts)
if err != nil {
return nil, err
}
resp.Header.Del("X-CIPHER-ENCODED")
resp.Body = ioutil.NopCloser(bytes.NewBuffer(debyts))
resp.ContentLength = int64(len(debyts))
}
}
//response processor
if a.processor != nil && err == nil {
return a.processor(resp)
}
return resp, err
}
func (a *Agent) Status() (int, string, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError), err
}
return resp.StatusCode, resp.Status, nil
}
func (a *Agent) Bytes() (int, []byte, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, []byte{}, err
}
defer resp.Body.Close()
if a.debug {
dump, _ := httputil.DumpResponse(resp, true)
log.Printf("api response\n--------------------------------\n%s\n", string(dump))
}
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, nil, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(string(body))
return resp.StatusCode, nil, a.Error
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, nil, err
}
resp.Body = ioutil.NopCloser(bytes.NewBuffer(body))
return resp.StatusCode, body, a.Error
}
func (a *Agent) Text() (int, string, error) {
code, bytes, err := a.Bytes()
return code, string(bytes), err
}
func (a *Agent) JSON(obj interface{}) (int, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(resp.Status)
return resp.StatusCode, fmt.Errorf(string(body))
}
//! decode bytes to json
if obj != nil {
if err := json.NewDecoder(resp.Body).Decode(&obj); err != nil {
a.Error = err
return resp.StatusCode, err
}
}
return resp.StatusCode, a.Error
}
func (a *Agent) JSONPB(obj proto.Message) (int, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(resp.Status)
return resp.StatusCode, fmt.Errorf(string(body))
}
//! decode bytes to jsonpb
if obj != nil {
if err := jsonpb.Unmarshal(resp.Body, obj); err != nil {
a.Error = err
return resp.StatusCode, err
}
}
return resp.StatusCode, a.Error
}
func (a *Agent) XML(obj interface{}) (int, error) {
resp, err := a.Do()
if err != nil {
a.Error = err
return http.StatusInternalServerError, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
a.Error = err
return resp.StatusCode, fmt.Errorf(resp.Status)
}
a.Error = fmt.Errorf(resp.Status)
return resp.StatusCode, fmt.Errorf(string(body))
}
//! decode bytes to json
if obj != nil {
if err := xml.NewDecoder(resp.Body).Decode(&obj); err != nil {
a.Error = err
return resp.StatusCode, err
}
}
return resp.StatusCode, a.Error
}
func (a *Agent) GetHeadIn() http.Header {
return a.headerIn
}
func (a *Agent) GetHeadOut() http.Header {
return a.headerOut
}
|
package goasana
import (
"encoding/json"
"errors"
"strconv"
"strings"
)
type Error struct {
Message string
}
type BaseData struct {
Id int
Name string
}
type Tag struct {
BaseData
}
type Project struct {
BaseData
}
type Workspace struct {
BaseData
}
type User struct {
BaseData
Email string
Photo map[string]string
Workspaces []Workspace
}
type Task struct {
BaseData
Assignee User
Assignee_status string
Created_at string
Completed bool
Completed_ad string
Due_on string
Followers []User
Modified_at string
Projects []Project
Parent BaseData
Workspace Workspace
}
const (
main_uri string = "https://app.asana.com/api/1.0"
users_uri string = "/users"
workspaces_uri string = "/workspaces"
me_uri string = "/me"
tasks_uri string = "/tasks"
projects_uri string = "/projects"
)
func checkForErrors(err []Error) error {
if len(err) == 0 {
return nil
}
lines := make([]string, len(err))
for i, e := range err {
lines[i] = e.Message
}
return errors.New(strings.Join(lines, "\n"))
}
func GetMe() (me *User, err error) {
data, err := SendRequest("GET", main_uri+users_uri+me_uri)
if err != nil {
return
}
var temp struct {
Data User
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return &temp.Data, nil
}
func GetUsers() (users []User, err error) {
data, err := SendRequest("GET", main_uri+users_uri)
if err != nil {
return
}
var temp struct {
Data []User
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetUsersFromWorkspace(workspace_id int) (users []User, err error) {
idstring := "/" + strconv.FormatInt(int64(workspace_id), 10)
data, err := SendRequest("GET", main_uri+workspaces_uri+idstring+users_uri)
if err != nil {
return
}
var temp struct {
Data []User
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetTaskFromUser(workspace, userid int) (tasks []Task, err error) {
filters := map[string]string{
"assignee": strconv.FormatInt(int64(userid), 10),
"workspace": strconv.FormatInt(int64(workspace), 10)}
data, err := SendRequestWithFilters("GET", main_uri+tasks_uri, filters)
if err != nil {
return
}
var temp struct {
Data []Task
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetTaskData(taskid int) (task Task, err error) {
taskstr := "/" + strconv.FormatInt(int64(taskid), 10)
data, err := SendRequest("GET", main_uri+tasks_uri+taskstr)
if err != nil {
return
}
var temp struct {
Data Task
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
Aggiunti dati su workspace e progetti
Create funzioni per ottenere workspace e progetti
package goasana
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
)
type Error struct {
Message string
}
type BaseData struct {
Id int
Name string
}
type Tag struct {
BaseData
}
type Team struct {
BaseData
}
type Project struct {
BaseData
Archivied bool
Created_at string
Followers []User
Color string
Notes string
Workspace Workspace
Team Team
}
type Workspace struct {
BaseData
Is_Organization bool
}
type User struct {
BaseData
Email string
Photo map[string]string
Workspaces []Workspace
}
type Task struct {
BaseData
Assignee User
Assignee_status string
Created_at string
Completed bool
Completed_ad string
Due_on string
Followers []User
Modified_at string
Projects []Project
Parent BaseData
Workspace Workspace
}
const (
main_uri string = "https://app.asana.com/api/1.0"
users_uri string = "/users"
workspaces_uri string = "/workspaces"
me_uri string = "/me"
tasks_uri string = "/tasks"
projects_uri string = "/projects"
organizations_uri string = "/organizations"
)
func checkForErrors(err []Error) error {
if len(err) == 0 {
return nil
}
lines := make([]string, len(err))
for i, e := range err {
lines[i] = e.Message
}
return errors.New(strings.Join(lines, "\n"))
}
func GetMe() (me *User, err error) {
data, err := SendRequest("GET", main_uri+users_uri+me_uri)
if err != nil {
return
}
var temp struct {
Data User
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return &temp.Data, nil
}
func GetUsers() (users []User, err error) {
data, err := SendRequest("GET", main_uri+users_uri)
if err != nil {
return
}
var temp struct {
Data []User
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetUsersFromWorkspace(workspace_id int) (users []User, err error) {
idstring := "/" + strconv.FormatInt(int64(workspace_id), 10)
data, err := SendRequest("GET", main_uri+workspaces_uri+idstring+users_uri)
if err != nil {
return
}
var temp struct {
Data []User
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetTaskFromUser(workspace, userid int) (tasks []Task, err error) {
filters := map[string]string{
"assignee": strconv.FormatInt(int64(userid), 10),
"workspace": strconv.FormatInt(int64(workspace), 10)}
data, err := SendRequestWithFilters("GET", main_uri+tasks_uri, filters)
if err != nil {
return
}
var temp struct {
Data []Task
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetTaskData(taskid int) (task Task, err error) {
taskstr := "/" + strconv.FormatInt(int64(taskid), 10)
data, err := SendRequest("GET", main_uri+tasks_uri+taskstr)
if err != nil {
return
}
var temp struct {
Data Task
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetWorkspaces() (workspaces []Workspace, err error) {
data, err := SendRequest("GET", main_uri+workspaces_uri)
if err != nil {
return
}
var temp struct {
Data []Workspace
Errors []Error
}
fmt.Println(string(data))
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetProjects(workspace_id int) (projects []Project, err error) {
filters := map[string]string{
"workspace": strconv.FormatInt(int64(workspace_id), 10)}
data, err := SendRequestWithFilters("GET", main_uri+projects_uri, filters)
if err != nil {
return
}
var temp struct {
Data []Project
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
func GetProjectData(project_id int) (project Project, err error) {
idstr := "/" + strconv.FormatInt(int64(project_id), 10)
data, err := SendRequest("GET", main_uri+projects_uri+idstr)
fmt.Println(main_uri + projects_uri + idstr)
if err != nil {
return
}
var temp struct {
Data Project
Errors []Error
}
json.Unmarshal(data, &temp)
err = checkForErrors(temp.Errors)
if err != nil {
return
}
return temp.Data, nil
}
|
// +build !noglobals
package inj
//////////////////////////////////////////////
// Interface definitions
//////////////////////////////////////////////
// A Grapher is anything that can represent an application graph
type Grapher interface {
Provide(inputs ...interface{}) error
Inject(fn interface{}, args ...interface{})
Assert() (valid bool, errors []string)
}
//////////////////////////////////////////////
// The one true global variable
//////////////////////////////////////////////
// A default grapher to use in the public API
var graph Grapher = NewGraph()
// Fetch the current grapher instance
func GetGrapher() Grapher {
return graph
}
// Set a specific grapher instance
func SetGrapher(g Grapher) {
graph = g
}
//////////////////////////////////////////////
// Public API
//////////////////////////////////////////////
// Insert a set of arbitrary objects into the
// application graph
func Provide(inputs ...interface{}) error {
return graph.Provide(inputs...)
}
// Given a function, call it with arguments assigned
// from the graph. Additional arguments can be provided
// for the sake of utility.
func Inject(fn interface{}, args ...interface{}) {
graph.Inject(fn, args...)
}
// Make sure that all provided dependencies have their
// requirements met, and return a list of errors if they
// don't.
func Assert() (valid bool, errors []string) {
return graph.Assert()
}
Update API to support datasources
// +build !noglobals
package inj
//////////////////////////////////////////////
// Interface definitions
//////////////////////////////////////////////
// A Grapher is anything that can represent an application graph
type Grapher interface {
Provide(inputs ...interface{}) error
Inject(fn interface{}, args ...interface{})
Assert() (valid bool, errors []string)
AddDatasource(...interface{}) error
}
//////////////////////////////////////////////
// The one true global variable
//////////////////////////////////////////////
// A default grapher to use in the public API
var graph Grapher = NewGraph()
// Fetch the current grapher instance
func GetGrapher() Grapher {
return graph
}
// Set a specific grapher instance
func SetGrapher(g Grapher) {
graph = g
}
//////////////////////////////////////////////
// Public API
//////////////////////////////////////////////
// Insert a set of arbitrary objects into the
// application graph
func Provide(inputs ...interface{}) error {
return graph.Provide(inputs...)
}
// Given a function, call it with arguments assigned
// from the graph. Additional arguments can be provided
// for the sake of utility.
func Inject(fn interface{}, args ...interface{}) {
graph.Inject(fn, args...)
}
// Make sure that all provided dependencies have their
// requirements met, and return a list of errors if they
// don't.
func Assert() (valid bool, errors []string) {
return graph.Assert()
}
// Add zero or more datasources to the global graph
func AddDatasource(ds ...interface{}) error {
return graph.AddDatasource(ds)
}
|
package mitm
import (
"crypto/tls"
"crypto/x509"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
)
var hostname, _ = os.Hostname()
func genCA() (cert tls.Certificate, err error) {
certPEM, keyPEM, err := GenCA(hostname)
if err != nil {
return tls.Certificate{}, err
}
cert, err = tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
return tls.Certificate{}, err
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
return cert, err
}
func testProxy(t *testing.T, ca *tls.Certificate, setupReq func(req *http.Request), wrap func(http.Handler) http.Handler, downstream http.HandlerFunc, checkResp func(*http.Response)) {
ds := httptest.NewTLSServer(downstream)
defer ds.Close()
p := &Proxy{
CA: ca,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
TLSServerConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
Wrap: wrap,
}
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatal("Listen:", err)
}
defer l.Close()
go func() {
if err := http.Serve(l, p); err != nil {
if !strings.Contains(err.Error(), "use of closed network") {
t.Fatal("Serve:", err)
}
}
}()
t.Logf("requesting %q", ds.URL)
req, err := http.NewRequest("GET", ds.URL, nil)
if err != nil {
t.Fatal("NewRequest:", err)
}
setupReq(req)
c := &http.Client{
Transport: &http.Transport{
Proxy: func(r *http.Request) (*url.URL, error) {
u := *r.URL
u.Scheme = "https"
u.Host = l.Addr().String()
return &u, nil
},
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
resp, err := c.Do(req)
if err != nil {
t.Fatal("Do:", err)
}
checkResp(resp)
}
func Test(t *testing.T) {
const xHops = "X-Hops"
ca, err := genCA()
if err != nil {
t.Fatal("loadCA:", err)
}
testProxy(t, &ca, func(req *http.Request) {
req.Header.Set(xHops, "a")
}, func(upstream http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
println("WRAP")
hops := r.Header.Get("X-Hops") + "b"
r.Header.Set("X-Hops", hops)
upstream.ServeHTTP(w, r)
})
}, func(w http.ResponseWriter, r *http.Request) {
hops := r.Header.Get(xHops) + "c"
w.Header().Set(xHops, hops)
}, func(resp *http.Response) {
const w = "abc"
if g := resp.Header.Get(xHops); g != w {
t.Errorf("want %s to be %s, got %s", xHops, w, g)
}
})
}
mitm: nettest
package mitm
import (
"crypto/tls"
"crypto/x509"
"flag"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"testing"
)
var hostname, _ = os.Hostname()
var (
nettest = flag.Bool("nettest", false, "run tests over network")
)
func init() {
flag.Parse()
}
func genCA() (cert tls.Certificate, err error) {
certPEM, keyPEM, err := GenCA(hostname)
if err != nil {
return tls.Certificate{}, err
}
cert, err = tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
return tls.Certificate{}, err
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
return cert, err
}
func testProxy(t *testing.T, ca *tls.Certificate, setupReq func(req *http.Request), wrap func(http.Handler) http.Handler, downstream http.HandlerFunc, checkResp func(*http.Response)) {
ds := httptest.NewTLSServer(downstream)
defer ds.Close()
p := &Proxy{
CA: ca,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
TLSServerConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
Wrap: wrap,
}
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatal("Listen:", err)
}
defer l.Close()
go func() {
if err := http.Serve(l, p); err != nil {
if !strings.Contains(err.Error(), "use of closed network") {
t.Fatal("Serve:", err)
}
}
}()
t.Logf("requesting %q", ds.URL)
req, err := http.NewRequest("GET", ds.URL, nil)
if err != nil {
t.Fatal("NewRequest:", err)
}
setupReq(req)
c := &http.Client{
Transport: &http.Transport{
Proxy: func(r *http.Request) (*url.URL, error) {
u := *r.URL
u.Scheme = "https"
u.Host = l.Addr().String()
return &u, nil
},
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
},
}
resp, err := c.Do(req)
if err != nil {
t.Fatal("Do:", err)
}
checkResp(resp)
}
func Test(t *testing.T) {
const xHops = "X-Hops"
ca, err := genCA()
if err != nil {
t.Fatal("loadCA:", err)
}
testProxy(t, &ca, func(req *http.Request) {
req.Header.Set(xHops, "a")
}, func(upstream http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
println("WRAP")
hops := r.Header.Get("X-Hops") + "b"
r.Header.Set("X-Hops", hops)
upstream.ServeHTTP(w, r)
})
}, func(w http.ResponseWriter, r *http.Request) {
hops := r.Header.Get(xHops) + "c"
w.Header().Set(xHops, hops)
}, func(resp *http.Response) {
const w = "abc"
if g := resp.Header.Get(xHops); g != w {
t.Errorf("want %s to be %s, got %s", xHops, w, g)
}
})
}
func TestNet(t *testing.T) {
if !*nettest {
t.Skip()
}
ca, err := genCA()
if err != nil {
t.Fatal("loadCA:", err)
}
var wrapped bool
testProxy(t, &ca, func(req *http.Request) {
nreq, _ := http.NewRequest("GET", "https://mitmtest.herokuapp.com/", nil)
*req = *nreq
}, func(upstream http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
wrapped = true
upstream.ServeHTTP(w, r)
})
}, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("this shouldn't be hit")
}, func(resp *http.Response) {
if !wrapped {
t.Errorf("expected wrap")
}
got, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal("ReadAll:", err)
}
if code := resp.StatusCode; code != 200 {
t.Errorf("want code 200, got %d", code)
}
if g := string(got); g != "ok\n" {
t.Errorf("want ok, got %q", g)
}
})
}
|
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha256"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
// Parsed endpoint url provided by the user.
endpointURL *url.URL
// Holds various credential providers.
credsProvider *credentials.Credentials
// Custom signerType value overrides all credentials.
overrideSignerType credentials.SignatureType
// User supplied.
appInfo struct {
appName string
appVersion string
}
// Indicate whether we are using https or not
secure bool
// Needs allocation.
httpClient *http.Client
bucketLocCache *bucketLocationCache
// Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
// S3 specific accelerated endpoint.
s3AccelerateEndpoint string
// Region endpoint
region string
// Random seed.
random *rand.Rand
// lookup indicates type of url lookup supported by server. If not specified,
// default to Auto.
lookup BucketLookupType
}
// Options for New method
type Options struct {
Creds *credentials.Credentials
Secure bool
Region string
BucketLookup BucketLookupType
// Add future fields here
}
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v6.0.6"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
// BucketLookupType is type of url lookup supported by server.
type BucketLookupType int
// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
const (
BucketLookupAuto BucketLookupType = iota
BucketLookupDNS
BucketLookupPath
)
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
if err != nil {
return nil, err
}
clnt.overrideSignerType = credentials.SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
if err != nil {
return nil, err
}
clnt.overrideSignerType = credentials.SignatureV4
return clnt, nil
}
// New - instantiate minio client, adds automatic verification of signature.
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV2
}
// If Amazon S3 set to signature v4.
if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV4
}
return clnt, nil
}
// NewWithCredentials - instantiate minio client with credentials provider
// for retrieving credentials from various credentials provider such as
// IAM, File, Env etc.
func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
return privateNew(endpoint, creds, secure, region, BucketLookupAuto)
}
// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
// Use this function when if your application deals with single region.
func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
return privateNew(endpoint, creds, secure, region, BucketLookupAuto)
}
// NewWithOptions - instantiate minio client with options
func NewWithOptions(endpoint string, opts *Options) (*Client, error) {
return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup)
}
// lockedRandSource provides protected rand source, implements rand.Source interface.
type lockedRandSource struct {
lk sync.Mutex
src rand.Source
}
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
func (r *lockedRandSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
// Seed uses the provided seed value to initialize the generator to a
// deterministic state.
func (r *lockedRandSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}
// Redirect requests by re signing the request.
func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) >= 5 {
return errors.New("stopped after 5 redirects")
}
if len(via) == 0 {
return nil
}
lastRequest := via[len(via)-1]
var reAuth bool
for attr, val := range lastRequest.Header {
// if hosts do not match do not copy Authorization header
if attr == "Authorization" && req.Host != lastRequest.Host {
reAuth = true
continue
}
if _, ok := req.Header[attr]; !ok {
req.Header[attr] = val
}
}
*c.endpointURL = *req.URL
value, err := c.credsProvider.Get()
if err != nil {
return err
}
var (
signerType = value.SignerType
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
region = c.region
)
// Custom signer set then override the behavior.
if c.overrideSignerType != credentials.SignatureDefault {
signerType = c.overrideSignerType
}
// If signerType returned by credentials helper is anonymous,
// then do not sign regardless of signerType override.
if value.SignerType == credentials.SignatureAnonymous {
signerType = credentials.SignatureAnonymous
}
if reAuth {
// Check if there is no region override, if not get it from the URL if possible.
if region == "" {
region = s3utils.GetRegionFromURL(*c.endpointURL)
}
switch {
case signerType.IsV2():
return errors.New("signature V2 cannot support redirection")
case signerType.IsV4():
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
}
}
return nil
}
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
return nil, err
}
// instantiate new Client.
clnt := new(Client)
// Save the credentials.
clnt.credsProvider = creds
// Remember whether we are using https or not
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Transport: DefaultTransport,
CheckRedirect: clnt.redirectHeaders,
}
// Sets custom region, if region is empty bucket location cache is used automatically.
if region == "" {
region = s3utils.GetRegionFromURL(*clnt.endpointURL)
}
clnt.region = region
// Instantiate bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
// Introduce a new locked random seed.
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
clnt.lookup = lookup
// Return.
return clnt, nil
}
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
}
}
// SetCustomTransport - set new custom transport.
func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetCustomTransport(tr)
//
if c.httpClient != nil {
c.httpClient.Transport = customHTTPTransport
}
}
// TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
if outputStream == nil {
outputStream = os.Stdout
}
// Sets a new output stream.
c.traceOutput = outputStream
// Enable tracing.
c.isTraceEnabled = true
}
// TraceOff - disable HTTP tracing.
func (c *Client) TraceOff() {
// Disable tracing.
c.isTraceEnabled = false
}
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
// requests. This feature is only specific to S3 for all other endpoints this
// function does nothing. To read further details on s3 transfer acceleration
// please vist -
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
c.s3AccelerateEndpoint = accelerateEndpoint
}
}
// Hash materials provides relevant initialized hash algo writers
// based on the expected signature type.
//
// - For signature v4 request if the connection is insecure compute only sha256.
// - For signature v4 request if the connection is secure compute only md5.
// - For anonymous request compute md5.
func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
hashSums = make(map[string][]byte)
hashAlgos = make(map[string]hash.Hash)
if c.overrideSignerType.IsV4() {
if c.secure {
hashAlgos["md5"] = md5.New()
} else {
hashAlgos["sha256"] = sha256.New()
}
} else {
if c.overrideSignerType.IsAnonymous() {
hashAlgos["md5"] = md5.New()
}
}
return hashAlgos, hashSums
}
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
// User supplied.
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
expires int64
// Generated by our internal code.
bucketLocation string
contentBody io.Reader
contentLength int64
contentMD5Base64 string // carries base64 encoded md5sum
contentSHA256Hex string // carries hex encoded sha256sum
}
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
if err != nil {
return err
}
// Filter out Signature field from Authorization header.
origAuth := req.Header.Get("Authorization")
if origAuth != "" {
req.Header.Set("Authorization", redactSignature(origAuth))
}
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil {
return err
}
// Write request to trace output.
_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
if err != nil {
return err
}
// Only display response header.
var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil {
return err
}
} else {
// WORKAROUND for https://github.com/golang/go/issues/13942.
// httputil.DumpResponse does not print response headers for
// all successful calls which have response ContentLength set
// to zero. Keep this workaround until the above bug is fixed.
if resp.ContentLength == 0 {
var buffer bytes.Buffer
if err = resp.Header.Write(&buffer); err != nil {
return err
}
respTrace = buffer.Bytes()
respTrace = append(respTrace, []byte("\r\n")...)
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
}
// Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
return err
}
// Ends the http dump.
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
if err != nil {
return err
}
// Returns success.
return nil
}
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
resp, err := c.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang versions fix this issue properly.
if urlErr, ok := err.(*url.Error); ok {
if strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
}
}
}
return nil, err
}
// Response cannot be non-nil, report error if thats the case.
if resp == nil {
msg := "Response is empty. " + reportIssue
return nil, ErrInvalidArgument(msg)
}
// If trace is enabled, dump http request and response.
if c.isTraceEnabled {
err = c.dumpHTTP(req, resp)
if err != nil {
return nil, err
}
}
return resp, nil
}
// List of success status.
var successStatus = []int{
http.StatusOK,
http.StatusNoContent,
http.StatusPartialContent,
}
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
var isRetryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
var reqRetry = MaxRetry // Indicates how many times we can retry the request
if metadata.contentBody != nil {
// Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
switch bodySeeker {
case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false
}
// Retry only when reader is seekable
if !isRetryable {
reqRetry = 1
}
// Figure out if the body can be closed - if yes
// we will definitely close it upon the function
// return.
bodyCloser, ok := metadata.contentBody.(io.Closer)
if ok {
defer bodyCloser.Close()
}
}
// Create a done channel to control 'newRetryTimer' go routine.
doneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
// binomial fashion.
if isRetryable {
// Seek back to beginning for each attempt.
if _, err = bodySeeker.Seek(0, 0); err != nil {
// If seek failed, no need to retry.
return nil, err
}
}
// Instantiate a new request.
var req *http.Request
req, err = c.newRequest(method, metadata)
if err != nil {
errResponse := ToErrorResponse(err)
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
return nil, err
}
// Add context to request
req = req.WithContext(ctx)
// Initiate the request.
res, err = c.do(req)
if err != nil {
// For supported http requests errors verify.
if isHTTPReqErrorRetryable(err) {
continue // Retry.
}
// For other errors, return here no need to retry.
return nil, err
}
// For any known successful http status, return quickly.
for _, httpStatus := range successStatus {
if httpStatus == res.StatusCode {
return res, nil
}
}
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
return nil, err
}
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = ioutil.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
res.Body = ioutil.NopCloser(errBodySeeker)
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
// with the new region.
//
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
if metadata.bucketName != "" && errResponse.Region != "" {
// Gather Cached location only if bucketName is present.
if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
continue // Retry.
}
}
}
}
// Verify if error response code is retryable.
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
// Verify if http status code is retryable.
if isHTTPStatusRetryable(res.StatusCode) {
continue // Retry.
}
// For all other cases break out of the retry loop.
break
}
return res, err
}
// newRequest - instantiate a new HTTP request for a given method.
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
location := metadata.bucketLocation
if location == "" {
if metadata.bucketName != "" {
// Gather location only if bucketName is present.
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
if ToErrorResponse(err).Code != "AccessDenied" {
return nil, err
}
}
// Upon AccessDenied error on fetching bucket location, default
// to possible locations based on endpoint URL. This can usually
// happen when GetBucketLocation() is disabled using IAM policies.
}
if location == "" {
location = getDefaultLocation(*c.endpointURL, c.region)
}
}
// Look if target url supports virtual host.
isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName)
// Construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues)
if err != nil {
return nil, err
}
// Initialize a new HTTP request for the method.
req, err = http.NewRequest(method, targetURL.String(), nil)
if err != nil {
return nil, err
}
// Get credentials from the configured credentials provider.
value, err := c.credsProvider.Get()
if err != nil {
return nil, err
}
var (
signerType = value.SignerType
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
)
// Custom signer set then override the behavior.
if c.overrideSignerType != credentials.SignatureDefault {
signerType = c.overrideSignerType
}
// If signerType returned by credentials helper is anonymous,
// then do not sign regardless of signerType override.
if value.SignerType == credentials.SignatureAnonymous {
signerType = credentials.SignatureAnonymous
}
// Generate presign url if needed, return right here.
if metadata.expires != 0 && metadata.presignURL {
if signerType.IsAnonymous() {
return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
}
if signerType.IsV2() {
// Presign URL with signature v2.
req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
} else if signerType.IsV4() {
// Presign URL with signature v4.
req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
}
return req, nil
}
// Set 'User-Agent' header for the request.
c.setUserAgent(req)
// Set all headers.
for k, v := range metadata.customHeader {
req.Header.Set(k, v[0])
}
// Go net/http notoriously closes the request body.
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
// This can cause underlying *os.File seekers to fail, avoid that
// by making sure to wrap the closer as a nop.
if metadata.contentLength == 0 {
req.Body = nil
} else {
req.Body = ioutil.NopCloser(metadata.contentBody)
}
// Set incoming content-length.
req.ContentLength = metadata.contentLength
if req.ContentLength <= -1 {
// For unknown content length, we upload using transfer-encoding: chunked.
req.TransferEncoding = []string{"chunked"}
}
// set md5Sum for content protection.
if len(metadata.contentMD5Base64) > 0 {
req.Header.Set("Content-Md5", metadata.contentMD5Base64)
}
// For anonymous requests just return.
if signerType.IsAnonymous() {
return req, nil
}
switch {
case signerType.IsV2():
// Add signature version '2' authorization header.
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
// Streaming signature is used by default for a PUT object request. Additionally we also
// look if the initialized client is secure, if yes then we don't need to perform
// streaming signature.
req = s3signer.StreamingSignV4(req, accessKeyID,
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
if metadata.contentSHA256Hex != "" {
shaHeader = metadata.contentSHA256Hex
}
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
// Add signature version '4' authorization header.
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
}
// Return request.
return req, nil
}
// set User agent.
func (c Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
}
}
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
if c.s3AccelerateEndpoint != "" && bucketName != "" {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
// Disable transfer acceleration for non-compliant bucket names.
if strings.Contains(bucketName, ".") {
return nil, ErrTransferAccelerationBucket(bucketName)
}
// If transfer acceleration is requested set new host.
// For more details about enabling transfer acceleration read here.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
if !s3utils.IsAmazonFIPSGovCloudEndpoint(*c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
}
}
// Save scheme.
scheme := c.endpointURL.Scheme
// Strip port 80 and 443 so we won't send these ports in Host header.
// The reason is that browsers and curl automatically remove :80 and :443
// with the generated presigned urls, then a signature mismatch error.
if h, p, err := net.SplitHostPort(host); err == nil {
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
host = h
}
}
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
// virtual host style.
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
return url.Parse(urlStr)
}
// returns true if virtual hosted style requests are to be used.
func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
if bucketName == "" {
return false
}
if c.lookup == BucketLookupDNS {
return true
}
if c.lookup == BucketLookupPath {
return false
}
// default to virtual only for Amazon/Google storage. In all other cases use
// path style requests
return s3utils.IsVirtualHostSupported(url, bucketName)
}
Remove work-around for httputil.Dump (#1017)
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha256"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
// Parsed endpoint url provided by the user.
endpointURL *url.URL
// Holds various credential providers.
credsProvider *credentials.Credentials
// Custom signerType value overrides all credentials.
overrideSignerType credentials.SignatureType
// User supplied.
appInfo struct {
appName string
appVersion string
}
// Indicate whether we are using https or not
secure bool
// Needs allocation.
httpClient *http.Client
bucketLocCache *bucketLocationCache
// Advanced functionality.
isTraceEnabled bool
traceOutput io.Writer
// S3 specific accelerated endpoint.
s3AccelerateEndpoint string
// Region endpoint
region string
// Random seed.
random *rand.Rand
// lookup indicates type of url lookup supported by server. If not specified,
// default to Auto.
lookup BucketLookupType
}
// Options for New method
type Options struct {
Creds *credentials.Credentials
Secure bool
Region string
BucketLookup BucketLookupType
// Add future fields here
}
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v6.0.6"
)
// User Agent should always following the below style.
// Please open an issue to discuss any new changes here.
//
// Minio (OS; ARCH) LIB/VER APP/VER
const (
libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
)
// BucketLookupType is type of url lookup supported by server.
type BucketLookupType int
// Different types of url lookup supported by the server.Initialized to BucketLookupAuto
const (
BucketLookupAuto BucketLookupType = iota
BucketLookupDNS
BucketLookupPath
)
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
if err != nil {
return nil, err
}
clnt.overrideSignerType = credentials.SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
if err != nil {
return nil, err
}
clnt.overrideSignerType = credentials.SignatureV4
return clnt, nil
}
// New - instantiate minio client, adds automatic verification of signature.
func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto)
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV2
}
// If Amazon S3 set to signature v4.
if s3utils.IsAmazonEndpoint(*clnt.endpointURL) {
clnt.overrideSignerType = credentials.SignatureV4
}
return clnt, nil
}
// NewWithCredentials - instantiate minio client with credentials provider
// for retrieving credentials from various credentials provider such as
// IAM, File, Env etc.
func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
return privateNew(endpoint, creds, secure, region, BucketLookupAuto)
}
// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
// Use this function when if your application deals with single region.
func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
return privateNew(endpoint, creds, secure, region, BucketLookupAuto)
}
// NewWithOptions - instantiate minio client with options
func NewWithOptions(endpoint string, opts *Options) (*Client, error) {
return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup)
}
// lockedRandSource provides protected rand source, implements rand.Source interface.
type lockedRandSource struct {
lk sync.Mutex
src rand.Source
}
// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
func (r *lockedRandSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
// Seed uses the provided seed value to initialize the generator to a
// deterministic state.
func (r *lockedRandSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
}
// Redirect requests by re signing the request.
func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) >= 5 {
return errors.New("stopped after 5 redirects")
}
if len(via) == 0 {
return nil
}
lastRequest := via[len(via)-1]
var reAuth bool
for attr, val := range lastRequest.Header {
// if hosts do not match do not copy Authorization header
if attr == "Authorization" && req.Host != lastRequest.Host {
reAuth = true
continue
}
if _, ok := req.Header[attr]; !ok {
req.Header[attr] = val
}
}
*c.endpointURL = *req.URL
value, err := c.credsProvider.Get()
if err != nil {
return err
}
var (
signerType = value.SignerType
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
region = c.region
)
// Custom signer set then override the behavior.
if c.overrideSignerType != credentials.SignatureDefault {
signerType = c.overrideSignerType
}
// If signerType returned by credentials helper is anonymous,
// then do not sign regardless of signerType override.
if value.SignerType == credentials.SignatureAnonymous {
signerType = credentials.SignatureAnonymous
}
if reAuth {
// Check if there is no region override, if not get it from the URL if possible.
if region == "" {
region = s3utils.GetRegionFromURL(*c.endpointURL)
}
switch {
case signerType.IsV2():
return errors.New("signature V2 cannot support redirection")
case signerType.IsV4():
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region))
}
}
return nil
}
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
return nil, err
}
// instantiate new Client.
clnt := new(Client)
// Save the credentials.
clnt.credsProvider = creds
// Remember whether we are using https or not
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Transport: DefaultTransport,
CheckRedirect: clnt.redirectHeaders,
}
// Sets custom region, if region is empty bucket location cache is used automatically.
if region == "" {
region = s3utils.GetRegionFromURL(*clnt.endpointURL)
}
clnt.region = region
// Instantiate bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
// Introduce a new locked random seed.
clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
// Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined
// by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints.
clnt.lookup = lookup
// Return.
return clnt, nil
}
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
appVersion string
}{}
c.appInfo.appName = appName
c.appInfo.appVersion = appVersion
}
}
// SetCustomTransport - set new custom transport.
func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
// Set this to override default transport
// ``http.DefaultTransport``.
//
// This transport is usually needed for debugging OR to add your
// own custom TLS certificates on the client transport, for custom
// CA's and certs which are not part of standard certificate
// authority follow this example :-
//
// tr := &http.Transport{
// TLSClientConfig: &tls.Config{RootCAs: pool},
// DisableCompression: true,
// }
// api.SetCustomTransport(tr)
//
if c.httpClient != nil {
c.httpClient.Transport = customHTTPTransport
}
}
// TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
if outputStream == nil {
outputStream = os.Stdout
}
// Sets a new output stream.
c.traceOutput = outputStream
// Enable tracing.
c.isTraceEnabled = true
}
// TraceOff - disable HTTP tracing.
func (c *Client) TraceOff() {
// Disable tracing.
c.isTraceEnabled = false
}
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
// requests. This feature is only specific to S3 for all other endpoints this
// function does nothing. To read further details on s3 transfer acceleration
// please vist -
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
c.s3AccelerateEndpoint = accelerateEndpoint
}
}
// Hash materials provides relevant initialized hash algo writers
// based on the expected signature type.
//
// - For signature v4 request if the connection is insecure compute only sha256.
// - For signature v4 request if the connection is secure compute only md5.
// - For anonymous request compute md5.
func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
hashSums = make(map[string][]byte)
hashAlgos = make(map[string]hash.Hash)
if c.overrideSignerType.IsV4() {
if c.secure {
hashAlgos["md5"] = md5.New()
} else {
hashAlgos["sha256"] = sha256.New()
}
} else {
if c.overrideSignerType.IsAnonymous() {
hashAlgos["md5"] = md5.New()
}
}
return hashAlgos, hashSums
}
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
// User supplied.
bucketName string
objectName string
queryValues url.Values
customHeader http.Header
expires int64
// Generated by our internal code.
bucketLocation string
contentBody io.Reader
contentLength int64
contentMD5Base64 string // carries base64 encoded md5sum
contentSHA256Hex string // carries hex encoded sha256sum
}
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
if err != nil {
return err
}
// Filter out Signature field from Authorization header.
origAuth := req.Header.Get("Authorization")
if origAuth != "" {
req.Header.Set("Authorization", redactSignature(origAuth))
}
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
if err != nil {
return err
}
// Write request to trace output.
_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
if err != nil {
return err
}
// Only display response header.
var respTrace []byte
// For errors we make sure to dump response body as well.
if resp.StatusCode != http.StatusOK &&
resp.StatusCode != http.StatusPartialContent &&
resp.StatusCode != http.StatusNoContent {
respTrace, err = httputil.DumpResponse(resp, true)
if err != nil {
return err
}
} else {
respTrace, err = httputil.DumpResponse(resp, false)
if err != nil {
return err
}
}
// Write response to trace output.
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
if err != nil {
return err
}
// Ends the http dump.
_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
if err != nil {
return err
}
// Returns success.
return nil
}
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
resp, err := c.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang versions fix this issue properly.
if urlErr, ok := err.(*url.Error); ok {
if strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
}
}
}
return nil, err
}
// Response cannot be non-nil, report error if thats the case.
if resp == nil {
msg := "Response is empty. " + reportIssue
return nil, ErrInvalidArgument(msg)
}
// If trace is enabled, dump http request and response.
if c.isTraceEnabled {
err = c.dumpHTTP(req, resp)
if err != nil {
return nil, err
}
}
return resp, nil
}
// List of success status.
var successStatus = []int{
http.StatusOK,
http.StatusNoContent,
http.StatusPartialContent,
}
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
var isRetryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
var reqRetry = MaxRetry // Indicates how many times we can retry the request
if metadata.contentBody != nil {
// Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
switch bodySeeker {
case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false
}
// Retry only when reader is seekable
if !isRetryable {
reqRetry = 1
}
// Figure out if the body can be closed - if yes
// we will definitely close it upon the function
// return.
bodyCloser, ok := metadata.contentBody.(io.Closer)
if ok {
defer bodyCloser.Close()
}
}
// Create a done channel to control 'newRetryTimer' go routine.
doneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
// binomial fashion.
if isRetryable {
// Seek back to beginning for each attempt.
if _, err = bodySeeker.Seek(0, 0); err != nil {
// If seek failed, no need to retry.
return nil, err
}
}
// Instantiate a new request.
var req *http.Request
req, err = c.newRequest(method, metadata)
if err != nil {
errResponse := ToErrorResponse(err)
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
return nil, err
}
// Add context to request
req = req.WithContext(ctx)
// Initiate the request.
res, err = c.do(req)
if err != nil {
// For supported http requests errors verify.
if isHTTPReqErrorRetryable(err) {
continue // Retry.
}
// For other errors, return here no need to retry.
return nil, err
}
// For any known successful http status, return quickly.
for _, httpStatus := range successStatus {
if httpStatus == res.StatusCode {
return res, nil
}
}
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
return nil, err
}
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = ioutil.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
// Save the body back again.
errBodySeeker.Seek(0, 0) // Seek back to starting point.
res.Body = ioutil.NopCloser(errBodySeeker)
// Bucket region if set in error response and the error
// code dictates invalid region, we can retry the request
// with the new region.
//
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
if metadata.bucketName != "" && errResponse.Region != "" {
// Gather Cached location only if bucketName is present.
if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
continue // Retry.
}
}
}
}
// Verify if error response code is retryable.
if isS3CodeRetryable(errResponse.Code) {
continue // Retry.
}
// Verify if http status code is retryable.
if isHTTPStatusRetryable(res.StatusCode) {
continue // Retry.
}
// For all other cases break out of the retry loop.
break
}
return res, err
}
// newRequest - instantiate a new HTTP request for a given method.
func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
// If no method is supplied default to 'POST'.
if method == "" {
method = "POST"
}
location := metadata.bucketLocation
if location == "" {
if metadata.bucketName != "" {
// Gather location only if bucketName is present.
location, err = c.getBucketLocation(metadata.bucketName)
if err != nil {
if ToErrorResponse(err).Code != "AccessDenied" {
return nil, err
}
}
// Upon AccessDenied error on fetching bucket location, default
// to possible locations based on endpoint URL. This can usually
// happen when GetBucketLocation() is disabled using IAM policies.
}
if location == "" {
location = getDefaultLocation(*c.endpointURL, c.region)
}
}
// Look if target url supports virtual host.
isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName)
// Construct a new target URL.
targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues)
if err != nil {
return nil, err
}
// Initialize a new HTTP request for the method.
req, err = http.NewRequest(method, targetURL.String(), nil)
if err != nil {
return nil, err
}
// Get credentials from the configured credentials provider.
value, err := c.credsProvider.Get()
if err != nil {
return nil, err
}
var (
signerType = value.SignerType
accessKeyID = value.AccessKeyID
secretAccessKey = value.SecretAccessKey
sessionToken = value.SessionToken
)
// Custom signer set then override the behavior.
if c.overrideSignerType != credentials.SignatureDefault {
signerType = c.overrideSignerType
}
// If signerType returned by credentials helper is anonymous,
// then do not sign regardless of signerType override.
if value.SignerType == credentials.SignatureAnonymous {
signerType = credentials.SignatureAnonymous
}
// Generate presign url if needed, return right here.
if metadata.expires != 0 && metadata.presignURL {
if signerType.IsAnonymous() {
return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
}
if signerType.IsV2() {
// Presign URL with signature v2.
req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost)
} else if signerType.IsV4() {
// Presign URL with signature v4.
req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
}
return req, nil
}
// Set 'User-Agent' header for the request.
c.setUserAgent(req)
// Set all headers.
for k, v := range metadata.customHeader {
req.Header.Set(k, v[0])
}
// Go net/http notoriously closes the request body.
// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
// This can cause underlying *os.File seekers to fail, avoid that
// by making sure to wrap the closer as a nop.
if metadata.contentLength == 0 {
req.Body = nil
} else {
req.Body = ioutil.NopCloser(metadata.contentBody)
}
// Set incoming content-length.
req.ContentLength = metadata.contentLength
if req.ContentLength <= -1 {
// For unknown content length, we upload using transfer-encoding: chunked.
req.TransferEncoding = []string{"chunked"}
}
// set md5Sum for content protection.
if len(metadata.contentMD5Base64) > 0 {
req.Header.Set("Content-Md5", metadata.contentMD5Base64)
}
// For anonymous requests just return.
if signerType.IsAnonymous() {
return req, nil
}
switch {
case signerType.IsV2():
// Add signature version '2' authorization header.
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost)
case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
// Streaming signature is used by default for a PUT object request. Additionally we also
// look if the initialized client is secure, if yes then we don't need to perform
// streaming signature.
req = s3signer.StreamingSignV4(req, accessKeyID,
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
if metadata.contentSHA256Hex != "" {
shaHeader = metadata.contentSHA256Hex
}
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
// Add signature version '4' authorization header.
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
}
// Return request.
return req, nil
}
// set User agent.
func (c Client) setUserAgent(req *http.Request) {
req.Header.Set("User-Agent", libraryUserAgent)
if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
}
}
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) {
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
if c.s3AccelerateEndpoint != "" && bucketName != "" {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
// Disable transfer acceleration for non-compliant bucket names.
if strings.Contains(bucketName, ".") {
return nil, ErrTransferAccelerationBucket(bucketName)
}
// If transfer acceleration is requested set new host.
// For more details about enabling transfer acceleration read here.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
if !s3utils.IsAmazonFIPSGovCloudEndpoint(*c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
}
}
// Save scheme.
scheme := c.endpointURL.Scheme
// Strip port 80 and 443 so we won't send these ports in Host header.
// The reason is that browsers and curl automatically remove :80 and :443
// with the generated presigned urls, then a signature mismatch error.
if h, p, err := net.SplitHostPort(host); err == nil {
if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
host = h
}
}
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
// virtual host style.
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
return url.Parse(urlStr)
}
// returns true if virtual hosted style requests are to be used.
func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool {
if bucketName == "" {
return false
}
if c.lookup == BucketLookupDNS {
return true
}
if c.lookup == BucketLookupPath {
return false
}
// default to virtual only for Amazon/Google storage. In all other cases use
// path style requests
return s3utils.IsVirtualHostSupported(url, bucketName)
}
|
package db
import (
"database/sql"
"fmt"
"github.com/lxc/lxd/shared/api"
"github.com/pkg/errors"
)
// Code generation directives.
//
//go:generate -command mapper lxd-generate db mapper -t profiles.mapper.go
//go:generate mapper reset
//
//go:generate mapper stmt -p db -e profile names
//go:generate mapper stmt -p db -e profile names-by-Project
//go:generate mapper stmt -p db -e profile names-by-Project-and-Name
//go:generate mapper stmt -p db -e profile objects
//go:generate mapper stmt -p db -e profile objects-by-Project
//go:generate mapper stmt -p db -e profile objects-by-Project-and-Name
//go:generate mapper stmt -p db -e profile config-ref
//go:generate mapper stmt -p db -e profile config-ref-by-Project
//go:generate mapper stmt -p db -e profile config-ref-by-Project-and-Name
//go:generate mapper stmt -p db -e profile devices-ref
//go:generate mapper stmt -p db -e profile devices-ref-by-Project
//go:generate mapper stmt -p db -e profile devices-ref-by-Project-and-Name
//go:generate mapper stmt -p db -e profile used-by-ref
//go:generate mapper stmt -p db -e profile used-by-ref-by-Project
//go:generate mapper stmt -p db -e profile used-by-ref-by-Project-and-Name
//go:generate mapper stmt -p db -e profile id
//go:generate mapper stmt -p db -e profile create struct=Profile
//go:generate mapper stmt -p db -e profile create-config-ref
//go:generate mapper stmt -p db -e profile create-devices-ref
//go:generate mapper stmt -p db -e profile rename
//go:generate mapper stmt -p db -e profile delete
//
//go:generate mapper method -p db -e profile URIs
//go:generate mapper method -p db -e profile List
//go:generate mapper method -p db -e profile Get
//go:generate mapper method -p db -e profile Exists struct=Profile
//go:generate mapper method -p db -e profile ID struct=Profile
//go:generate mapper method -p db -e profile ConfigRef
//go:generate mapper method -p db -e profile DevicesRef
//go:generate mapper method -p db -e profile UsedByRef
//go:generate mapper method -p db -e profile Create struct=Profile
//go:generate mapper method -p db -e profile Rename
//go:generate mapper method -p db -e profile Delete
// Profile is a value object holding db-related details about a profile.
type Profile struct {
ID int
Project string `db:"primary=yes&join=projects.name"`
Name string `db:"primary=yes"`
Description string `db:"coalesce=''"`
Config map[string]string
Devices map[string]map[string]string
UsedBy []string
}
// ProfileToAPI is a convenience to convert a Profile db struct into
// an API profile struct.
func ProfileToAPI(profile *Profile) *api.Profile {
p := &api.Profile{
Name: profile.Name,
UsedBy: profile.UsedBy,
}
p.Description = profile.Description
p.Config = profile.Config
p.Devices = profile.Devices
return p
}
// ProfileFilter can be used to filter results yielded by ProfileList.
type ProfileFilter struct {
Project string
Name string
}
// Profiles returns a string list of profiles.
func (c *Cluster) Profiles(project string) ([]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
q := fmt.Sprintf(`
SELECT profiles.name
FROM profiles
JOIN projects ON projects.id = profiles.project_id
WHERE projects.name = ?
`)
inargs := []interface{}{project}
var name string
outfmt := []interface{}{name}
result, err := queryScan(c.db, q, inargs, outfmt)
if err != nil {
return []string{}, err
}
response := []string{}
for _, r := range result {
response = append(response, r[0].(string))
}
return response, nil
}
// ProfileGet returns the profile with the given name.
func (c *Cluster) ProfileGet(project, name string) (int64, *api.Profile, error) {
var result *api.Profile
var id int64
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
profile, err := tx.ProfileGet(project, name)
if err != nil {
return err
}
result = ProfileToAPI(profile)
id = int64(profile.ID)
return nil
})
if err != nil {
return -1, nil, err
}
return id, result, nil
}
// ProfileConfig gets the profile configuration map from the DB.
func (c *Cluster) ProfileConfig(project, name string) (map[string]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
var key, value string
query := `
SELECT
key, value
FROM profiles_config
JOIN profiles ON profiles_config.profile_id=profiles.id
JOIN projects ON projects.id = profiles.project_id
WHERE projects.name=? AND profiles.name=?`
inargs := []interface{}{project, name}
outfmt := []interface{}{key, value}
results, err := queryScan(c.db, query, inargs, outfmt)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get profile '%s'", name)
}
if len(results) == 0 {
/*
* If we didn't get any rows here, let's check to make sure the
* profile really exists; if it doesn't, let's send back a 404.
*/
query := "SELECT id FROM profiles WHERE name=?"
var id int
results, err := queryScan(c.db, query, []interface{}{name}, []interface{}{id})
if err != nil {
return nil, err
}
if len(results) == 0 {
return nil, ErrNoSuchObject
}
}
config := map[string]string{}
for _, r := range results {
key = r[0].(string)
value = r[1].(string)
config[key] = value
}
return config, nil
}
// ProfileDescriptionUpdate updates the description of the profile with the given ID.
func ProfileDescriptionUpdate(tx *sql.Tx, id int64, description string) error {
_, err := tx.Exec("UPDATE profiles SET description=? WHERE id=?", description, id)
return err
}
// ProfileConfigClear resets the config of the profile with the given ID.
func ProfileConfigClear(tx *sql.Tx, id int64) error {
_, err := tx.Exec("DELETE FROM profiles_config WHERE profile_id=?", id)
if err != nil {
return err
}
_, err = tx.Exec(`DELETE FROM profiles_devices_config WHERE id IN
(SELECT profiles_devices_config.id
FROM profiles_devices_config JOIN profiles_devices
ON profiles_devices_config.profile_device_id=profiles_devices.id
WHERE profiles_devices.profile_id=?)`, id)
if err != nil {
return err
}
_, err = tx.Exec("DELETE FROM profiles_devices WHERE profile_id=?", id)
if err != nil {
return err
}
return nil
}
// ProfileConfigAdd adds a config to the profile with the given ID.
func ProfileConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
str := fmt.Sprintf("INSERT INTO profiles_config (profile_id, key, value) VALUES(?, ?, ?)")
stmt, err := tx.Prepare(str)
defer stmt.Close()
if err != nil {
return err
}
for k, v := range config {
_, err = stmt.Exec(id, k, v)
if err != nil {
return err
}
}
return nil
}
// ProfileContainersGet gets the names of the containers associated with the
// profile with the given name.
func (c *Cluster) ProfileContainersGet(project, profile string) ([]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
q := `SELECT containers.name FROM containers JOIN containers_profiles
ON containers.id == containers_profiles.container_id
JOIN profiles ON containers_profiles.profile_id == profiles.id
JOIN projects ON projects.id == profiles.project_id
WHERE projects.name == ? AND profiles.name == ? AND containers.type == 0`
results := []string{}
inargs := []interface{}{project, profile}
var name string
outfmt := []interface{}{name}
output, err := queryScan(c.db, q, inargs, outfmt)
if err != nil {
return results, err
}
for _, r := range output {
results = append(results, r[0].(string))
}
return results, nil
}
// ProfileCleanupLeftover removes unreferenced profiles.
func (c *Cluster) ProfileCleanupLeftover() error {
stmt := `
DELETE FROM profiles_config WHERE profile_id NOT IN (SELECT id FROM profiles);
DELETE FROM profiles_devices WHERE profile_id NOT IN (SELECT id FROM profiles);
DELETE FROM profiles_devices_config WHERE profile_device_id NOT IN (SELECT id FROM profiles_devices);
`
err := exec(c.db, stmt)
if err != nil {
return err
}
return nil
}
Add logic to the db package to load and expand profiles
Signed-off-by: Free Ekanayaka <04111f73b2d444cf053b50d877d79556bf34f55a@canonical.com>
package db
import (
"database/sql"
"fmt"
"github.com/lxc/lxd/shared/api"
"github.com/pkg/errors"
)
// Code generation directives.
//
//go:generate -command mapper lxd-generate db mapper -t profiles.mapper.go
//go:generate mapper reset
//
//go:generate mapper stmt -p db -e profile names
//go:generate mapper stmt -p db -e profile names-by-Project
//go:generate mapper stmt -p db -e profile names-by-Project-and-Name
//go:generate mapper stmt -p db -e profile objects
//go:generate mapper stmt -p db -e profile objects-by-Project
//go:generate mapper stmt -p db -e profile objects-by-Project-and-Name
//go:generate mapper stmt -p db -e profile config-ref
//go:generate mapper stmt -p db -e profile config-ref-by-Project
//go:generate mapper stmt -p db -e profile config-ref-by-Project-and-Name
//go:generate mapper stmt -p db -e profile devices-ref
//go:generate mapper stmt -p db -e profile devices-ref-by-Project
//go:generate mapper stmt -p db -e profile devices-ref-by-Project-and-Name
//go:generate mapper stmt -p db -e profile used-by-ref
//go:generate mapper stmt -p db -e profile used-by-ref-by-Project
//go:generate mapper stmt -p db -e profile used-by-ref-by-Project-and-Name
//go:generate mapper stmt -p db -e profile id
//go:generate mapper stmt -p db -e profile create struct=Profile
//go:generate mapper stmt -p db -e profile create-config-ref
//go:generate mapper stmt -p db -e profile create-devices-ref
//go:generate mapper stmt -p db -e profile rename
//go:generate mapper stmt -p db -e profile delete
//
//go:generate mapper method -p db -e profile URIs
//go:generate mapper method -p db -e profile List
//go:generate mapper method -p db -e profile Get
//go:generate mapper method -p db -e profile Exists struct=Profile
//go:generate mapper method -p db -e profile ID struct=Profile
//go:generate mapper method -p db -e profile ConfigRef
//go:generate mapper method -p db -e profile DevicesRef
//go:generate mapper method -p db -e profile UsedByRef
//go:generate mapper method -p db -e profile Create struct=Profile
//go:generate mapper method -p db -e profile Rename
//go:generate mapper method -p db -e profile Delete
// Profile is a value object holding db-related details about a profile.
type Profile struct {
ID int
Project string `db:"primary=yes&join=projects.name"`
Name string `db:"primary=yes"`
Description string `db:"coalesce=''"`
Config map[string]string
Devices map[string]map[string]string
UsedBy []string
}
// ProfileToAPI is a convenience to convert a Profile db struct into
// an API profile struct.
func ProfileToAPI(profile *Profile) *api.Profile {
p := &api.Profile{
Name: profile.Name,
UsedBy: profile.UsedBy,
}
p.Description = profile.Description
p.Config = profile.Config
p.Devices = profile.Devices
return p
}
// ProfileFilter can be used to filter results yielded by ProfileList.
type ProfileFilter struct {
Project string
Name string
}
// Profiles returns a string list of profiles.
func (c *Cluster) Profiles(project string) ([]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
q := fmt.Sprintf(`
SELECT profiles.name
FROM profiles
JOIN projects ON projects.id = profiles.project_id
WHERE projects.name = ?
`)
inargs := []interface{}{project}
var name string
outfmt := []interface{}{name}
result, err := queryScan(c.db, q, inargs, outfmt)
if err != nil {
return []string{}, err
}
response := []string{}
for _, r := range result {
response = append(response, r[0].(string))
}
return response, nil
}
// ProfileGet returns the profile with the given name.
func (c *Cluster) ProfileGet(project, name string) (int64, *api.Profile, error) {
var result *api.Profile
var id int64
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
profile, err := tx.ProfileGet(project, name)
if err != nil {
return err
}
result = ProfileToAPI(profile)
id = int64(profile.ID)
return nil
})
if err != nil {
return -1, nil, err
}
return id, result, nil
}
// ProfilesGet returns the profiles with the given names in the given project.
func (c *Cluster) ProfilesGet(project string, names []string) ([]api.Profile, error) {
profiles := make([]api.Profile, len(names))
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
for i, name := range names {
profile, err := tx.ProfileGet(project, name)
if err != nil {
return errors.Wrapf(err, "Load profile %q", name)
}
profiles[i] = *ProfileToAPI(profile)
}
return nil
})
if err != nil {
return nil, err
}
return profiles, nil
}
// ProfileConfig gets the profile configuration map from the DB.
func (c *Cluster) ProfileConfig(project, name string) (map[string]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
var key, value string
query := `
SELECT
key, value
FROM profiles_config
JOIN profiles ON profiles_config.profile_id=profiles.id
JOIN projects ON projects.id = profiles.project_id
WHERE projects.name=? AND profiles.name=?`
inargs := []interface{}{project, name}
outfmt := []interface{}{key, value}
results, err := queryScan(c.db, query, inargs, outfmt)
if err != nil {
return nil, errors.Wrapf(err, "Failed to get profile '%s'", name)
}
if len(results) == 0 {
/*
* If we didn't get any rows here, let's check to make sure the
* profile really exists; if it doesn't, let's send back a 404.
*/
query := "SELECT id FROM profiles WHERE name=?"
var id int
results, err := queryScan(c.db, query, []interface{}{name}, []interface{}{id})
if err != nil {
return nil, err
}
if len(results) == 0 {
return nil, ErrNoSuchObject
}
}
config := map[string]string{}
for _, r := range results {
key = r[0].(string)
value = r[1].(string)
config[key] = value
}
return config, nil
}
// ProfileDescriptionUpdate updates the description of the profile with the given ID.
func ProfileDescriptionUpdate(tx *sql.Tx, id int64, description string) error {
_, err := tx.Exec("UPDATE profiles SET description=? WHERE id=?", description, id)
return err
}
// ProfileConfigClear resets the config of the profile with the given ID.
func ProfileConfigClear(tx *sql.Tx, id int64) error {
_, err := tx.Exec("DELETE FROM profiles_config WHERE profile_id=?", id)
if err != nil {
return err
}
_, err = tx.Exec(`DELETE FROM profiles_devices_config WHERE id IN
(SELECT profiles_devices_config.id
FROM profiles_devices_config JOIN profiles_devices
ON profiles_devices_config.profile_device_id=profiles_devices.id
WHERE profiles_devices.profile_id=?)`, id)
if err != nil {
return err
}
_, err = tx.Exec("DELETE FROM profiles_devices WHERE profile_id=?", id)
if err != nil {
return err
}
return nil
}
// ProfileConfigAdd adds a config to the profile with the given ID.
func ProfileConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {
str := fmt.Sprintf("INSERT INTO profiles_config (profile_id, key, value) VALUES(?, ?, ?)")
stmt, err := tx.Prepare(str)
defer stmt.Close()
if err != nil {
return err
}
for k, v := range config {
_, err = stmt.Exec(id, k, v)
if err != nil {
return err
}
}
return nil
}
// ProfileContainersGet gets the names of the containers associated with the
// profile with the given name.
func (c *Cluster) ProfileContainersGet(project, profile string) ([]string, error) {
err := c.Transaction(func(tx *ClusterTx) error {
enabled, err := tx.ProjectHasProfiles(project)
if err != nil {
return errors.Wrap(err, "Check if project has profiles")
}
if !enabled {
project = "default"
}
return nil
})
if err != nil {
return nil, err
}
q := `SELECT containers.name FROM containers JOIN containers_profiles
ON containers.id == containers_profiles.container_id
JOIN profiles ON containers_profiles.profile_id == profiles.id
JOIN projects ON projects.id == profiles.project_id
WHERE projects.name == ? AND profiles.name == ? AND containers.type == 0`
results := []string{}
inargs := []interface{}{project, profile}
var name string
outfmt := []interface{}{name}
output, err := queryScan(c.db, q, inargs, outfmt)
if err != nil {
return results, err
}
for _, r := range output {
results = append(results, r[0].(string))
}
return results, nil
}
// ProfileCleanupLeftover removes unreferenced profiles.
func (c *Cluster) ProfileCleanupLeftover() error {
stmt := `
DELETE FROM profiles_config WHERE profile_id NOT IN (SELECT id FROM profiles);
DELETE FROM profiles_devices WHERE profile_id NOT IN (SELECT id FROM profiles);
DELETE FROM profiles_devices_config WHERE profile_device_id NOT IN (SELECT id FROM profiles_devices);
`
err := exec(c.db, stmt)
if err != nil {
return err
}
return nil
}
// ProfilesExpandConfig expands the given container config with the config
// values of the given profiles.
func ProfilesExpandConfig(config map[string]string, profiles []api.Profile) map[string]string {
expandedConfig := map[string]string{}
// Apply all the profiles
profileConfigs := make([]map[string]string, len(profiles))
for i, profile := range profiles {
profileConfigs[i] = profile.Config
}
for i := range profileConfigs {
for k, v := range profileConfigs[i] {
expandedConfig[k] = v
}
}
// Stick the given config on top
for k, v := range config {
expandedConfig[k] = v
}
return expandedConfig
}
|
package api
import (
"encoding/json"
"log"
"net"
"net/http"
"os"
"os/signal"
"time"
)
type Config map[string]interface{}
type HandlerFunc func(w http.ResponseWriter, r *http.Request)
type Router interface {
Get(string, http.Handler)
Head(string, http.Handler)
Post(string, http.Handler)
Put(string, http.Handler)
Delete(string, http.Handler)
http.Handler
}
type APIError struct {
ApiStatus int `json:"api_status"`
Message string `json:"message"`
}
type APIErrors struct {
ApiStatus int `json:"api_status"`
Errors []*APIError `json:"errors"`
}
type Api struct {
Router Router
Config Config
ReadTimeout time.Duration
WriteTimeout time.Duration
MaxHeaderBytes int
}
func NewApi(router Router) *Api {
api := &Api{Router: router, Config: Config{}}
return api
}
// --- routing helper ---
func (api *Api) Get(path string, f HandlerFunc) {
api.Router.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "apilication/json; charset=utf-8")
f(w, r)
}))
}
func (api *Api) Post(path string, f HandlerFunc) {
api.Router.Post(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "apilication/json; charset=utf-8")
f(w, r)
}))
}
func (api *Api) Put(path string, f HandlerFunc) {
api.Router.Put(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "apilication/json; charset=utf-8")
f(w, r)
}))
}
func (api *Api) Delete(path string, f HandlerFunc) {
api.Router.Delete(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "apilication/json; charset=utf-8")
f(w, r)
}))
}
// --- error helper ---
// write `{message: "error content"}` with http-status-code:http.StatusInternalServerError
func (api *Api) Error(w http.ResponseWriter, err error) {
api.ErrorWithHttpStatusAndApiStatus(w, err, http.StatusInternalServerError, 0)
}
// write `{message: "error content"}` with http-status-code
func (api *Api) ErrorWithHttpStatus(w http.ResponseWriter, err error, httpStatus int) {
api.ErrorWithHttpStatusAndApiStatus(w, err, httpStatus, 0)
}
// write `{message: "error content"}` with http-status-code and api-status-code
func (api *Api) ErrorWithHttpStatusAndApiStatus(w http.ResponseWriter, err error, httpStatus, apiStatus int) {
log.Print(err.Error())
j, marchalError := json.Marshal(&APIError{Message: err.Error(), ApiStatus: apiStatus})
if marchalError != nil {
panic(marchalError)
}
w.Header().Set("Content-Type", "apilication/json; charset=utf-8")
http.Error(w, string(j), httpStatus)
}
// write `{errors: [{message: "error content"}, {message: "error content"}]}` with http-status-code:http.StatusInternalServerError
func (api *Api) Errors(w http.ResponseWriter, errs []error) {
api.ErrorsWithHttpStatusAndApiStatus(w, errs, http.StatusInternalServerError, 0)
}
// write `{errors: [{message: "error content"}, {message: "error content"}]}` with http-status-code
func (api *Api) ErrorsWithHttpStatus(w http.ResponseWriter, errs []error, httpStatus int) {
api.ErrorsWithHttpStatusAndApiStatus(w, errs, httpStatus, 0)
}
// write `{errors: [{message: "error content"}, {message: "error content"}]}` with http-status-code and api-status-code
func (api *Api) ErrorsWithHttpStatusAndApiStatus(w http.ResponseWriter, errs []error, httpStatus, apiStatus int) {
apiErrors := &APIErrors{ApiStatus: apiStatus}
for _, err := range errs {
log.Print(err.Error())
apiErrors.Errors = append(apiErrors.Errors, &APIError{Message: err.Error()})
}
j, marchalError := json.Marshal(apiErrors)
if marchalError != nil {
panic(marchalError)
}
w.Header().Set("Content-Type", "apilication/json; charset=utf-8")
http.Error(w, string(j), httpStatus)
}
// --- server helper ---
func (api *Api) Run(addr string) {
s := &http.Server{
Addr: addr,
Handler: api.Router,
ReadTimeout: api.ReadTimeout,
WriteTimeout: api.WriteTimeout,
MaxHeaderBytes: api.MaxHeaderBytes,
}
// notify signal Interrupt to channel c
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
listener, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("Could not listen: %s", addr)
}
go func() {
for _ = range c {
// sig is a ^C, handle it
log.Print("Stopping the server...")
listener.Close()
log.Print("Tearing down...")
log.Fatal("Finished - bye bye. ;-)")
}
}()
log.Printf("HTTP Server: %s", addr)
log.Fatalf("Error in Serve: %s", s.Serve(listener))
}
Push out signal handling to out of this lib
package api
import (
"encoding/json"
"log"
"net"
"net/http"
"time"
)
type Config map[string]interface{}
type HandlerFunc func(w http.ResponseWriter, r *http.Request)
type Router interface {
Get(string, http.Handler)
Head(string, http.Handler)
Post(string, http.Handler)
Put(string, http.Handler)
Delete(string, http.Handler)
http.Handler
}
type APIError struct {
ApiStatus int `json:"api_status"`
Message string `json:"message"`
}
type APIErrors struct {
ApiStatus int `json:"api_status"`
Errors []*APIError `json:"errors"`
}
type Api struct {
Router Router
Config Config
ReadTimeout time.Duration
WriteTimeout time.Duration
MaxHeaderBytes int
Listener net.Listener
Server *http.Server
}
func NewApi(router Router) *Api {
api := &Api{Router: router, Config: Config{}}
return api
}
// --- routing helper ---
func (api *Api) Get(path string, f HandlerFunc) {
api.Router.Get(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
f(w, r)
}))
}
func (api *Api) Post(path string, f HandlerFunc) {
api.Router.Post(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
f(w, r)
}))
}
func (api *Api) Put(path string, f HandlerFunc) {
api.Router.Put(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
f(w, r)
}))
}
func (api *Api) Delete(path string, f HandlerFunc) {
api.Router.Delete(path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
f(w, r)
}))
}
// --- error helper ---
// write `{message: "error content"}` with http-status-code:http.StatusInternalServerError
func (api *Api) Error(w http.ResponseWriter, err error) {
api.ErrorWithHttpStatusAndApiStatus(w, err, http.StatusInternalServerError, 0)
}
// write `{message: "error content"}` with http-status-code
func (api *Api) ErrorWithHttpStatus(w http.ResponseWriter, err error, httpStatus int) {
api.ErrorWithHttpStatusAndApiStatus(w, err, httpStatus, 0)
}
// write `{message: "error content"}` with http-status-code and api-status-code
func (api *Api) ErrorWithHttpStatusAndApiStatus(w http.ResponseWriter, err error, httpStatus, apiStatus int) {
log.Print(err.Error())
j, marchalError := json.Marshal(&APIError{Message: err.Error(), ApiStatus: apiStatus})
if marchalError != nil {
panic(marchalError)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
http.Error(w, string(j), httpStatus)
}
// write `{errors: [{message: "error content"}, {message: "error content"}]}` with http-status-code:http.StatusInternalServerError
func (api *Api) Errors(w http.ResponseWriter, errs []error) {
api.ErrorsWithHttpStatusAndApiStatus(w, errs, http.StatusInternalServerError, 0)
}
// write `{errors: [{message: "error content"}, {message: "error content"}]}` with http-status-code
func (api *Api) ErrorsWithHttpStatus(w http.ResponseWriter, errs []error, httpStatus int) {
api.ErrorsWithHttpStatusAndApiStatus(w, errs, httpStatus, 0)
}
// write `{errors: [{message: "error content"}, {message: "error content"}]}` with http-status-code and api-status-code
func (api *Api) ErrorsWithHttpStatusAndApiStatus(w http.ResponseWriter, errs []error, httpStatus, apiStatus int) {
apiErrors := &APIErrors{ApiStatus: apiStatus}
for _, err := range errs {
log.Print(err.Error())
apiErrors.Errors = append(apiErrors.Errors, &APIError{Message: err.Error()})
}
j, marchalError := json.Marshal(apiErrors)
if marchalError != nil {
panic(marchalError)
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
http.Error(w, string(j), httpStatus)
}
// --- server helper ---
func (api *Api) Run(l net.Listener) {
var err error
api.Listener = l
api.Server = &http.Server{
Handler: api.Router,
ReadTimeout: api.ReadTimeout,
WriteTimeout: api.WriteTimeout,
MaxHeaderBytes: api.MaxHeaderBytes,
}
if err != nil {
log.Fatalf("Could not listen: %s", api.Listener)
}
log.Printf("HTTP Server: %s", api.Listener.Addr())
// Serve
log.Fatalf("Error in Serve: %s", api.Server.Serve(api.Listener))
}
func (api *Api) Stop() {
api.Listener.Close()
}
|
package device
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/cgroup"
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
storagePools "github.com/lxc/lxd/lxd/storage"
storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/idmap"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/units"
"github.com/lxc/lxd/shared/validate"
)
// Special disk "source" value used for generating a VM cloud-init config ISO.
const diskSourceCloudInit = "cloud-init:config"
// DiskVirtiofsdSockMountOpt indicates the mount option prefix used to provide the virtiofsd socket path to
// the QEMU driver.
const DiskVirtiofsdSockMountOpt = "virtiofsdSock"
type diskBlockLimit struct {
readBps int64
readIops int64
writeBps int64
writeIops int64
}
type disk struct {
deviceCommon
}
// isRequired indicates whether the supplied device config requires this device to start OK.
func (d *disk) isRequired(devConfig deviceConfig.Device) bool {
// Defaults to required.
if (devConfig["required"] == "" || shared.IsTrue(devConfig["required"])) && !shared.IsTrue(devConfig["optional"]) {
return true
}
return false
}
// sourceIsLocalPath returns true if the source supplied should be considered a local path on the host.
// It returns false if the disk source is empty, a VM cloud-init config drive, or a remote ceph/cephfs path.
func (d *disk) sourceIsLocalPath(source string) bool {
if source == "" {
return false
}
if source == diskSourceCloudInit {
return false
}
if shared.StringHasPrefix(d.config["source"], "ceph:", "cephfs:") {
return false
}
return true
}
// validateConfig checks the supplied config for correctness.
func (d *disk) validateConfig(instConf instance.ConfigReader) error {
if !instanceSupported(instConf.Type(), instancetype.Container, instancetype.VM) {
return ErrUnsupportedDevType
}
// Supported propagation types.
// If an empty value is supplied the default behavior is to assume "private" mode.
// These come from https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
propagationTypes := []string{"", "private", "shared", "slave", "unbindable", "rshared", "rslave", "runbindable", "rprivate"}
validatePropagation := func(input string) error {
if !shared.StringInSlice(d.config["bind"], propagationTypes) {
return fmt.Errorf("Invalid propagation value. Must be one of: %s", strings.Join(propagationTypes, ", "))
}
return nil
}
rules := map[string]func(string) error{
"required": validate.Optional(validate.IsBool),
"optional": validate.Optional(validate.IsBool), // "optional" is deprecated, replaced by "required".
"readonly": validate.Optional(validate.IsBool),
"recursive": validate.Optional(validate.IsBool),
"shift": validate.Optional(validate.IsBool),
"source": validate.IsAny,
"limits.read": validate.IsAny,
"limits.write": validate.IsAny,
"limits.max": validate.IsAny,
"size": validate.Optional(validate.IsSize),
"size.state": validate.Optional(validate.IsSize),
"pool": validate.IsAny,
"propagation": validatePropagation,
"raw.mount.options": validate.IsAny,
"ceph.cluster_name": validate.IsAny,
"ceph.user_name": validate.IsAny,
"boot.priority": validate.Optional(validate.IsUint32),
"path": validate.IsAny,
}
err := d.config.Validate(rules)
if err != nil {
return err
}
if d.config["required"] != "" && d.config["optional"] != "" {
return fmt.Errorf(`Cannot use both "required" and deprecated "optional" properties at the same time`)
}
if d.config["source"] == "" && d.config["path"] != "/" {
return fmt.Errorf(`Disk entry is missing the required "source" property`)
}
if d.config["path"] == "/" && d.config["source"] != "" {
return fmt.Errorf(`Root disk entry may not have a "source" property set`)
}
if d.config["path"] == "/" && d.config["pool"] == "" {
return fmt.Errorf(`Root disk entry must have a "pool" property set`)
}
if d.config["size"] != "" && d.config["path"] != "/" {
return fmt.Errorf("Only the root disk may have a size quota")
}
if d.config["size.state"] != "" && d.config["path"] != "/" {
return fmt.Errorf("Only the root disk may have a migration size quota")
}
if d.config["recursive"] != "" && (d.config["path"] == "/" || !shared.IsDir(shared.HostPath(d.config["source"]))) {
return fmt.Errorf("The recursive option is only supported for additional bind-mounted paths")
}
if shared.IsTrue(d.config["recursive"]) && shared.IsTrue(d.config["readonly"]) {
return fmt.Errorf("Recursive read-only bind-mounts aren't currently supported by the kernel")
}
// Check ceph options are only used when ceph or cephfs type source is specified.
if !shared.StringHasPrefix(d.config["source"], "ceph:", "cephfs:") && (d.config["ceph.cluster_name"] != "" || d.config["ceph.user_name"] != "") {
return fmt.Errorf("Invalid options ceph.cluster_name/ceph.user_name for source %q", d.config["source"])
}
// Check no other devices also have the same path as us. Use LocalDevices for this check so
// that we can check before the config is expanded or when a profile is being checked.
// Don't take into account the device names, only count active devices that point to the
// same path, so that if merged profiles share the same the path and then one is removed
// this can still be cleanly removed.
pathCount := 0
for _, devConfig := range instConf.LocalDevices() {
if devConfig["type"] == "disk" && d.config["path"] != "" && devConfig["path"] == d.config["path"] {
pathCount++
if pathCount > 1 {
return fmt.Errorf("More than one disk device uses the same path %q", d.config["path"])
}
}
}
// Check that external disk source path exists. External disk sources have a non-empty "source" property
// that contains the path of the external source, and do not have a "pool" property. We only check the
// source path exists when the disk device is required, is not an external ceph/cephfs source and is not a
// VM cloud-init drive. We only check this when an instance is loaded to avoid validating snapshot configs
// that may contain older config that no longer exists which can prevent migrations.
if d.inst != nil && d.config["pool"] == "" && d.isRequired(d.config) && d.sourceIsLocalPath(d.config["source"]) && !shared.PathExists(shared.HostPath(d.config["source"])) {
return fmt.Errorf("Missing source path %q for disk %q", d.config["source"], d.name)
}
if d.config["pool"] != "" {
if d.inst != nil && !d.inst.IsSnapshot() {
_, pool, _, err := d.state.Cluster.GetStoragePoolInAnyState(d.config["pool"])
if err != nil {
return fmt.Errorf("Failed to get storage pool %q: %s", d.config["pool"], err)
}
if pool.Status == "Pending" {
return fmt.Errorf("Pool %q is pending", d.config["pool"])
}
}
if d.config["shift"] != "" {
return fmt.Errorf(`The "shift" property cannot be used with custom storage volumes`)
}
if filepath.IsAbs(d.config["source"]) {
return fmt.Errorf("Storage volumes cannot be specified as absolute paths")
}
// Only perform expensive instance custom volume checks when not validating a profile and after
// device expansion has occurred (to avoid doing it twice during instance load).
if d.inst != nil && len(instConf.ExpandedDevices()) > 0 && d.config["source"] != "" && d.config["path"] != "/" {
poolID, err := d.state.Cluster.GetStoragePoolID(d.config["pool"])
if err != nil {
return fmt.Errorf("The %q storage pool doesn't exist", d.config["pool"])
}
// Derive the effective storage project name from the instance config's project.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, instConf.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
// GetLocalStoragePoolVolume returns a volume with an empty Location field for remote drivers.
_, vol, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, d.config["source"], db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return errors.Wrapf(err, "Failed loading custom volume")
}
// Check storage volume is available to mount on this cluster member.
remoteInstance, err := storagePools.VolumeUsedByExclusiveRemoteInstancesWithProfiles(d.state, d.config["pool"], storageProjectName, vol)
if err != nil {
return errors.Wrapf(err, "Failed checking if custom volume is exclusively attached to another instance")
}
if remoteInstance != nil {
return fmt.Errorf("Custom volume is already attached to an instance on a different node")
}
// Check only block type volumes are attached to VM instances.
contentType, err := storagePools.VolumeContentTypeNameToContentType(vol.ContentType)
if err != nil {
return err
}
if contentType == db.StoragePoolVolumeContentTypeBlock {
if instConf.Type() == instancetype.Container {
return fmt.Errorf("Custom block volumes cannot be used on containers")
}
if d.config["path"] != "" {
return fmt.Errorf("Custom block volumes cannot have a path defined")
}
}
}
}
return nil
}
// getDevicePath returns the absolute path on the host for this instance and supplied device config.
func (d *disk) getDevicePath(devName string, devConfig deviceConfig.Device) string {
relativeDestPath := strings.TrimPrefix(devConfig["path"], "/")
devPath := storageDrivers.PathNameEncode(deviceJoinPath("disk", devName, relativeDestPath))
return filepath.Join(d.inst.DevicesPath(), devPath)
}
// validateEnvironment checks the runtime environment for correctness.
func (d *disk) validateEnvironment() error {
if shared.IsTrue(d.config["shift"]) && !d.state.OS.Shiftfs {
return fmt.Errorf("shiftfs is required by disk entry but isn't supported on system")
}
if d.inst.Type() != instancetype.VM && d.config["source"] == diskSourceCloudInit {
return fmt.Errorf("disks with source=%s are only supported by virtual machines", diskSourceCloudInit)
}
return nil
}
// UpdatableFields returns a list of fields that can be updated without triggering a device remove & add.
func (d *disk) UpdatableFields(oldDevice Type) []string {
// Check old and new device types match.
_, match := oldDevice.(*disk)
if !match {
return []string{}
}
return []string{"limits.max", "limits.read", "limits.write", "size", "size.state"}
}
// Register calls mount for the disk volume (which should already be mounted) to reinitialise the reference counter
// for volumes attached to running instances on LXD restart.
func (d *disk) Register() error {
d.logger.Debug("Initialising mounted disk ref counter")
if d.config["path"] == "/" {
pool, err := storagePools.GetPoolByInstance(d.state, d.inst)
if err != nil {
return err
}
// Try to mount the volume that should already be mounted to reinitialise the ref counter.
_, err = pool.MountInstance(d.inst, nil)
if err != nil {
return err
}
} else if d.config["path"] != "/" && d.config["source"] != "" && d.config["pool"] != "" {
pool, err := storagePools.GetPoolByName(d.state, d.config["pool"])
if err != nil {
return err
}
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
// Try to mount the volume that should already be mounted to reinitialise the ref counter.
err = pool.MountCustomVolume(storageProjectName, d.config["source"], nil)
if err != nil {
return err
}
}
return nil
}
// Start is run when the device is added to the instance.
func (d *disk) Start() (*deviceConfig.RunConfig, error) {
err := d.validateEnvironment()
if err != nil {
return nil, err
}
if d.inst.Type() == instancetype.VM {
return d.startVM()
}
return d.startContainer()
}
// startContainer starts the disk device for a container instance.
func (d *disk) startContainer() (*deviceConfig.RunConfig, error) {
runConf := deviceConfig.RunConfig{}
isReadOnly := shared.IsTrue(d.config["readonly"])
isRequired := d.isRequired(d.config)
// Apply cgroups only after all the mounts have been processed.
runConf.PostHooks = append(runConf.PostHooks, func() error {
runConf := deviceConfig.RunConfig{}
err := d.generateLimits(&runConf)
if err != nil {
return err
}
err = d.inst.DeviceEventHandler(&runConf)
if err != nil {
return err
}
return nil
})
revert := revert.New()
defer revert.Fail()
// Deal with a rootfs.
if shared.IsRootDiskDevice(d.config) {
// Set the rootfs path.
rootfs := deviceConfig.RootFSEntryItem{
Path: d.inst.RootfsPath(),
}
// Read-only rootfs (unlikely to work very well).
if isReadOnly {
rootfs.Opts = append(rootfs.Opts, "ro")
}
// Handle previous requests for setting new quotas.
err := d.applyDeferredQuota()
if err != nil {
return nil, err
}
runConf.RootFS = rootfs
} else {
// Source path.
srcPath := shared.HostPath(d.config["source"])
// Destination path.
destPath := d.config["path"]
relativeDestPath := strings.TrimPrefix(destPath, "/")
// Option checks.
isRecursive := shared.IsTrue(d.config["recursive"])
// If we want to mount a storage volume from a storage pool we created via our
// storage api, we are always mounting a directory.
isFile := false
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
}
ownerShift := deviceConfig.MountOwnerShiftNone
if shared.IsTrue(d.config["shift"]) {
ownerShift = deviceConfig.MountOwnerShiftDynamic
}
// If ownerShift is none and pool is specified then check whether the pool itself
// has owner shifting enabled, and if so enable shifting on this device too.
if ownerShift == deviceConfig.MountOwnerShiftNone && d.config["pool"] != "" {
poolID, _, _, err := d.state.Cluster.GetStoragePool(d.config["pool"])
if err != nil {
return nil, err
}
// Only custom volumes can be attached currently.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return nil, err
}
_, volume, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, d.config["source"], db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return nil, err
}
if shared.IsTrue(volume.Config["security.shifted"]) {
ownerShift = "dynamic"
}
}
options := []string{}
if isReadOnly {
options = append(options, "ro")
}
if isRecursive {
options = append(options, "rbind")
} else {
options = append(options, "bind")
}
if d.config["propagation"] != "" {
options = append(options, d.config["propagation"])
}
if isFile {
options = append(options, "create=file")
} else {
options = append(options, "create=dir")
}
// Mount the pool volume and set poolVolSrcPath for createDevice below.
var poolVolSrcPath string
if d.config["pool"] != "" {
var err error
poolVolSrcPath, err = d.mountPoolVolume(revert)
if err != nil {
if !isRequired {
d.logger.Warn(err.Error())
return nil, nil
}
return nil, err
}
}
// Mount the source in the instance devices directory.
sourceDevPath, err := d.createDevice(revert, poolVolSrcPath)
if err != nil {
return nil, err
}
if sourceDevPath != "" {
// Instruct LXD to perform the mount.
runConf.Mounts = append(runConf.Mounts, deviceConfig.MountEntryItem{
DevName: d.name,
DevPath: sourceDevPath,
TargetPath: relativeDestPath,
FSType: "none",
Opts: options,
OwnerShift: ownerShift,
})
// Unmount host-side mount once instance is started.
runConf.PostHooks = append(runConf.PostHooks, d.postStart)
}
}
revert.Success()
return &runConf, nil
}
// vmVirtfsProxyHelperPaths returns the path for the socket and PID file to use with virtfs-proxy-helper process.
func (d *disk) vmVirtfsProxyHelperPaths() (string, string) {
sockPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("%s.sock", d.name))
pidPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("%s.pid", d.name))
return sockPath, pidPath
}
// vmVirtiofsdPaths returns the path for the socket and PID file to use with virtiofsd process.
func (d *disk) vmVirtiofsdPaths() (string, string) {
sockPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("virtio-fs.%s.sock", d.name))
pidPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("virtio-fs.%s.pid", d.name))
return sockPath, pidPath
}
// startVM starts the disk device for a virtual machine instance.
func (d *disk) startVM() (*deviceConfig.RunConfig, error) {
runConf := deviceConfig.RunConfig{}
isRequired := d.isRequired(d.config)
if shared.IsRootDiskDevice(d.config) {
// Handle previous requests for setting new quotas.
err := d.applyDeferredQuota()
if err != nil {
return nil, err
}
runConf.Mounts = []deviceConfig.MountEntryItem{
{
TargetPath: d.config["path"], // Indicator used that this is the root device.
DevName: d.name,
},
}
return &runConf, nil
} else if d.config["source"] == diskSourceCloudInit {
// This is a special virtual disk source that can be attached to a VM to provide cloud-init config.
isoPath, err := d.generateVMConfigDrive()
if err != nil {
return nil, err
}
runConf.Mounts = []deviceConfig.MountEntryItem{
{
DevPath: isoPath,
DevName: d.name,
FSType: "iso9660",
},
}
return &runConf, nil
} else if d.config["source"] != "" {
revert := revert.New()
defer revert.Fail()
if strings.HasPrefix(d.config["source"], "ceph:") {
// Get the pool and volume names.
fields := strings.SplitN(d.config["source"], ":", 2)
fields = strings.SplitN(fields[1], "/", 2)
poolName := fields[0]
volumeName := fields[1]
clusterName, userName := d.cephCreds()
// Configuration values containing :, @, or = can be escaped with a leading \ character.
// According to https://docs.ceph.com/docs/hammer/rbd/qemu-rbd/#usage
optEscaper := strings.NewReplacer(":", `\:`, "@", `\@`, "=", `\=`)
opts := []string{
fmt.Sprintf("id=%s", optEscaper.Replace(userName)),
fmt.Sprintf("conf=/etc/ceph/%s.conf", optEscaper.Replace(clusterName)),
}
runConf.Mounts = []deviceConfig.MountEntryItem{
{
DevPath: fmt.Sprintf("rbd:%s/%s:%s", optEscaper.Replace(poolName), optEscaper.Replace(volumeName), strings.Join(opts, ":")),
DevName: d.name,
},
}
} else {
srcPath := shared.HostPath(d.config["source"])
var err error
// Mount the pool volume and update srcPath to mount path so it can be recognised as dir
// if the volume is a filesystem volume type (if it is a block volume the srcPath will
// be returned as the path to the block device).
if d.config["pool"] != "" {
srcPath, err = d.mountPoolVolume(revert)
if err != nil {
if !isRequired {
logger.Warn(err.Error())
return nil, nil
}
return nil, err
}
}
// Default to block device or image file passthrough first.
mount := deviceConfig.MountEntryItem{
DevPath: srcPath,
DevName: d.name,
}
readonly := shared.IsTrue(d.config["readonly"])
if readonly {
mount.Opts = append(mount.Opts, "ro")
}
// If the source being added is a directory or cephfs share, then we will use the lxd-agent
// directory sharing feature to mount the directory inside the VM, and as such we need to
// indicate to the VM the target path to mount to.
if shared.IsDir(srcPath) || strings.HasPrefix(d.config["source"], "cephfs:") {
// Mount the source in the instance devices directory.
// This will ensure that if the exported directory configured as readonly that this
// takes effect event if using virtio-fs (which doesn't support read only mode) by
// having the underlying mount setup as readonly.
srcPath, err = d.createDevice(revert, srcPath)
if err != nil {
return nil, err
}
// Something went wrong, but no error returned, meaning required != true so nothing
// to do.
if srcPath == "" {
return nil, err
}
mount.TargetPath = d.config["path"]
mount.FSType = "9p"
// Start virtfs-proxy-helper for 9p share.
err = func() error {
sockPath, pidPath := d.vmVirtfsProxyHelperPaths()
// Use 9p socket path as dev path so qemu can connect to the proxy.
mount.DevPath = sockPath
// Remove old socket if needed.
os.Remove(sockPath)
// Locate virtfs-proxy-helper.
cmd, err := exec.LookPath("virtfs-proxy-helper")
if err != nil {
if shared.PathExists("/usr/lib/qemu/virtfs-proxy-helper") {
cmd = "/usr/lib/qemu/virtfs-proxy-helper"
}
}
if cmd == "" {
return fmt.Errorf(`Required binary "virtfs-proxy-helper" couldn't be found`)
}
// Start the virtfs-proxy-helper process in non-daemon mode and as root so
// that when the VM process is started as an unprivileged user, we can
// still share directories that process cannot access.
proc, err := subprocess.NewProcess(cmd, []string{"-n", "-u", "0", "-g", "0", "-s", sockPath, "-p", srcPath}, "", "")
if err != nil {
return err
}
err = proc.Start()
if err != nil {
return errors.Wrapf(err, "Failed to start virtfs-proxy-helper")
}
revert.Add(func() { proc.Stop() })
err = proc.Save(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to save virtfs-proxy-helper state")
}
// Wait for socket file to exist (as otherwise qemu can race the creation
// of this file).
waitDuration := time.Second * time.Duration(10)
waitUntil := time.Now().Add(waitDuration)
for {
if shared.PathExists(sockPath) {
break
}
if time.Now().After(waitUntil) {
return fmt.Errorf("virtfs-proxy-helper failed to bind socket after %v", waitDuration)
}
time.Sleep(50 * time.Millisecond)
}
return nil
}()
if err != nil {
return nil, errors.Wrapf(err, "Failed to setup virtfs-proxy-helper for device %q", d.name)
}
// Start virtiofsd for virtio-fs share. The lxd-agent prefers to use this over the
// virtfs-proxy-helper 9p share. The 9p share will only be used as a fallback.
err = func() error {
// virtiofsd doesn't support readonly mode, so its important we don't
// expose the share as writable when the LXD device is set as readonly.
if readonly {
d.logger.Warn("Unable to use virtio-fs for device, using 9p as a fallback", log.Ctx{"err": "readonly devices unsupported"})
return nil
}
sockPath, pidPath := d.vmVirtiofsdPaths()
logPath := filepath.Join(d.inst.LogPath(), fmt.Sprintf("disk.%s.log", d.name))
err = DiskVMVirtiofsdStart(d.inst, sockPath, pidPath, logPath, srcPath)
if err != nil {
var errUnsupported UnsupportedError
if errors.As(err, &errUnsupported) {
d.logger.Warn("Unable to use virtio-fs for device, using 9p as a fallback", log.Ctx{"err": errUnsupported})
return nil
}
return err
}
revert.Add(func() { DiskVMVirtiofsdStop(sockPath, pidPath) })
// Add the socket path to the mount options to indicate to the qemu driver
// that this share is available.
// Note: the sockPath is not passed to the QEMU via mount.DevPath like the
// 9p share above. This is because we run the 9p share concurrently
// and can only pass one DevPath at a time. Instead pass the sock path to
// the QEMU driver via the mount opts field as virtiofsdSock to allow the
// QEMU driver also setup the virtio-fs share.
mount.Opts = append(mount.Opts, fmt.Sprintf("%s=%s", DiskVirtiofsdSockMountOpt, sockPath))
return nil
}()
if err != nil {
return nil, errors.Wrapf(err, "Failed to setup virtiofsd for device %q", d.name)
}
} else if !shared.PathExists(srcPath) {
if isRequired {
return nil, fmt.Errorf("Source path %q doesn't exist for device %q", srcPath, d.name)
}
}
// Add successfully setup mount config to runConf.
runConf.Mounts = []deviceConfig.MountEntryItem{mount}
}
revert.Success()
return &runConf, nil
}
return nil, fmt.Errorf("Disk type not supported for VMs")
}
// postStart is run after the instance is started.
func (d *disk) postStart() error {
devPath := d.getDevicePath(d.name, d.config)
// Unmount the host side.
err := unix.Unmount(devPath, unix.MNT_DETACH)
if err != nil {
return err
}
return nil
}
// Update applies configuration changes to a started device.
func (d *disk) Update(oldDevices deviceConfig.Devices, isRunning bool) error {
if d.inst.Type() == instancetype.VM && !shared.IsRootDiskDevice(d.config) {
return fmt.Errorf("Non-root disks not supported for VMs")
}
if shared.IsRootDiskDevice(d.config) {
// Make sure we have a valid root disk device (and only one).
expandedDevices := d.inst.ExpandedDevices()
newRootDiskDeviceKey, _, err := shared.GetRootDiskDevice(expandedDevices.CloneNative())
if err != nil {
return errors.Wrap(err, "Detect root disk device")
}
// Retrieve the first old root disk device key, even if there are duplicates.
oldRootDiskDeviceKey := ""
for k, v := range oldDevices {
if shared.IsRootDiskDevice(v) {
oldRootDiskDeviceKey = k
break
}
}
// Check for pool change.
oldRootDiskDevicePool := oldDevices[oldRootDiskDeviceKey]["pool"]
newRootDiskDevicePool := expandedDevices[newRootDiskDeviceKey]["pool"]
if oldRootDiskDevicePool != newRootDiskDevicePool {
return fmt.Errorf("The storage pool of the root disk can only be changed through move")
}
// Deal with quota changes.
oldRootDiskDeviceSize := oldDevices[oldRootDiskDeviceKey]["size"]
newRootDiskDeviceSize := expandedDevices[newRootDiskDeviceKey]["size"]
oldRootDiskDeviceMigrationSize := oldDevices[oldRootDiskDeviceKey]["size.state"]
newRootDiskDeviceMigrationSize := expandedDevices[newRootDiskDeviceKey]["size.state"]
// Apply disk quota changes.
if newRootDiskDeviceSize != oldRootDiskDeviceSize || oldRootDiskDeviceMigrationSize != newRootDiskDeviceMigrationSize {
// Remove any outstanding volatile apply_quota key if applying a new quota.
v := d.volatileGet()
if v["apply_quota"] != "" {
err = d.volatileSet(map[string]string{"apply_quota": ""})
if err != nil {
return err
}
}
err := d.applyQuota(false)
if err == storageDrivers.ErrInUse {
// Save volatile apply_quota key for next boot if cannot apply now.
err = d.volatileSet(map[string]string{"apply_quota": "true"})
if err != nil {
return err
}
d.logger.Warn("Could not apply quota because disk is in use, deferring until next start")
} else if err != nil {
return err
}
}
}
// Only apply IO limits if instance is container and is running.
if isRunning && d.inst.Type() == instancetype.Container {
runConf := deviceConfig.RunConfig{}
err := d.generateLimits(&runConf)
if err != nil {
return err
}
err = d.inst.DeviceEventHandler(&runConf)
if err != nil {
return err
}
}
return nil
}
// applyDeferredQuota attempts to apply the deferred quota specified in the volatile "apply_quota" key if set.
// If successfully applies new quota then removes the volatile "apply_quota" key.
func (d *disk) applyDeferredQuota() error {
v := d.volatileGet()
if v["apply_quota"] != "" {
d.logger.Info("Applying deferred quota change")
// Indicate that we want applyQuota to unmount the volume first, this is so we can perform resizes
// that cannot be done when the volume is in use.
err := d.applyQuota(true)
if err != nil {
return errors.Wrapf(err, "Failed to apply deferred quota from %q", fmt.Sprintf("volatile.%s.apply_quota", d.name))
}
// Remove volatile apply_quota key if successful.
err = d.volatileSet(map[string]string{"apply_quota": ""})
if err != nil {
return err
}
}
return nil
}
// applyQuota attempts to resize the instance root disk to the specified size.
// If unmount is true, attempts to unmount first before resizing.
func (d *disk) applyQuota(unmount bool) error {
rootDisk, _, err := shared.GetRootDiskDevice(d.inst.ExpandedDevices().CloneNative())
if err != nil {
return errors.Wrap(err, "Detect root disk device")
}
newSize := d.inst.ExpandedDevices()[rootDisk]["size"]
newMigrationSize := d.inst.ExpandedDevices()[rootDisk]["size.state"]
pool, err := storagePools.GetPoolByInstance(d.state, d.inst)
if err != nil {
return err
}
if unmount {
ourUnmount, err := pool.UnmountInstance(d.inst, nil)
if err != nil {
return err
}
if ourUnmount {
defer pool.MountInstance(d.inst, nil)
}
}
err = pool.SetInstanceQuota(d.inst, newSize, newMigrationSize, nil)
if err != nil {
return err
}
return nil
}
// generateLimits adds a set of cgroup rules to apply specified limits to the supplied RunConfig.
func (d *disk) generateLimits(runConf *deviceConfig.RunConfig) error {
// Disk throttle limits.
hasDiskLimits := false
for _, dev := range d.inst.ExpandedDevices() {
if dev["type"] != "disk" {
continue
}
if dev["limits.read"] != "" || dev["limits.write"] != "" || dev["limits.max"] != "" {
hasDiskLimits = true
}
}
if hasDiskLimits {
if !d.state.OS.CGInfo.Supports(cgroup.Blkio, nil) {
return fmt.Errorf("Cannot apply disk limits as blkio cgroup controller is missing")
}
diskLimits, err := d.getDiskLimits()
if err != nil {
return err
}
cg, err := cgroup.New(&cgroupWriter{runConf})
if err != nil {
return err
}
for block, limit := range diskLimits {
if limit.readBps > 0 {
err = cg.SetBlkioLimit(block, "read", "bps", limit.readBps)
if err != nil {
return err
}
}
if limit.readIops > 0 {
err = cg.SetBlkioLimit(block, "read", "iops", limit.readIops)
if err != nil {
return err
}
}
if limit.writeBps > 0 {
err = cg.SetBlkioLimit(block, "write", "bps", limit.writeBps)
if err != nil {
return err
}
}
if limit.writeIops > 0 {
err = cg.SetBlkioLimit(block, "write", "iops", limit.writeIops)
if err != nil {
return err
}
}
}
}
return nil
}
type cgroupWriter struct {
runConf *deviceConfig.RunConfig
}
func (w *cgroupWriter) Get(version cgroup.Backend, controller string, key string) (string, error) {
return "", fmt.Errorf("This cgroup handler does not support reading")
}
func (w *cgroupWriter) Set(version cgroup.Backend, controller string, key string, value string) error {
w.runConf.CGroups = append(w.runConf.CGroups, deviceConfig.RunConfigItem{
Key: key,
Value: value,
})
return nil
}
// mountPoolVolume mounts the pool volume specified in d.config["source"] from pool specified in d.config["pool"]
// and return the mount path. If the instance type is container volume will be shifted if needed.
func (d *disk) mountPoolVolume(revert *revert.Reverter) (string, error) {
// Deal with mounting storage volumes created via the storage api. Extract the name of the storage volume
// that we are supposed to attach. We assume that the only syntactically valid ways of specifying a
// storage volume are:
// - <volume_name>
// - <type>/<volume_name>
// Currently, <type> must either be empty or "custom".
// We do not yet support instance mounts.
if filepath.IsAbs(d.config["source"]) {
return "", fmt.Errorf(`When the "pool" property is set "source" must specify the name of a volume, not a path`)
}
volumeTypeName := ""
volumeName := filepath.Clean(d.config["source"])
slash := strings.Index(volumeName, "/")
if (slash > 0) && (len(volumeName) > slash) {
// Extract volume name.
volumeName = d.config["source"][(slash + 1):]
// Extract volume type.
volumeTypeName = d.config["source"][:slash]
}
var srcPath string
// Check volume type name is custom.
switch volumeTypeName {
case db.StoragePoolVolumeTypeNameContainer:
return "", fmt.Errorf("Using instance storage volumes is not supported")
case "":
// We simply received the name of a storage volume.
volumeTypeName = db.StoragePoolVolumeTypeNameCustom
fallthrough
case db.StoragePoolVolumeTypeNameCustom:
break
case db.StoragePoolVolumeTypeNameImage:
return "", fmt.Errorf("Using image storage volumes is not supported")
default:
return "", fmt.Errorf("Unknown storage type prefix %q found", volumeTypeName)
}
// Only custom volumes can be attached currently.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return "", err
}
volStorageName := project.StorageVolume(storageProjectName, volumeName)
srcPath = storageDrivers.GetVolumeMountPath(d.config["pool"], storageDrivers.VolumeTypeCustom, volStorageName)
pool, err := storagePools.GetPoolByName(d.state, d.config["pool"])
if err != nil {
return "", err
}
err = pool.MountCustomVolume(storageProjectName, volumeName, nil)
if err != nil {
return "", errors.Wrapf(err, "Failed mounting storage volume %q of type %q on storage pool %q", volumeName, volumeTypeName, pool.Name())
}
revert.Add(func() { pool.UnmountCustomVolume(storageProjectName, volumeName, nil) })
_, vol, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, volumeName, db.StoragePoolVolumeTypeCustom, pool.ID())
if err != nil {
return "", errors.Wrapf(err, "Failed to fetch local storage volume record")
}
if d.inst.Type() == instancetype.Container {
if vol.ContentType == db.StoragePoolVolumeContentTypeNameFS {
err = d.storagePoolVolumeAttachShift(storageProjectName, pool.Name(), volumeName, db.StoragePoolVolumeTypeCustom, srcPath)
if err != nil {
return "", errors.Wrapf(err, "Failed shifting storage volume %q of type %q on storage pool %q", volumeName, volumeTypeName, pool.Name())
}
} else {
return "", fmt.Errorf("Only filesystem volumes are supported for containers")
}
}
if vol.ContentType == db.StoragePoolVolumeContentTypeNameBlock {
srcPath, err = pool.GetCustomVolumeDisk(storageProjectName, volumeName)
if err != nil {
return "", errors.Wrapf(err, "Failed to get disk path")
}
}
return srcPath, nil
}
// createDevice creates a disk device mount on host.
// The poolVolSrcPath takes the path to the mounted custom pool volume when d.config["pool"] is non-empty.
func (d *disk) createDevice(revert *revert.Reverter, poolVolSrcPath string) (string, error) {
// Paths.
devPath := d.getDevicePath(d.name, d.config)
srcPath := shared.HostPath(d.config["source"])
isRequired := d.isRequired(d.config)
isReadOnly := shared.IsTrue(d.config["readonly"])
isRecursive := shared.IsTrue(d.config["recursive"])
mntOptions := d.config["raw.mount.options"]
fsName := "none"
isFile := false
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
if strings.HasPrefix(d.config["source"], "cephfs:") {
// Get fs name and path from d.config.
fields := strings.SplitN(d.config["source"], ":", 2)
fields = strings.SplitN(fields[1], "/", 2)
mdsName := fields[0]
mdsPath := fields[1]
clusterName, userName := d.cephCreds()
// Get the mount options.
mntSrcPath, fsOptions, fsErr := diskCephfsOptions(clusterName, userName, mdsName, mdsPath)
if fsErr != nil {
return "", fsErr
}
// Join the options with any provided by the user.
if mntOptions == "" {
mntOptions = fsOptions
} else {
mntOptions += "," + fsOptions
}
fsName = "ceph"
srcPath = mntSrcPath
isFile = false
} else if strings.HasPrefix(d.config["source"], "ceph:") {
// Get the pool and volume names.
fields := strings.SplitN(d.config["source"], ":", 2)
fields = strings.SplitN(fields[1], "/", 2)
poolName := fields[0]
volumeName := fields[1]
clusterName, userName := d.cephCreds()
// Map the RBD.
rbdPath, err := diskCephRbdMap(clusterName, userName, poolName, volumeName)
if err != nil {
msg := fmt.Sprintf("Could not mount map Ceph RBD: %v", err)
if !isRequired {
d.logger.Warn(msg)
return "", nil
}
return "", fmt.Errorf(msg)
}
// Record the device path.
err = d.volatileSet(map[string]string{"ceph_rbd": rbdPath})
if err != nil {
return "", err
}
srcPath = rbdPath
isFile = false
}
} else {
srcPath = poolVolSrcPath // Use pool source path override.
}
// Check if the source exists unless it is a cephfs.
if fsName != "ceph" && !shared.PathExists(srcPath) {
if !isRequired {
return "", nil
}
return "", fmt.Errorf("Source path %q doesn't exist for device %q", srcPath, d.name)
}
// Create the devices directory if missing.
if !shared.PathExists(d.inst.DevicesPath()) {
err := os.Mkdir(d.inst.DevicesPath(), 0711)
if err != nil {
return "", err
}
}
// Clean any existing entry.
if shared.PathExists(devPath) {
err := os.Remove(devPath)
if err != nil {
return "", err
}
}
// Create the mount point.
if isFile {
f, err := os.Create(devPath)
if err != nil {
return "", err
}
f.Close()
} else {
err := os.Mkdir(devPath, 0700)
if err != nil {
return "", err
}
}
// Mount the fs.
err := DiskMount(srcPath, devPath, isReadOnly, isRecursive, d.config["propagation"], mntOptions, fsName)
if err != nil {
return "", err
}
revert.Success()
return devPath, nil
}
func (d *disk) storagePoolVolumeAttachShift(projectName, poolName, volumeName string, volumeType int, remapPath string) error {
// Load the DB records.
poolID, pool, _, err := d.state.Cluster.GetStoragePool(poolName)
if err != nil {
return err
}
_, volume, err := d.state.Cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)
if err != nil {
return err
}
poolVolumePut := volume.Writable()
// Check if unmapped.
if shared.IsTrue(poolVolumePut.Config["security.unmapped"]) {
// No need to look at containers and maps for unmapped volumes.
return nil
}
// Get the on-disk idmap for the volume.
var lastIdmap *idmap.IdmapSet
if poolVolumePut.Config["volatile.idmap.last"] != "" {
lastIdmap, err = idmap.JSONUnmarshal(poolVolumePut.Config["volatile.idmap.last"])
if err != nil {
d.logger.Error("Failed to unmarshal last idmapping", log.Ctx{"idmap": poolVolumePut.Config["volatile.idmap.last"], "err": err})
return err
}
}
var nextIdmap *idmap.IdmapSet
nextJSONMap := "[]"
if !shared.IsTrue(poolVolumePut.Config["security.shifted"]) {
c := d.inst.(instance.Container)
// Get the container's idmap.
if c.IsRunning() {
nextIdmap, err = c.CurrentIdmap()
} else {
nextIdmap, err = c.NextIdmap()
}
if err != nil {
return err
}
if nextIdmap != nil {
nextJSONMap, err = idmap.JSONMarshal(nextIdmap)
if err != nil {
return err
}
}
}
poolVolumePut.Config["volatile.idmap.next"] = nextJSONMap
if !nextIdmap.Equals(lastIdmap) {
d.logger.Debug("Shifting storage volume")
if !shared.IsTrue(poolVolumePut.Config["security.shifted"]) {
volumeUsedBy := []instance.Instance{}
err = storagePools.VolumeUsedByInstanceDevices(d.state, poolName, projectName, volume, true, func(dbInst db.Instance, project api.Project, profiles []api.Profile, usedByDevices []string) error {
inst, err := instance.Load(d.state, db.InstanceToArgs(&dbInst), profiles)
if err != nil {
return err
}
volumeUsedBy = append(volumeUsedBy, inst)
return nil
})
if err != nil {
return err
}
if len(volumeUsedBy) > 1 {
for _, inst := range volumeUsedBy {
if inst.Type() != instancetype.Container {
continue
}
ct := inst.(instance.Container)
var ctNextIdmap *idmap.IdmapSet
if ct.IsRunning() {
ctNextIdmap, err = ct.CurrentIdmap()
} else {
ctNextIdmap, err = ct.NextIdmap()
}
if err != nil {
return fmt.Errorf("Failed to retrieve idmap of container")
}
if !nextIdmap.Equals(ctNextIdmap) {
return fmt.Errorf("Idmaps of container %q and storage volume %q are not identical", ct.Name(), volumeName)
}
}
} else if len(volumeUsedBy) == 1 {
// If we're the only one who's attached that container
// we can shift the storage volume.
// I'm not sure if we want some locking here.
if volumeUsedBy[0].Name() != d.inst.Name() {
return fmt.Errorf("Idmaps of container and storage volume are not identical")
}
}
}
// Unshift rootfs.
if lastIdmap != nil {
var err error
if pool.Driver == "zfs" {
err = lastIdmap.UnshiftRootfs(remapPath, storageDrivers.ShiftZFSSkipper)
} else {
err = lastIdmap.UnshiftRootfs(remapPath, nil)
}
if err != nil {
d.logger.Error("Failed to unshift", log.Ctx{"path": remapPath, "err": err})
return err
}
d.logger.Debug("Unshifted", log.Ctx{"path": remapPath})
}
// Shift rootfs.
if nextIdmap != nil {
var err error
if pool.Driver == "zfs" {
err = nextIdmap.ShiftRootfs(remapPath, storageDrivers.ShiftZFSSkipper)
} else {
err = nextIdmap.ShiftRootfs(remapPath, nil)
}
if err != nil {
d.logger.Error("Failed to shift", log.Ctx{"path": remapPath, "err": err})
return err
}
d.logger.Debug("Shifted", log.Ctx{"path": remapPath})
}
d.logger.Debug("Shifted storage volume")
}
jsonIdmap := "[]"
if nextIdmap != nil {
var err error
jsonIdmap, err = idmap.JSONMarshal(nextIdmap)
if err != nil {
d.logger.Error("Failed to marshal idmap", log.Ctx{"idmap": nextIdmap, "err": err})
return err
}
}
// Update last idmap.
poolVolumePut.Config["volatile.idmap.last"] = jsonIdmap
err = d.state.Cluster.UpdateStoragePoolVolume(projectName, volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
if err != nil {
return err
}
return nil
}
// Stop is run when the device is removed from the instance.
func (d *disk) Stop() (*deviceConfig.RunConfig, error) {
if d.inst.Type() == instancetype.VM {
return d.stopVM()
}
runConf := deviceConfig.RunConfig{
PostHooks: []func() error{d.postStop},
}
// Figure out the paths
relativeDestPath := strings.TrimPrefix(d.config["path"], "/")
devPath := d.getDevicePath(d.name, d.config)
// The disk device doesn't exist do nothing.
if !shared.PathExists(devPath) {
return nil, nil
}
// Request an unmount of the device inside the instance.
runConf.Mounts = append(runConf.Mounts, deviceConfig.MountEntryItem{
TargetPath: relativeDestPath,
})
return &runConf, nil
}
func (d *disk) stopVM() (*deviceConfig.RunConfig, error) {
// Stop the virtfs-proxy-helper process and clean up.
err := func() error {
sockPath, pidPath := d.vmVirtfsProxyHelperPaths()
if shared.PathExists(pidPath) {
proc, err := subprocess.ImportProcess(pidPath)
if err != nil {
return err
}
err = proc.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return err
}
// Remove PID file.
os.Remove(pidPath)
}
// Remove socket file.
os.Remove(sockPath)
return nil
}()
if err != nil {
return &deviceConfig.RunConfig{}, errors.Wrapf(err, "Failed cleaning up virtfs-proxy-helper")
}
// Stop the virtiofsd process and clean up.
err = DiskVMVirtiofsdStop(d.vmVirtiofsdPaths())
if err != nil {
return &deviceConfig.RunConfig{}, errors.Wrapf(err, "Failed cleaning up virtiofsd")
}
runConf := deviceConfig.RunConfig{
PostHooks: []func() error{d.postStop},
}
return &runConf, nil
}
// postStop is run after the device is removed from the instance.
func (d *disk) postStop() error {
// Clean any existing device mount entry. Should occur first before custom volume unmounts.
err := DiskMountClear(d.getDevicePath(d.name, d.config))
if err != nil {
return err
}
// Check if pool-specific action should be taken to unmount custom volume disks.
if d.config["pool"] != "" && d.config["path"] != "/" {
pool, err := storagePools.GetPoolByName(d.state, d.config["pool"])
if err != nil {
return err
}
// Only custom volumes can be attached currently.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
_, err = pool.UnmountCustomVolume(storageProjectName, d.config["source"], nil)
if err != nil {
return err
}
}
if strings.HasPrefix(d.config["source"], "ceph:") {
v := d.volatileGet()
err := diskCephRbdUnmap(v["ceph_rbd"])
if err != nil {
d.logger.Error("Failed to unmap RBD volume", log.Ctx{"rbd": v["ceph_rbd"], "err": err})
}
}
return nil
}
// getDiskLimits calculates Block I/O limits.
func (d *disk) getDiskLimits() (map[string]diskBlockLimit, error) {
result := map[string]diskBlockLimit{}
// Build a list of all valid block devices
validBlocks := []string{}
dents, err := ioutil.ReadDir("/sys/class/block/")
if err != nil {
return nil, err
}
for _, f := range dents {
fPath := filepath.Join("/sys/class/block/", f.Name())
if shared.PathExists(fmt.Sprintf("%s/partition", fPath)) {
continue
}
if !shared.PathExists(fmt.Sprintf("%s/dev", fPath)) {
continue
}
block, err := ioutil.ReadFile(fmt.Sprintf("%s/dev", fPath))
if err != nil {
return nil, err
}
validBlocks = append(validBlocks, strings.TrimSuffix(string(block), "\n"))
}
// Process all the limits
blockLimits := map[string][]diskBlockLimit{}
for devName, dev := range d.inst.ExpandedDevices() {
if dev["type"] != "disk" {
continue
}
// Apply max limit
if dev["limits.max"] != "" {
dev["limits.read"] = dev["limits.max"]
dev["limits.write"] = dev["limits.max"]
}
// Parse the user input
readBps, readIops, writeBps, writeIops, err := d.parseDiskLimit(dev["limits.read"], dev["limits.write"])
if err != nil {
return nil, err
}
// Set the source path
source := d.getDevicePath(devName, dev)
if dev["source"] == "" {
source = d.inst.RootfsPath()
}
if !shared.PathExists(source) {
// Require that device is mounted before resolving block device if required.
if d.isRequired(dev) {
return nil, fmt.Errorf("Block device path doesn't exist %q", source)
}
continue // Do not resolve block device if device isn't mounted.
}
// Get the backing block devices (major:minor)
blocks, err := d.getParentBlocks(source)
if err != nil {
if readBps == 0 && readIops == 0 && writeBps == 0 && writeIops == 0 {
// If the device doesn't exist, there is no limit to clear so ignore the failure
continue
} else {
return nil, err
}
}
device := diskBlockLimit{readBps: readBps, readIops: readIops, writeBps: writeBps, writeIops: writeIops}
for _, block := range blocks {
blockStr := ""
if shared.StringInSlice(block, validBlocks) {
// Straightforward entry (full block device)
blockStr = block
} else {
// Attempt to deal with a partition (guess its parent)
fields := strings.SplitN(block, ":", 2)
fields[1] = "0"
if shared.StringInSlice(fmt.Sprintf("%s:%s", fields[0], fields[1]), validBlocks) {
blockStr = fmt.Sprintf("%s:%s", fields[0], fields[1])
}
}
if blockStr == "" {
return nil, fmt.Errorf("Block device doesn't support quotas %q", block)
}
if blockLimits[blockStr] == nil {
blockLimits[blockStr] = []diskBlockLimit{}
}
blockLimits[blockStr] = append(blockLimits[blockStr], device)
}
}
// Average duplicate limits
for block, limits := range blockLimits {
var readBpsCount, readBpsTotal, readIopsCount, readIopsTotal, writeBpsCount, writeBpsTotal, writeIopsCount, writeIopsTotal int64
for _, limit := range limits {
if limit.readBps > 0 {
readBpsCount++
readBpsTotal += limit.readBps
}
if limit.readIops > 0 {
readIopsCount++
readIopsTotal += limit.readIops
}
if limit.writeBps > 0 {
writeBpsCount++
writeBpsTotal += limit.writeBps
}
if limit.writeIops > 0 {
writeIopsCount++
writeIopsTotal += limit.writeIops
}
}
device := diskBlockLimit{}
if readBpsCount > 0 {
device.readBps = readBpsTotal / readBpsCount
}
if readIopsCount > 0 {
device.readIops = readIopsTotal / readIopsCount
}
if writeBpsCount > 0 {
device.writeBps = writeBpsTotal / writeBpsCount
}
if writeIopsCount > 0 {
device.writeIops = writeIopsTotal / writeIopsCount
}
result[block] = device
}
return result, nil
}
func (d *disk) parseDiskLimit(readSpeed string, writeSpeed string) (int64, int64, int64, int64, error) {
parseValue := func(value string) (int64, int64, error) {
var err error
bps := int64(0)
iops := int64(0)
if value == "" {
return bps, iops, nil
}
if strings.HasSuffix(value, "iops") {
iops, err = strconv.ParseInt(strings.TrimSuffix(value, "iops"), 10, 64)
if err != nil {
return -1, -1, err
}
} else {
bps, err = units.ParseByteSizeString(value)
if err != nil {
return -1, -1, err
}
}
return bps, iops, nil
}
readBps, readIops, err := parseValue(readSpeed)
if err != nil {
return -1, -1, -1, -1, err
}
writeBps, writeIops, err := parseValue(writeSpeed)
if err != nil {
return -1, -1, -1, -1, err
}
return readBps, readIops, writeBps, writeIops, nil
}
func (d *disk) getParentBlocks(path string) ([]string, error) {
var devices []string
var dev []string
// Expand the mount path
absPath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
expPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
expPath = absPath
}
// Find the source mount of the path
file, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
match := ""
for scanner.Scan() {
line := scanner.Text()
rows := strings.Fields(line)
if len(rows[4]) <= len(match) {
continue
}
if expPath != rows[4] && !strings.HasPrefix(expPath, rows[4]) {
continue
}
match = rows[4]
// Go backward to avoid problems with optional fields
dev = []string{rows[2], rows[len(rows)-2]}
}
if dev == nil {
return nil, fmt.Errorf("Couldn't find a match /proc/self/mountinfo entry")
}
// Handle the most simple case
if !strings.HasPrefix(dev[0], "0:") {
return []string{dev[0]}, nil
}
// Deal with per-filesystem oddities. We don't care about failures here
// because any non-special filesystem => directory backend.
fs, _ := util.FilesystemDetect(expPath)
if fs == "zfs" && shared.PathExists("/dev/zfs") {
// Accessible zfs filesystems
poolName := strings.Split(dev[1], "/")[0]
output, err := shared.RunCommand("zpool", "status", "-P", "-L", poolName)
if err != nil {
return nil, fmt.Errorf("Failed to query zfs filesystem information for %q: %v", dev[1], err)
}
header := true
for _, line := range strings.Split(output, "\n") {
fields := strings.Fields(line)
if len(fields) < 5 {
continue
}
if fields[1] != "ONLINE" {
continue
}
if header {
header = false
continue
}
var path string
if shared.PathExists(fields[0]) {
if shared.IsBlockdevPath(fields[0]) {
path = fields[0]
} else {
subDevices, err := d.getParentBlocks(fields[0])
if err != nil {
return nil, err
}
for _, dev := range subDevices {
devices = append(devices, dev)
}
}
} else {
continue
}
if path != "" {
_, major, minor, err := unixDeviceAttributes(path)
if err != nil {
continue
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
}
}
if len(devices) == 0 {
return nil, fmt.Errorf("Unable to find backing block for zfs pool %q", poolName)
}
} else if fs == "btrfs" && shared.PathExists(dev[1]) {
// Accessible btrfs filesystems
output, err := shared.RunCommand("btrfs", "filesystem", "show", dev[1])
if err != nil {
// Fallback to using device path to support BTRFS on block volumes (like LVM).
_, major, minor, errFallback := unixDeviceAttributes(dev[1])
if errFallback != nil {
return nil, errors.Wrapf(err, "Failed to query btrfs filesystem information for %q", dev[1])
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
}
for _, line := range strings.Split(output, "\n") {
fields := strings.Fields(line)
if len(fields) == 0 || fields[0] != "devid" {
continue
}
_, major, minor, err := unixDeviceAttributes(fields[len(fields)-1])
if err != nil {
return nil, err
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
}
} else if shared.PathExists(dev[1]) {
// Anything else with a valid path
_, major, minor, err := unixDeviceAttributes(dev[1])
if err != nil {
return nil, err
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
} else {
return nil, fmt.Errorf("Invalid block device %q", dev[1])
}
return devices, nil
}
// generateVMConfigDrive generates an ISO containing the cloud init config for a VM.
// Returns the path to the ISO.
func (d *disk) generateVMConfigDrive() (string, error) {
scratchDir := filepath.Join(d.inst.DevicesPath(), storageDrivers.PathNameEncode(d.name))
// Check we have the mkisofs tool available.
mkisofsPath, err := exec.LookPath("mkisofs")
if err != nil {
return "", err
}
// Create config drive dir.
err = os.MkdirAll(scratchDir, 0100)
if err != nil {
return "", err
}
instanceConfig := d.inst.ExpandedConfig()
// Use an empty vendor-data file if no custom vendor-data supplied.
vendorData := instanceConfig["user.vendor-data"]
if vendorData == "" {
vendorData = "#cloud-config\n{}"
}
err = ioutil.WriteFile(filepath.Join(scratchDir, "vendor-data"), []byte(vendorData), 0400)
if err != nil {
return "", err
}
// Use an empty user-data file if no custom user-data supplied.
userData := instanceConfig["user.user-data"]
if userData == "" {
userData = "#cloud-config\n{}"
}
err = ioutil.WriteFile(filepath.Join(scratchDir, "user-data"), []byte(userData), 0400)
if err != nil {
return "", err
}
// Include a network-config file if the user configured it.
networkConfig := instanceConfig["user.network-config"]
if networkConfig != "" {
err = ioutil.WriteFile(filepath.Join(scratchDir, "network-config"), []byte(networkConfig), 0400)
if err != nil {
return "", err
}
}
// Append any custom meta-data to our predefined meta-data config.
metaData := fmt.Sprintf(`instance-id: %s
local-hostname: %s
%s
`, d.inst.Name(), d.inst.Name(), instanceConfig["user.meta-data"])
err = ioutil.WriteFile(filepath.Join(scratchDir, "meta-data"), []byte(metaData), 0400)
if err != nil {
return "", err
}
// Finally convert the config drive dir into an ISO file. The cidata label is important
// as this is what cloud-init uses to detect, mount the drive and run the cloud-init
// templates on first boot. The vendor-data template then modifies the system so that the
// config drive is mounted and the agent is started on subsequent boots.
isoPath := filepath.Join(d.inst.Path(), "config.iso")
_, err = shared.RunCommand(mkisofsPath, "-J", "-R", "-V", "cidata", "-o", isoPath, scratchDir)
if err != nil {
return "", err
}
// Remove the config drive folder.
os.RemoveAll(scratchDir)
return isoPath, nil
}
// cephCreds returns cluster name and user name to use for ceph disks.
func (d *disk) cephCreds() (string, string) {
// Apply the ceph configuration.
userName := d.config["ceph.user_name"]
if userName == "" {
userName = "admin"
}
clusterName := d.config["ceph.cluster_name"]
if clusterName == "" {
clusterName = "ceph"
}
return clusterName, userName
}
lxd/device/disk: Remove check that prevents use of virtiofsd for readonly disks in startVM
The readonly bind mount works around this limitation now.
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package device
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/lxc/lxd/lxd/cgroup"
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/revert"
storagePools "github.com/lxc/lxd/lxd/storage"
storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/idmap"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/units"
"github.com/lxc/lxd/shared/validate"
)
// Special disk "source" value used for generating a VM cloud-init config ISO.
const diskSourceCloudInit = "cloud-init:config"
// DiskVirtiofsdSockMountOpt indicates the mount option prefix used to provide the virtiofsd socket path to
// the QEMU driver.
const DiskVirtiofsdSockMountOpt = "virtiofsdSock"
type diskBlockLimit struct {
readBps int64
readIops int64
writeBps int64
writeIops int64
}
type disk struct {
deviceCommon
}
// isRequired indicates whether the supplied device config requires this device to start OK.
func (d *disk) isRequired(devConfig deviceConfig.Device) bool {
// Defaults to required.
if (devConfig["required"] == "" || shared.IsTrue(devConfig["required"])) && !shared.IsTrue(devConfig["optional"]) {
return true
}
return false
}
// sourceIsLocalPath returns true if the source supplied should be considered a local path on the host.
// It returns false if the disk source is empty, a VM cloud-init config drive, or a remote ceph/cephfs path.
func (d *disk) sourceIsLocalPath(source string) bool {
if source == "" {
return false
}
if source == diskSourceCloudInit {
return false
}
if shared.StringHasPrefix(d.config["source"], "ceph:", "cephfs:") {
return false
}
return true
}
// validateConfig checks the supplied config for correctness.
func (d *disk) validateConfig(instConf instance.ConfigReader) error {
if !instanceSupported(instConf.Type(), instancetype.Container, instancetype.VM) {
return ErrUnsupportedDevType
}
// Supported propagation types.
// If an empty value is supplied the default behavior is to assume "private" mode.
// These come from https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
propagationTypes := []string{"", "private", "shared", "slave", "unbindable", "rshared", "rslave", "runbindable", "rprivate"}
validatePropagation := func(input string) error {
if !shared.StringInSlice(d.config["bind"], propagationTypes) {
return fmt.Errorf("Invalid propagation value. Must be one of: %s", strings.Join(propagationTypes, ", "))
}
return nil
}
rules := map[string]func(string) error{
"required": validate.Optional(validate.IsBool),
"optional": validate.Optional(validate.IsBool), // "optional" is deprecated, replaced by "required".
"readonly": validate.Optional(validate.IsBool),
"recursive": validate.Optional(validate.IsBool),
"shift": validate.Optional(validate.IsBool),
"source": validate.IsAny,
"limits.read": validate.IsAny,
"limits.write": validate.IsAny,
"limits.max": validate.IsAny,
"size": validate.Optional(validate.IsSize),
"size.state": validate.Optional(validate.IsSize),
"pool": validate.IsAny,
"propagation": validatePropagation,
"raw.mount.options": validate.IsAny,
"ceph.cluster_name": validate.IsAny,
"ceph.user_name": validate.IsAny,
"boot.priority": validate.Optional(validate.IsUint32),
"path": validate.IsAny,
}
err := d.config.Validate(rules)
if err != nil {
return err
}
if d.config["required"] != "" && d.config["optional"] != "" {
return fmt.Errorf(`Cannot use both "required" and deprecated "optional" properties at the same time`)
}
if d.config["source"] == "" && d.config["path"] != "/" {
return fmt.Errorf(`Disk entry is missing the required "source" property`)
}
if d.config["path"] == "/" && d.config["source"] != "" {
return fmt.Errorf(`Root disk entry may not have a "source" property set`)
}
if d.config["path"] == "/" && d.config["pool"] == "" {
return fmt.Errorf(`Root disk entry must have a "pool" property set`)
}
if d.config["size"] != "" && d.config["path"] != "/" {
return fmt.Errorf("Only the root disk may have a size quota")
}
if d.config["size.state"] != "" && d.config["path"] != "/" {
return fmt.Errorf("Only the root disk may have a migration size quota")
}
if d.config["recursive"] != "" && (d.config["path"] == "/" || !shared.IsDir(shared.HostPath(d.config["source"]))) {
return fmt.Errorf("The recursive option is only supported for additional bind-mounted paths")
}
if shared.IsTrue(d.config["recursive"]) && shared.IsTrue(d.config["readonly"]) {
return fmt.Errorf("Recursive read-only bind-mounts aren't currently supported by the kernel")
}
// Check ceph options are only used when ceph or cephfs type source is specified.
if !shared.StringHasPrefix(d.config["source"], "ceph:", "cephfs:") && (d.config["ceph.cluster_name"] != "" || d.config["ceph.user_name"] != "") {
return fmt.Errorf("Invalid options ceph.cluster_name/ceph.user_name for source %q", d.config["source"])
}
// Check no other devices also have the same path as us. Use LocalDevices for this check so
// that we can check before the config is expanded or when a profile is being checked.
// Don't take into account the device names, only count active devices that point to the
// same path, so that if merged profiles share the same the path and then one is removed
// this can still be cleanly removed.
pathCount := 0
for _, devConfig := range instConf.LocalDevices() {
if devConfig["type"] == "disk" && d.config["path"] != "" && devConfig["path"] == d.config["path"] {
pathCount++
if pathCount > 1 {
return fmt.Errorf("More than one disk device uses the same path %q", d.config["path"])
}
}
}
// Check that external disk source path exists. External disk sources have a non-empty "source" property
// that contains the path of the external source, and do not have a "pool" property. We only check the
// source path exists when the disk device is required, is not an external ceph/cephfs source and is not a
// VM cloud-init drive. We only check this when an instance is loaded to avoid validating snapshot configs
// that may contain older config that no longer exists which can prevent migrations.
if d.inst != nil && d.config["pool"] == "" && d.isRequired(d.config) && d.sourceIsLocalPath(d.config["source"]) && !shared.PathExists(shared.HostPath(d.config["source"])) {
return fmt.Errorf("Missing source path %q for disk %q", d.config["source"], d.name)
}
if d.config["pool"] != "" {
if d.inst != nil && !d.inst.IsSnapshot() {
_, pool, _, err := d.state.Cluster.GetStoragePoolInAnyState(d.config["pool"])
if err != nil {
return fmt.Errorf("Failed to get storage pool %q: %s", d.config["pool"], err)
}
if pool.Status == "Pending" {
return fmt.Errorf("Pool %q is pending", d.config["pool"])
}
}
if d.config["shift"] != "" {
return fmt.Errorf(`The "shift" property cannot be used with custom storage volumes`)
}
if filepath.IsAbs(d.config["source"]) {
return fmt.Errorf("Storage volumes cannot be specified as absolute paths")
}
// Only perform expensive instance custom volume checks when not validating a profile and after
// device expansion has occurred (to avoid doing it twice during instance load).
if d.inst != nil && len(instConf.ExpandedDevices()) > 0 && d.config["source"] != "" && d.config["path"] != "/" {
poolID, err := d.state.Cluster.GetStoragePoolID(d.config["pool"])
if err != nil {
return fmt.Errorf("The %q storage pool doesn't exist", d.config["pool"])
}
// Derive the effective storage project name from the instance config's project.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, instConf.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
// GetLocalStoragePoolVolume returns a volume with an empty Location field for remote drivers.
_, vol, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, d.config["source"], db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return errors.Wrapf(err, "Failed loading custom volume")
}
// Check storage volume is available to mount on this cluster member.
remoteInstance, err := storagePools.VolumeUsedByExclusiveRemoteInstancesWithProfiles(d.state, d.config["pool"], storageProjectName, vol)
if err != nil {
return errors.Wrapf(err, "Failed checking if custom volume is exclusively attached to another instance")
}
if remoteInstance != nil {
return fmt.Errorf("Custom volume is already attached to an instance on a different node")
}
// Check only block type volumes are attached to VM instances.
contentType, err := storagePools.VolumeContentTypeNameToContentType(vol.ContentType)
if err != nil {
return err
}
if contentType == db.StoragePoolVolumeContentTypeBlock {
if instConf.Type() == instancetype.Container {
return fmt.Errorf("Custom block volumes cannot be used on containers")
}
if d.config["path"] != "" {
return fmt.Errorf("Custom block volumes cannot have a path defined")
}
}
}
}
return nil
}
// getDevicePath returns the absolute path on the host for this instance and supplied device config.
func (d *disk) getDevicePath(devName string, devConfig deviceConfig.Device) string {
relativeDestPath := strings.TrimPrefix(devConfig["path"], "/")
devPath := storageDrivers.PathNameEncode(deviceJoinPath("disk", devName, relativeDestPath))
return filepath.Join(d.inst.DevicesPath(), devPath)
}
// validateEnvironment checks the runtime environment for correctness.
func (d *disk) validateEnvironment() error {
if shared.IsTrue(d.config["shift"]) && !d.state.OS.Shiftfs {
return fmt.Errorf("shiftfs is required by disk entry but isn't supported on system")
}
if d.inst.Type() != instancetype.VM && d.config["source"] == diskSourceCloudInit {
return fmt.Errorf("disks with source=%s are only supported by virtual machines", diskSourceCloudInit)
}
return nil
}
// UpdatableFields returns a list of fields that can be updated without triggering a device remove & add.
func (d *disk) UpdatableFields(oldDevice Type) []string {
// Check old and new device types match.
_, match := oldDevice.(*disk)
if !match {
return []string{}
}
return []string{"limits.max", "limits.read", "limits.write", "size", "size.state"}
}
// Register calls mount for the disk volume (which should already be mounted) to reinitialise the reference counter
// for volumes attached to running instances on LXD restart.
func (d *disk) Register() error {
d.logger.Debug("Initialising mounted disk ref counter")
if d.config["path"] == "/" {
pool, err := storagePools.GetPoolByInstance(d.state, d.inst)
if err != nil {
return err
}
// Try to mount the volume that should already be mounted to reinitialise the ref counter.
_, err = pool.MountInstance(d.inst, nil)
if err != nil {
return err
}
} else if d.config["path"] != "/" && d.config["source"] != "" && d.config["pool"] != "" {
pool, err := storagePools.GetPoolByName(d.state, d.config["pool"])
if err != nil {
return err
}
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
// Try to mount the volume that should already be mounted to reinitialise the ref counter.
err = pool.MountCustomVolume(storageProjectName, d.config["source"], nil)
if err != nil {
return err
}
}
return nil
}
// Start is run when the device is added to the instance.
func (d *disk) Start() (*deviceConfig.RunConfig, error) {
err := d.validateEnvironment()
if err != nil {
return nil, err
}
if d.inst.Type() == instancetype.VM {
return d.startVM()
}
return d.startContainer()
}
// startContainer starts the disk device for a container instance.
func (d *disk) startContainer() (*deviceConfig.RunConfig, error) {
runConf := deviceConfig.RunConfig{}
isReadOnly := shared.IsTrue(d.config["readonly"])
isRequired := d.isRequired(d.config)
// Apply cgroups only after all the mounts have been processed.
runConf.PostHooks = append(runConf.PostHooks, func() error {
runConf := deviceConfig.RunConfig{}
err := d.generateLimits(&runConf)
if err != nil {
return err
}
err = d.inst.DeviceEventHandler(&runConf)
if err != nil {
return err
}
return nil
})
revert := revert.New()
defer revert.Fail()
// Deal with a rootfs.
if shared.IsRootDiskDevice(d.config) {
// Set the rootfs path.
rootfs := deviceConfig.RootFSEntryItem{
Path: d.inst.RootfsPath(),
}
// Read-only rootfs (unlikely to work very well).
if isReadOnly {
rootfs.Opts = append(rootfs.Opts, "ro")
}
// Handle previous requests for setting new quotas.
err := d.applyDeferredQuota()
if err != nil {
return nil, err
}
runConf.RootFS = rootfs
} else {
// Source path.
srcPath := shared.HostPath(d.config["source"])
// Destination path.
destPath := d.config["path"]
relativeDestPath := strings.TrimPrefix(destPath, "/")
// Option checks.
isRecursive := shared.IsTrue(d.config["recursive"])
// If we want to mount a storage volume from a storage pool we created via our
// storage api, we are always mounting a directory.
isFile := false
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
}
ownerShift := deviceConfig.MountOwnerShiftNone
if shared.IsTrue(d.config["shift"]) {
ownerShift = deviceConfig.MountOwnerShiftDynamic
}
// If ownerShift is none and pool is specified then check whether the pool itself
// has owner shifting enabled, and if so enable shifting on this device too.
if ownerShift == deviceConfig.MountOwnerShiftNone && d.config["pool"] != "" {
poolID, _, _, err := d.state.Cluster.GetStoragePool(d.config["pool"])
if err != nil {
return nil, err
}
// Only custom volumes can be attached currently.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return nil, err
}
_, volume, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, d.config["source"], db.StoragePoolVolumeTypeCustom, poolID)
if err != nil {
return nil, err
}
if shared.IsTrue(volume.Config["security.shifted"]) {
ownerShift = "dynamic"
}
}
options := []string{}
if isReadOnly {
options = append(options, "ro")
}
if isRecursive {
options = append(options, "rbind")
} else {
options = append(options, "bind")
}
if d.config["propagation"] != "" {
options = append(options, d.config["propagation"])
}
if isFile {
options = append(options, "create=file")
} else {
options = append(options, "create=dir")
}
// Mount the pool volume and set poolVolSrcPath for createDevice below.
var poolVolSrcPath string
if d.config["pool"] != "" {
var err error
poolVolSrcPath, err = d.mountPoolVolume(revert)
if err != nil {
if !isRequired {
d.logger.Warn(err.Error())
return nil, nil
}
return nil, err
}
}
// Mount the source in the instance devices directory.
sourceDevPath, err := d.createDevice(revert, poolVolSrcPath)
if err != nil {
return nil, err
}
if sourceDevPath != "" {
// Instruct LXD to perform the mount.
runConf.Mounts = append(runConf.Mounts, deviceConfig.MountEntryItem{
DevName: d.name,
DevPath: sourceDevPath,
TargetPath: relativeDestPath,
FSType: "none",
Opts: options,
OwnerShift: ownerShift,
})
// Unmount host-side mount once instance is started.
runConf.PostHooks = append(runConf.PostHooks, d.postStart)
}
}
revert.Success()
return &runConf, nil
}
// vmVirtfsProxyHelperPaths returns the path for the socket and PID file to use with virtfs-proxy-helper process.
func (d *disk) vmVirtfsProxyHelperPaths() (string, string) {
sockPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("%s.sock", d.name))
pidPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("%s.pid", d.name))
return sockPath, pidPath
}
// vmVirtiofsdPaths returns the path for the socket and PID file to use with virtiofsd process.
func (d *disk) vmVirtiofsdPaths() (string, string) {
sockPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("virtio-fs.%s.sock", d.name))
pidPath := filepath.Join(d.inst.DevicesPath(), fmt.Sprintf("virtio-fs.%s.pid", d.name))
return sockPath, pidPath
}
// startVM starts the disk device for a virtual machine instance.
func (d *disk) startVM() (*deviceConfig.RunConfig, error) {
runConf := deviceConfig.RunConfig{}
isRequired := d.isRequired(d.config)
if shared.IsRootDiskDevice(d.config) {
// Handle previous requests for setting new quotas.
err := d.applyDeferredQuota()
if err != nil {
return nil, err
}
runConf.Mounts = []deviceConfig.MountEntryItem{
{
TargetPath: d.config["path"], // Indicator used that this is the root device.
DevName: d.name,
},
}
return &runConf, nil
} else if d.config["source"] == diskSourceCloudInit {
// This is a special virtual disk source that can be attached to a VM to provide cloud-init config.
isoPath, err := d.generateVMConfigDrive()
if err != nil {
return nil, err
}
runConf.Mounts = []deviceConfig.MountEntryItem{
{
DevPath: isoPath,
DevName: d.name,
FSType: "iso9660",
},
}
return &runConf, nil
} else if d.config["source"] != "" {
revert := revert.New()
defer revert.Fail()
if strings.HasPrefix(d.config["source"], "ceph:") {
// Get the pool and volume names.
fields := strings.SplitN(d.config["source"], ":", 2)
fields = strings.SplitN(fields[1], "/", 2)
poolName := fields[0]
volumeName := fields[1]
clusterName, userName := d.cephCreds()
// Configuration values containing :, @, or = can be escaped with a leading \ character.
// According to https://docs.ceph.com/docs/hammer/rbd/qemu-rbd/#usage
optEscaper := strings.NewReplacer(":", `\:`, "@", `\@`, "=", `\=`)
opts := []string{
fmt.Sprintf("id=%s", optEscaper.Replace(userName)),
fmt.Sprintf("conf=/etc/ceph/%s.conf", optEscaper.Replace(clusterName)),
}
runConf.Mounts = []deviceConfig.MountEntryItem{
{
DevPath: fmt.Sprintf("rbd:%s/%s:%s", optEscaper.Replace(poolName), optEscaper.Replace(volumeName), strings.Join(opts, ":")),
DevName: d.name,
},
}
} else {
srcPath := shared.HostPath(d.config["source"])
var err error
// Mount the pool volume and update srcPath to mount path so it can be recognised as dir
// if the volume is a filesystem volume type (if it is a block volume the srcPath will
// be returned as the path to the block device).
if d.config["pool"] != "" {
srcPath, err = d.mountPoolVolume(revert)
if err != nil {
if !isRequired {
logger.Warn(err.Error())
return nil, nil
}
return nil, err
}
}
// Default to block device or image file passthrough first.
mount := deviceConfig.MountEntryItem{
DevPath: srcPath,
DevName: d.name,
}
readonly := shared.IsTrue(d.config["readonly"])
if readonly {
mount.Opts = append(mount.Opts, "ro")
}
// If the source being added is a directory or cephfs share, then we will use the lxd-agent
// directory sharing feature to mount the directory inside the VM, and as such we need to
// indicate to the VM the target path to mount to.
if shared.IsDir(srcPath) || strings.HasPrefix(d.config["source"], "cephfs:") {
// Mount the source in the instance devices directory.
// This will ensure that if the exported directory configured as readonly that this
// takes effect event if using virtio-fs (which doesn't support read only mode) by
// having the underlying mount setup as readonly.
srcPath, err = d.createDevice(revert, srcPath)
if err != nil {
return nil, err
}
// Something went wrong, but no error returned, meaning required != true so nothing
// to do.
if srcPath == "" {
return nil, err
}
mount.TargetPath = d.config["path"]
mount.FSType = "9p"
// Start virtfs-proxy-helper for 9p share.
err = func() error {
sockPath, pidPath := d.vmVirtfsProxyHelperPaths()
// Use 9p socket path as dev path so qemu can connect to the proxy.
mount.DevPath = sockPath
// Remove old socket if needed.
os.Remove(sockPath)
// Locate virtfs-proxy-helper.
cmd, err := exec.LookPath("virtfs-proxy-helper")
if err != nil {
if shared.PathExists("/usr/lib/qemu/virtfs-proxy-helper") {
cmd = "/usr/lib/qemu/virtfs-proxy-helper"
}
}
if cmd == "" {
return fmt.Errorf(`Required binary "virtfs-proxy-helper" couldn't be found`)
}
// Start the virtfs-proxy-helper process in non-daemon mode and as root so
// that when the VM process is started as an unprivileged user, we can
// still share directories that process cannot access.
proc, err := subprocess.NewProcess(cmd, []string{"-n", "-u", "0", "-g", "0", "-s", sockPath, "-p", srcPath}, "", "")
if err != nil {
return err
}
err = proc.Start()
if err != nil {
return errors.Wrapf(err, "Failed to start virtfs-proxy-helper")
}
revert.Add(func() { proc.Stop() })
err = proc.Save(pidPath)
if err != nil {
return errors.Wrapf(err, "Failed to save virtfs-proxy-helper state")
}
// Wait for socket file to exist (as otherwise qemu can race the creation
// of this file).
waitDuration := time.Second * time.Duration(10)
waitUntil := time.Now().Add(waitDuration)
for {
if shared.PathExists(sockPath) {
break
}
if time.Now().After(waitUntil) {
return fmt.Errorf("virtfs-proxy-helper failed to bind socket after %v", waitDuration)
}
time.Sleep(50 * time.Millisecond)
}
return nil
}()
if err != nil {
return nil, errors.Wrapf(err, "Failed to setup virtfs-proxy-helper for device %q", d.name)
}
// Start virtiofsd for virtio-fs share. The lxd-agent prefers to use this over the
// virtfs-proxy-helper 9p share. The 9p share will only be used as a fallback.
err = func() error {
sockPath, pidPath := d.vmVirtiofsdPaths()
logPath := filepath.Join(d.inst.LogPath(), fmt.Sprintf("disk.%s.log", d.name))
err = DiskVMVirtiofsdStart(d.inst, sockPath, pidPath, logPath, srcPath)
if err != nil {
var errUnsupported UnsupportedError
if errors.As(err, &errUnsupported) {
d.logger.Warn("Unable to use virtio-fs for device, using 9p as a fallback", log.Ctx{"err": errUnsupported})
return nil
}
return err
}
revert.Add(func() { DiskVMVirtiofsdStop(sockPath, pidPath) })
// Add the socket path to the mount options to indicate to the qemu driver
// that this share is available.
// Note: the sockPath is not passed to the QEMU via mount.DevPath like the
// 9p share above. This is because we run the 9p share concurrently
// and can only pass one DevPath at a time. Instead pass the sock path to
// the QEMU driver via the mount opts field as virtiofsdSock to allow the
// QEMU driver also setup the virtio-fs share.
mount.Opts = append(mount.Opts, fmt.Sprintf("%s=%s", DiskVirtiofsdSockMountOpt, sockPath))
return nil
}()
if err != nil {
return nil, errors.Wrapf(err, "Failed to setup virtiofsd for device %q", d.name)
}
} else if !shared.PathExists(srcPath) {
if isRequired {
return nil, fmt.Errorf("Source path %q doesn't exist for device %q", srcPath, d.name)
}
}
// Add successfully setup mount config to runConf.
runConf.Mounts = []deviceConfig.MountEntryItem{mount}
}
revert.Success()
return &runConf, nil
}
return nil, fmt.Errorf("Disk type not supported for VMs")
}
// postStart is run after the instance is started.
func (d *disk) postStart() error {
devPath := d.getDevicePath(d.name, d.config)
// Unmount the host side.
err := unix.Unmount(devPath, unix.MNT_DETACH)
if err != nil {
return err
}
return nil
}
// Update applies configuration changes to a started device.
func (d *disk) Update(oldDevices deviceConfig.Devices, isRunning bool) error {
if d.inst.Type() == instancetype.VM && !shared.IsRootDiskDevice(d.config) {
return fmt.Errorf("Non-root disks not supported for VMs")
}
if shared.IsRootDiskDevice(d.config) {
// Make sure we have a valid root disk device (and only one).
expandedDevices := d.inst.ExpandedDevices()
newRootDiskDeviceKey, _, err := shared.GetRootDiskDevice(expandedDevices.CloneNative())
if err != nil {
return errors.Wrap(err, "Detect root disk device")
}
// Retrieve the first old root disk device key, even if there are duplicates.
oldRootDiskDeviceKey := ""
for k, v := range oldDevices {
if shared.IsRootDiskDevice(v) {
oldRootDiskDeviceKey = k
break
}
}
// Check for pool change.
oldRootDiskDevicePool := oldDevices[oldRootDiskDeviceKey]["pool"]
newRootDiskDevicePool := expandedDevices[newRootDiskDeviceKey]["pool"]
if oldRootDiskDevicePool != newRootDiskDevicePool {
return fmt.Errorf("The storage pool of the root disk can only be changed through move")
}
// Deal with quota changes.
oldRootDiskDeviceSize := oldDevices[oldRootDiskDeviceKey]["size"]
newRootDiskDeviceSize := expandedDevices[newRootDiskDeviceKey]["size"]
oldRootDiskDeviceMigrationSize := oldDevices[oldRootDiskDeviceKey]["size.state"]
newRootDiskDeviceMigrationSize := expandedDevices[newRootDiskDeviceKey]["size.state"]
// Apply disk quota changes.
if newRootDiskDeviceSize != oldRootDiskDeviceSize || oldRootDiskDeviceMigrationSize != newRootDiskDeviceMigrationSize {
// Remove any outstanding volatile apply_quota key if applying a new quota.
v := d.volatileGet()
if v["apply_quota"] != "" {
err = d.volatileSet(map[string]string{"apply_quota": ""})
if err != nil {
return err
}
}
err := d.applyQuota(false)
if err == storageDrivers.ErrInUse {
// Save volatile apply_quota key for next boot if cannot apply now.
err = d.volatileSet(map[string]string{"apply_quota": "true"})
if err != nil {
return err
}
d.logger.Warn("Could not apply quota because disk is in use, deferring until next start")
} else if err != nil {
return err
}
}
}
// Only apply IO limits if instance is container and is running.
if isRunning && d.inst.Type() == instancetype.Container {
runConf := deviceConfig.RunConfig{}
err := d.generateLimits(&runConf)
if err != nil {
return err
}
err = d.inst.DeviceEventHandler(&runConf)
if err != nil {
return err
}
}
return nil
}
// applyDeferredQuota attempts to apply the deferred quota specified in the volatile "apply_quota" key if set.
// If successfully applies new quota then removes the volatile "apply_quota" key.
func (d *disk) applyDeferredQuota() error {
v := d.volatileGet()
if v["apply_quota"] != "" {
d.logger.Info("Applying deferred quota change")
// Indicate that we want applyQuota to unmount the volume first, this is so we can perform resizes
// that cannot be done when the volume is in use.
err := d.applyQuota(true)
if err != nil {
return errors.Wrapf(err, "Failed to apply deferred quota from %q", fmt.Sprintf("volatile.%s.apply_quota", d.name))
}
// Remove volatile apply_quota key if successful.
err = d.volatileSet(map[string]string{"apply_quota": ""})
if err != nil {
return err
}
}
return nil
}
// applyQuota attempts to resize the instance root disk to the specified size.
// If unmount is true, attempts to unmount first before resizing.
func (d *disk) applyQuota(unmount bool) error {
rootDisk, _, err := shared.GetRootDiskDevice(d.inst.ExpandedDevices().CloneNative())
if err != nil {
return errors.Wrap(err, "Detect root disk device")
}
newSize := d.inst.ExpandedDevices()[rootDisk]["size"]
newMigrationSize := d.inst.ExpandedDevices()[rootDisk]["size.state"]
pool, err := storagePools.GetPoolByInstance(d.state, d.inst)
if err != nil {
return err
}
if unmount {
ourUnmount, err := pool.UnmountInstance(d.inst, nil)
if err != nil {
return err
}
if ourUnmount {
defer pool.MountInstance(d.inst, nil)
}
}
err = pool.SetInstanceQuota(d.inst, newSize, newMigrationSize, nil)
if err != nil {
return err
}
return nil
}
// generateLimits adds a set of cgroup rules to apply specified limits to the supplied RunConfig.
func (d *disk) generateLimits(runConf *deviceConfig.RunConfig) error {
// Disk throttle limits.
hasDiskLimits := false
for _, dev := range d.inst.ExpandedDevices() {
if dev["type"] != "disk" {
continue
}
if dev["limits.read"] != "" || dev["limits.write"] != "" || dev["limits.max"] != "" {
hasDiskLimits = true
}
}
if hasDiskLimits {
if !d.state.OS.CGInfo.Supports(cgroup.Blkio, nil) {
return fmt.Errorf("Cannot apply disk limits as blkio cgroup controller is missing")
}
diskLimits, err := d.getDiskLimits()
if err != nil {
return err
}
cg, err := cgroup.New(&cgroupWriter{runConf})
if err != nil {
return err
}
for block, limit := range diskLimits {
if limit.readBps > 0 {
err = cg.SetBlkioLimit(block, "read", "bps", limit.readBps)
if err != nil {
return err
}
}
if limit.readIops > 0 {
err = cg.SetBlkioLimit(block, "read", "iops", limit.readIops)
if err != nil {
return err
}
}
if limit.writeBps > 0 {
err = cg.SetBlkioLimit(block, "write", "bps", limit.writeBps)
if err != nil {
return err
}
}
if limit.writeIops > 0 {
err = cg.SetBlkioLimit(block, "write", "iops", limit.writeIops)
if err != nil {
return err
}
}
}
}
return nil
}
type cgroupWriter struct {
runConf *deviceConfig.RunConfig
}
func (w *cgroupWriter) Get(version cgroup.Backend, controller string, key string) (string, error) {
return "", fmt.Errorf("This cgroup handler does not support reading")
}
func (w *cgroupWriter) Set(version cgroup.Backend, controller string, key string, value string) error {
w.runConf.CGroups = append(w.runConf.CGroups, deviceConfig.RunConfigItem{
Key: key,
Value: value,
})
return nil
}
// mountPoolVolume mounts the pool volume specified in d.config["source"] from pool specified in d.config["pool"]
// and return the mount path. If the instance type is container volume will be shifted if needed.
func (d *disk) mountPoolVolume(revert *revert.Reverter) (string, error) {
// Deal with mounting storage volumes created via the storage api. Extract the name of the storage volume
// that we are supposed to attach. We assume that the only syntactically valid ways of specifying a
// storage volume are:
// - <volume_name>
// - <type>/<volume_name>
// Currently, <type> must either be empty or "custom".
// We do not yet support instance mounts.
if filepath.IsAbs(d.config["source"]) {
return "", fmt.Errorf(`When the "pool" property is set "source" must specify the name of a volume, not a path`)
}
volumeTypeName := ""
volumeName := filepath.Clean(d.config["source"])
slash := strings.Index(volumeName, "/")
if (slash > 0) && (len(volumeName) > slash) {
// Extract volume name.
volumeName = d.config["source"][(slash + 1):]
// Extract volume type.
volumeTypeName = d.config["source"][:slash]
}
var srcPath string
// Check volume type name is custom.
switch volumeTypeName {
case db.StoragePoolVolumeTypeNameContainer:
return "", fmt.Errorf("Using instance storage volumes is not supported")
case "":
// We simply received the name of a storage volume.
volumeTypeName = db.StoragePoolVolumeTypeNameCustom
fallthrough
case db.StoragePoolVolumeTypeNameCustom:
break
case db.StoragePoolVolumeTypeNameImage:
return "", fmt.Errorf("Using image storage volumes is not supported")
default:
return "", fmt.Errorf("Unknown storage type prefix %q found", volumeTypeName)
}
// Only custom volumes can be attached currently.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return "", err
}
volStorageName := project.StorageVolume(storageProjectName, volumeName)
srcPath = storageDrivers.GetVolumeMountPath(d.config["pool"], storageDrivers.VolumeTypeCustom, volStorageName)
pool, err := storagePools.GetPoolByName(d.state, d.config["pool"])
if err != nil {
return "", err
}
err = pool.MountCustomVolume(storageProjectName, volumeName, nil)
if err != nil {
return "", errors.Wrapf(err, "Failed mounting storage volume %q of type %q on storage pool %q", volumeName, volumeTypeName, pool.Name())
}
revert.Add(func() { pool.UnmountCustomVolume(storageProjectName, volumeName, nil) })
_, vol, err := d.state.Cluster.GetLocalStoragePoolVolume(storageProjectName, volumeName, db.StoragePoolVolumeTypeCustom, pool.ID())
if err != nil {
return "", errors.Wrapf(err, "Failed to fetch local storage volume record")
}
if d.inst.Type() == instancetype.Container {
if vol.ContentType == db.StoragePoolVolumeContentTypeNameFS {
err = d.storagePoolVolumeAttachShift(storageProjectName, pool.Name(), volumeName, db.StoragePoolVolumeTypeCustom, srcPath)
if err != nil {
return "", errors.Wrapf(err, "Failed shifting storage volume %q of type %q on storage pool %q", volumeName, volumeTypeName, pool.Name())
}
} else {
return "", fmt.Errorf("Only filesystem volumes are supported for containers")
}
}
if vol.ContentType == db.StoragePoolVolumeContentTypeNameBlock {
srcPath, err = pool.GetCustomVolumeDisk(storageProjectName, volumeName)
if err != nil {
return "", errors.Wrapf(err, "Failed to get disk path")
}
}
return srcPath, nil
}
// createDevice creates a disk device mount on host.
// The poolVolSrcPath takes the path to the mounted custom pool volume when d.config["pool"] is non-empty.
func (d *disk) createDevice(revert *revert.Reverter, poolVolSrcPath string) (string, error) {
// Paths.
devPath := d.getDevicePath(d.name, d.config)
srcPath := shared.HostPath(d.config["source"])
isRequired := d.isRequired(d.config)
isReadOnly := shared.IsTrue(d.config["readonly"])
isRecursive := shared.IsTrue(d.config["recursive"])
mntOptions := d.config["raw.mount.options"]
fsName := "none"
isFile := false
if d.config["pool"] == "" {
isFile = !shared.IsDir(srcPath) && !IsBlockdev(srcPath)
if strings.HasPrefix(d.config["source"], "cephfs:") {
// Get fs name and path from d.config.
fields := strings.SplitN(d.config["source"], ":", 2)
fields = strings.SplitN(fields[1], "/", 2)
mdsName := fields[0]
mdsPath := fields[1]
clusterName, userName := d.cephCreds()
// Get the mount options.
mntSrcPath, fsOptions, fsErr := diskCephfsOptions(clusterName, userName, mdsName, mdsPath)
if fsErr != nil {
return "", fsErr
}
// Join the options with any provided by the user.
if mntOptions == "" {
mntOptions = fsOptions
} else {
mntOptions += "," + fsOptions
}
fsName = "ceph"
srcPath = mntSrcPath
isFile = false
} else if strings.HasPrefix(d.config["source"], "ceph:") {
// Get the pool and volume names.
fields := strings.SplitN(d.config["source"], ":", 2)
fields = strings.SplitN(fields[1], "/", 2)
poolName := fields[0]
volumeName := fields[1]
clusterName, userName := d.cephCreds()
// Map the RBD.
rbdPath, err := diskCephRbdMap(clusterName, userName, poolName, volumeName)
if err != nil {
msg := fmt.Sprintf("Could not mount map Ceph RBD: %v", err)
if !isRequired {
d.logger.Warn(msg)
return "", nil
}
return "", fmt.Errorf(msg)
}
// Record the device path.
err = d.volatileSet(map[string]string{"ceph_rbd": rbdPath})
if err != nil {
return "", err
}
srcPath = rbdPath
isFile = false
}
} else {
srcPath = poolVolSrcPath // Use pool source path override.
}
// Check if the source exists unless it is a cephfs.
if fsName != "ceph" && !shared.PathExists(srcPath) {
if !isRequired {
return "", nil
}
return "", fmt.Errorf("Source path %q doesn't exist for device %q", srcPath, d.name)
}
// Create the devices directory if missing.
if !shared.PathExists(d.inst.DevicesPath()) {
err := os.Mkdir(d.inst.DevicesPath(), 0711)
if err != nil {
return "", err
}
}
// Clean any existing entry.
if shared.PathExists(devPath) {
err := os.Remove(devPath)
if err != nil {
return "", err
}
}
// Create the mount point.
if isFile {
f, err := os.Create(devPath)
if err != nil {
return "", err
}
f.Close()
} else {
err := os.Mkdir(devPath, 0700)
if err != nil {
return "", err
}
}
// Mount the fs.
err := DiskMount(srcPath, devPath, isReadOnly, isRecursive, d.config["propagation"], mntOptions, fsName)
if err != nil {
return "", err
}
revert.Success()
return devPath, nil
}
func (d *disk) storagePoolVolumeAttachShift(projectName, poolName, volumeName string, volumeType int, remapPath string) error {
// Load the DB records.
poolID, pool, _, err := d.state.Cluster.GetStoragePool(poolName)
if err != nil {
return err
}
_, volume, err := d.state.Cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)
if err != nil {
return err
}
poolVolumePut := volume.Writable()
// Check if unmapped.
if shared.IsTrue(poolVolumePut.Config["security.unmapped"]) {
// No need to look at containers and maps for unmapped volumes.
return nil
}
// Get the on-disk idmap for the volume.
var lastIdmap *idmap.IdmapSet
if poolVolumePut.Config["volatile.idmap.last"] != "" {
lastIdmap, err = idmap.JSONUnmarshal(poolVolumePut.Config["volatile.idmap.last"])
if err != nil {
d.logger.Error("Failed to unmarshal last idmapping", log.Ctx{"idmap": poolVolumePut.Config["volatile.idmap.last"], "err": err})
return err
}
}
var nextIdmap *idmap.IdmapSet
nextJSONMap := "[]"
if !shared.IsTrue(poolVolumePut.Config["security.shifted"]) {
c := d.inst.(instance.Container)
// Get the container's idmap.
if c.IsRunning() {
nextIdmap, err = c.CurrentIdmap()
} else {
nextIdmap, err = c.NextIdmap()
}
if err != nil {
return err
}
if nextIdmap != nil {
nextJSONMap, err = idmap.JSONMarshal(nextIdmap)
if err != nil {
return err
}
}
}
poolVolumePut.Config["volatile.idmap.next"] = nextJSONMap
if !nextIdmap.Equals(lastIdmap) {
d.logger.Debug("Shifting storage volume")
if !shared.IsTrue(poolVolumePut.Config["security.shifted"]) {
volumeUsedBy := []instance.Instance{}
err = storagePools.VolumeUsedByInstanceDevices(d.state, poolName, projectName, volume, true, func(dbInst db.Instance, project api.Project, profiles []api.Profile, usedByDevices []string) error {
inst, err := instance.Load(d.state, db.InstanceToArgs(&dbInst), profiles)
if err != nil {
return err
}
volumeUsedBy = append(volumeUsedBy, inst)
return nil
})
if err != nil {
return err
}
if len(volumeUsedBy) > 1 {
for _, inst := range volumeUsedBy {
if inst.Type() != instancetype.Container {
continue
}
ct := inst.(instance.Container)
var ctNextIdmap *idmap.IdmapSet
if ct.IsRunning() {
ctNextIdmap, err = ct.CurrentIdmap()
} else {
ctNextIdmap, err = ct.NextIdmap()
}
if err != nil {
return fmt.Errorf("Failed to retrieve idmap of container")
}
if !nextIdmap.Equals(ctNextIdmap) {
return fmt.Errorf("Idmaps of container %q and storage volume %q are not identical", ct.Name(), volumeName)
}
}
} else if len(volumeUsedBy) == 1 {
// If we're the only one who's attached that container
// we can shift the storage volume.
// I'm not sure if we want some locking here.
if volumeUsedBy[0].Name() != d.inst.Name() {
return fmt.Errorf("Idmaps of container and storage volume are not identical")
}
}
}
// Unshift rootfs.
if lastIdmap != nil {
var err error
if pool.Driver == "zfs" {
err = lastIdmap.UnshiftRootfs(remapPath, storageDrivers.ShiftZFSSkipper)
} else {
err = lastIdmap.UnshiftRootfs(remapPath, nil)
}
if err != nil {
d.logger.Error("Failed to unshift", log.Ctx{"path": remapPath, "err": err})
return err
}
d.logger.Debug("Unshifted", log.Ctx{"path": remapPath})
}
// Shift rootfs.
if nextIdmap != nil {
var err error
if pool.Driver == "zfs" {
err = nextIdmap.ShiftRootfs(remapPath, storageDrivers.ShiftZFSSkipper)
} else {
err = nextIdmap.ShiftRootfs(remapPath, nil)
}
if err != nil {
d.logger.Error("Failed to shift", log.Ctx{"path": remapPath, "err": err})
return err
}
d.logger.Debug("Shifted", log.Ctx{"path": remapPath})
}
d.logger.Debug("Shifted storage volume")
}
jsonIdmap := "[]"
if nextIdmap != nil {
var err error
jsonIdmap, err = idmap.JSONMarshal(nextIdmap)
if err != nil {
d.logger.Error("Failed to marshal idmap", log.Ctx{"idmap": nextIdmap, "err": err})
return err
}
}
// Update last idmap.
poolVolumePut.Config["volatile.idmap.last"] = jsonIdmap
err = d.state.Cluster.UpdateStoragePoolVolume(projectName, volumeName, volumeType, poolID, poolVolumePut.Description, poolVolumePut.Config)
if err != nil {
return err
}
return nil
}
// Stop is run when the device is removed from the instance.
func (d *disk) Stop() (*deviceConfig.RunConfig, error) {
if d.inst.Type() == instancetype.VM {
return d.stopVM()
}
runConf := deviceConfig.RunConfig{
PostHooks: []func() error{d.postStop},
}
// Figure out the paths
relativeDestPath := strings.TrimPrefix(d.config["path"], "/")
devPath := d.getDevicePath(d.name, d.config)
// The disk device doesn't exist do nothing.
if !shared.PathExists(devPath) {
return nil, nil
}
// Request an unmount of the device inside the instance.
runConf.Mounts = append(runConf.Mounts, deviceConfig.MountEntryItem{
TargetPath: relativeDestPath,
})
return &runConf, nil
}
func (d *disk) stopVM() (*deviceConfig.RunConfig, error) {
// Stop the virtfs-proxy-helper process and clean up.
err := func() error {
sockPath, pidPath := d.vmVirtfsProxyHelperPaths()
if shared.PathExists(pidPath) {
proc, err := subprocess.ImportProcess(pidPath)
if err != nil {
return err
}
err = proc.Stop()
if err != nil && err != subprocess.ErrNotRunning {
return err
}
// Remove PID file.
os.Remove(pidPath)
}
// Remove socket file.
os.Remove(sockPath)
return nil
}()
if err != nil {
return &deviceConfig.RunConfig{}, errors.Wrapf(err, "Failed cleaning up virtfs-proxy-helper")
}
// Stop the virtiofsd process and clean up.
err = DiskVMVirtiofsdStop(d.vmVirtiofsdPaths())
if err != nil {
return &deviceConfig.RunConfig{}, errors.Wrapf(err, "Failed cleaning up virtiofsd")
}
runConf := deviceConfig.RunConfig{
PostHooks: []func() error{d.postStop},
}
return &runConf, nil
}
// postStop is run after the device is removed from the instance.
func (d *disk) postStop() error {
// Clean any existing device mount entry. Should occur first before custom volume unmounts.
err := DiskMountClear(d.getDevicePath(d.name, d.config))
if err != nil {
return err
}
// Check if pool-specific action should be taken to unmount custom volume disks.
if d.config["pool"] != "" && d.config["path"] != "/" {
pool, err := storagePools.GetPoolByName(d.state, d.config["pool"])
if err != nil {
return err
}
// Only custom volumes can be attached currently.
storageProjectName, err := project.StorageVolumeProject(d.state.Cluster, d.inst.Project(), db.StoragePoolVolumeTypeCustom)
if err != nil {
return err
}
_, err = pool.UnmountCustomVolume(storageProjectName, d.config["source"], nil)
if err != nil {
return err
}
}
if strings.HasPrefix(d.config["source"], "ceph:") {
v := d.volatileGet()
err := diskCephRbdUnmap(v["ceph_rbd"])
if err != nil {
d.logger.Error("Failed to unmap RBD volume", log.Ctx{"rbd": v["ceph_rbd"], "err": err})
}
}
return nil
}
// getDiskLimits calculates Block I/O limits.
func (d *disk) getDiskLimits() (map[string]diskBlockLimit, error) {
result := map[string]diskBlockLimit{}
// Build a list of all valid block devices
validBlocks := []string{}
dents, err := ioutil.ReadDir("/sys/class/block/")
if err != nil {
return nil, err
}
for _, f := range dents {
fPath := filepath.Join("/sys/class/block/", f.Name())
if shared.PathExists(fmt.Sprintf("%s/partition", fPath)) {
continue
}
if !shared.PathExists(fmt.Sprintf("%s/dev", fPath)) {
continue
}
block, err := ioutil.ReadFile(fmt.Sprintf("%s/dev", fPath))
if err != nil {
return nil, err
}
validBlocks = append(validBlocks, strings.TrimSuffix(string(block), "\n"))
}
// Process all the limits
blockLimits := map[string][]diskBlockLimit{}
for devName, dev := range d.inst.ExpandedDevices() {
if dev["type"] != "disk" {
continue
}
// Apply max limit
if dev["limits.max"] != "" {
dev["limits.read"] = dev["limits.max"]
dev["limits.write"] = dev["limits.max"]
}
// Parse the user input
readBps, readIops, writeBps, writeIops, err := d.parseDiskLimit(dev["limits.read"], dev["limits.write"])
if err != nil {
return nil, err
}
// Set the source path
source := d.getDevicePath(devName, dev)
if dev["source"] == "" {
source = d.inst.RootfsPath()
}
if !shared.PathExists(source) {
// Require that device is mounted before resolving block device if required.
if d.isRequired(dev) {
return nil, fmt.Errorf("Block device path doesn't exist %q", source)
}
continue // Do not resolve block device if device isn't mounted.
}
// Get the backing block devices (major:minor)
blocks, err := d.getParentBlocks(source)
if err != nil {
if readBps == 0 && readIops == 0 && writeBps == 0 && writeIops == 0 {
// If the device doesn't exist, there is no limit to clear so ignore the failure
continue
} else {
return nil, err
}
}
device := diskBlockLimit{readBps: readBps, readIops: readIops, writeBps: writeBps, writeIops: writeIops}
for _, block := range blocks {
blockStr := ""
if shared.StringInSlice(block, validBlocks) {
// Straightforward entry (full block device)
blockStr = block
} else {
// Attempt to deal with a partition (guess its parent)
fields := strings.SplitN(block, ":", 2)
fields[1] = "0"
if shared.StringInSlice(fmt.Sprintf("%s:%s", fields[0], fields[1]), validBlocks) {
blockStr = fmt.Sprintf("%s:%s", fields[0], fields[1])
}
}
if blockStr == "" {
return nil, fmt.Errorf("Block device doesn't support quotas %q", block)
}
if blockLimits[blockStr] == nil {
blockLimits[blockStr] = []diskBlockLimit{}
}
blockLimits[blockStr] = append(blockLimits[blockStr], device)
}
}
// Average duplicate limits
for block, limits := range blockLimits {
var readBpsCount, readBpsTotal, readIopsCount, readIopsTotal, writeBpsCount, writeBpsTotal, writeIopsCount, writeIopsTotal int64
for _, limit := range limits {
if limit.readBps > 0 {
readBpsCount++
readBpsTotal += limit.readBps
}
if limit.readIops > 0 {
readIopsCount++
readIopsTotal += limit.readIops
}
if limit.writeBps > 0 {
writeBpsCount++
writeBpsTotal += limit.writeBps
}
if limit.writeIops > 0 {
writeIopsCount++
writeIopsTotal += limit.writeIops
}
}
device := diskBlockLimit{}
if readBpsCount > 0 {
device.readBps = readBpsTotal / readBpsCount
}
if readIopsCount > 0 {
device.readIops = readIopsTotal / readIopsCount
}
if writeBpsCount > 0 {
device.writeBps = writeBpsTotal / writeBpsCount
}
if writeIopsCount > 0 {
device.writeIops = writeIopsTotal / writeIopsCount
}
result[block] = device
}
return result, nil
}
func (d *disk) parseDiskLimit(readSpeed string, writeSpeed string) (int64, int64, int64, int64, error) {
parseValue := func(value string) (int64, int64, error) {
var err error
bps := int64(0)
iops := int64(0)
if value == "" {
return bps, iops, nil
}
if strings.HasSuffix(value, "iops") {
iops, err = strconv.ParseInt(strings.TrimSuffix(value, "iops"), 10, 64)
if err != nil {
return -1, -1, err
}
} else {
bps, err = units.ParseByteSizeString(value)
if err != nil {
return -1, -1, err
}
}
return bps, iops, nil
}
readBps, readIops, err := parseValue(readSpeed)
if err != nil {
return -1, -1, -1, -1, err
}
writeBps, writeIops, err := parseValue(writeSpeed)
if err != nil {
return -1, -1, -1, -1, err
}
return readBps, readIops, writeBps, writeIops, nil
}
func (d *disk) getParentBlocks(path string) ([]string, error) {
var devices []string
var dev []string
// Expand the mount path
absPath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
expPath, err := filepath.EvalSymlinks(absPath)
if err != nil {
expPath = absPath
}
// Find the source mount of the path
file, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
match := ""
for scanner.Scan() {
line := scanner.Text()
rows := strings.Fields(line)
if len(rows[4]) <= len(match) {
continue
}
if expPath != rows[4] && !strings.HasPrefix(expPath, rows[4]) {
continue
}
match = rows[4]
// Go backward to avoid problems with optional fields
dev = []string{rows[2], rows[len(rows)-2]}
}
if dev == nil {
return nil, fmt.Errorf("Couldn't find a match /proc/self/mountinfo entry")
}
// Handle the most simple case
if !strings.HasPrefix(dev[0], "0:") {
return []string{dev[0]}, nil
}
// Deal with per-filesystem oddities. We don't care about failures here
// because any non-special filesystem => directory backend.
fs, _ := util.FilesystemDetect(expPath)
if fs == "zfs" && shared.PathExists("/dev/zfs") {
// Accessible zfs filesystems
poolName := strings.Split(dev[1], "/")[0]
output, err := shared.RunCommand("zpool", "status", "-P", "-L", poolName)
if err != nil {
return nil, fmt.Errorf("Failed to query zfs filesystem information for %q: %v", dev[1], err)
}
header := true
for _, line := range strings.Split(output, "\n") {
fields := strings.Fields(line)
if len(fields) < 5 {
continue
}
if fields[1] != "ONLINE" {
continue
}
if header {
header = false
continue
}
var path string
if shared.PathExists(fields[0]) {
if shared.IsBlockdevPath(fields[0]) {
path = fields[0]
} else {
subDevices, err := d.getParentBlocks(fields[0])
if err != nil {
return nil, err
}
for _, dev := range subDevices {
devices = append(devices, dev)
}
}
} else {
continue
}
if path != "" {
_, major, minor, err := unixDeviceAttributes(path)
if err != nil {
continue
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
}
}
if len(devices) == 0 {
return nil, fmt.Errorf("Unable to find backing block for zfs pool %q", poolName)
}
} else if fs == "btrfs" && shared.PathExists(dev[1]) {
// Accessible btrfs filesystems
output, err := shared.RunCommand("btrfs", "filesystem", "show", dev[1])
if err != nil {
// Fallback to using device path to support BTRFS on block volumes (like LVM).
_, major, minor, errFallback := unixDeviceAttributes(dev[1])
if errFallback != nil {
return nil, errors.Wrapf(err, "Failed to query btrfs filesystem information for %q", dev[1])
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
}
for _, line := range strings.Split(output, "\n") {
fields := strings.Fields(line)
if len(fields) == 0 || fields[0] != "devid" {
continue
}
_, major, minor, err := unixDeviceAttributes(fields[len(fields)-1])
if err != nil {
return nil, err
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
}
} else if shared.PathExists(dev[1]) {
// Anything else with a valid path
_, major, minor, err := unixDeviceAttributes(dev[1])
if err != nil {
return nil, err
}
devices = append(devices, fmt.Sprintf("%d:%d", major, minor))
} else {
return nil, fmt.Errorf("Invalid block device %q", dev[1])
}
return devices, nil
}
// generateVMConfigDrive generates an ISO containing the cloud init config for a VM.
// Returns the path to the ISO.
func (d *disk) generateVMConfigDrive() (string, error) {
scratchDir := filepath.Join(d.inst.DevicesPath(), storageDrivers.PathNameEncode(d.name))
// Check we have the mkisofs tool available.
mkisofsPath, err := exec.LookPath("mkisofs")
if err != nil {
return "", err
}
// Create config drive dir.
err = os.MkdirAll(scratchDir, 0100)
if err != nil {
return "", err
}
instanceConfig := d.inst.ExpandedConfig()
// Use an empty vendor-data file if no custom vendor-data supplied.
vendorData := instanceConfig["user.vendor-data"]
if vendorData == "" {
vendorData = "#cloud-config\n{}"
}
err = ioutil.WriteFile(filepath.Join(scratchDir, "vendor-data"), []byte(vendorData), 0400)
if err != nil {
return "", err
}
// Use an empty user-data file if no custom user-data supplied.
userData := instanceConfig["user.user-data"]
if userData == "" {
userData = "#cloud-config\n{}"
}
err = ioutil.WriteFile(filepath.Join(scratchDir, "user-data"), []byte(userData), 0400)
if err != nil {
return "", err
}
// Include a network-config file if the user configured it.
networkConfig := instanceConfig["user.network-config"]
if networkConfig != "" {
err = ioutil.WriteFile(filepath.Join(scratchDir, "network-config"), []byte(networkConfig), 0400)
if err != nil {
return "", err
}
}
// Append any custom meta-data to our predefined meta-data config.
metaData := fmt.Sprintf(`instance-id: %s
local-hostname: %s
%s
`, d.inst.Name(), d.inst.Name(), instanceConfig["user.meta-data"])
err = ioutil.WriteFile(filepath.Join(scratchDir, "meta-data"), []byte(metaData), 0400)
if err != nil {
return "", err
}
// Finally convert the config drive dir into an ISO file. The cidata label is important
// as this is what cloud-init uses to detect, mount the drive and run the cloud-init
// templates on first boot. The vendor-data template then modifies the system so that the
// config drive is mounted and the agent is started on subsequent boots.
isoPath := filepath.Join(d.inst.Path(), "config.iso")
_, err = shared.RunCommand(mkisofsPath, "-J", "-R", "-V", "cidata", "-o", isoPath, scratchDir)
if err != nil {
return "", err
}
// Remove the config drive folder.
os.RemoveAll(scratchDir)
return isoPath, nil
}
// cephCreds returns cluster name and user name to use for ceph disks.
func (d *disk) cephCreds() (string, string) {
// Apply the ceph configuration.
userName := d.config["ceph.user_name"]
if userName == "" {
userName = "admin"
}
clusterName := d.config["ceph.cluster_name"]
if clusterName == "" {
clusterName = "ceph"
}
return clusterName, userName
}
|
package rsync
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"syscall"
"time"
"github.com/gorilla/websocket"
"github.com/pborman/uuid"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/logger"
)
// LocalCopy copies a directory using rsync (with the --devices option).
func LocalCopy(source string, dest string, bwlimit string, xattrs bool) (string, error) {
err := os.MkdirAll(dest, 0755)
if err != nil {
return "", err
}
rsyncVerbosity := "-q"
if daemon.Debug {
rsyncVerbosity = "-vi"
}
if bwlimit == "" {
bwlimit = "0"
}
args := []string{
"-a",
"-HA",
"--sparse",
"--devices",
"--delete",
"--checksum",
"--numeric-ids",
}
if xattrs {
args = append(args, "--xattrs")
}
if bwlimit != "" {
args = append(args, "--bwlimit", bwlimit)
}
args = append(args,
rsyncVerbosity,
shared.AddSlash(source),
dest)
msg, err := shared.RunCommand("rsync", args...)
if err != nil {
runError, ok := err.(shared.RunError)
if ok {
exitError, ok := runError.Err.(*exec.ExitError)
if ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
if waitStatus.ExitStatus() == 24 {
return msg, nil
}
}
}
return msg, err
}
return msg, nil
}
func sendSetup(name string, path string, bwlimit string, execPath string, features []string) (*exec.Cmd, net.Conn, io.ReadCloser, error) {
/*
* The way rsync works, it invokes a subprocess that does the actual
* talking (given to it by a -E argument). Since there isn't an easy
* way for us to capture this process' stdin/stdout, we just use netcat
* and write to/from a unix socket.
*
* In principle we don't need this socket. It seems to me that some
* clever invocation of rsync --server --sender and usage of that
* process' stdin/stdout could work around the need for this socket,
* but I couldn't get it to work. Another option would be to look at
* the spawned process' first child and read/write from its
* stdin/stdout, but that also seemed messy. In any case, this seems to
* work just fine.
*/
auds := fmt.Sprintf("@lxd/%s", uuid.NewRandom().String())
// We simply copy a part of the uuid if it's longer than the allowed
// maximum. That should be safe enough for our purposes.
if len(auds) > shared.ABSTRACT_UNIX_SOCK_LEN-1 {
auds = auds[:shared.ABSTRACT_UNIX_SOCK_LEN-1]
}
l, err := net.Listen("unix", auds)
if err != nil {
return nil, nil, nil, err
}
defer l.Close()
/*
* Here, the path /tmp/foo is ignored. Since we specify localhost,
* rsync thinks we are syncing to a remote host (in this case, the
* other end of the lxd websocket), and so the path specified on the
* --server instance of rsync takes precedence.
*
* Additionally, we use sh -c instead of just calling nc directly
* because rsync passes a whole bunch of arguments to the wrapper
* command (i.e. the command to run on --server). However, we're
* hardcoding that at the other end, so we can just ignore it.
*/
rsyncCmd := fmt.Sprintf("sh -c \"%s netcat %s %s\"", execPath, auds, name)
if bwlimit == "" {
bwlimit = "0"
}
args := []string{
"-ar",
"--devices",
"--numeric-ids",
"--partial",
"--sparse",
}
if features != nil && len(features) > 0 {
args = append(args, rsyncFeatureArgs(features)...)
}
args = append(args, []string{
path,
"localhost:/tmp/foo",
"-e",
rsyncCmd,
"--bwlimit",
bwlimit}...)
cmd := exec.Command("rsync", args...)
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, nil, nil, err
}
if err := cmd.Start(); err != nil {
return nil, nil, nil, err
}
var conn *net.Conn
chConn := make(chan *net.Conn, 1)
go func() {
conn, err := l.Accept()
if err != nil {
chConn <- nil
return
}
chConn <- &conn
}()
select {
case conn = <-chConn:
if conn == nil {
cmd.Process.Kill()
cmd.Wait()
return nil, nil, nil, fmt.Errorf("Failed to connect to rsync socket")
}
case <-time.After(10 * time.Second):
cmd.Process.Kill()
cmd.Wait()
return nil, nil, nil, fmt.Errorf("rsync failed to spawn after 10s")
}
return cmd, *conn, stderr, nil
}
// Send sets up the sending half of an rsync, to recursively send the
// directory pointed to by path over the websocket.
func Send(name string, path string, conn *websocket.Conn, readWrapper func(io.ReadCloser) io.ReadCloser, features []string, bwlimit string, execPath string) error {
cmd, dataSocket, stderr, err := sendSetup(name, path, bwlimit, execPath, features)
if err != nil {
return err
}
if dataSocket != nil {
defer dataSocket.Close()
}
readPipe := io.ReadCloser(dataSocket)
if readWrapper != nil {
readPipe = readWrapper(dataSocket)
}
readDone, writeDone := shared.WebsocketMirror(conn, dataSocket, readPipe, nil, nil)
chError := make(chan error, 1)
go func() {
err = cmd.Wait()
if err != nil {
dataSocket.Close()
readPipe.Close()
}
chError <- err
}()
output, err := ioutil.ReadAll(stderr)
if err != nil {
cmd.Process.Kill()
}
err = <-chError
if err != nil {
logger.Errorf("Rsync send failed: %s: %s: %s", path, err, string(output))
}
<-readDone
<-writeDone
return err
}
// Recv sets up the receiving half of the websocket to rsync (the other
// half set up by rsync.Send), putting the contents in the directory specified
// by path.
func Recv(path string, conn *websocket.Conn, writeWrapper func(io.WriteCloser) io.WriteCloser, features []string) error {
args := []string{
"--server",
"-vlogDtpre.iLsfx",
"--numeric-ids",
"--devices",
"--partial",
"--sparse",
}
if features != nil && len(features) > 0 {
args = append(args, rsyncFeatureArgs(features)...)
}
args = append(args, []string{".", path}...)
cmd := exec.Command("rsync", args...)
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return err
}
if err := cmd.Start(); err != nil {
return err
}
writePipe := io.WriteCloser(stdin)
if writeWrapper != nil {
writePipe = writeWrapper(stdin)
}
readDone, writeDone := shared.WebsocketMirror(conn, writePipe, stdout, nil, nil)
output, err := ioutil.ReadAll(stderr)
if err != nil {
cmd.Process.Kill()
cmd.Wait()
return err
}
err = cmd.Wait()
if err != nil {
logger.Errorf("Rsync receive failed: %s: %s: %s", path, err, string(output))
}
<-readDone
<-writeDone
return err
}
func rsyncFeatureArgs(features []string) []string {
args := []string{}
if shared.StringInSlice("xattrs", features) {
args = append(args, "--xattrs")
}
if shared.StringInSlice("delete", features) {
args = append(args, "--delete")
}
if shared.StringInSlice("compress", features) {
args = append(args, "--compress")
args = append(args, "--compress-level=2")
}
return args
}
lxd/rsync: Switch to using io.ReadWriteCloser
Signed-off-by: Stéphane Graber <089afc6d81f66f1168a9849e15660feae286e024@ubuntu.com>
Signed-off-by: Thomas Parrott <6b778ce645fb0e3dde76d79eccad490955b1ae74@canonical.com>
package rsync
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"syscall"
"time"
"github.com/pborman/uuid"
"github.com/lxc/lxd/lxd/daemon"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/ioprogress"
"github.com/lxc/lxd/shared/logger"
)
// LocalCopy copies a directory using rsync (with the --devices option).
func LocalCopy(source string, dest string, bwlimit string, xattrs bool) (string, error) {
err := os.MkdirAll(dest, 0755)
if err != nil {
return "", err
}
rsyncVerbosity := "-q"
if daemon.Debug {
rsyncVerbosity = "-vi"
}
if bwlimit == "" {
bwlimit = "0"
}
args := []string{
"-a",
"-HA",
"--sparse",
"--devices",
"--delete",
"--checksum",
"--numeric-ids",
}
if xattrs {
args = append(args, "--xattrs")
}
if bwlimit != "" {
args = append(args, "--bwlimit", bwlimit)
}
args = append(args,
rsyncVerbosity,
shared.AddSlash(source),
dest)
msg, err := shared.RunCommand("rsync", args...)
if err != nil {
runError, ok := err.(shared.RunError)
if ok {
exitError, ok := runError.Err.(*exec.ExitError)
if ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
if waitStatus.ExitStatus() == 24 {
return msg, nil
}
}
}
return msg, err
}
return msg, nil
}
func sendSetup(name string, path string, bwlimit string, execPath string, features []string) (*exec.Cmd, net.Conn, io.ReadCloser, error) {
/*
* The way rsync works, it invokes a subprocess that does the actual
* talking (given to it by a -E argument). Since there isn't an easy
* way for us to capture this process' stdin/stdout, we just use netcat
* and write to/from a unix socket.
*
* In principle we don't need this socket. It seems to me that some
* clever invocation of rsync --server --sender and usage of that
* process' stdin/stdout could work around the need for this socket,
* but I couldn't get it to work. Another option would be to look at
* the spawned process' first child and read/write from its
* stdin/stdout, but that also seemed messy. In any case, this seems to
* work just fine.
*/
auds := fmt.Sprintf("@lxd/%s", uuid.NewRandom().String())
// We simply copy a part of the uuid if it's longer than the allowed
// maximum. That should be safe enough for our purposes.
if len(auds) > shared.ABSTRACT_UNIX_SOCK_LEN-1 {
auds = auds[:shared.ABSTRACT_UNIX_SOCK_LEN-1]
}
l, err := net.Listen("unix", auds)
if err != nil {
return nil, nil, nil, err
}
defer l.Close()
/*
* Here, the path /tmp/foo is ignored. Since we specify localhost,
* rsync thinks we are syncing to a remote host (in this case, the
* other end of the lxd websocket), and so the path specified on the
* --server instance of rsync takes precedence.
*
* Additionally, we use sh -c instead of just calling nc directly
* because rsync passes a whole bunch of arguments to the wrapper
* command (i.e. the command to run on --server). However, we're
* hardcoding that at the other end, so we can just ignore it.
*/
rsyncCmd := fmt.Sprintf("sh -c \"%s netcat %s %s\"", execPath, auds, name)
if bwlimit == "" {
bwlimit = "0"
}
args := []string{
"-ar",
"--devices",
"--numeric-ids",
"--partial",
"--sparse",
}
if features != nil && len(features) > 0 {
args = append(args, rsyncFeatureArgs(features)...)
}
args = append(args, []string{
path,
"localhost:/tmp/foo",
"-e",
rsyncCmd,
"--bwlimit",
bwlimit}...)
cmd := exec.Command("rsync", args...)
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, nil, nil, err
}
if err := cmd.Start(); err != nil {
return nil, nil, nil, err
}
var conn *net.Conn
chConn := make(chan *net.Conn, 1)
go func() {
conn, err := l.Accept()
if err != nil {
chConn <- nil
return
}
chConn <- &conn
}()
select {
case conn = <-chConn:
if conn == nil {
cmd.Process.Kill()
cmd.Wait()
return nil, nil, nil, fmt.Errorf("Failed to connect to rsync socket")
}
case <-time.After(10 * time.Second):
cmd.Process.Kill()
cmd.Wait()
return nil, nil, nil, fmt.Errorf("rsync failed to spawn after 10s")
}
return cmd, *conn, stderr, nil
}
// Send sets up the sending half of an rsync, to recursively send the
// directory pointed to by path over the websocket.
func Send(name string, path string, conn io.ReadWriteCloser, tracker *ioprogress.ProgressTracker, features []string, bwlimit string, execPath string) error {
cmd, netcatConn, stderr, err := sendSetup(name, path, bwlimit, execPath, features)
if err != nil {
return err
}
// Setup progress tracker.
readNetcatPipe := io.ReadCloser(netcatConn)
if tracker != nil {
readNetcatPipe = &ioprogress.ProgressReader{
ReadCloser: netcatConn,
Tracker: tracker,
}
}
// Forward from netcat to target.
chCopyNetcat := make(chan error, 1)
go func() {
_, err := io.Copy(conn, readNetcatPipe)
chCopyNetcat <- err
readNetcatPipe.Close()
netcatConn.Close()
conn.Close() // sends barrier message.
}()
// Forward from target to netcat.
writeNetcatPipe := io.WriteCloser(netcatConn)
chCopyTarget := make(chan error, 1)
go func() {
_, err := io.Copy(writeNetcatPipe, conn)
chCopyTarget <- err
writeNetcatPipe.Close()
}()
// Wait for rsync to complete.
output, err := ioutil.ReadAll(stderr)
if err != nil {
cmd.Process.Kill()
logger.Errorf("Rsync stderr read failed: %s: %v", path, err)
}
err = cmd.Wait()
errs := []error{}
chCopyNetcatErr := <-chCopyNetcat
chCopyTargetErr := <-chCopyTarget
if err != nil {
errs = append(errs, err)
// Try to get more info about the error.
if chCopyNetcatErr != nil {
errs = append(errs, chCopyNetcatErr)
}
if chCopyTargetErr != nil {
errs = append(errs, chCopyTargetErr)
}
}
if len(errs) > 0 {
return fmt.Errorf("Rsync send failed: %s, %s: %v (%s)", name, path, errs, string(output))
}
return nil
}
// Recv sets up the receiving half of the websocket to rsync (the other
// half set up by rsync.Send), putting the contents in the directory specified
// by path.
func Recv(path string, conn io.ReadWriteCloser, tracker *ioprogress.ProgressTracker, features []string) error {
args := []string{
"--server",
"-vlogDtpre.iLsfx",
"--numeric-ids",
"--devices",
"--partial",
"--sparse",
}
if features != nil && len(features) > 0 {
args = append(args, rsyncFeatureArgs(features)...)
}
args = append(args, []string{".", path}...)
cmd := exec.Command("rsync", args...)
// Forward from rsync to source.
stdout, err := cmd.StdoutPipe()
if err != nil {
return err
}
chCopyRsync := make(chan error, 1)
go func() {
_, err := io.Copy(conn, stdout)
chCopyRsync <- err
stdout.Close()
conn.Close() // sends barrier message.
}()
// Forward from source to rsync.
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
readSourcePipe := io.ReadCloser(conn)
if tracker != nil {
readSourcePipe = &ioprogress.ProgressReader{
ReadCloser: conn,
Tracker: tracker,
}
}
chCopySource := make(chan error, 1)
go func() {
_, err := io.Copy(stdin, readSourcePipe)
chCopySource <- err
stdin.Close()
}()
stderr, err := cmd.StderrPipe()
if err != nil {
cmd.Process.Kill()
logger.Errorf("Rsync stderr read failed: %s: %v", path, err)
}
err = cmd.Start()
if err != nil {
return err
}
output, err := ioutil.ReadAll(stderr)
if err != nil {
logger.Errorf("Rsync stderr read failed: %s: %v", path, err)
}
err = cmd.Wait()
errs := []error{}
chCopyRsyncErr := <-chCopyRsync
chCopySourceErr := <-chCopySource
if err != nil {
errs = append(errs, err)
// Try to get more info about the error.
if chCopyRsyncErr != nil {
errs = append(errs, chCopyRsyncErr)
}
if chCopySourceErr != nil {
errs = append(errs, chCopySourceErr)
}
}
if len(errs) > 0 {
return fmt.Errorf("Rsync receive failed: %s: %v (%s)", path, errs, string(output))
}
return nil
}
func rsyncFeatureArgs(features []string) []string {
args := []string{}
if shared.StringInSlice("xattrs", features) {
args = append(args, "--xattrs")
}
if shared.StringInSlice("delete", features) {
args = append(args, "--delete")
}
if shared.StringInSlice("compress", features) {
args = append(args, "--compress")
args = append(args, "--compress-level=2")
}
return args
}
|
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/gorilla/websocket"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "gopkg.in/inconshreveable/log15.v2"
)
func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
output, err := exec.Command("vgs", "--noheadings", "-o", "lv_attr", fmt.Sprintf("%s/%s", vgName, poolName)).Output()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
if waitStatus.ExitStatus() == 5 {
// pool LV was not found
return false, nil
}
}
return false, fmt.Errorf("Error checking for pool '%s'", poolName)
}
// Found LV named poolname, check type:
attrs := strings.TrimSpace(string(output[:]))
if strings.HasPrefix(attrs, "t") {
return true, nil
}
return false, fmt.Errorf("Pool named '%s' exists but is not a thin pool.", poolName)
}
func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) {
results := []string{}
if daemonConfig["storage.lvm_vg_name"].Get() == "" {
return results, nil
}
cNames, err := dbContainersList(d.db, cTypeRegular)
if err != nil {
return results, err
}
for _, cName := range cNames {
var lvLinkPath string
if strings.Contains(cName, shared.SnapshotDelimiter) {
lvLinkPath = shared.VarPath("snapshots", fmt.Sprintf("%s.lv", cName))
} else {
lvLinkPath = shared.VarPath("containers", fmt.Sprintf("%s.lv", cName))
}
if shared.PathExists(lvLinkPath) {
results = append(results, cName)
}
}
imageNames, err := dbImagesGet(d.db, false)
if err != nil {
return results, err
}
for _, imageName := range imageNames {
imageLinkPath := shared.VarPath("images", fmt.Sprintf("%s.lv", imageName))
if shared.PathExists(imageLinkPath) {
results = append(results, imageName)
}
}
return results, nil
}
func storageLVMValidateThinPoolName(d *Daemon, vgName string, value string) error {
users, err := storageLVMGetThinPoolUsers(d)
if err != nil {
return fmt.Errorf("Error checking if a pool is already in use: %v", err)
}
if len(users) > 0 {
return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users)
}
if value != "" {
if vgName == "" {
return fmt.Errorf("Can not set lvm_thinpool_name without lvm_vg_name set.")
}
poolExists, err := storageLVMThinpoolExists(vgName, value)
if err != nil {
return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", value, vgName, err)
}
if !poolExists {
return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", value, vgName)
}
}
return nil
}
func xfsGenerateNewUUID(lvpath string) error {
output, err := exec.Command(
"xfs_admin",
"-U", "generate",
lvpath).CombinedOutput()
if err != nil {
return fmt.Errorf("Error generating new UUID: %v\noutput:'%s'", err, string(output))
}
return nil
}
func containerNameToLVName(containerName string) string {
lvName := strings.Replace(containerName, "-", "--", -1)
return strings.Replace(lvName, shared.SnapshotDelimiter, "-", -1)
}
type storageLvm struct {
storageShared
}
func getLvmDevPath(lvmPool string, volumeType string, lvmVolume string) string {
return fmt.Sprintf("/dev/%s/%s_%s", lvmPool, volumeType, lvmVolume)
}
func getPrefixedLvName(volumeType string, lvmVolume string) string {
return fmt.Sprintf("%s_%s", volumeType, lvmVolume)
}
func getTmpSnapshotName(snap string) string {
return fmt.Sprintf("%s_tmp", snap)
}
// Only initialize the minimal information we need about a given storage type.
func (s *storageLvm) StorageCoreInit() (*storageCore, error) {
sCore := storageCore{}
sCore.sType = storageTypeLvm
typeName, err := storageTypeToString(sCore.sType)
if err != nil {
return nil, err
}
sCore.sTypeName = typeName
output, err := exec.Command("lvm", "version").CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Error getting LVM version: %v\noutput:'%s'", err, string(output))
}
lines := strings.Split(string(output), "\n")
sCore.sTypeVersion = ""
for idx, line := range lines {
fields := strings.SplitAfterN(line, ":", 2)
if len(fields) < 2 {
continue
}
if idx > 0 {
sCore.sTypeVersion += " / "
}
sCore.sTypeVersion += strings.TrimSpace(fields[1])
}
err = sCore.initShared()
if err != nil {
return nil, err
}
s.storageCore = sCore
return &sCore, nil
}
func (s *storageLvm) StoragePoolInit(config map[string]interface{}) (storage, error) {
_, err := s.StorageCoreInit()
if err != nil {
return s, err
}
return s, nil
}
func (s *storageLvm) StoragePoolCheck() error {
return nil
}
func versionSplit(versionString string) (int, int, int, error) {
fs := strings.Split(versionString, ".")
majs, mins, incs := fs[0], fs[1], fs[2]
maj, err := strconv.Atoi(majs)
if err != nil {
return 0, 0, 0, err
}
min, err := strconv.Atoi(mins)
if err != nil {
return 0, 0, 0, err
}
incs = strings.Split(incs, "(")[0]
inc, err := strconv.Atoi(incs)
if err != nil {
return 0, 0, 0, err
}
return maj, min, inc, nil
}
func (s *storageLvm) lvmVersionIsAtLeast(versionString string) (bool, error) {
lvmVersion := strings.Split(s.sTypeVersion, "/")[0]
lvmMaj, lvmMin, lvmInc, err := versionSplit(lvmVersion)
if err != nil {
return false, err
}
inMaj, inMin, inInc, err := versionSplit(versionString)
if err != nil {
return false, err
}
if lvmMaj < inMaj || lvmMin < inMin || lvmInc < inInc {
return false, nil
} else {
return true, nil
}
}
func (s *storageLvm) StoragePoolCreate() error {
tryUndo := true
source := s.pool.Config["source"]
if source == "" {
return fmt.Errorf("No \"source\" property found for the storage pool.")
}
// Create the mountpoint for the storage pool.
poolMntPoint := getStoragePoolMountPoint(s.pool.Name)
err := os.MkdirAll(poolMntPoint, 0711)
if err != nil {
return err
}
defer func() {
if tryUndo {
os.Remove(poolMntPoint)
}
}()
if !shared.IsBlockdevPath(source) {
return fmt.Errorf("Loop backed lvm storage volumes are currently not supported.")
}
// Create a lvm physical volume.
output, err := exec.Command("pvcreate", source).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %s.", output)
}
defer func() {
if tryUndo {
exec.Command("pvremove", source).Run()
}
}()
// Create a volume group on the physical volume.
output, err = exec.Command("vgcreate", s.pool.Name, source).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to create the volume group for the lvm storage pool: %s.", output)
}
s.pool.Config["source"] = s.pool.Name
// Deregister cleanup.
tryUndo = false
return nil
}
func (s *storageLvm) StoragePoolDelete() error {
source := s.pool.Config["source"]
if source == "" {
return fmt.Errorf("No \"source\" property found for the storage pool.")
}
// Remove the volume group.
output, err := exec.Command("vgremove", "-f", s.pool.Name).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to destroy the volume group for the lvm storage pool: %s.", output)
}
// Delete the mountpoint for the storage pool.
poolMntPoint := getStoragePoolMountPoint(s.pool.Name)
err = os.RemoveAll(poolMntPoint)
if err != nil {
return err
}
return nil
}
func (s *storageLvm) StoragePoolMount() (bool, error) {
return true, nil
}
func (s *storageLvm) StoragePoolUmount() (bool, error) {
return true, nil
}
func (s *storageLvm) StoragePoolVolumeCreate() error {
tryUndo := true
vgName := s.pool.Name
thinPoolName := s.volume.Config["lvm.thinpool_name"]
lvFsType := s.volume.Config["block.filesystem"]
lvSize := s.volume.Config["size"]
volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
if err != nil {
return err
}
err = s.createThinLV(vgName, thinPoolName, s.volume.Name, lvFsType, lvSize, volumeType)
if err != nil {
s.log.Error("LVMCreateThinLV", log.Ctx{"err": err})
return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
}
defer func() {
if tryUndo {
s.StoragePoolVolumeDelete()
}
}()
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
err = os.MkdirAll(customPoolVolumeMntPoint, 0711)
if err != nil {
return err
}
_, err = s.StoragePoolVolumeMount()
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) StoragePoolVolumeDelete() error {
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
_, err := s.StoragePoolVolumeUmount()
if err != nil {
return err
}
volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
if err != nil {
return err
}
err = s.removeLV(s.pool.Name, volumeType, s.volume.Name)
if err != nil {
return err
}
if shared.PathExists(customPoolVolumeMntPoint) {
err := os.Remove(customPoolVolumeMntPoint)
if err != nil {
return err
}
}
return nil
}
func (s *storageLvm) StoragePoolVolumeMount() (bool, error) {
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
if shared.IsMountPoint(customPoolVolumeMntPoint) {
return false, nil
}
lvFsType := s.volume.Config["block.filesystem"]
volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
if err != nil {
return false, err
}
lvmVolumePath := getLvmDevPath(s.pool.Name, volumeType, s.volume.Name)
mountOptions := s.volume.Config["block.mount_options"]
err = tryMount(lvmVolumePath, customPoolVolumeMntPoint, lvFsType, 0, mountOptions)
if err != nil {
return false, err
}
return true, nil
}
func (s *storageLvm) StoragePoolVolumeUmount() (bool, error) {
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
if !shared.IsMountPoint(customPoolVolumeMntPoint) {
return false, nil
}
err := tryUnmount(customPoolVolumeMntPoint, 0)
if err != nil {
return false, err
}
return true, nil
}
func (s *storageLvm) GetStoragePoolWritable() api.StoragePoolPut {
return s.pool.Writable()
}
func (s *storageLvm) GetStoragePoolVolumeWritable() api.StorageVolumePut {
return s.volume.Writable()
}
func (s *storageLvm) SetStoragePoolWritable(writable *api.StoragePoolPut) {
s.pool.StoragePoolPut = *writable
}
func (s *storageLvm) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
s.volume.StorageVolumePut = *writable
}
func (s *storageLvm) ContainerPoolGet() string {
return s.pool.Name
}
func (s *storageLvm) ContainerPoolIDGet() int64 {
return s.poolID
}
func (s *storageLvm) StoragePoolUpdate(changedConfig []string) error {
if shared.StringInSlice("size", changedConfig) {
return fmt.Errorf("The \"size\" property cannot be changed.")
}
if shared.StringInSlice("source", changedConfig) {
return fmt.Errorf("The \"source\" property cannot be changed.")
}
if shared.StringInSlice("volume.zfs.use_refquota", changedConfig) {
return fmt.Errorf("The \"volume.zfs.use_refquota\" property cannot be changed.")
}
if shared.StringInSlice("volume.zfs.remove_snapshots", changedConfig) {
return fmt.Errorf("The \"volume.zfs.remove_snapshots\" property cannot be changed.")
}
if shared.StringInSlice("zfs.pool_name", changedConfig) {
return fmt.Errorf("The \"zfs.pool_name\" property cannot be changed.")
}
if shared.StringInSlice("volume.block.mount_options", changedConfig) {
// noop
}
if shared.StringInSlice("volume.block.filesystem", changedConfig) {
// noop
}
if shared.StringInSlice("volume.size", changedConfig) {
// noop
}
if shared.StringInSlice("volume.lvm.thinpool_name", changedConfig) {
return fmt.Errorf("The \"volume.lvm.thinpool_name\" property cannot be changed.")
}
return nil
}
func (s *storageLvm) StoragePoolVolumeUpdate(changedConfig []string) error {
if shared.StringInSlice("block.mount_options", changedConfig) && len(changedConfig) == 1 {
// noop
} else {
return fmt.Errorf("The properties \"%v\" cannot be changed.", changedConfig)
}
return nil
}
func (s *storageLvm) ContainerCreate(container container) error {
tryUndo := true
containerName := container.Name()
containerLvmName := containerNameToLVName(containerName)
thinPoolName := s.volume.Config["lvm.thinpool_name"]
lvFsType := s.volume.Config["block.filesystem"]
lvSize := s.volume.Config["size"]
err := s.createThinLV(s.pool.Name, thinPoolName, containerLvmName, lvFsType, lvSize, storagePoolVolumeApiEndpointContainers)
if err != nil {
return err
}
defer func() {
if tryUndo {
s.ContainerDelete(container)
}
}()
if container.IsSnapshot() {
containerMntPoint := getSnapshotMountPoint(s.pool.Name, containerName)
fields := strings.SplitN(containerName, shared.SnapshotDelimiter, 2)
sourceName := fields[0]
snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "snapshots", sourceName)
snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
err := os.MkdirAll(containerMntPoint, 0755)
if err != nil {
return err
}
err = createSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
if err != nil {
return err
}
} else {
containerMntPoint := getContainerMountPoint(s.pool.Name, containerName)
containerPath := container.Path()
err := os.MkdirAll(containerMntPoint, 0755)
if err != nil {
return err
}
err = createContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
if err != nil {
return err
}
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerCreateFromImage(container container, fingerprint string) error {
tryUndo := true
// Check if the image already exists.
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
imageLvmDevPath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointImages, fingerprint)
imageStoragePoolLockID := fmt.Sprintf("%s/%s", s.pool.Name, fingerprint)
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[imageStoragePoolLockID]; ok {
lxdStorageLock.Unlock()
if _, ok := <-waitChannel; ok {
shared.LogWarnf("Value transmitted over image lock semaphore?")
}
} else {
lxdStorageLockMap[imageStoragePoolLockID] = make(chan bool)
lxdStorageLock.Unlock()
var imgerr error
if !shared.PathExists(imageMntPoint) || !shared.PathExists(imageLvmDevPath) {
imgerr = s.ImageCreate(fingerprint)
}
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[imageStoragePoolLockID]; ok {
close(waitChannel)
delete(lxdStorageLockMap, imageStoragePoolLockID)
}
lxdStorageLock.Unlock()
if imgerr != nil {
return imgerr
}
}
containerName := container.Name()
containerLvmName := containerNameToLVName(containerName)
containerLvSnapshotPath, err := s.createSnapshotLV(s.pool.Name, fingerprint, storagePoolVolumeApiEndpointImages, containerLvmName, storagePoolVolumeApiEndpointContainers, false)
if err != nil {
return err
}
defer func() {
if tryUndo {
s.ContainerDelete(container)
}
}()
containerMntPoint := getContainerMountPoint(s.pool.Name, containerName)
containerPath := container.Path()
err = os.MkdirAll(containerMntPoint, 0755)
if err != nil {
return err
}
err = createContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
if err != nil {
return err
}
// Generate a new xfs's UUID
lvFsType := s.volume.Config["block.filesystem"]
if lvFsType == "xfs" {
err := xfsGenerateNewUUID(containerLvSnapshotPath)
if err != nil {
return err
}
}
ourMount, err := s.ContainerMount(containerName, containerPath)
if err != nil {
return err
}
if ourMount {
defer s.ContainerUmount(containerName, containerPath)
}
if container.IsPrivileged() {
err = os.Chmod(containerMntPoint, 0700)
} else {
err = os.Chmod(containerMntPoint, 0755)
}
if err != nil {
return err
}
if !container.IsPrivileged() {
err := s.shiftRootfs(container)
if err != nil {
return err
}
}
err = container.TemplateApply("create")
if err != nil {
s.log.Error("Error in create template during ContainerCreateFromImage, continuing to unmount", log.Ctx{"err": err})
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerCanRestore(container container, sourceContainer container) error {
return nil
}
func (s *storageLvm) ContainerDelete(container container) error {
containerName := container.Name()
containerLvmName := containerNameToLVName(containerName)
containerMntPoint := ""
if container.IsSnapshot() {
containerMntPoint = getSnapshotMountPoint(s.pool.Name, containerName)
} else {
containerMntPoint = getContainerMountPoint(s.pool.Name, containerName)
}
// Make sure that the container is really unmounted at this point.
// Otherwise we will fail.
if shared.IsMountPoint(containerMntPoint) {
err := tryUnmount(containerMntPoint, 0)
if err != nil {
return fmt.Errorf("failed to unmount container path '%s': %s", containerMntPoint, err)
}
}
err := s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, containerLvmName)
if err != nil {
return err
}
if container.IsSnapshot() {
fields := strings.SplitN(containerName, shared.SnapshotDelimiter, 2)
sourceName := fields[0]
snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "snapshots", sourceName)
snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
err = deleteSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
} else {
err = tryUnmount(containerMntPoint, 0)
err = deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
}
if err != nil {
return err
}
return nil
}
func (s *storageLvm) ContainerCopy(container container, sourceContainer container) error {
tryUndo := true
err := sourceContainer.StorageStart()
if err != nil {
return err
}
defer sourceContainer.StorageStop()
if sourceContainer.Storage().GetStorageType() == storageTypeLvm {
err := s.createSnapshotContainer(container, sourceContainer, false)
if err != nil {
s.log.Error("Error creating snapshot LV for copy", log.Ctx{"err": err})
return err
}
} else {
sourceContainerName := sourceContainer.Name()
targetContainerName := container.Name()
s.log.Info("Copy from Non-LVM container", log.Ctx{"container": targetContainerName, "sourceContainer": sourceContainerName})
err := s.ContainerCreate(container)
if err != nil {
s.log.Error("Error creating empty container", log.Ctx{"err": err})
return err
}
defer func() {
if tryUndo {
s.ContainerDelete(container)
}
}()
targetContainerPath := container.Path()
ourSourceMount, err := s.ContainerMount(targetContainerName, targetContainerPath)
if err != nil {
s.log.Error("Error starting/mounting container", log.Ctx{"err": err, "container": targetContainerName})
return err
}
if ourSourceMount {
defer s.ContainerUmount(targetContainerName, targetContainerPath)
}
sourceContainerPath := sourceContainer.Path()
ourTargetMount, err := sourceContainer.Storage().ContainerMount(sourceContainerName, sourceContainerPath)
if err != nil {
return err
}
if ourTargetMount {
sourceContainer.Storage().ContainerUmount(sourceContainerName, sourceContainerPath)
}
sourcePool := sourceContainer.Storage().ContainerPoolGet()
sourceContainerMntPoint := getContainerMountPoint(sourcePool, sourceContainerName)
targetContainerMntPoint := getContainerMountPoint(s.pool.Name, targetContainerName)
output, err := storageRsyncCopy(sourceContainerMntPoint, targetContainerMntPoint)
if err != nil {
s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)})
s.ContainerDelete(container)
return fmt.Errorf("rsync failed: %s", string(output))
}
}
err = container.TemplateApply("copy")
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerMount(name string, path string) (bool, error) {
containerLvmName := containerNameToLVName(name)
lvFsType := s.volume.Config["block.filesystem"]
containerLvmPath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointContainers, containerLvmName)
mountOptions := s.volume.Config["block.mount_options"]
containerMntPoint := getContainerMountPoint(s.pool.Name, name)
containerMountLockID := fmt.Sprintf("mount/%s/%s", s.pool.Name, name)
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerMountLockID]; ok {
lxdStorageLock.Unlock()
if _, ok := <-waitChannel; ok {
shared.LogWarnf("Value transmitted over image lock semaphore?")
}
// Give the benefit of the doubt and assume that the other
// thread actually succeeded in mounting the storage volume.
return false, nil
}
lxdStorageLockMap[containerMountLockID] = make(chan bool)
lxdStorageLock.Unlock()
var imgerr error
ourMount := false
if !shared.IsMountPoint(containerMntPoint) {
imgerr = tryMount(containerLvmPath, containerMntPoint, lvFsType, 0, mountOptions)
ourMount = true
}
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerMountLockID]; ok {
close(waitChannel)
delete(lxdStorageLockMap, containerMountLockID)
}
lxdStorageLock.Unlock()
if imgerr != nil {
return false, imgerr
}
return ourMount, nil
}
func (s *storageLvm) ContainerUmount(name string, path string) (bool, error) {
containerMntPoint := getContainerMountPoint(s.pool.Name, name)
containerUmountLockID := fmt.Sprintf("umount/%s/%s", s.pool.Name, name)
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerUmountLockID]; ok {
lxdStorageLock.Unlock()
if _, ok := <-waitChannel; ok {
shared.LogWarnf("Value transmitted over image lock semaphore?")
}
// Give the benefit of the doubt and assume that the other
// thread actually succeeded in unmounting the storage volume.
return false, nil
}
lxdStorageLockMap[containerUmountLockID] = make(chan bool)
lxdStorageLock.Unlock()
var imgerr error
ourUmount := false
if shared.IsMountPoint(containerMntPoint) {
imgerr = tryUnmount(containerMntPoint, 0)
ourUmount = true
}
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerUmountLockID]; ok {
close(waitChannel)
delete(lxdStorageLockMap, containerUmountLockID)
}
lxdStorageLock.Unlock()
if imgerr != nil {
return false, imgerr
}
return ourUmount, nil
}
func (s *storageLvm) ContainerRename(container container, newContainerName string) error {
tryUndo := true
oldName := container.Name()
oldLvmName := containerNameToLVName(oldName)
newLvmName := containerNameToLVName(newContainerName)
_, err := s.ContainerUmount(oldName, container.Path())
if err != nil {
return err
}
output, err := s.renameLV(oldLvmName, newLvmName, storagePoolVolumeApiEndpointContainers)
if err != nil {
s.log.Error("Failed to rename a container LV", log.Ctx{"oldName": oldLvmName, "newName": newLvmName, "err": err, "output": string(output)})
return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
}
defer func() {
if tryUndo {
s.renameLV(newLvmName, oldLvmName, storagePoolVolumeApiEndpointContainers)
}
}()
// MAYBE(FIXME(brauner)): Register another cleanup function that tries to
// rename alreday renamed snapshots back to their old name when the
// rename fails.
if !container.IsSnapshot() {
snaps, err := container.Snapshots()
if err != nil {
return err
}
for _, snap := range snaps {
baseSnapName := filepath.Base(snap.Name())
newSnapshotName := newContainerName + shared.SnapshotDelimiter + baseSnapName
err := s.ContainerRename(snap, newSnapshotName)
if err != nil {
return err
}
}
oldContainerMntPoint := getContainerMountPoint(s.pool.Name, oldName)
oldContainerMntPointSymlink := container.Path()
newContainerMntPoint := getContainerMountPoint(s.pool.Name, newContainerName)
newContainerMntPointSymlink := shared.VarPath("containers", newContainerName)
err = renameContainerMountpoint(oldContainerMntPoint, oldContainerMntPointSymlink, newContainerMntPoint, newContainerMntPointSymlink)
if err != nil {
return err
}
oldSnapshotPath := getSnapshotMountPoint(s.pool.Name, oldName)
newSnapshotPath := getSnapshotMountPoint(s.pool.Name, newContainerName)
if shared.PathExists(oldSnapshotPath) {
err = os.Rename(oldSnapshotPath, newSnapshotPath)
if err != nil {
return err
}
}
oldSnapshotSymlink := shared.VarPath("snapshots", oldName)
newSnapshotSymlink := shared.VarPath("snapshots", newContainerName)
if shared.PathExists(oldSnapshotSymlink) {
err := os.Remove(oldSnapshotSymlink)
if err != nil {
return err
}
err = os.Symlink(newSnapshotPath, newSnapshotSymlink)
if err != nil {
return err
}
}
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerRestore(container container, sourceContainer container) error {
err := sourceContainer.StorageStart()
if err != nil {
return err
}
defer sourceContainer.StorageStop()
if s.pool.Name != sourceContainer.Storage().ContainerPoolGet() {
return fmt.Errorf("Containers must be on the same pool to be restored.")
}
srcName := sourceContainer.Name()
srcLvName := containerNameToLVName(srcName)
if sourceContainer.IsSnapshot() {
srcLvName = getTmpSnapshotName(srcLvName)
}
destName := container.Name()
destLvName := containerNameToLVName(destName)
_, err = container.Storage().ContainerUmount(container.Name(), container.Path())
if err != nil {
return err
}
err = s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, destName)
if err != nil {
s.log.Error(fmt.Sprintf("Failed to remove \"%s\": %s.", destName, err))
}
_, err = s.createSnapshotLV(s.pool.Name, srcLvName, storagePoolVolumeApiEndpointContainers, destLvName, storagePoolVolumeApiEndpointContainers, false)
if err != nil {
return fmt.Errorf("Error creating snapshot LV: %v", err)
}
return nil
}
func (s *storageLvm) ContainerSetQuota(container container, size int64) error {
return fmt.Errorf("The LVM container backend doesn't support quotas.")
}
func (s *storageLvm) ContainerGetUsage(container container) (int64, error) {
return -1, fmt.Errorf("The LVM container backend doesn't support quotas.")
}
func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
return s.createSnapshotContainer(snapshotContainer, sourceContainer, true)
}
func (s *storageLvm) createSnapshotContainer(snapshotContainer container, sourceContainer container, readonly bool) error {
tryUndo := true
sourceContainerName := sourceContainer.Name()
targetContainerName := snapshotContainer.Name()
sourceContainerLvmName := containerNameToLVName(sourceContainerName)
targetContainerLvmName := containerNameToLVName(targetContainerName)
shared.LogDebug("Creating snapshot", log.Ctx{"srcName": sourceContainerName, "destName": targetContainerName})
_, err := s.createSnapshotLV(s.pool.Name, sourceContainerLvmName, storagePoolVolumeApiEndpointContainers, targetContainerLvmName, storagePoolVolumeApiEndpointContainers, readonly)
if err != nil {
return fmt.Errorf("Error creating snapshot LV: %s", err)
}
defer func() {
if tryUndo {
s.ContainerCreate(snapshotContainer)
}
}()
targetContainerMntPoint := ""
targetContainerPath := snapshotContainer.Path()
targetIsSnapshot := snapshotContainer.IsSnapshot()
if targetIsSnapshot {
targetContainerMntPoint = getSnapshotMountPoint(s.pool.Name, targetContainerName)
sourceFields := strings.SplitN(sourceContainerName, shared.SnapshotDelimiter, 2)
sourceName := sourceFields[0]
sourcePool := sourceContainer.Storage().ContainerPoolGet()
snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", sourcePool, "snapshots", sourceName)
snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
err = createSnapshotMountpoint(targetContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
} else {
targetContainerMntPoint = getContainerMountPoint(s.pool.Name, targetContainerName)
err = createContainerMountpoint(targetContainerMntPoint, targetContainerPath, snapshotContainer.IsPrivileged())
}
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer container) error {
err := s.ContainerDelete(snapshotContainer)
if err != nil {
return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err)
}
return nil
}
func (s *storageLvm) ContainerSnapshotRename(snapshotContainer container, newContainerName string) error {
tryUndo := true
oldName := snapshotContainer.Name()
oldLvmName := containerNameToLVName(oldName)
newLvmName := containerNameToLVName(newContainerName)
output, err := s.renameLV(oldLvmName, newLvmName, storagePoolVolumeApiEndpointContainers)
if err != nil {
s.log.Error("Failed to rename a snapshot LV", log.Ctx{"oldName": oldLvmName, "newName": newLvmName, "err": err, "output": string(output)})
return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
}
defer func() {
if tryUndo {
s.renameLV(newLvmName, oldLvmName, storagePoolVolumeApiEndpointContainers)
}
}()
oldSnapshotMntPoint := getSnapshotMountPoint(s.pool.Name, oldName)
newSnapshotMntPoint := getSnapshotMountPoint(s.pool.Name, newContainerName)
err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerSnapshotStart(container container) error {
tryUndo := true
sourceName := container.Name()
targetName := sourceName
sourceLvmName := containerNameToLVName(sourceName)
targetLvmName := containerNameToLVName(targetName)
tmpTargetLvmName := getTmpSnapshotName(targetLvmName)
shared.LogDebug("Creating snapshot", log.Ctx{"srcName": sourceLvmName, "destName": targetLvmName})
lvpath, err := s.createSnapshotLV(s.pool.Name, sourceLvmName, storagePoolVolumeApiEndpointContainers, tmpTargetLvmName, storagePoolVolumeApiEndpointContainers, false)
if err != nil {
return fmt.Errorf("Error creating snapshot LV: %s", err)
}
defer func() {
if tryUndo {
s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, tmpTargetLvmName)
}
}()
lvFsType := s.volume.Config["block.filesystem"]
containerLvmPath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointContainers, tmpTargetLvmName)
mountOptions := s.volume.Config["block.mount_options"]
containerMntPoint := getSnapshotMountPoint(s.pool.Name, sourceName)
// Generate a new xfs's UUID
if lvFsType == "xfs" {
err := xfsGenerateNewUUID(lvpath)
if err != nil {
return err
}
}
if !shared.IsMountPoint(containerMntPoint) {
err = tryMount(containerLvmPath, containerMntPoint, lvFsType, 0, mountOptions)
if err != nil {
return fmt.Errorf("Error mounting snapshot LV path='%s': %s", containerMntPoint, err)
}
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerSnapshotStop(container container) error {
name := container.Name()
snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, name)
if shared.IsMountPoint(snapshotMntPoint) {
err := tryUnmount(snapshotMntPoint, 0)
if err != nil {
return err
}
}
lvName := containerNameToLVName(name)
tmpLvName := getTmpSnapshotName(lvName)
err := s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, tmpLvName)
if err != nil {
return err
}
return nil
}
func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer container) error {
return s.ContainerCreate(snapshotContainer)
}
func (s *storageLvm) ImageCreate(fingerprint string) error {
tryUndo := true
vgName := s.pool.Name
thinPoolName := s.volume.Config["lvm.thinpool_name"]
lvFsType := s.volume.Config["block.filesystem"]
lvSize := s.volume.Config["size"]
err := s.createImageDbPoolVolume(fingerprint)
if err != nil {
return err
}
err = s.createThinLV(vgName, thinPoolName, fingerprint, lvFsType, lvSize, storagePoolVolumeApiEndpointImages)
if err != nil {
s.log.Error("LVMCreateThinLV", log.Ctx{"err": err})
return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
}
defer func() {
if tryUndo {
s.ImageDelete(fingerprint)
}
}()
// Create image mountpoint.
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if !shared.PathExists(imageMntPoint) {
err := os.MkdirAll(imageMntPoint, 0700)
if err != nil {
return err
}
}
_, err = s.ImageMount(fingerprint)
if err != nil {
return err
}
imagePath := shared.VarPath("images", fingerprint)
err = unpackImage(s.d, imagePath, imageMntPoint, storageTypeLvm)
if err != nil {
return err
}
s.ImageUmount(fingerprint)
tryUndo = false
return nil
}
func (s *storageLvm) ImageDelete(fingerprint string) error {
_, err := s.ImageUmount(fingerprint)
if err != nil {
return err
}
err = s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointImages, fingerprint)
if err != nil {
return err
}
err = s.deleteImageDbPoolVolume(fingerprint)
if err != nil {
return err
}
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if shared.PathExists(imageMntPoint) {
err := os.Remove(imageMntPoint)
if err != nil {
return err
}
}
return nil
}
func (s *storageLvm) ImageMount(fingerprint string) (bool, error) {
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if shared.IsMountPoint(imageMntPoint) {
return false, nil
}
// Shouldn't happen.
lvmFstype := s.volume.Config["block.filesystem"]
if lvmFstype == "" {
return false, fmt.Errorf("No filesystem type specified.")
}
lvmVolumePath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointImages, fingerprint)
lvmMountOptions := s.volume.Config["block.mount_options"]
// Shouldn't be necessary since it should be validated in the config
// checks.
if lvmFstype == "ext4" && lvmMountOptions == "" {
lvmMountOptions = "discard"
}
err := tryMount(lvmVolumePath, imageMntPoint, lvmFstype, 0, lvmMountOptions)
if err != nil {
shared.LogInfof("Error mounting image LV for unpacking: %s", err)
return false, fmt.Errorf("Error mounting image LV: %v", err)
}
return true, nil
}
func (s *storageLvm) ImageUmount(fingerprint string) (bool, error) {
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if !shared.IsMountPoint(imageMntPoint) {
return false, nil
}
err := tryUnmount(imageMntPoint, 0)
if err != nil {
return false, err
}
return true, nil
}
func (s *storageLvm) createThinLV(vgName string, thinPoolName string, lvName string, lvFsType string, lvSize string, volumeType string) error {
exists, err := storageLVMThinpoolExists(vgName, thinPoolName)
if err != nil {
return err
}
if !exists {
err := s.createDefaultThinPool(vgName, thinPoolName, lvName, lvFsType)
if err != nil {
return err
}
err = storageLVMValidateThinPoolName(s.d, vgName, thinPoolName)
if err != nil {
s.log.Error("Setting thin pool name", log.Ctx{"err": err})
return fmt.Errorf("Error setting LVM thin pool config: %v", err)
}
}
lvmThinPoolPath := fmt.Sprintf("%s/%s", vgName, thinPoolName)
lvmPoolVolumeName := getPrefixedLvName(volumeType, lvName)
output, err := tryExec(
"lvcreate",
"--thin",
"-n", lvmPoolVolumeName,
"--virtualsize", lvSize+"B", lvmThinPoolPath)
if err != nil {
s.log.Error("Could not create LV", log.Ctx{"lvname": lvmPoolVolumeName, "output": string(output)})
return fmt.Errorf("Could not create thin LV named %s", lvmPoolVolumeName)
}
fsPath := getLvmDevPath(vgName, volumeType, lvName)
switch lvFsType {
case "xfs":
output, err = tryExec("mkfs.xfs", fsPath)
default:
// default = ext4
output, err = tryExec(
"mkfs.ext4",
"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0",
fsPath)
}
if err != nil {
s.log.Error("Filesystem creation failed", log.Ctx{"output": string(output)})
return fmt.Errorf("Error making filesystem on image LV: %v", err)
}
return nil
}
func (s *storageLvm) createDefaultThinPool(vgName string, thinPoolName string, lvName string, lvFsType string) error {
isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
if err != nil {
return fmt.Errorf("Error checking LVM version: %s", err)
}
// Create the thin pool
lvmThinPool := fmt.Sprintf("%s/%s", vgName, thinPoolName)
var output []byte
if isRecent {
output, err = tryExec(
"lvcreate",
"--poolmetadatasize", "1G",
"-l", "100%FREE",
"--thinpool", lvmThinPool)
} else {
output, err = tryExec(
"lvcreate",
"--poolmetadatasize", "1G",
"-L", "1G",
"--thinpool", lvmThinPool)
}
if err != nil {
s.log.Error("Could not create thin pool", log.Ctx{"name": thinPoolName, "err": err, "output": string(output)})
return fmt.Errorf("Could not create LVM thin pool named %s", thinPoolName)
}
if !isRecent {
// Grow it to the maximum VG size (two step process required by old LVM)
output, err = tryExec("lvextend", "--alloc", "anywhere", "-l", "100%FREE", lvmThinPool)
if err != nil {
s.log.Error("Could not grow thin pool", log.Ctx{"name": thinPoolName, "err": err, "output": string(output)})
return fmt.Errorf("Could not grow LVM thin pool named %s", thinPoolName)
}
}
return nil
}
func (s *storageLvm) removeLV(vgName string, volumeType string, lvName string) error {
lvmVolumePath := getLvmDevPath(vgName, volumeType, lvName)
output, err := tryExec("lvremove", "-f", lvmVolumePath)
if err != nil {
s.log.Error("Could not remove LV", log.Ctx{"lvname": lvName, "output": string(output)})
return fmt.Errorf("Could not remove LV named %s", lvName)
}
return nil
}
func (s *storageLvm) createSnapshotLV(vgName string, origLvName string, origVolumeType string, lvName string, volumeType string, readonly bool) (string, error) {
sourceLvmVolumePath := getLvmDevPath(vgName, origVolumeType, origLvName)
s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvName, "dev string": sourceLvmVolumePath})
isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
if err != nil {
return "", fmt.Errorf("Error checking LVM version: %v", err)
}
lvmPoolVolumeName := getPrefixedLvName(volumeType, lvName)
var output []byte
if isRecent {
output, err = tryExec(
"lvcreate",
"-kn",
"-n", lvmPoolVolumeName,
"-s", sourceLvmVolumePath)
} else {
output, err = tryExec(
"lvcreate",
"-n", lvmPoolVolumeName,
"-s", sourceLvmVolumePath)
}
if err != nil {
s.log.Error("Could not create LV snapshot", log.Ctx{"lvname": lvName, "origlvname": origLvName, "output": string(output)})
return "", fmt.Errorf("Could not create snapshot LV named %s", lvName)
}
targetLvmVolumePath := getLvmDevPath(vgName, volumeType, lvName)
if readonly {
output, err = tryExec("lvchange", "-ay", "-pr", targetLvmVolumePath)
} else {
output, err = tryExec("lvchange", "-ay", targetLvmVolumePath)
}
if err != nil {
return "", fmt.Errorf("Could not activate new snapshot '%s': %v\noutput:%s", lvName, err, string(output))
}
return targetLvmVolumePath, nil
}
func (s *storageLvm) renameLV(oldName string, newName string, volumeType string) (string, error) {
oldLvmName := getPrefixedLvName(volumeType, oldName)
newLvmName := getPrefixedLvName(volumeType, newName)
output, err := tryExec("lvrename", s.pool.Name, oldLvmName, newLvmName)
return string(output), err
}
func (s *storageLvm) MigrationType() MigrationFSType {
return MigrationFSType_RSYNC
}
func (s *storageLvm) PreservesInodes() bool {
return false
}
func (s *storageLvm) MigrationSource(container container) (MigrationStorageSourceDriver, error) {
return rsyncMigrationSource(container)
}
func (s *storageLvm) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error {
return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap, op)
}
storage: remove deprecated lvm keys
Signed-off-by: Christian Brauner <48455ab3070520a2d174545c7239d6d0fabd9a83@ubuntu.com>
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/gorilla/websocket"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
log "gopkg.in/inconshreveable/log15.v2"
)
func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) {
output, err := exec.Command("vgs", "--noheadings", "-o", "lv_attr", fmt.Sprintf("%s/%s", vgName, poolName)).Output()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
if waitStatus.ExitStatus() == 5 {
// pool LV was not found
return false, nil
}
}
return false, fmt.Errorf("Error checking for pool '%s'", poolName)
}
// Found LV named poolname, check type:
attrs := strings.TrimSpace(string(output[:]))
if strings.HasPrefix(attrs, "t") {
return true, nil
}
return false, fmt.Errorf("Pool named '%s' exists but is not a thin pool.", poolName)
}
func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) {
results := []string{}
cNames, err := dbContainersList(d.db, cTypeRegular)
if err != nil {
return results, err
}
for _, cName := range cNames {
var lvLinkPath string
if strings.Contains(cName, shared.SnapshotDelimiter) {
lvLinkPath = shared.VarPath("snapshots", fmt.Sprintf("%s.lv", cName))
} else {
lvLinkPath = shared.VarPath("containers", fmt.Sprintf("%s.lv", cName))
}
if shared.PathExists(lvLinkPath) {
results = append(results, cName)
}
}
imageNames, err := dbImagesGet(d.db, false)
if err != nil {
return results, err
}
for _, imageName := range imageNames {
imageLinkPath := shared.VarPath("images", fmt.Sprintf("%s.lv", imageName))
if shared.PathExists(imageLinkPath) {
results = append(results, imageName)
}
}
return results, nil
}
func storageLVMValidateThinPoolName(d *Daemon, vgName string, value string) error {
users, err := storageLVMGetThinPoolUsers(d)
if err != nil {
return fmt.Errorf("Error checking if a pool is already in use: %v", err)
}
if len(users) > 0 {
return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users)
}
if value != "" {
if vgName == "" {
return fmt.Errorf("Can not set lvm_thinpool_name without lvm_vg_name set.")
}
poolExists, err := storageLVMThinpoolExists(vgName, value)
if err != nil {
return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", value, vgName, err)
}
if !poolExists {
return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", value, vgName)
}
}
return nil
}
func xfsGenerateNewUUID(lvpath string) error {
output, err := exec.Command(
"xfs_admin",
"-U", "generate",
lvpath).CombinedOutput()
if err != nil {
return fmt.Errorf("Error generating new UUID: %v\noutput:'%s'", err, string(output))
}
return nil
}
func containerNameToLVName(containerName string) string {
lvName := strings.Replace(containerName, "-", "--", -1)
return strings.Replace(lvName, shared.SnapshotDelimiter, "-", -1)
}
type storageLvm struct {
storageShared
}
func getLvmDevPath(lvmPool string, volumeType string, lvmVolume string) string {
return fmt.Sprintf("/dev/%s/%s_%s", lvmPool, volumeType, lvmVolume)
}
func getPrefixedLvName(volumeType string, lvmVolume string) string {
return fmt.Sprintf("%s_%s", volumeType, lvmVolume)
}
func getTmpSnapshotName(snap string) string {
return fmt.Sprintf("%s_tmp", snap)
}
// Only initialize the minimal information we need about a given storage type.
func (s *storageLvm) StorageCoreInit() (*storageCore, error) {
sCore := storageCore{}
sCore.sType = storageTypeLvm
typeName, err := storageTypeToString(sCore.sType)
if err != nil {
return nil, err
}
sCore.sTypeName = typeName
output, err := exec.Command("lvm", "version").CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Error getting LVM version: %v\noutput:'%s'", err, string(output))
}
lines := strings.Split(string(output), "\n")
sCore.sTypeVersion = ""
for idx, line := range lines {
fields := strings.SplitAfterN(line, ":", 2)
if len(fields) < 2 {
continue
}
if idx > 0 {
sCore.sTypeVersion += " / "
}
sCore.sTypeVersion += strings.TrimSpace(fields[1])
}
err = sCore.initShared()
if err != nil {
return nil, err
}
s.storageCore = sCore
return &sCore, nil
}
func (s *storageLvm) StoragePoolInit(config map[string]interface{}) (storage, error) {
_, err := s.StorageCoreInit()
if err != nil {
return s, err
}
return s, nil
}
func (s *storageLvm) StoragePoolCheck() error {
return nil
}
func versionSplit(versionString string) (int, int, int, error) {
fs := strings.Split(versionString, ".")
majs, mins, incs := fs[0], fs[1], fs[2]
maj, err := strconv.Atoi(majs)
if err != nil {
return 0, 0, 0, err
}
min, err := strconv.Atoi(mins)
if err != nil {
return 0, 0, 0, err
}
incs = strings.Split(incs, "(")[0]
inc, err := strconv.Atoi(incs)
if err != nil {
return 0, 0, 0, err
}
return maj, min, inc, nil
}
func (s *storageLvm) lvmVersionIsAtLeast(versionString string) (bool, error) {
lvmVersion := strings.Split(s.sTypeVersion, "/")[0]
lvmMaj, lvmMin, lvmInc, err := versionSplit(lvmVersion)
if err != nil {
return false, err
}
inMaj, inMin, inInc, err := versionSplit(versionString)
if err != nil {
return false, err
}
if lvmMaj < inMaj || lvmMin < inMin || lvmInc < inInc {
return false, nil
} else {
return true, nil
}
}
func (s *storageLvm) StoragePoolCreate() error {
tryUndo := true
source := s.pool.Config["source"]
if source == "" {
return fmt.Errorf("No \"source\" property found for the storage pool.")
}
// Create the mountpoint for the storage pool.
poolMntPoint := getStoragePoolMountPoint(s.pool.Name)
err := os.MkdirAll(poolMntPoint, 0711)
if err != nil {
return err
}
defer func() {
if tryUndo {
os.Remove(poolMntPoint)
}
}()
if !shared.IsBlockdevPath(source) {
return fmt.Errorf("Loop backed lvm storage volumes are currently not supported.")
}
// Create a lvm physical volume.
output, err := exec.Command("pvcreate", source).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to create the physical volume for the lvm storage pool: %s.", output)
}
defer func() {
if tryUndo {
exec.Command("pvremove", source).Run()
}
}()
// Create a volume group on the physical volume.
output, err = exec.Command("vgcreate", s.pool.Name, source).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to create the volume group for the lvm storage pool: %s.", output)
}
s.pool.Config["source"] = s.pool.Name
// Deregister cleanup.
tryUndo = false
return nil
}
func (s *storageLvm) StoragePoolDelete() error {
source := s.pool.Config["source"]
if source == "" {
return fmt.Errorf("No \"source\" property found for the storage pool.")
}
// Remove the volume group.
output, err := exec.Command("vgremove", "-f", s.pool.Name).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to destroy the volume group for the lvm storage pool: %s.", output)
}
// Delete the mountpoint for the storage pool.
poolMntPoint := getStoragePoolMountPoint(s.pool.Name)
err = os.RemoveAll(poolMntPoint)
if err != nil {
return err
}
return nil
}
func (s *storageLvm) StoragePoolMount() (bool, error) {
return true, nil
}
func (s *storageLvm) StoragePoolUmount() (bool, error) {
return true, nil
}
func (s *storageLvm) StoragePoolVolumeCreate() error {
tryUndo := true
vgName := s.pool.Name
thinPoolName := s.volume.Config["lvm.thinpool_name"]
lvFsType := s.volume.Config["block.filesystem"]
lvSize := s.volume.Config["size"]
volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
if err != nil {
return err
}
err = s.createThinLV(vgName, thinPoolName, s.volume.Name, lvFsType, lvSize, volumeType)
if err != nil {
s.log.Error("LVMCreateThinLV", log.Ctx{"err": err})
return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
}
defer func() {
if tryUndo {
s.StoragePoolVolumeDelete()
}
}()
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
err = os.MkdirAll(customPoolVolumeMntPoint, 0711)
if err != nil {
return err
}
_, err = s.StoragePoolVolumeMount()
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) StoragePoolVolumeDelete() error {
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
_, err := s.StoragePoolVolumeUmount()
if err != nil {
return err
}
volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
if err != nil {
return err
}
err = s.removeLV(s.pool.Name, volumeType, s.volume.Name)
if err != nil {
return err
}
if shared.PathExists(customPoolVolumeMntPoint) {
err := os.Remove(customPoolVolumeMntPoint)
if err != nil {
return err
}
}
return nil
}
func (s *storageLvm) StoragePoolVolumeMount() (bool, error) {
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
if shared.IsMountPoint(customPoolVolumeMntPoint) {
return false, nil
}
lvFsType := s.volume.Config["block.filesystem"]
volumeType, err := storagePoolVolumeTypeNameToApiEndpoint(s.volume.Type)
if err != nil {
return false, err
}
lvmVolumePath := getLvmDevPath(s.pool.Name, volumeType, s.volume.Name)
mountOptions := s.volume.Config["block.mount_options"]
err = tryMount(lvmVolumePath, customPoolVolumeMntPoint, lvFsType, 0, mountOptions)
if err != nil {
return false, err
}
return true, nil
}
func (s *storageLvm) StoragePoolVolumeUmount() (bool, error) {
customPoolVolumeMntPoint := getStoragePoolVolumeMountPoint(s.pool.Name, s.volume.Name)
if !shared.IsMountPoint(customPoolVolumeMntPoint) {
return false, nil
}
err := tryUnmount(customPoolVolumeMntPoint, 0)
if err != nil {
return false, err
}
return true, nil
}
func (s *storageLvm) GetStoragePoolWritable() api.StoragePoolPut {
return s.pool.Writable()
}
func (s *storageLvm) GetStoragePoolVolumeWritable() api.StorageVolumePut {
return s.volume.Writable()
}
func (s *storageLvm) SetStoragePoolWritable(writable *api.StoragePoolPut) {
s.pool.StoragePoolPut = *writable
}
func (s *storageLvm) SetStoragePoolVolumeWritable(writable *api.StorageVolumePut) {
s.volume.StorageVolumePut = *writable
}
func (s *storageLvm) ContainerPoolGet() string {
return s.pool.Name
}
func (s *storageLvm) ContainerPoolIDGet() int64 {
return s.poolID
}
func (s *storageLvm) StoragePoolUpdate(changedConfig []string) error {
if shared.StringInSlice("size", changedConfig) {
return fmt.Errorf("The \"size\" property cannot be changed.")
}
if shared.StringInSlice("source", changedConfig) {
return fmt.Errorf("The \"source\" property cannot be changed.")
}
if shared.StringInSlice("volume.zfs.use_refquota", changedConfig) {
return fmt.Errorf("The \"volume.zfs.use_refquota\" property cannot be changed.")
}
if shared.StringInSlice("volume.zfs.remove_snapshots", changedConfig) {
return fmt.Errorf("The \"volume.zfs.remove_snapshots\" property cannot be changed.")
}
if shared.StringInSlice("zfs.pool_name", changedConfig) {
return fmt.Errorf("The \"zfs.pool_name\" property cannot be changed.")
}
if shared.StringInSlice("volume.block.mount_options", changedConfig) {
// noop
}
if shared.StringInSlice("volume.block.filesystem", changedConfig) {
// noop
}
if shared.StringInSlice("volume.size", changedConfig) {
// noop
}
if shared.StringInSlice("volume.lvm.thinpool_name", changedConfig) {
return fmt.Errorf("The \"volume.lvm.thinpool_name\" property cannot be changed.")
}
return nil
}
func (s *storageLvm) StoragePoolVolumeUpdate(changedConfig []string) error {
if shared.StringInSlice("block.mount_options", changedConfig) && len(changedConfig) == 1 {
// noop
} else {
return fmt.Errorf("The properties \"%v\" cannot be changed.", changedConfig)
}
return nil
}
func (s *storageLvm) ContainerCreate(container container) error {
tryUndo := true
containerName := container.Name()
containerLvmName := containerNameToLVName(containerName)
thinPoolName := s.volume.Config["lvm.thinpool_name"]
lvFsType := s.volume.Config["block.filesystem"]
lvSize := s.volume.Config["size"]
err := s.createThinLV(s.pool.Name, thinPoolName, containerLvmName, lvFsType, lvSize, storagePoolVolumeApiEndpointContainers)
if err != nil {
return err
}
defer func() {
if tryUndo {
s.ContainerDelete(container)
}
}()
if container.IsSnapshot() {
containerMntPoint := getSnapshotMountPoint(s.pool.Name, containerName)
fields := strings.SplitN(containerName, shared.SnapshotDelimiter, 2)
sourceName := fields[0]
snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "snapshots", sourceName)
snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
err := os.MkdirAll(containerMntPoint, 0755)
if err != nil {
return err
}
err = createSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
if err != nil {
return err
}
} else {
containerMntPoint := getContainerMountPoint(s.pool.Name, containerName)
containerPath := container.Path()
err := os.MkdirAll(containerMntPoint, 0755)
if err != nil {
return err
}
err = createContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
if err != nil {
return err
}
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerCreateFromImage(container container, fingerprint string) error {
tryUndo := true
// Check if the image already exists.
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
imageLvmDevPath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointImages, fingerprint)
imageStoragePoolLockID := fmt.Sprintf("%s/%s", s.pool.Name, fingerprint)
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[imageStoragePoolLockID]; ok {
lxdStorageLock.Unlock()
if _, ok := <-waitChannel; ok {
shared.LogWarnf("Value transmitted over image lock semaphore?")
}
} else {
lxdStorageLockMap[imageStoragePoolLockID] = make(chan bool)
lxdStorageLock.Unlock()
var imgerr error
if !shared.PathExists(imageMntPoint) || !shared.PathExists(imageLvmDevPath) {
imgerr = s.ImageCreate(fingerprint)
}
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[imageStoragePoolLockID]; ok {
close(waitChannel)
delete(lxdStorageLockMap, imageStoragePoolLockID)
}
lxdStorageLock.Unlock()
if imgerr != nil {
return imgerr
}
}
containerName := container.Name()
containerLvmName := containerNameToLVName(containerName)
containerLvSnapshotPath, err := s.createSnapshotLV(s.pool.Name, fingerprint, storagePoolVolumeApiEndpointImages, containerLvmName, storagePoolVolumeApiEndpointContainers, false)
if err != nil {
return err
}
defer func() {
if tryUndo {
s.ContainerDelete(container)
}
}()
containerMntPoint := getContainerMountPoint(s.pool.Name, containerName)
containerPath := container.Path()
err = os.MkdirAll(containerMntPoint, 0755)
if err != nil {
return err
}
err = createContainerMountpoint(containerMntPoint, containerPath, container.IsPrivileged())
if err != nil {
return err
}
// Generate a new xfs's UUID
lvFsType := s.volume.Config["block.filesystem"]
if lvFsType == "xfs" {
err := xfsGenerateNewUUID(containerLvSnapshotPath)
if err != nil {
return err
}
}
ourMount, err := s.ContainerMount(containerName, containerPath)
if err != nil {
return err
}
if ourMount {
defer s.ContainerUmount(containerName, containerPath)
}
if container.IsPrivileged() {
err = os.Chmod(containerMntPoint, 0700)
} else {
err = os.Chmod(containerMntPoint, 0755)
}
if err != nil {
return err
}
if !container.IsPrivileged() {
err := s.shiftRootfs(container)
if err != nil {
return err
}
}
err = container.TemplateApply("create")
if err != nil {
s.log.Error("Error in create template during ContainerCreateFromImage, continuing to unmount", log.Ctx{"err": err})
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerCanRestore(container container, sourceContainer container) error {
return nil
}
func (s *storageLvm) ContainerDelete(container container) error {
containerName := container.Name()
containerLvmName := containerNameToLVName(containerName)
containerMntPoint := ""
if container.IsSnapshot() {
containerMntPoint = getSnapshotMountPoint(s.pool.Name, containerName)
} else {
containerMntPoint = getContainerMountPoint(s.pool.Name, containerName)
}
// Make sure that the container is really unmounted at this point.
// Otherwise we will fail.
if shared.IsMountPoint(containerMntPoint) {
err := tryUnmount(containerMntPoint, 0)
if err != nil {
return fmt.Errorf("failed to unmount container path '%s': %s", containerMntPoint, err)
}
}
err := s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, containerLvmName)
if err != nil {
return err
}
if container.IsSnapshot() {
fields := strings.SplitN(containerName, shared.SnapshotDelimiter, 2)
sourceName := fields[0]
snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", s.pool.Name, "snapshots", sourceName)
snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
err = deleteSnapshotMountpoint(containerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
} else {
err = tryUnmount(containerMntPoint, 0)
err = deleteContainerMountpoint(containerMntPoint, container.Path(), s.GetStorageTypeName())
}
if err != nil {
return err
}
return nil
}
func (s *storageLvm) ContainerCopy(container container, sourceContainer container) error {
tryUndo := true
err := sourceContainer.StorageStart()
if err != nil {
return err
}
defer sourceContainer.StorageStop()
if sourceContainer.Storage().GetStorageType() == storageTypeLvm {
err := s.createSnapshotContainer(container, sourceContainer, false)
if err != nil {
s.log.Error("Error creating snapshot LV for copy", log.Ctx{"err": err})
return err
}
} else {
sourceContainerName := sourceContainer.Name()
targetContainerName := container.Name()
s.log.Info("Copy from Non-LVM container", log.Ctx{"container": targetContainerName, "sourceContainer": sourceContainerName})
err := s.ContainerCreate(container)
if err != nil {
s.log.Error("Error creating empty container", log.Ctx{"err": err})
return err
}
defer func() {
if tryUndo {
s.ContainerDelete(container)
}
}()
targetContainerPath := container.Path()
ourSourceMount, err := s.ContainerMount(targetContainerName, targetContainerPath)
if err != nil {
s.log.Error("Error starting/mounting container", log.Ctx{"err": err, "container": targetContainerName})
return err
}
if ourSourceMount {
defer s.ContainerUmount(targetContainerName, targetContainerPath)
}
sourceContainerPath := sourceContainer.Path()
ourTargetMount, err := sourceContainer.Storage().ContainerMount(sourceContainerName, sourceContainerPath)
if err != nil {
return err
}
if ourTargetMount {
sourceContainer.Storage().ContainerUmount(sourceContainerName, sourceContainerPath)
}
sourcePool := sourceContainer.Storage().ContainerPoolGet()
sourceContainerMntPoint := getContainerMountPoint(sourcePool, sourceContainerName)
targetContainerMntPoint := getContainerMountPoint(s.pool.Name, targetContainerName)
output, err := storageRsyncCopy(sourceContainerMntPoint, targetContainerMntPoint)
if err != nil {
s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)})
s.ContainerDelete(container)
return fmt.Errorf("rsync failed: %s", string(output))
}
}
err = container.TemplateApply("copy")
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerMount(name string, path string) (bool, error) {
containerLvmName := containerNameToLVName(name)
lvFsType := s.volume.Config["block.filesystem"]
containerLvmPath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointContainers, containerLvmName)
mountOptions := s.volume.Config["block.mount_options"]
containerMntPoint := getContainerMountPoint(s.pool.Name, name)
containerMountLockID := fmt.Sprintf("mount/%s/%s", s.pool.Name, name)
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerMountLockID]; ok {
lxdStorageLock.Unlock()
if _, ok := <-waitChannel; ok {
shared.LogWarnf("Value transmitted over image lock semaphore?")
}
// Give the benefit of the doubt and assume that the other
// thread actually succeeded in mounting the storage volume.
return false, nil
}
lxdStorageLockMap[containerMountLockID] = make(chan bool)
lxdStorageLock.Unlock()
var imgerr error
ourMount := false
if !shared.IsMountPoint(containerMntPoint) {
imgerr = tryMount(containerLvmPath, containerMntPoint, lvFsType, 0, mountOptions)
ourMount = true
}
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerMountLockID]; ok {
close(waitChannel)
delete(lxdStorageLockMap, containerMountLockID)
}
lxdStorageLock.Unlock()
if imgerr != nil {
return false, imgerr
}
return ourMount, nil
}
func (s *storageLvm) ContainerUmount(name string, path string) (bool, error) {
containerMntPoint := getContainerMountPoint(s.pool.Name, name)
containerUmountLockID := fmt.Sprintf("umount/%s/%s", s.pool.Name, name)
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerUmountLockID]; ok {
lxdStorageLock.Unlock()
if _, ok := <-waitChannel; ok {
shared.LogWarnf("Value transmitted over image lock semaphore?")
}
// Give the benefit of the doubt and assume that the other
// thread actually succeeded in unmounting the storage volume.
return false, nil
}
lxdStorageLockMap[containerUmountLockID] = make(chan bool)
lxdStorageLock.Unlock()
var imgerr error
ourUmount := false
if shared.IsMountPoint(containerMntPoint) {
imgerr = tryUnmount(containerMntPoint, 0)
ourUmount = true
}
lxdStorageLock.Lock()
if waitChannel, ok := lxdStorageLockMap[containerUmountLockID]; ok {
close(waitChannel)
delete(lxdStorageLockMap, containerUmountLockID)
}
lxdStorageLock.Unlock()
if imgerr != nil {
return false, imgerr
}
return ourUmount, nil
}
func (s *storageLvm) ContainerRename(container container, newContainerName string) error {
tryUndo := true
oldName := container.Name()
oldLvmName := containerNameToLVName(oldName)
newLvmName := containerNameToLVName(newContainerName)
_, err := s.ContainerUmount(oldName, container.Path())
if err != nil {
return err
}
output, err := s.renameLV(oldLvmName, newLvmName, storagePoolVolumeApiEndpointContainers)
if err != nil {
s.log.Error("Failed to rename a container LV", log.Ctx{"oldName": oldLvmName, "newName": newLvmName, "err": err, "output": string(output)})
return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
}
defer func() {
if tryUndo {
s.renameLV(newLvmName, oldLvmName, storagePoolVolumeApiEndpointContainers)
}
}()
// MAYBE(FIXME(brauner)): Register another cleanup function that tries to
// rename alreday renamed snapshots back to their old name when the
// rename fails.
if !container.IsSnapshot() {
snaps, err := container.Snapshots()
if err != nil {
return err
}
for _, snap := range snaps {
baseSnapName := filepath.Base(snap.Name())
newSnapshotName := newContainerName + shared.SnapshotDelimiter + baseSnapName
err := s.ContainerRename(snap, newSnapshotName)
if err != nil {
return err
}
}
oldContainerMntPoint := getContainerMountPoint(s.pool.Name, oldName)
oldContainerMntPointSymlink := container.Path()
newContainerMntPoint := getContainerMountPoint(s.pool.Name, newContainerName)
newContainerMntPointSymlink := shared.VarPath("containers", newContainerName)
err = renameContainerMountpoint(oldContainerMntPoint, oldContainerMntPointSymlink, newContainerMntPoint, newContainerMntPointSymlink)
if err != nil {
return err
}
oldSnapshotPath := getSnapshotMountPoint(s.pool.Name, oldName)
newSnapshotPath := getSnapshotMountPoint(s.pool.Name, newContainerName)
if shared.PathExists(oldSnapshotPath) {
err = os.Rename(oldSnapshotPath, newSnapshotPath)
if err != nil {
return err
}
}
oldSnapshotSymlink := shared.VarPath("snapshots", oldName)
newSnapshotSymlink := shared.VarPath("snapshots", newContainerName)
if shared.PathExists(oldSnapshotSymlink) {
err := os.Remove(oldSnapshotSymlink)
if err != nil {
return err
}
err = os.Symlink(newSnapshotPath, newSnapshotSymlink)
if err != nil {
return err
}
}
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerRestore(container container, sourceContainer container) error {
err := sourceContainer.StorageStart()
if err != nil {
return err
}
defer sourceContainer.StorageStop()
if s.pool.Name != sourceContainer.Storage().ContainerPoolGet() {
return fmt.Errorf("Containers must be on the same pool to be restored.")
}
srcName := sourceContainer.Name()
srcLvName := containerNameToLVName(srcName)
if sourceContainer.IsSnapshot() {
srcLvName = getTmpSnapshotName(srcLvName)
}
destName := container.Name()
destLvName := containerNameToLVName(destName)
_, err = container.Storage().ContainerUmount(container.Name(), container.Path())
if err != nil {
return err
}
err = s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, destName)
if err != nil {
s.log.Error(fmt.Sprintf("Failed to remove \"%s\": %s.", destName, err))
}
_, err = s.createSnapshotLV(s.pool.Name, srcLvName, storagePoolVolumeApiEndpointContainers, destLvName, storagePoolVolumeApiEndpointContainers, false)
if err != nil {
return fmt.Errorf("Error creating snapshot LV: %v", err)
}
return nil
}
func (s *storageLvm) ContainerSetQuota(container container, size int64) error {
return fmt.Errorf("The LVM container backend doesn't support quotas.")
}
func (s *storageLvm) ContainerGetUsage(container container) (int64, error) {
return -1, fmt.Errorf("The LVM container backend doesn't support quotas.")
}
func (s *storageLvm) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error {
return s.createSnapshotContainer(snapshotContainer, sourceContainer, true)
}
func (s *storageLvm) createSnapshotContainer(snapshotContainer container, sourceContainer container, readonly bool) error {
tryUndo := true
sourceContainerName := sourceContainer.Name()
targetContainerName := snapshotContainer.Name()
sourceContainerLvmName := containerNameToLVName(sourceContainerName)
targetContainerLvmName := containerNameToLVName(targetContainerName)
shared.LogDebug("Creating snapshot", log.Ctx{"srcName": sourceContainerName, "destName": targetContainerName})
_, err := s.createSnapshotLV(s.pool.Name, sourceContainerLvmName, storagePoolVolumeApiEndpointContainers, targetContainerLvmName, storagePoolVolumeApiEndpointContainers, readonly)
if err != nil {
return fmt.Errorf("Error creating snapshot LV: %s", err)
}
defer func() {
if tryUndo {
s.ContainerCreate(snapshotContainer)
}
}()
targetContainerMntPoint := ""
targetContainerPath := snapshotContainer.Path()
targetIsSnapshot := snapshotContainer.IsSnapshot()
if targetIsSnapshot {
targetContainerMntPoint = getSnapshotMountPoint(s.pool.Name, targetContainerName)
sourceFields := strings.SplitN(sourceContainerName, shared.SnapshotDelimiter, 2)
sourceName := sourceFields[0]
sourcePool := sourceContainer.Storage().ContainerPoolGet()
snapshotMntPointSymlinkTarget := shared.VarPath("storage-pools", sourcePool, "snapshots", sourceName)
snapshotMntPointSymlink := shared.VarPath("snapshots", sourceName)
err = createSnapshotMountpoint(targetContainerMntPoint, snapshotMntPointSymlinkTarget, snapshotMntPointSymlink)
} else {
targetContainerMntPoint = getContainerMountPoint(s.pool.Name, targetContainerName)
err = createContainerMountpoint(targetContainerMntPoint, targetContainerPath, snapshotContainer.IsPrivileged())
}
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerSnapshotDelete(snapshotContainer container) error {
err := s.ContainerDelete(snapshotContainer)
if err != nil {
return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err)
}
return nil
}
func (s *storageLvm) ContainerSnapshotRename(snapshotContainer container, newContainerName string) error {
tryUndo := true
oldName := snapshotContainer.Name()
oldLvmName := containerNameToLVName(oldName)
newLvmName := containerNameToLVName(newContainerName)
output, err := s.renameLV(oldLvmName, newLvmName, storagePoolVolumeApiEndpointContainers)
if err != nil {
s.log.Error("Failed to rename a snapshot LV", log.Ctx{"oldName": oldLvmName, "newName": newLvmName, "err": err, "output": string(output)})
return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldLvmName, newLvmName, err)
}
defer func() {
if tryUndo {
s.renameLV(newLvmName, oldLvmName, storagePoolVolumeApiEndpointContainers)
}
}()
oldSnapshotMntPoint := getSnapshotMountPoint(s.pool.Name, oldName)
newSnapshotMntPoint := getSnapshotMountPoint(s.pool.Name, newContainerName)
err = os.Rename(oldSnapshotMntPoint, newSnapshotMntPoint)
if err != nil {
return err
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerSnapshotStart(container container) error {
tryUndo := true
sourceName := container.Name()
targetName := sourceName
sourceLvmName := containerNameToLVName(sourceName)
targetLvmName := containerNameToLVName(targetName)
tmpTargetLvmName := getTmpSnapshotName(targetLvmName)
shared.LogDebug("Creating snapshot", log.Ctx{"srcName": sourceLvmName, "destName": targetLvmName})
lvpath, err := s.createSnapshotLV(s.pool.Name, sourceLvmName, storagePoolVolumeApiEndpointContainers, tmpTargetLvmName, storagePoolVolumeApiEndpointContainers, false)
if err != nil {
return fmt.Errorf("Error creating snapshot LV: %s", err)
}
defer func() {
if tryUndo {
s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, tmpTargetLvmName)
}
}()
lvFsType := s.volume.Config["block.filesystem"]
containerLvmPath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointContainers, tmpTargetLvmName)
mountOptions := s.volume.Config["block.mount_options"]
containerMntPoint := getSnapshotMountPoint(s.pool.Name, sourceName)
// Generate a new xfs's UUID
if lvFsType == "xfs" {
err := xfsGenerateNewUUID(lvpath)
if err != nil {
return err
}
}
if !shared.IsMountPoint(containerMntPoint) {
err = tryMount(containerLvmPath, containerMntPoint, lvFsType, 0, mountOptions)
if err != nil {
return fmt.Errorf("Error mounting snapshot LV path='%s': %s", containerMntPoint, err)
}
}
tryUndo = false
return nil
}
func (s *storageLvm) ContainerSnapshotStop(container container) error {
name := container.Name()
snapshotMntPoint := getSnapshotMountPoint(s.pool.Name, name)
if shared.IsMountPoint(snapshotMntPoint) {
err := tryUnmount(snapshotMntPoint, 0)
if err != nil {
return err
}
}
lvName := containerNameToLVName(name)
tmpLvName := getTmpSnapshotName(lvName)
err := s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointContainers, tmpLvName)
if err != nil {
return err
}
return nil
}
func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer container) error {
return s.ContainerCreate(snapshotContainer)
}
func (s *storageLvm) ImageCreate(fingerprint string) error {
tryUndo := true
vgName := s.pool.Name
thinPoolName := s.volume.Config["lvm.thinpool_name"]
lvFsType := s.volume.Config["block.filesystem"]
lvSize := s.volume.Config["size"]
err := s.createImageDbPoolVolume(fingerprint)
if err != nil {
return err
}
err = s.createThinLV(vgName, thinPoolName, fingerprint, lvFsType, lvSize, storagePoolVolumeApiEndpointImages)
if err != nil {
s.log.Error("LVMCreateThinLV", log.Ctx{"err": err})
return fmt.Errorf("Error Creating LVM LV for new image: %v", err)
}
defer func() {
if tryUndo {
s.ImageDelete(fingerprint)
}
}()
// Create image mountpoint.
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if !shared.PathExists(imageMntPoint) {
err := os.MkdirAll(imageMntPoint, 0700)
if err != nil {
return err
}
}
_, err = s.ImageMount(fingerprint)
if err != nil {
return err
}
imagePath := shared.VarPath("images", fingerprint)
err = unpackImage(s.d, imagePath, imageMntPoint, storageTypeLvm)
if err != nil {
return err
}
s.ImageUmount(fingerprint)
tryUndo = false
return nil
}
func (s *storageLvm) ImageDelete(fingerprint string) error {
_, err := s.ImageUmount(fingerprint)
if err != nil {
return err
}
err = s.removeLV(s.pool.Name, storagePoolVolumeApiEndpointImages, fingerprint)
if err != nil {
return err
}
err = s.deleteImageDbPoolVolume(fingerprint)
if err != nil {
return err
}
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if shared.PathExists(imageMntPoint) {
err := os.Remove(imageMntPoint)
if err != nil {
return err
}
}
return nil
}
func (s *storageLvm) ImageMount(fingerprint string) (bool, error) {
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if shared.IsMountPoint(imageMntPoint) {
return false, nil
}
// Shouldn't happen.
lvmFstype := s.volume.Config["block.filesystem"]
if lvmFstype == "" {
return false, fmt.Errorf("No filesystem type specified.")
}
lvmVolumePath := getLvmDevPath(s.pool.Name, storagePoolVolumeApiEndpointImages, fingerprint)
lvmMountOptions := s.volume.Config["block.mount_options"]
// Shouldn't be necessary since it should be validated in the config
// checks.
if lvmFstype == "ext4" && lvmMountOptions == "" {
lvmMountOptions = "discard"
}
err := tryMount(lvmVolumePath, imageMntPoint, lvmFstype, 0, lvmMountOptions)
if err != nil {
shared.LogInfof("Error mounting image LV for unpacking: %s", err)
return false, fmt.Errorf("Error mounting image LV: %v", err)
}
return true, nil
}
func (s *storageLvm) ImageUmount(fingerprint string) (bool, error) {
imageMntPoint := getImageMountPoint(s.pool.Name, fingerprint)
if !shared.IsMountPoint(imageMntPoint) {
return false, nil
}
err := tryUnmount(imageMntPoint, 0)
if err != nil {
return false, err
}
return true, nil
}
func (s *storageLvm) createThinLV(vgName string, thinPoolName string, lvName string, lvFsType string, lvSize string, volumeType string) error {
exists, err := storageLVMThinpoolExists(vgName, thinPoolName)
if err != nil {
return err
}
if !exists {
err := s.createDefaultThinPool(vgName, thinPoolName, lvName, lvFsType)
if err != nil {
return err
}
err = storageLVMValidateThinPoolName(s.d, vgName, thinPoolName)
if err != nil {
s.log.Error("Setting thin pool name", log.Ctx{"err": err})
return fmt.Errorf("Error setting LVM thin pool config: %v", err)
}
}
lvmThinPoolPath := fmt.Sprintf("%s/%s", vgName, thinPoolName)
lvmPoolVolumeName := getPrefixedLvName(volumeType, lvName)
output, err := tryExec(
"lvcreate",
"--thin",
"-n", lvmPoolVolumeName,
"--virtualsize", lvSize+"B", lvmThinPoolPath)
if err != nil {
s.log.Error("Could not create LV", log.Ctx{"lvname": lvmPoolVolumeName, "output": string(output)})
return fmt.Errorf("Could not create thin LV named %s", lvmPoolVolumeName)
}
fsPath := getLvmDevPath(vgName, volumeType, lvName)
switch lvFsType {
case "xfs":
output, err = tryExec("mkfs.xfs", fsPath)
default:
// default = ext4
output, err = tryExec(
"mkfs.ext4",
"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0",
fsPath)
}
if err != nil {
s.log.Error("Filesystem creation failed", log.Ctx{"output": string(output)})
return fmt.Errorf("Error making filesystem on image LV: %v", err)
}
return nil
}
func (s *storageLvm) createDefaultThinPool(vgName string, thinPoolName string, lvName string, lvFsType string) error {
isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
if err != nil {
return fmt.Errorf("Error checking LVM version: %s", err)
}
// Create the thin pool
lvmThinPool := fmt.Sprintf("%s/%s", vgName, thinPoolName)
var output []byte
if isRecent {
output, err = tryExec(
"lvcreate",
"--poolmetadatasize", "1G",
"-l", "100%FREE",
"--thinpool", lvmThinPool)
} else {
output, err = tryExec(
"lvcreate",
"--poolmetadatasize", "1G",
"-L", "1G",
"--thinpool", lvmThinPool)
}
if err != nil {
s.log.Error("Could not create thin pool", log.Ctx{"name": thinPoolName, "err": err, "output": string(output)})
return fmt.Errorf("Could not create LVM thin pool named %s", thinPoolName)
}
if !isRecent {
// Grow it to the maximum VG size (two step process required by old LVM)
output, err = tryExec("lvextend", "--alloc", "anywhere", "-l", "100%FREE", lvmThinPool)
if err != nil {
s.log.Error("Could not grow thin pool", log.Ctx{"name": thinPoolName, "err": err, "output": string(output)})
return fmt.Errorf("Could not grow LVM thin pool named %s", thinPoolName)
}
}
return nil
}
func (s *storageLvm) removeLV(vgName string, volumeType string, lvName string) error {
lvmVolumePath := getLvmDevPath(vgName, volumeType, lvName)
output, err := tryExec("lvremove", "-f", lvmVolumePath)
if err != nil {
s.log.Error("Could not remove LV", log.Ctx{"lvname": lvName, "output": string(output)})
return fmt.Errorf("Could not remove LV named %s", lvName)
}
return nil
}
func (s *storageLvm) createSnapshotLV(vgName string, origLvName string, origVolumeType string, lvName string, volumeType string, readonly bool) (string, error) {
sourceLvmVolumePath := getLvmDevPath(vgName, origVolumeType, origLvName)
s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvName, "dev string": sourceLvmVolumePath})
isRecent, err := s.lvmVersionIsAtLeast("2.02.99")
if err != nil {
return "", fmt.Errorf("Error checking LVM version: %v", err)
}
lvmPoolVolumeName := getPrefixedLvName(volumeType, lvName)
var output []byte
if isRecent {
output, err = tryExec(
"lvcreate",
"-kn",
"-n", lvmPoolVolumeName,
"-s", sourceLvmVolumePath)
} else {
output, err = tryExec(
"lvcreate",
"-n", lvmPoolVolumeName,
"-s", sourceLvmVolumePath)
}
if err != nil {
s.log.Error("Could not create LV snapshot", log.Ctx{"lvname": lvName, "origlvname": origLvName, "output": string(output)})
return "", fmt.Errorf("Could not create snapshot LV named %s", lvName)
}
targetLvmVolumePath := getLvmDevPath(vgName, volumeType, lvName)
if readonly {
output, err = tryExec("lvchange", "-ay", "-pr", targetLvmVolumePath)
} else {
output, err = tryExec("lvchange", "-ay", targetLvmVolumePath)
}
if err != nil {
return "", fmt.Errorf("Could not activate new snapshot '%s': %v\noutput:%s", lvName, err, string(output))
}
return targetLvmVolumePath, nil
}
func (s *storageLvm) renameLV(oldName string, newName string, volumeType string) (string, error) {
oldLvmName := getPrefixedLvName(volumeType, oldName)
newLvmName := getPrefixedLvName(volumeType, newName)
output, err := tryExec("lvrename", s.pool.Name, oldLvmName, newLvmName)
return string(output), err
}
func (s *storageLvm) MigrationType() MigrationFSType {
return MigrationFSType_RSYNC
}
func (s *storageLvm) PreservesInodes() bool {
return false
}
func (s *storageLvm) MigrationSource(container container) (MigrationStorageSourceDriver, error) {
return rsyncMigrationSource(container)
}
func (s *storageLvm) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error {
return rsyncMigrationSink(live, container, snapshots, conn, srcIdmap, op)
}
|
package radix
import (
"log"
"strconv"
"sync"
. "testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func publish(t *T, c Conn, ch, msg string) {
require.Nil(t, c.Do(Cmd(nil, "PUBLISH", ch, msg)))
}
func assertMsgRead(t *T, msgCh <-chan PubSubMessage) PubSubMessage {
select {
case m := <-msgCh:
return m
case <-time.After(5 * time.Second):
panic("timedout reading")
}
}
func assertMsgNoRead(t *T, msgCh <-chan PubSubMessage) {
select {
case msg, ok := <-msgCh:
if !ok {
assert.Fail(t, "msgCh closed")
} else {
assert.Fail(t, "unexpected PubSubMessage off msgCh", "msg:%#v", msg)
}
default:
}
}
func testSubscribe(t *T, c PubSubConn, pubCh chan int) {
pubC := dial()
msgCh := make(chan PubSubMessage, 1)
ch1, ch2, msgStr := randStr(), randStr(), randStr()
require.Nil(t, c.Subscribe(msgCh, ch1, ch2))
pubChs := make([]chan int, 3)
{
for i := range pubChs {
pubChs[i] = make(chan int)
}
go func() {
for i := range pubCh {
for _, innerPubCh := range pubChs {
innerPubCh <- i
}
}
for _, innerPubCh := range pubChs {
close(innerPubCh)
}
}()
}
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
for i := range pubChs[0] {
publish(t, pubC, ch1, msgStr+"_"+strconv.Itoa(i))
}
wg.Done()
}()
wg.Add(1)
go func() {
for i := range pubChs[1] {
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "message",
Channel: ch1,
Message: []byte(msgStr + "_" + strconv.Itoa(i)),
}, msg)
}
wg.Done()
}()
wg.Add(1)
go func() {
for range pubChs[2] {
require.Nil(t, c.Ping())
}
wg.Done()
}()
wg.Wait()
require.Nil(t, c.Unsubscribe(msgCh, ch1))
publish(t, pubC, ch1, msgStr)
publish(t, pubC, ch2, msgStr)
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "message",
Channel: ch2,
Message: []byte(msgStr),
}, msg)
}
func TestPubSubSubscribe(t *T) {
pubCh := make(chan int)
go func() {
for i := 0; i < 1000; i++ {
pubCh <- i
}
close(pubCh)
}()
c := PubSub(dial())
testSubscribe(t, c, pubCh)
c.Close()
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
}
func TestPubSubPSubscribe(t *T) {
pubC := dial()
c := PubSub(dial())
msgCh := make(chan PubSubMessage, 1)
p1, p2, msgStr := randStr()+"_*", randStr()+"_*", randStr()
ch1, ch2 := p1+"_"+randStr(), p2+"_"+randStr()
p3, p4 := randStr()+"_?", randStr()+"_[ae]"
ch3, ch4 := p3[:len(p3)-len("?")]+"a", p4[:len(p4)-len("[ae]")]+"a"
require.Nil(t, c.PSubscribe(msgCh, p1, p2, p3, p4))
count := 1000
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
for i := 0; i < count; i++ {
publish(t, pubC, ch1, msgStr)
}
wg.Done()
}()
wg.Add(1)
go func() {
for i := 0; i < count; i++ {
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p1,
Channel: ch1,
Message: []byte(msgStr),
}, msg)
}
wg.Done()
}()
wg.Add(1)
go func() {
for i := 0; i < count; i++ {
require.Nil(t, c.Ping())
}
wg.Done()
}()
wg.Wait()
require.Nil(t, c.PUnsubscribe(msgCh, p1))
publish(t, pubC, ch1, msgStr)
publish(t, pubC, ch2, msgStr)
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p2,
Channel: ch2,
Message: []byte(msgStr),
}, msg)
publish(t, pubC, ch3, msgStr)
msg = assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p3,
Channel: ch3,
Message: []byte(msgStr),
}, msg)
publish(t, pubC, ch4, msgStr)
msg = assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p4,
Channel: ch4,
Message: []byte(msgStr),
}, msg)
c.Close()
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
publish(t, pubC, ch2, msgStr)
time.Sleep(250 * time.Millisecond)
assertMsgNoRead(t, msgCh)
}
func TestPubSubMixedSubscribe(t *T) {
pubC := dial()
defer pubC.Close()
c := PubSub(dial())
defer c.Close()
msgCh := make(chan PubSubMessage, 2)
const msgStr = "bar"
require.Nil(t, c.Subscribe(msgCh, "foo"))
require.Nil(t, c.PSubscribe(msgCh, "f[aeiou]o"))
publish(t, pubC, "foo", msgStr)
msg1, msg2 := assertMsgRead(t, msgCh), assertMsgRead(t, msgCh)
// If we received the pmessage first we must swap msg1 and msg1.
if msg1.Type == "pmessage" {
msg1, msg2 = msg2, msg1
}
assert.Equal(t, PubSubMessage{
Type: "message",
Channel: "foo",
Message: []byte(msgStr),
}, msg1)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Channel: "foo",
Pattern: "f[aeiou]o",
Message: []byte(msgStr),
}, msg2)
}
// Ensure that PubSubConn properly handles the case where the Conn it's reading
// from returns a timeout error
func TestPubSubTimeout(t *T) {
c, pubC := PubSub(dial(DialReadTimeout(1*time.Second))), dial()
c.(*pubSubConn).testEventCh = make(chan string, 1)
ch, msgCh := randStr(), make(chan PubSubMessage, 1)
require.Nil(t, c.Subscribe(msgCh, ch))
msgStr := randStr()
go func() {
time.Sleep(2 * time.Second)
assert.Nil(t, pubC.Do(Cmd(nil, "PUBLISH", ch, msgStr)))
}()
assert.Equal(t, "timeout", <-c.(*pubSubConn).testEventCh)
msg := assertMsgRead(t, msgCh)
assert.Equal(t, msgStr, string(msg.Message))
}
// This attempts to catch weird race conditions which might occur due to
// subscribing/unsubscribing quickly on an active channel.
func TestPubSubChaotic(t *T) {
c, pubC := PubSub(dial()), dial()
ch, msgStr := randStr(), randStr()
stopCh := make(chan struct{})
defer close(stopCh)
go func() {
for {
select {
case <-stopCh:
return
default:
publish(t, pubC, ch, msgStr)
time.Sleep(10 * time.Millisecond)
}
}
}()
msgCh := make(chan PubSubMessage, 100)
require.Nil(t, c.Subscribe(msgCh, ch))
stopAfter := time.After(10 * time.Second)
toggleTimer := time.Tick(250 * time.Millisecond)
subbed := true
for {
waitFor := time.NewTimer(100 * time.Millisecond)
select {
case <-stopAfter:
return
case <-waitFor.C:
if subbed {
t.Fatal("waited too long to receive message")
}
case msg := <-msgCh:
waitFor.Stop()
assert.Equal(t, msgStr, string(msg.Message))
case <-toggleTimer:
waitFor.Stop()
if subbed {
require.Nil(t, c.Unsubscribe(msgCh, ch))
} else {
require.Nil(t, c.Subscribe(msgCh, ch))
}
subbed = !subbed
}
}
}
func BenchmarkPubSub(b *B) {
c, pubC := PubSub(dial()), dial()
defer c.Close()
defer pubC.Close()
msg := randStr()
msgCh := make(chan PubSubMessage, 1)
require.Nil(b, c.Subscribe(msgCh, "benchmark"))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := pubC.Do(Cmd(nil, "PUBLISH", "benchmark", msg)); err != nil {
b.Fatal(err)
}
<-msgCh
}
}
func ExamplePubSub() {
// Create a normal redis connection
conn, err := Dial("tcp", "127.0.0.1:6379")
if err != nil {
panic(err)
}
// Pass that connection into PubSub, conn should never get used after this
ps := PubSub(conn)
// Subscribe to a channel called "myChannel". All publishes to "myChannel"
// will get sent to msgCh after this
msgCh := make(chan PubSubMessage)
if err := ps.Subscribe(msgCh, "myChannel"); err != nil {
panic(err)
}
for msg := range msgCh {
log.Printf("publish to channel %q received: %q", msg.Channel, msg.Message)
}
}
add ExamplePersistentPubSubCluster
package radix
import (
"log"
"math/rand"
"strconv"
"sync"
. "testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func publish(t *T, c Conn, ch, msg string) {
require.Nil(t, c.Do(Cmd(nil, "PUBLISH", ch, msg)))
}
func assertMsgRead(t *T, msgCh <-chan PubSubMessage) PubSubMessage {
select {
case m := <-msgCh:
return m
case <-time.After(5 * time.Second):
panic("timedout reading")
}
}
func assertMsgNoRead(t *T, msgCh <-chan PubSubMessage) {
select {
case msg, ok := <-msgCh:
if !ok {
assert.Fail(t, "msgCh closed")
} else {
assert.Fail(t, "unexpected PubSubMessage off msgCh", "msg:%#v", msg)
}
default:
}
}
func testSubscribe(t *T, c PubSubConn, pubCh chan int) {
pubC := dial()
msgCh := make(chan PubSubMessage, 1)
ch1, ch2, msgStr := randStr(), randStr(), randStr()
require.Nil(t, c.Subscribe(msgCh, ch1, ch2))
pubChs := make([]chan int, 3)
{
for i := range pubChs {
pubChs[i] = make(chan int)
}
go func() {
for i := range pubCh {
for _, innerPubCh := range pubChs {
innerPubCh <- i
}
}
for _, innerPubCh := range pubChs {
close(innerPubCh)
}
}()
}
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
for i := range pubChs[0] {
publish(t, pubC, ch1, msgStr+"_"+strconv.Itoa(i))
}
wg.Done()
}()
wg.Add(1)
go func() {
for i := range pubChs[1] {
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "message",
Channel: ch1,
Message: []byte(msgStr + "_" + strconv.Itoa(i)),
}, msg)
}
wg.Done()
}()
wg.Add(1)
go func() {
for range pubChs[2] {
require.Nil(t, c.Ping())
}
wg.Done()
}()
wg.Wait()
require.Nil(t, c.Unsubscribe(msgCh, ch1))
publish(t, pubC, ch1, msgStr)
publish(t, pubC, ch2, msgStr)
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "message",
Channel: ch2,
Message: []byte(msgStr),
}, msg)
}
func TestPubSubSubscribe(t *T) {
pubCh := make(chan int)
go func() {
for i := 0; i < 1000; i++ {
pubCh <- i
}
close(pubCh)
}()
c := PubSub(dial())
testSubscribe(t, c, pubCh)
c.Close()
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
}
func TestPubSubPSubscribe(t *T) {
pubC := dial()
c := PubSub(dial())
msgCh := make(chan PubSubMessage, 1)
p1, p2, msgStr := randStr()+"_*", randStr()+"_*", randStr()
ch1, ch2 := p1+"_"+randStr(), p2+"_"+randStr()
p3, p4 := randStr()+"_?", randStr()+"_[ae]"
ch3, ch4 := p3[:len(p3)-len("?")]+"a", p4[:len(p4)-len("[ae]")]+"a"
require.Nil(t, c.PSubscribe(msgCh, p1, p2, p3, p4))
count := 1000
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
for i := 0; i < count; i++ {
publish(t, pubC, ch1, msgStr)
}
wg.Done()
}()
wg.Add(1)
go func() {
for i := 0; i < count; i++ {
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p1,
Channel: ch1,
Message: []byte(msgStr),
}, msg)
}
wg.Done()
}()
wg.Add(1)
go func() {
for i := 0; i < count; i++ {
require.Nil(t, c.Ping())
}
wg.Done()
}()
wg.Wait()
require.Nil(t, c.PUnsubscribe(msgCh, p1))
publish(t, pubC, ch1, msgStr)
publish(t, pubC, ch2, msgStr)
msg := assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p2,
Channel: ch2,
Message: []byte(msgStr),
}, msg)
publish(t, pubC, ch3, msgStr)
msg = assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p3,
Channel: ch3,
Message: []byte(msgStr),
}, msg)
publish(t, pubC, ch4, msgStr)
msg = assertMsgRead(t, msgCh)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Pattern: p4,
Channel: ch4,
Message: []byte(msgStr),
}, msg)
c.Close()
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
assert.NotNil(t, c.Ping())
publish(t, pubC, ch2, msgStr)
time.Sleep(250 * time.Millisecond)
assertMsgNoRead(t, msgCh)
}
func TestPubSubMixedSubscribe(t *T) {
pubC := dial()
defer pubC.Close()
c := PubSub(dial())
defer c.Close()
msgCh := make(chan PubSubMessage, 2)
const msgStr = "bar"
require.Nil(t, c.Subscribe(msgCh, "foo"))
require.Nil(t, c.PSubscribe(msgCh, "f[aeiou]o"))
publish(t, pubC, "foo", msgStr)
msg1, msg2 := assertMsgRead(t, msgCh), assertMsgRead(t, msgCh)
// If we received the pmessage first we must swap msg1 and msg1.
if msg1.Type == "pmessage" {
msg1, msg2 = msg2, msg1
}
assert.Equal(t, PubSubMessage{
Type: "message",
Channel: "foo",
Message: []byte(msgStr),
}, msg1)
assert.Equal(t, PubSubMessage{
Type: "pmessage",
Channel: "foo",
Pattern: "f[aeiou]o",
Message: []byte(msgStr),
}, msg2)
}
// Ensure that PubSubConn properly handles the case where the Conn it's reading
// from returns a timeout error
func TestPubSubTimeout(t *T) {
c, pubC := PubSub(dial(DialReadTimeout(1*time.Second))), dial()
c.(*pubSubConn).testEventCh = make(chan string, 1)
ch, msgCh := randStr(), make(chan PubSubMessage, 1)
require.Nil(t, c.Subscribe(msgCh, ch))
msgStr := randStr()
go func() {
time.Sleep(2 * time.Second)
assert.Nil(t, pubC.Do(Cmd(nil, "PUBLISH", ch, msgStr)))
}()
assert.Equal(t, "timeout", <-c.(*pubSubConn).testEventCh)
msg := assertMsgRead(t, msgCh)
assert.Equal(t, msgStr, string(msg.Message))
}
// This attempts to catch weird race conditions which might occur due to
// subscribing/unsubscribing quickly on an active channel.
func TestPubSubChaotic(t *T) {
c, pubC := PubSub(dial()), dial()
ch, msgStr := randStr(), randStr()
stopCh := make(chan struct{})
defer close(stopCh)
go func() {
for {
select {
case <-stopCh:
return
default:
publish(t, pubC, ch, msgStr)
time.Sleep(10 * time.Millisecond)
}
}
}()
msgCh := make(chan PubSubMessage, 100)
require.Nil(t, c.Subscribe(msgCh, ch))
stopAfter := time.After(10 * time.Second)
toggleTimer := time.Tick(250 * time.Millisecond)
subbed := true
for {
waitFor := time.NewTimer(100 * time.Millisecond)
select {
case <-stopAfter:
return
case <-waitFor.C:
if subbed {
t.Fatal("waited too long to receive message")
}
case msg := <-msgCh:
waitFor.Stop()
assert.Equal(t, msgStr, string(msg.Message))
case <-toggleTimer:
waitFor.Stop()
if subbed {
require.Nil(t, c.Unsubscribe(msgCh, ch))
} else {
require.Nil(t, c.Subscribe(msgCh, ch))
}
subbed = !subbed
}
}
}
func BenchmarkPubSub(b *B) {
c, pubC := PubSub(dial()), dial()
defer c.Close()
defer pubC.Close()
msg := randStr()
msgCh := make(chan PubSubMessage, 1)
require.Nil(b, c.Subscribe(msgCh, "benchmark"))
b.ResetTimer()
for i := 0; i < b.N; i++ {
if err := pubC.Do(Cmd(nil, "PUBLISH", "benchmark", msg)); err != nil {
b.Fatal(err)
}
<-msgCh
}
}
func ExamplePubSub() {
// Create a normal redis connection
conn, err := Dial("tcp", "127.0.0.1:6379")
if err != nil {
panic(err)
}
// Pass that connection into PubSub, conn should never get used after this
ps := PubSub(conn)
// Subscribe to a channel called "myChannel". All publishes to "myChannel"
// will get sent to msgCh after this
msgCh := make(chan PubSubMessage)
if err := ps.Subscribe(msgCh, "myChannel"); err != nil {
panic(err)
}
for msg := range msgCh {
log.Printf("publish to channel %q received: %q", msg.Channel, msg.Message)
}
}
// Example of how to use PersistentPubSub with a Cluster instance.
func ExamplePersistentPubSubCluster() {
// Initialize the cluster in any way you see fit
cluster, err := NewCluster([]string{"127.0.0.1:6379"})
if err != nil {
panic(err)
}
// Have PersistentPubSub pick a random cluster node everytime it wants to
// make a new connection. If the node fails PersistentPubSub will
// automatically pick a new node to connect to.
ps := PersistentPubSub("", "", func(string, string) (Conn, error) {
topo := cluster.Topo()
node := topo[rand.Intn(len(topo))]
return Dial("tcp", node.Addr)
})
// Use the PubSubConn as normal.
msgCh := make(chan PubSubMessage)
ps.Subscribe(msgCh, "myChannel")
for msg := range msgCh {
log.Printf("publish to channel %q received: %q", msg.Channel, msg.Message)
}
}
|
package volumeattachmentcommands
import (
"github.com/jrperritt/rack/commandoptions"
"github.com/jrperritt/rack/handler"
"github.com/jrperritt/rack/internal/github.com/codegangsta/cli"
"github.com/jrperritt/rack/internal/github.com/fatih/structs"
"github.com/jrperritt/rack/internal/github.com/jrperritt/gophercloud/rackspace/compute/v2/volumeattach"
osVolumeAttach "github.com/jrperritt/rack/internal/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
osServers "github.com/jrperritt/rack/internal/github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/jrperritt/rack/util"
)
var create = cli.Command{
Name: "create",
Usage: util.Usage(commandPrefix, "create", "[--server-id <serverID> | --server-name <serverName>] [--id <volumeID> | --name <volumeName> | --stdin id]"),
Description: "Creates a new volume attachment on the server",
Action: actionCreate,
Flags: commandoptions.CommandFlags(flagsCreate, keysCreate),
BashComplete: func(c *cli.Context) {
commandoptions.CompleteFlags(commandoptions.CommandFlags(flagsCreate, keysCreate))
},
}
func flagsCreate() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "id",
Usage: "[optional; required if `stdin` isn't provided] The ID of the volume to attach.",
},
cli.StringFlag{
Name: "stdin",
Usage: "[optional; required if `id` isn't provided] The field being piped into STDIN. Valid values are: id",
},
cli.StringFlag{
Name: "server-id",
Usage: "[optional; required if `server-name` isn't provided] The server ID to which attach the volume.",
},
cli.StringFlag{
Name: "server-name",
Usage: "[optional; required if 1server-id` isn't provided] The server name to which attach the volume.",
},
cli.StringFlag{
Name: "device",
Usage: "[optional] The name of the device to which the volume will attach. Default is 'auto'.",
},
}
}
var keysCreate = []string{"ID", "Device", "VolumeID", "ServerID"}
type paramsCreate struct {
opts *osVolumeAttach.CreateOpts
serverID string
}
type commandCreate handler.Command
func actionCreate(c *cli.Context) {
command := &commandCreate{
Ctx: &handler.Context{
CLIContext: c,
},
}
handler.Handle(command)
}
func (command *commandCreate) Context() *handler.Context {
return command.Ctx
}
func (command *commandCreate) Keys() []string {
return keysCreate
}
func (command *commandCreate) ServiceClientType() string {
return serviceClientType
}
func (command *commandCreate) HandleFlags(resource *handler.Resource) error {
serverID, err := command.Ctx.IDOrName(osServers.IDFromName)
if err != nil {
return err
}
c := command.Ctx.CLIContext
opts := &osVolumeAttach.CreateOpts{
Device: c.String("device"),
}
resource.Params = ¶msCreate{
opts: opts,
serverID: serverID,
}
return nil
}
func (command *commandCreate) HandlePipe(resource *handler.Resource, item string) error {
resource.Params.(*paramsCreate).opts.VolumeID = item
return nil
}
func (command *commandCreate) HandleSingle(resource *handler.Resource) error {
err := command.Ctx.CheckFlagsSet([]string{"id"})
if err != nil {
return err
}
resource.Params.(*paramsCreate).opts.VolumeID = command.Ctx.CLIContext.String("id")
return nil
}
func (command *commandCreate) Execute(resource *handler.Resource) {
params := resource.Params.(*paramsCreate)
volumeAttachment, err := volumeattach.Create(command.Ctx.ServiceClient, params.serverID, params.opts).Extract()
if err != nil {
resource.Err = err
return
}
resource.Result = structs.Map(volumeAttachment)
}
func (command *commandCreate) StdinField() string {
return "id"
}
touch ups for 'volume-attachment create' command
package volumeattachmentcommands
import (
"github.com/jrperritt/rack/commandoptions"
"github.com/jrperritt/rack/handler"
"github.com/jrperritt/rack/internal/github.com/codegangsta/cli"
"github.com/jrperritt/rack/internal/github.com/fatih/structs"
osVolumeAttach "github.com/jrperritt/rack/internal/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
osServers "github.com/jrperritt/rack/internal/github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/jrperritt/rack/util"
)
var create = cli.Command{
Name: "create",
Usage: util.Usage(commandPrefix, "create", "[--server-id <serverID> | --server-name <serverName>] [--id <volumeID> | --name <volumeName> | --stdin id]"),
Description: "Creates a new volume attachment on the server",
Action: actionCreate,
Flags: commandoptions.CommandFlags(flagsCreate, keysCreate),
BashComplete: func(c *cli.Context) {
commandoptions.CompleteFlags(commandoptions.CommandFlags(flagsCreate, keysCreate))
},
}
func flagsCreate() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "id",
Usage: "[optional; required if `stdin` isn't provided] The ID of the volume to attach.",
},
cli.StringFlag{
Name: "stdin",
Usage: "[optional; required if `id` isn't provided] The field being piped into STDIN. Valid values are: id",
},
cli.StringFlag{
Name: "server-id",
Usage: "[optional; required if `server-name` isn't provided] The server ID to which attach the volume.",
},
cli.StringFlag{
Name: "server-name",
Usage: "[optional; required if `server-id` isn't provided] The server name to which attach the volume.",
},
cli.StringFlag{
Name: "device",
Usage: "[optional] The name of the device to which the volume will attach. Default is 'auto'.",
},
}
}
var keysCreate = []string{"ID", "Device", "VolumeID", "ServerID"}
type paramsCreate struct {
opts *osVolumeAttach.CreateOpts
serverID string
}
type commandCreate handler.Command
func actionCreate(c *cli.Context) {
command := &commandCreate{
Ctx: &handler.Context{
CLIContext: c,
},
}
handler.Handle(command)
}
func (command *commandCreate) Context() *handler.Context {
return command.Ctx
}
func (command *commandCreate) Keys() []string {
return keysCreate
}
func (command *commandCreate) ServiceClientType() string {
return serviceClientType
}
func (command *commandCreate) HandleFlags(resource *handler.Resource) error {
serverID, err := command.Ctx.IDOrName(osServers.IDFromName)
if err != nil {
return err
}
c := command.Ctx.CLIContext
opts := &osVolumeAttach.CreateOpts{
Device: c.String("device"),
}
resource.Params = ¶msCreate{
opts: opts,
serverID: serverID,
}
return nil
}
func (command *commandCreate) HandlePipe(resource *handler.Resource, item string) error {
resource.Params.(*paramsCreate).opts.VolumeID = item
return nil
}
func (command *commandCreate) HandleSingle(resource *handler.Resource) error {
err := command.Ctx.CheckFlagsSet([]string{"id"})
if err != nil {
return err
}
resource.Params.(*paramsCreate).opts.VolumeID = command.Ctx.CLIContext.String("id")
return nil
}
func (command *commandCreate) Execute(resource *handler.Resource) {
params := resource.Params.(*paramsCreate)
volumeAttachment, err := osVolumeAttach.Create(command.Ctx.ServiceClient, params.serverID, params.opts).Extract()
if err != nil {
resource.Err = err
return
}
resource.Result = structs.Map(volumeAttachment)
}
func (command *commandCreate) StdinField() string {
return "id"
}
|
// HTTP server that takes JSON feedback for the Alkomat 3000 app and turns it into a neat
// e-mail forwarded to the maintainer's address.
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/smtp"
"strings"
"time"
)
func main() {
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatal(err)
}
defer f.Close()
log.SetOutput(f)
if FromAddr == "foo@example.com" {
log.Fatal("Please set the constants in config.go and recompile")
}
if Passwd == "" {
log.Fatal("Please enter the password in config.go and recompile")
}
http.HandleFunc("/", rootHandler)
log.Println("Starting server on port", Port)
log.Fatal(http.ListenAndServeTLS(":"+Port, CertificatePath, PrivateKeyPath, nil))
}
type Device struct {
OSVer string
OSAPILvl int
Device string
Model string
}
func (d Device) String() string {
return fmt.Sprintf("\tOS version: %s (API %d)\n\tDevice: %s\n\tModel: %s\n",
d.OSVer, d.OSAPILvl, d.Device, d.Model)
}
type Feedback struct {
Device Device
AppInfo string
LogTrace string
Sender string
SenderMail string
Message string
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
body, _ := ioutil.ReadAll(r.Body)
fbstr := string(body)
agent := r.Header.Get("user-agent")
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatal(err)
}
defer f.Close()
log.SetOutput(f)
log.Println("Received feedback from", agent, "(", r.RemoteAddr, ")")
decoder := json.NewDecoder(strings.NewReader(fbstr))
var feedback Feedback
err := decoder.Decode(&feedback)
if err != nil {
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
defer f.Close()
log.SetOutput(f)
log.Println("JSON decode error:", err)
}
// Only show sender if one is mentioned.
senderstr := ""
if feedback.Sender != "" {
senderstr = fmt.Sprintf("Sender: %s <%s>", feedback.Sender, feedback.SenderMail)
}
// The message written by the app user is indented by a single tab for clearer separation.
msg := fmt.Sprintf("Alkomat 3000 Feedback\n%s\nDevice:\n%v\nApp: %s\nMessage:\n\n\t",
senderstr, feedback.Device, feedback.AppInfo)
msg += strings.Replace(feedback.Message, "\n", "\n\t", -1) + "\n"
msg += "\nEnd of message\n"
msg += "Log trace:\n" + feedback.LogTrace + "\n"
err = sendMail(Subject, msg)
if err != nil {
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
defer f.Close()
log.SetOutput(f)
log.Println("couldn't send mail:", err)
return
}
}
// sendMail sends a mail with the given subject and content. The content must have Unix
// newlines (\n) and end in a newline. These newlines will be converted to SMTP's CRLF
// automatically.
func sendMail(subject, content string) error {
auth := smtp.PlainAuth("", FromAddr, Passwd, SMTPSrv)
to := []string{ToAddr}
date := time.Now().Format(time.RFC822Z)
header := fmt.Sprintf("Date: %s\r\nFrom: %s\r\nTo: %s\r\nSubject: %s\r\n", date, FromAddr, ToAddr, subject)
body := strings.Replace(content, "\n", "\r\n", -1)
msg := []byte(header + "\r\n" + body)
return smtp.SendMail(SMTPSrv+":"+SMTPPort, auth, FromAddr, to, msg)
}
Add "os" to imports (throws error)
Throws ./main.go:80: no new variables on left side of :=
// HTTP server that takes JSON feedback for the Alkomat 3000 app and turns it into a neat
// e-mail forwarded to the maintainer's address.
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/smtp"
"strings"
"time"
"os"
)
func main() {
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatal(err)
}
defer f.Close()
log.SetOutput(f)
if FromAddr == "foo@example.com" {
log.Fatal("Please set the constants in config.go and recompile")
}
if Passwd == "" {
log.Fatal("Please enter the password in config.go and recompile")
}
http.HandleFunc("/", rootHandler)
log.Println("Starting server on port", Port)
log.Fatal(http.ListenAndServeTLS(":"+Port, CertificatePath, PrivateKeyPath, nil))
}
type Device struct {
OSVer string
OSAPILvl int
Device string
Model string
}
func (d Device) String() string {
return fmt.Sprintf("\tOS version: %s (API %d)\n\tDevice: %s\n\tModel: %s\n",
d.OSVer, d.OSAPILvl, d.Device, d.Model)
}
type Feedback struct {
Device Device
AppInfo string
LogTrace string
Sender string
SenderMail string
Message string
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
body, _ := ioutil.ReadAll(r.Body)
fbstr := string(body)
agent := r.Header.Get("user-agent")
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
log.Fatal(err)
}
defer f.Close()
log.SetOutput(f)
log.Println("Received feedback from", agent, "(", r.RemoteAddr, ")")
decoder := json.NewDecoder(strings.NewReader(fbstr))
var feedback Feedback
err := decoder.Decode(&feedback)
if err != nil {
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
defer f.Close()
log.SetOutput(f)
log.Println("JSON decode error:", err)
}
// Only show sender if one is mentioned.
senderstr := ""
if feedback.Sender != "" {
senderstr = fmt.Sprintf("Sender: %s <%s>", feedback.Sender, feedback.SenderMail)
}
// The message written by the app user is indented by a single tab for clearer separation.
msg := fmt.Sprintf("Alkomat 3000 Feedback\n%s\nDevice:\n%v\nApp: %s\nMessage:\n\n\t",
senderstr, feedback.Device, feedback.AppInfo)
msg += strings.Replace(feedback.Message, "\n", "\n\t", -1) + "\n"
msg += "\nEnd of message\n"
msg += "Log trace:\n" + feedback.LogTrace + "\n"
err = sendMail(Subject, msg)
if err != nil {
f, err := os.OpenFile("feedback.log", os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
defer f.Close()
log.SetOutput(f)
log.Println("couldn't send mail:", err)
return
}
}
// sendMail sends a mail with the given subject and content. The content must have Unix
// newlines (\n) and end in a newline. These newlines will be converted to SMTP's CRLF
// automatically.
func sendMail(subject, content string) error {
auth := smtp.PlainAuth("", FromAddr, Passwd, SMTPSrv)
to := []string{ToAddr}
date := time.Now().Format(time.RFC822Z)
header := fmt.Sprintf("Date: %s\r\nFrom: %s\r\nTo: %s\r\nSubject: %s\r\n", date, FromAddr, ToAddr, subject)
body := strings.Replace(content, "\n", "\r\n", -1)
msg := []byte(header + "\r\n" + body)
return smtp.SendMail(SMTPSrv+":"+SMTPPort, auth, FromAddr, to, msg)
}
|
package raster
import (
"image"
"image/color"
"io"
)
func (p *Page) ParseColors(b []byte) ([]color.Color, error) {
// TODO support banded and planar
if p.Header.CUPSColorOrder != ChunkyPixels {
return nil, ErrUnsupported
}
switch p.Header.CUPSColorSpace {
case ColorSpaceBlack:
return p.parseColorsBlack(b)
case ColorSpaceCMYK:
return p.parseColorsCMYK(b)
default:
return nil, ErrUnsupported
}
}
func (p *Page) parseColorsBlack(b []byte) ([]color.Color, error) {
if p.Header.CUPSBitsPerColor != 1 {
return nil, ErrUnsupported
}
var colors []color.Color
for _, packet := range b {
for i := uint(0); i < 8; i++ {
if packet<<i&128 == 0 {
colors = append(colors, color.Gray{255})
} else {
colors = append(colors, color.Gray{0})
}
}
}
return colors, nil
}
func (p *Page) parseColorsCMYK(b []byte) ([]color.Color, error) {
if p.Header.CUPSBitsPerColor != 8 {
return nil, ErrUnsupported
}
if len(b)%4 != 0 || len(b) < 4 {
return nil, ErrInvalidFormat
}
var colors []color.Color
for i := 0; i < len(b); i += 4 {
// TODO does cups have a byte order for colors in a pixel and
// do we need to swap bytes?
c := color.CMYK{C: b[i], M: b[i+1], Y: b[i+2], K: b[i+3]}
colors = append(colors, c)
}
return colors, nil
}
type ImageSetter interface {
Set(x, y int, c color.Color)
}
// Render renders a CUPS raster image onto any image.Image that
// implements the Set method.
func (p *Page) Render(img ImageSetter) error {
b := make([]byte, p.LineSize())
for y := uint32(0); y < p.Header.CUPSHeight; y++ {
err := p.ReadLine(b)
if err == io.EOF {
return io.ErrUnexpectedEOF
}
if err != nil {
return err
}
colors, err := p.ParseColors(b)
if err != nil {
return err
}
for x, color := range colors {
img.Set(x, int(y), color)
}
}
return nil
}
func (p *Page) rect() image.Rectangle {
return image.Rect(0, 0, int(p.Header.CUPSWidth), int(p.Header.CUPSHeight))
}
func (p *Page) Image() (image.Image, error) {
b := make([]byte, p.TotalSize())
err := p.ReadAll(b)
if err != nil {
return nil, err
}
// FIXME support color orders other than chunked
if p.Header.CUPSColorOrder != ChunkyPixels {
return nil, ErrUnsupported
}
switch p.Header.CUPSColorSpace {
case ColorSpaceBlack:
return &Monochrome{p: p, data: b}, nil
case ColorSpaceCMYK:
if p.Header.CUPSBitsPerColor != 8 {
return nil, ErrUnsupported
}
// TODO does cups have a byte order for colors in a pixel and
// do we need to swap bytes?
return &image.CMYK{
Pix: b,
Stride: int(p.Header.CUPSBytesPerLine),
Rect: p.rect(),
}, nil
default:
return nil, ErrUnsupported
}
}
var _ image.Image = (*Monochrome)(nil)
type Monochrome struct {
p *Page
data []byte
}
func (img *Monochrome) ColorModel() color.Model {
return color.GrayModel
}
func (img *Monochrome) Bounds() image.Rectangle {
return img.p.rect()
}
func (img *Monochrome) At(x, y int) color.Color {
idx := y*int(img.p.Header.CUPSBytesPerLine) + (x / 8)
if img.data[idx]<<uint(x%8)&128 == 0 {
return color.Gray{Y: 255}
}
return color.Gray{Y: 0}
}
raster: remove Page.Render
We now have Page.Image to return an image.Image instead.
package raster
import (
"image"
"image/color"
"io"
)
func (p *Page) ParseColors(b []byte) ([]color.Color, error) {
// TODO support banded and planar
if p.Header.CUPSColorOrder != ChunkyPixels {
return nil, ErrUnsupported
}
switch p.Header.CUPSColorSpace {
case ColorSpaceBlack:
return p.parseColorsBlack(b)
case ColorSpaceCMYK:
return p.parseColorsCMYK(b)
default:
return nil, ErrUnsupported
}
}
func (p *Page) parseColorsBlack(b []byte) ([]color.Color, error) {
if p.Header.CUPSBitsPerColor != 1 {
return nil, ErrUnsupported
}
var colors []color.Color
for _, packet := range b {
for i := uint(0); i < 8; i++ {
if packet<<i&128 == 0 {
colors = append(colors, color.Gray{255})
} else {
colors = append(colors, color.Gray{0})
}
}
}
return colors, nil
}
func (p *Page) parseColorsCMYK(b []byte) ([]color.Color, error) {
if p.Header.CUPSBitsPerColor != 8 {
return nil, ErrUnsupported
}
if len(b)%4 != 0 || len(b) < 4 {
return nil, ErrInvalidFormat
}
var colors []color.Color
for i := 0; i < len(b); i += 4 {
// TODO does cups have a byte order for colors in a pixel and
// do we need to swap bytes?
c := color.CMYK{C: b[i], M: b[i+1], Y: b[i+2], K: b[i+3]}
colors = append(colors, c)
}
return colors, nil
}
func (p *Page) rect() image.Rectangle {
return image.Rect(0, 0, int(p.Header.CUPSWidth), int(p.Header.CUPSHeight))
}
func (p *Page) Image() (image.Image, error) {
b := make([]byte, p.TotalSize())
err := p.ReadAll(b)
if err != nil {
return nil, err
}
// FIXME support color orders other than chunked
if p.Header.CUPSColorOrder != ChunkyPixels {
return nil, ErrUnsupported
}
switch p.Header.CUPSColorSpace {
case ColorSpaceBlack:
return &Monochrome{p: p, data: b}, nil
case ColorSpaceCMYK:
if p.Header.CUPSBitsPerColor != 8 {
return nil, ErrUnsupported
}
// TODO does cups have a byte order for colors in a pixel and
// do we need to swap bytes?
return &image.CMYK{
Pix: b,
Stride: int(p.Header.CUPSBytesPerLine),
Rect: p.rect(),
}, nil
default:
return nil, ErrUnsupported
}
}
var _ image.Image = (*Monochrome)(nil)
type Monochrome struct {
p *Page
data []byte
}
func (img *Monochrome) ColorModel() color.Model {
return color.GrayModel
}
func (img *Monochrome) Bounds() image.Rectangle {
return img.p.rect()
}
func (img *Monochrome) At(x, y int) color.Color {
idx := y*int(img.p.Header.CUPSBytesPerLine) + (x / 8)
if img.data[idx]<<uint(x%8)&128 == 0 {
return color.Gray{Y: 255}
}
return color.Gray{Y: 0}
}
|
package vm
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"text/template"
"time"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
sl "github.com/maximilien/softlayer-go/softlayer"
bslcommon "github.com/cloudfoundry/bosh-softlayer-cpi/softlayer/common"
bslcdisk "github.com/cloudfoundry/bosh-softlayer-cpi/softlayer/disk"
bslcstem "github.com/cloudfoundry/bosh-softlayer-cpi/softlayer/stemcell"
util "github.com/cloudfoundry/bosh-softlayer-cpi/util"
datatypes "github.com/maximilien/softlayer-go/data_types"
sldatatypes "github.com/maximilien/softlayer-go/data_types"
)
const (
SOFTLAYER_VM_OS_RELOAD_TAG = "OSReload"
SOFTLAYER_VM_LOG_TAG = "SoftLayerVM"
ROOT_USER_NAME = "root"
)
type SoftLayerVM struct {
id int
softLayerClient sl.Client
agentEnvService AgentEnvService
sshClient util.SshClient
logger boshlog.Logger
}
func NewSoftLayerVM(id int, softLayerClient sl.Client, sshClient util.SshClient, agentEnvService AgentEnvService, logger boshlog.Logger) SoftLayerVM {
bslcommon.TIMEOUT = 60 * time.Minute
bslcommon.POLLING_INTERVAL = 10 * time.Second
return SoftLayerVM{
id: id,
softLayerClient: softLayerClient,
agentEnvService: agentEnvService,
sshClient: sshClient,
logger: logger,
}
}
func (vm SoftLayerVM) ID() int { return vm.id }
func (vm SoftLayerVM) Delete(agentID string) error {
virtualGuest, err := bslcommon.GetObjectDetailsOnVirtualGuest(vm.softLayerClient, vm.ID())
if err != nil {
return bosherr.WrapErrorf(err, "Cannot get details from virtual guest with id: %d.", vm.ID())
}
if strings.contains(virtualGuest.FullyQualifiedDomainName, "-worker-") {
return vm.DeleteVM()
}
metadata := VMMetadata{}
return vm.SetMetadata(metadata)
}
func (vm SoftLayerVM) DeleteVM() error {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating SoftLayer VirtualGuestService from client")
}
vmCID := vm.ID()
err = bslcommon.WaitForVirtualGuestToHaveNoRunningTransactions(vm.softLayerClient, vmCID)
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, fmt.Sprintf("Waiting for VirtualGuest `%d` to have no pending transactions before deleting vm", vmCID))
}
}
deleted, err := virtualGuestService.DeleteObject(vm.ID())
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Deleting SoftLayer VirtualGuest from client")
}
}
if !deleted {
return bosherr.WrapError(nil, "Did not delete SoftLayer VirtualGuest from client")
}
return nil
}
func (vm SoftLayerVM) Reboot() error {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating SoftLayer VirtualGuestService from client")
}
rebooted, err := virtualGuestService.RebootSoft(vm.ID())
if err != nil {
return bosherr.WrapError(err, "Rebooting (soft) SoftLayer VirtualGuest from client")
}
if !rebooted {
return bosherr.WrapError(nil, "Did not reboot (soft) SoftLayer VirtualGuest from client")
}
return nil
}
func (vm SoftLayerVM) ReloadOS(stemcell bslcstem.Stemcell) error {
reload_OS_Config := sldatatypes.Image_Template_Config{
ImageTemplateId: strconv.Itoa(stemcell.ID()),
}
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating VirtualGuestService from SoftLayer client")
}
err = bslcommon.WaitForVirtualGuestToHaveNoRunningTransactions(vm.softLayerClient, vm.ID())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Waiting for VirtualGuest %d to have no pending transactions before os reload", vm.ID()))
}
vm.logger.Info(SOFTLAYER_VM_OS_RELOAD_TAG, fmt.Sprintf("No transaction is running on this VM %d", vm.ID()))
err = virtualGuestService.ReloadOperatingSystem(vm.ID(), reload_OS_Config)
if err != nil {
return bosherr.WrapError(err, "Failed to reload OS on the specified VirtualGuest from SoftLayer client")
}
err = vm.postCheckActiveTransactionsForOSReload(vm.softLayerClient)
if err != nil {
return err
}
return nil
}
func (vm SoftLayerVM) SetMetadata(vmMetadata VMMetadata) error {
tags, err := vm.extractTagsFromVMMetadata(vmMetadata)
if err != nil {
return err
}
//Check below needed since Golang strings.Split return [""] on strings.Split("", ",")
if len(tags) == 1 && tags[0] == "" {
return nil
}
if len(tags) == 0 {
return nil
}
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating SoftLayer VirtualGuestService from client")
}
success, err := virtualGuestService.SetTags(vm.ID(), tags)
if !success {
return bosherr.WrapErrorf(err, "Settings tags on SoftLayer VirtualGuest `%d`", vm.ID())
}
if err != nil {
return bosherr.WrapErrorf(err, "Settings tags on SoftLayer VirtualGuest `%d`", vm.ID())
}
return nil
}
func (vm SoftLayerVM) ConfigureNetworks(networks Networks) error {
virtualGuest, err := bslcommon.GetObjectDetailsOnVirtualGuest(vm.softLayerClient, vm.ID())
if err != nil {
return bosherr.WrapErrorf(err, "Cannot get details from virtual guest with id: %d.", virtualGuest.Id)
}
oldAgentEnv, err := vm.agentEnvService.Fetch()
if err != nil {
return bosherr.WrapErrorf(err, "Failed to unmarshal userdata from virutal guest with id: %d.", virtualGuest.Id)
}
oldAgentEnv.Networks = networks
err = vm.agentEnvService.Update(oldAgentEnv)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Configuring network setting on VirtualGuest with id: `%d`", virtualGuest.Id))
}
return nil
}
func (vm SoftLayerVM) AttachDisk(disk bslcdisk.Disk) error {
virtualGuest, volume, err := vm.fetchVMandIscsiVolume(vm.ID(), disk.ID())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to fetch disk `%d` and virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
networkStorageService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Service()
if err != nil {
return bosherr.WrapError(err, "Cannot get network storage service.")
}
allowed, err := networkStorageService.HasAllowedVirtualGuest(disk.ID(), vm.ID())
totalTime := time.Duration(0)
if err == nil && allowed == false {
for totalTime < bslcommon.TIMEOUT {
allowable, err := networkStorageService.AttachIscsiVolume(virtualGuest, disk.ID())
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, fmt.Sprintf("Granting volume access to vitrual guest %d", virtualGuest.Id))
}
} else {
if allowable {
break
}
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
}
if totalTime >= bslcommon.TIMEOUT {
return bosherr.Error("Waiting for grantting access to virutal guest TIME OUT!")
}
hasMultiPath, err := vm.hasMulitPathToolBasedOnShellScript(virtualGuest)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to get multipath information from virtual guest `%d`", virtualGuest.Id))
}
deviceName, err := vm.waitForVolumeAttached(virtualGuest, volume, hasMultiPath)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to attach volume `%d` to virtual guest `%d`", disk.ID(), virtualGuest.Id))
}
oldAgentEnv, err := vm.agentEnvService.Fetch()
if err != nil {
return bosherr.WrapErrorf(err, "Failed to unmarshal userdata from virutal guest with id: %d.", virtualGuest.Id)
}
var newAgentEnv AgentEnv
if hasMultiPath {
newAgentEnv = oldAgentEnv.AttachPersistentDisk(strconv.Itoa(disk.ID()), "/dev/mapper/"+deviceName)
} else {
newAgentEnv = oldAgentEnv.AttachPersistentDisk(strconv.Itoa(disk.ID()), "/dev/"+deviceName)
}
err = vm.agentEnvService.Update(newAgentEnv)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Configuring userdata on VirtualGuest with id: `%d`", virtualGuest.Id))
}
return nil
}
func (vm SoftLayerVM) DetachDisk(disk bslcdisk.Disk) error {
virtualGuest, volume, err := vm.fetchVMandIscsiVolume(vm.ID(), disk.ID())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("failed in disk `%d` from virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
hasMultiPath, err := vm.hasMulitPathToolBasedOnShellScript(virtualGuest)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to get multipath information from virtual guest `%d`", virtualGuest.Id))
}
err = vm.detachVolumeBasedOnShellScript(virtualGuest, volume, hasMultiPath)
if err != nil {
return bosherr.WrapErrorf(err, "Failed to detach volume with id %d from virtual guest with id: %d.", volume.Id, virtualGuest.Id)
}
networkStorageService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Service()
if err != nil {
return bosherr.WrapError(err, "Cannot get network storage service.")
}
allowed, err := networkStorageService.HasAllowedVirtualGuest(disk.ID(), vm.ID())
if err == nil && allowed == true {
err = networkStorageService.DetachIscsiVolume(virtualGuest, disk.ID())
}
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to revoke access of disk `%d` from virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
oldAgentEnv, err := vm.agentEnvService.Fetch()
if err != nil {
return bosherr.WrapErrorf(err, "Failed to unmarshal userdata from virutal guest with id: %d.", virtualGuest.Id)
}
newAgentEnv := oldAgentEnv.DetachPersistentDisk(strconv.Itoa(disk.ID()))
err = vm.agentEnvService.Update(newAgentEnv)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Configuring userdata on VirtualGuest with id: `%d`", virtualGuest.Id))
}
if len(newAgentEnv.Disks.Persistent) == 1 {
for key, devicePath := range newAgentEnv.Disks.Persistent {
leftDiskId, err := strconv.Atoi(key)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to transfer disk id %s from string to int", key))
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "Left Disk Id %d", leftDiskId)
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "Left Disk device path %s", devicePath)
virtualGuest, volume, err := vm.fetchVMandIscsiVolume(vm.ID(), leftDiskId)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to fetch disk `%d` and virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
_, err = vm.discoveryOpenIscsiTargetsBasedOnShellScript(virtualGuest, volume)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to reattach volume `%s` to virtual guest `%d`", key, virtualGuest.Id))
}
command := fmt.Sprintf("sleep 5; mount %s-part1 /var/vcap/store", devicePath)
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return bosherr.WrapError(err, "mount /var/vcap/store")
}
}
}
return nil
}
// Private methods
func (vm SoftLayerVM) extractTagsFromVMMetadata(vmMetadata VMMetadata) ([]string, error) {
tags := []string{}
status := ""
for key, value := range vmMetadata {
if key == "compiling" || key == "job" || key == "index" || key == "deployment" {
stringValue, err := value.(string)
if !err {
return []string{}, bosherr.Errorf("Cannot convert tags metadata value `%v` to string", value)
}
if status == "" {
status = key + ":" + stringValue
} else {
status = status + "," + key + ":" + stringValue
}
}
tags = vm.parseTags(status)
}
return tags, nil
}
func (vm SoftLayerVM) parseTags(value string) []string {
return strings.Split(value, ",")
}
func (vm SoftLayerVM) waitForVolumeAttached(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage, hasMultiPath bool) (string, error) {
oldDisks, err := vm.getIscsiDeviceNamesBasedOnShellScript(virtualGuest, hasMultiPath)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to get devices names from virtual guest `%d`", virtualGuest.Id))
}
if len(oldDisks) > 2 {
return "", bosherr.Error(fmt.Sprintf("Too manay persistent disks attached to virtual guest `%d`", virtualGuest.Id))
}
credential, err := vm.getAllowedHostCredential(virtualGuest)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to get iscsi host auth from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.backupOpenIscsiConfBasedOnShellScript(virtualGuest)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to backup open iscsi conf files from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.writeOpenIscsiInitiatornameBasedOnShellScript(virtualGuest, credential)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to write open iscsi initiatorname from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.writeOpenIscsiConfBasedOnShellScript(virtualGuest, volume, credential)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to write open iscsi conf from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.restartOpenIscsiBasedOnShellScript(virtualGuest)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to restart open iscsi from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.discoveryOpenIscsiTargetsBasedOnShellScript(virtualGuest, volume)
if err != nil {
return "", bosherr.WrapErrorf(err, "Failed to attach volume with id %d to virtual guest with id: %d.", volume.Id, virtualGuest.Id)
}
var deviceName string
totalTime := time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
newDisks, err := vm.getIscsiDeviceNamesBasedOnShellScript(virtualGuest, hasMultiPath)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to get devices names from virtual guest `%d`", virtualGuest.Id))
}
if len(oldDisks) == 0 {
if len(newDisks) > 0 {
deviceName = newDisks[0]
return deviceName, nil
}
}
var included bool
for _, newDisk := range newDisks {
for _, oldDisk := range oldDisks {
if strings.EqualFold(newDisk, oldDisk) {
included = true
}
}
if !included {
deviceName = newDisk
}
included = false
}
if len(deviceName) > 0 {
return deviceName, nil
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
return "", bosherr.Errorf("Failed to attach disk '%d' to virtual guest '%d'", volume.Id, virtualGuest.Id)
}
func (vm SoftLayerVM) hasMulitPathToolBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) (bool, error) {
command := fmt.Sprintf("echo `command -v multipath`")
output, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, err
}
if len(output) > 0 && strings.Contains(output, "multipath") {
return true, nil
}
return false, nil
}
func (vm SoftLayerVM) getIscsiDeviceNamesBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, hasMultiPath bool) ([]string, error) {
devices := []string{}
command1 := fmt.Sprintf("dmsetup ls")
command2 := fmt.Sprintf("cat /proc/partitions")
if hasMultiPath {
result, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command1)
if err != nil {
return devices, err
}
if strings.Contains(result, "No devices found") {
return devices, nil
}
lines := strings.Split(strings.Trim(result, "\n"), "\n")
for i := 0; i < len(lines); i++ {
if match, _ := regexp.MatchString("-part1", lines[i]); !match {
devices = append(devices, strings.Fields(lines[i])[0])
}
}
} else {
result, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command2)
if err != nil {
return devices, err
}
lines := strings.Split(strings.Trim(result, "\n"), "\n")
for i := 0; i < len(lines); i++ {
if match, _ := regexp.MatchString("sd[a-z]$", lines[i]); match {
vals := strings.Fields(lines[i])
devices = append(devices, vals[len(vals)-1])
}
}
}
return devices, nil
}
func (vm SoftLayerVM) fetchVMandIscsiVolume(vmId int, volumeId int) (datatypes.SoftLayer_Virtual_Guest, datatypes.SoftLayer_Network_Storage, error) {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapError(err, "Cannot get softlayer virtual guest service.")
}
networkStorageService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Service()
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapError(err, "Cannot get network storage service.")
}
virtualGuest, err := virtualGuestService.GetObject(vmId)
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapErrorf(err, "Cannot get virtual guest with id: %d", vmId)
}
volume, err := networkStorageService.GetIscsiVolume(volumeId)
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapErrorf(err, "Cannot get iSCSI volume with id: %d", volumeId)
}
return virtualGuest, volume, nil
}
func (vm SoftLayerVM) getAllowedHostCredential(virtualGuest datatypes.SoftLayer_Virtual_Guest) (AllowedHostCredential, error) {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return AllowedHostCredential{}, bosherr.WrapError(err, "Cannot get softlayer virtual guest service.")
}
allowedHost, err := virtualGuestService.GetAllowedHost(virtualGuest.Id)
if err != nil {
return AllowedHostCredential{}, bosherr.WrapErrorf(err, "Cannot get allowed host with instance id: %d", virtualGuest.Id)
}
if allowedHost.Id == 0 {
return AllowedHostCredential{}, bosherr.Errorf("Cannot get allowed host with instance id: %d", virtualGuest.Id)
}
allowedHostService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Allowed_Host_Service()
if err != nil {
return AllowedHostCredential{}, bosherr.WrapError(err, "Cannot get network storage allowed host service.")
}
credential, err := allowedHostService.GetCredential(allowedHost.Id)
if err != nil {
return AllowedHostCredential{}, bosherr.WrapErrorf(err, "Cannot get credential with allowed host id: %d", allowedHost.Id)
}
return AllowedHostCredential{
Iqn: allowedHost.Name,
Username: credential.Username,
Password: credential.Password,
}, nil
}
func (vm SoftLayerVM) backupOpenIscsiConfBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) (bool, error) {
command := fmt.Sprintf("cp /etc/iscsi/iscsid.conf{,.save}")
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "backuping open iscsi conf")
}
return true, nil
}
func (vm SoftLayerVM) restartOpenIscsiBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) (bool, error) {
command := fmt.Sprintf("/etc/init.d/open-iscsi restart")
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "restarting open iscsi")
}
return true, nil
}
func (vm SoftLayerVM) discoveryOpenIscsiTargetsBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage) (bool, error) {
command := fmt.Sprintf("sleep 5; iscsiadm -m discovery -t sendtargets -p %s", volume.ServiceResourceBackendIpAddress)
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "discoverying open iscsi targets")
}
command = "sleep 5; echo `iscsiadm -m node -l`"
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "login iscsi targets")
}
return true, nil
}
func (vm SoftLayerVM) writeOpenIscsiInitiatornameBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, credential AllowedHostCredential) (bool, error) {
if len(credential.Iqn) > 0 {
command := fmt.Sprintf("echo 'InitiatorName=%s' > /etc/iscsi/initiatorname.iscsi", credential.Iqn)
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "Writing to /etc/iscsi/initiatorname.iscsi")
}
}
return true, nil
}
func (vm SoftLayerVM) writeOpenIscsiConfBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage, credential AllowedHostCredential) (bool, error) {
buffer := bytes.NewBuffer([]byte{})
t := template.Must(template.New("open_iscsid_conf").Parse(etcIscsidConfTemplate))
if len(credential.Password) == 0 {
err := t.Execute(buffer, volume)
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
} else {
err := t.Execute(buffer, credential)
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
}
file, err := ioutil.TempFile(os.TempDir(), "iscsid_conf_")
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
defer os.Remove(file.Name())
_, err = file.WriteString(buffer.String())
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
if err = vm.uploadFile(virtualGuest, file.Name(), "/etc/iscsi/iscsid.conf"); err != nil {
return false, bosherr.WrapError(err, "Writing to /etc/iscsi/iscsid.conf")
}
return true, nil
}
const etcIscsidConfTemplate = `# Generated by bosh-agent
node.startup = automatic
node.session.auth.authmethod = CHAP
node.session.auth.username = {{.Username}}
node.session.auth.password = {{.Password}}
discovery.sendtargets.auth.authmethod = CHAP
discovery.sendtargets.auth.username = {{.Username}}
discovery.sendtargets.auth.password = {{.Password}}
node.session.timeo.replacement_timeout = 120
node.conn[0].timeo.login_timeout = 15
node.conn[0].timeo.logout_timeout = 15
node.conn[0].timeo.noop_out_interval = 10
node.conn[0].timeo.noop_out_timeout = 15
node.session.iscsi.InitialR2T = No
node.session.iscsi.ImmediateData = Yes
node.session.iscsi.FirstBurstLength = 262144
node.session.iscsi.MaxBurstLength = 16776192
node.conn[0].iscsi.MaxRecvDataSegmentLength = 65536
`
func (vm SoftLayerVM) detachVolumeBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage, hasMultiPath bool) error {
// umount /var/vcap/store in case read-only mount
isMounted, err := vm.isMountPoint(virtualGuest, "/var/vcap/store")
if err != nil {
return bosherr.WrapError(err, "check mount point /var/vcap/store")
}
if isMounted {
step00 := fmt.Sprintf("umount -l /var/vcap/store")
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step00)
if err != nil {
return bosherr.WrapError(err, "umount -l /var/vcap/store")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "umount -l /var/vcap/store", nil)
}
// stop open-iscsi
step1 := fmt.Sprintf("/etc/init.d/open-iscsi stop")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step1)
if err != nil {
return bosherr.WrapError(err, "Restarting open iscsi")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "/etc/init.d/open-iscsi stop", nil)
// clean up /etc/iscsi/send_targets/
step2 := fmt.Sprintf("rm -rf /etc/iscsi/send_targets")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step2)
if err != nil {
return bosherr.WrapError(err, "Removing /etc/iscsi/send_targets")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "rm -rf /etc/iscsi/send_targets", nil)
// clean up /etc/iscsi/nodes/
step3 := fmt.Sprintf("rm -rf /etc/iscsi/nodes")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step3)
if err != nil {
return bosherr.WrapError(err, "Removing /etc/iscsi/nodes")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "rm -rf /etc/iscsi/nodes", nil)
// start open-iscsi
step4 := fmt.Sprintf("/etc/init.d/open-iscsi start")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step4)
if err != nil {
return bosherr.WrapError(err, "Restarting open iscsi")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "/etc/init.d/open-iscsi start", nil)
if hasMultiPath {
// restart dm-multipath tool
step5 := fmt.Sprintf("service multipath-tools restart")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step5)
if err != nil {
return bosherr.WrapError(err, "Restarting Multipath deamon")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "service multipath-tools restart", nil)
}
return nil
}
func (vm SoftLayerVM) findOpenIscsiTargetBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) ([]string, error) {
command := "sleep 5 ; iscsiadm -m session -P3 | awk '/Target: /{print $2}'"
output, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return []string{}, err
}
targets := []string{}
lines := strings.Split(strings.Trim(output, "\n"), "\n")
for _, line := range lines {
targets = append(targets, strings.Split(line, ",")[0])
}
if len(targets) > 0 {
return targets, nil
}
return []string{}, errors.New(fmt.Sprintf("Cannot find matched iSCSI device"))
}
func (vm SoftLayerVM) findOpenIscsiPortalsBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage) ([]string, error) {
command := "sleep 5 ; iscsiadm -m session -P3 | awk 'BEGIN{ lel=0} { if($0 ~ /Current Portal: /){ portal = $3 ; lel=NR } else { if( NR==(lel+46) && $0 ~ /Attached scsi disk /) {print portal}}}'"
output, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return []string{}, err
}
portals := []string{}
lines := strings.Split(strings.Trim(output, "\n"), "\n")
for _, line := range lines {
portals = append(portals, strings.Split(line, ",")[0])
}
return portals, nil
}
func (vm SoftLayerVM) getRootPassword(virtualGuest datatypes.SoftLayer_Virtual_Guest) string {
passwords := virtualGuest.OperatingSystem.Passwords
for _, password := range passwords {
if password.Username == ROOT_USER_NAME {
return password.Password
}
}
return ""
}
func (vm SoftLayerVM) postCheckActiveTransactionsForOSReload(softLayerClient sl.Client) error {
virtualGuestService, err := softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating VirtualGuestService from SoftLayer client")
}
totalTime := time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
activeTransactions, err := virtualGuestService.GetActiveTransactions(vm.ID())
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Getting active transactions from SoftLayer client")
}
}
if len(activeTransactions) > 0 {
vm.logger.Info(SOFTLAYER_VM_OS_RELOAD_TAG, "OS Reload transaction started")
break
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
if totalTime >= bslcommon.TIMEOUT {
return errors.New(fmt.Sprintf("Waiting for OS Reload transaction to start TIME OUT!"))
}
err = bslcommon.WaitForVirtualGuest(vm.softLayerClient, vm.ID(), "RUNNING")
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, fmt.Sprintf("PowerOn failed with VirtualGuest id %d", vm.ID()))
}
}
vm.logger.Info(SOFTLAYER_VM_OS_RELOAD_TAG, fmt.Sprintf("The virtual guest %d is powered on", vm.ID()))
return nil
}
func (vm SoftLayerVM) postCheckActiveTransactionsForDeleteVM(softLayerClient sl.Client, virtualGuestId int) error {
virtualGuestService, err := softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating VirtualGuestService from SoftLayer client")
}
totalTime := time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
activeTransactions, err := virtualGuestService.GetActiveTransactions(virtualGuestId)
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Getting active transactions from SoftLayer client")
}
}
if len(activeTransactions) > 0 {
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "Delete VM transaction started", nil)
break
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
if totalTime >= bslcommon.TIMEOUT {
return errors.New(fmt.Sprintf("Waiting for DeleteVM transaction to start TIME OUT!"))
}
totalTime = time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
vm1, err := virtualGuestService.GetObject(virtualGuestId)
if err != nil || vm1.Id == 0 {
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "VM doesn't exist. Delete done", nil)
break
}
activeTransaction, err := virtualGuestService.GetActiveTransaction(virtualGuestId)
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Getting active transactions from SoftLayer client")
}
}
averageDuration := activeTransaction.TransactionStatus.AverageDuration
if strings.HasPrefix(averageDuration, ".") || averageDuration == "" {
averageDuration = "0" + averageDuration
}
averageTransactionDuration, err := strconv.ParseFloat(averageDuration, 32)
if err != nil {
averageTransactionDuration = 0
}
if averageTransactionDuration > 30 {
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "Deleting VM instance had been launched and it is a long transaction. Please check Softlayer Portal", nil)
break
}
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "This is a short transaction, waiting for all active transactions to complete", nil)
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
if totalTime >= bslcommon.TIMEOUT {
return errors.New(fmt.Sprintf("After deleting a vm, waiting for active transactions to complete TIME OUT!"))
}
return nil
}
func (vm SoftLayerVM) isMountPoint(virtualGuest datatypes.SoftLayer_Virtual_Guest, path string) (bool, error) {
mounts, err := vm.searchMounts(virtualGuest)
if err != nil {
return false, bosherr.WrapError(err, "Searching mounts")
}
for _, mount := range mounts {
if mount.MountPoint == path {
return true, nil
}
}
return false, nil
}
func (vm SoftLayerVM) searchMounts(virtualGuest datatypes.SoftLayer_Virtual_Guest) ([]Mount, error) {
var mounts []Mount
stdout, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, "mount")
if err != nil {
return mounts, bosherr.WrapError(err, "Running mount")
}
// e.g. '/dev/sda on /boot type ext2 (rw)'
for _, mountEntry := range strings.Split(stdout, "\n") {
if mountEntry == "" {
continue
}
mountFields := strings.Fields(mountEntry)
mounts = append(mounts, Mount{
PartitionPath: mountFields[0],
MountPoint: mountFields[2],
})
}
return mounts, nil
}
func (vm SoftLayerVM) execCommand(virtualGuest datatypes.SoftLayer_Virtual_Guest, command string) (string, error) {
result, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
return result, err
}
func (vm SoftLayerVM) uploadFile(virtualGuest datatypes.SoftLayer_Virtual_Guest, srcFile string, destFile string) error {
err := vm.sshClient.UploadFile(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, srcFile, destFile)
return err
}
func (vm SoftLayerVM) downloadFile(virtualGuest datatypes.SoftLayer_Virtual_Guest, srcFile string, destFile string) error {
err := vm.sshClient.DownloadFile(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, srcFile, destFile)
return err
}
update typo
package vm
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"text/template"
"time"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
sl "github.com/maximilien/softlayer-go/softlayer"
bslcommon "github.com/cloudfoundry/bosh-softlayer-cpi/softlayer/common"
bslcdisk "github.com/cloudfoundry/bosh-softlayer-cpi/softlayer/disk"
bslcstem "github.com/cloudfoundry/bosh-softlayer-cpi/softlayer/stemcell"
util "github.com/cloudfoundry/bosh-softlayer-cpi/util"
datatypes "github.com/maximilien/softlayer-go/data_types"
sldatatypes "github.com/maximilien/softlayer-go/data_types"
)
const (
SOFTLAYER_VM_OS_RELOAD_TAG = "OSReload"
SOFTLAYER_VM_LOG_TAG = "SoftLayerVM"
ROOT_USER_NAME = "root"
)
type SoftLayerVM struct {
id int
softLayerClient sl.Client
agentEnvService AgentEnvService
sshClient util.SshClient
logger boshlog.Logger
}
func NewSoftLayerVM(id int, softLayerClient sl.Client, sshClient util.SshClient, agentEnvService AgentEnvService, logger boshlog.Logger) SoftLayerVM {
bslcommon.TIMEOUT = 60 * time.Minute
bslcommon.POLLING_INTERVAL = 10 * time.Second
return SoftLayerVM{
id: id,
softLayerClient: softLayerClient,
agentEnvService: agentEnvService,
sshClient: sshClient,
logger: logger,
}
}
func (vm SoftLayerVM) ID() int { return vm.id }
func (vm SoftLayerVM) Delete(agentID string) error {
virtualGuest, err := bslcommon.GetObjectDetailsOnVirtualGuest(vm.softLayerClient, vm.ID())
if err != nil {
return bosherr.WrapErrorf(err, "Cannot get details from virtual guest with id: %d.", vm.ID())
}
if strings.Contains(virtualGuest.FullyQualifiedDomainName, "-worker-") {
return vm.DeleteVM()
}
metadata := VMMetadata{}
return vm.SetMetadata(metadata)
}
func (vm SoftLayerVM) DeleteVM() error {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating SoftLayer VirtualGuestService from client")
}
vmCID := vm.ID()
err = bslcommon.WaitForVirtualGuestToHaveNoRunningTransactions(vm.softLayerClient, vmCID)
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, fmt.Sprintf("Waiting for VirtualGuest `%d` to have no pending transactions before deleting vm", vmCID))
}
}
deleted, err := virtualGuestService.DeleteObject(vm.ID())
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Deleting SoftLayer VirtualGuest from client")
}
}
if !deleted {
return bosherr.WrapError(nil, "Did not delete SoftLayer VirtualGuest from client")
}
return nil
}
func (vm SoftLayerVM) Reboot() error {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating SoftLayer VirtualGuestService from client")
}
rebooted, err := virtualGuestService.RebootSoft(vm.ID())
if err != nil {
return bosherr.WrapError(err, "Rebooting (soft) SoftLayer VirtualGuest from client")
}
if !rebooted {
return bosherr.WrapError(nil, "Did not reboot (soft) SoftLayer VirtualGuest from client")
}
return nil
}
func (vm SoftLayerVM) ReloadOS(stemcell bslcstem.Stemcell) error {
reload_OS_Config := sldatatypes.Image_Template_Config{
ImageTemplateId: strconv.Itoa(stemcell.ID()),
}
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating VirtualGuestService from SoftLayer client")
}
err = bslcommon.WaitForVirtualGuestToHaveNoRunningTransactions(vm.softLayerClient, vm.ID())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Waiting for VirtualGuest %d to have no pending transactions before os reload", vm.ID()))
}
vm.logger.Info(SOFTLAYER_VM_OS_RELOAD_TAG, fmt.Sprintf("No transaction is running on this VM %d", vm.ID()))
err = virtualGuestService.ReloadOperatingSystem(vm.ID(), reload_OS_Config)
if err != nil {
return bosherr.WrapError(err, "Failed to reload OS on the specified VirtualGuest from SoftLayer client")
}
err = vm.postCheckActiveTransactionsForOSReload(vm.softLayerClient)
if err != nil {
return err
}
return nil
}
func (vm SoftLayerVM) SetMetadata(vmMetadata VMMetadata) error {
tags, err := vm.extractTagsFromVMMetadata(vmMetadata)
if err != nil {
return err
}
//Check below needed since Golang strings.Split return [""] on strings.Split("", ",")
if len(tags) == 1 && tags[0] == "" {
return nil
}
if len(tags) == 0 {
return nil
}
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating SoftLayer VirtualGuestService from client")
}
success, err := virtualGuestService.SetTags(vm.ID(), tags)
if !success {
return bosherr.WrapErrorf(err, "Settings tags on SoftLayer VirtualGuest `%d`", vm.ID())
}
if err != nil {
return bosherr.WrapErrorf(err, "Settings tags on SoftLayer VirtualGuest `%d`", vm.ID())
}
return nil
}
func (vm SoftLayerVM) ConfigureNetworks(networks Networks) error {
virtualGuest, err := bslcommon.GetObjectDetailsOnVirtualGuest(vm.softLayerClient, vm.ID())
if err != nil {
return bosherr.WrapErrorf(err, "Cannot get details from virtual guest with id: %d.", virtualGuest.Id)
}
oldAgentEnv, err := vm.agentEnvService.Fetch()
if err != nil {
return bosherr.WrapErrorf(err, "Failed to unmarshal userdata from virutal guest with id: %d.", virtualGuest.Id)
}
oldAgentEnv.Networks = networks
err = vm.agentEnvService.Update(oldAgentEnv)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Configuring network setting on VirtualGuest with id: `%d`", virtualGuest.Id))
}
return nil
}
func (vm SoftLayerVM) AttachDisk(disk bslcdisk.Disk) error {
virtualGuest, volume, err := vm.fetchVMandIscsiVolume(vm.ID(), disk.ID())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to fetch disk `%d` and virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
networkStorageService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Service()
if err != nil {
return bosherr.WrapError(err, "Cannot get network storage service.")
}
allowed, err := networkStorageService.HasAllowedVirtualGuest(disk.ID(), vm.ID())
totalTime := time.Duration(0)
if err == nil && allowed == false {
for totalTime < bslcommon.TIMEOUT {
allowable, err := networkStorageService.AttachIscsiVolume(virtualGuest, disk.ID())
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, fmt.Sprintf("Granting volume access to vitrual guest %d", virtualGuest.Id))
}
} else {
if allowable {
break
}
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
}
if totalTime >= bslcommon.TIMEOUT {
return bosherr.Error("Waiting for grantting access to virutal guest TIME OUT!")
}
hasMultiPath, err := vm.hasMulitPathToolBasedOnShellScript(virtualGuest)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to get multipath information from virtual guest `%d`", virtualGuest.Id))
}
deviceName, err := vm.waitForVolumeAttached(virtualGuest, volume, hasMultiPath)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to attach volume `%d` to virtual guest `%d`", disk.ID(), virtualGuest.Id))
}
oldAgentEnv, err := vm.agentEnvService.Fetch()
if err != nil {
return bosherr.WrapErrorf(err, "Failed to unmarshal userdata from virutal guest with id: %d.", virtualGuest.Id)
}
var newAgentEnv AgentEnv
if hasMultiPath {
newAgentEnv = oldAgentEnv.AttachPersistentDisk(strconv.Itoa(disk.ID()), "/dev/mapper/"+deviceName)
} else {
newAgentEnv = oldAgentEnv.AttachPersistentDisk(strconv.Itoa(disk.ID()), "/dev/"+deviceName)
}
err = vm.agentEnvService.Update(newAgentEnv)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Configuring userdata on VirtualGuest with id: `%d`", virtualGuest.Id))
}
return nil
}
func (vm SoftLayerVM) DetachDisk(disk bslcdisk.Disk) error {
virtualGuest, volume, err := vm.fetchVMandIscsiVolume(vm.ID(), disk.ID())
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("failed in disk `%d` from virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
hasMultiPath, err := vm.hasMulitPathToolBasedOnShellScript(virtualGuest)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to get multipath information from virtual guest `%d`", virtualGuest.Id))
}
err = vm.detachVolumeBasedOnShellScript(virtualGuest, volume, hasMultiPath)
if err != nil {
return bosherr.WrapErrorf(err, "Failed to detach volume with id %d from virtual guest with id: %d.", volume.Id, virtualGuest.Id)
}
networkStorageService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Service()
if err != nil {
return bosherr.WrapError(err, "Cannot get network storage service.")
}
allowed, err := networkStorageService.HasAllowedVirtualGuest(disk.ID(), vm.ID())
if err == nil && allowed == true {
err = networkStorageService.DetachIscsiVolume(virtualGuest, disk.ID())
}
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to revoke access of disk `%d` from virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
oldAgentEnv, err := vm.agentEnvService.Fetch()
if err != nil {
return bosherr.WrapErrorf(err, "Failed to unmarshal userdata from virutal guest with id: %d.", virtualGuest.Id)
}
newAgentEnv := oldAgentEnv.DetachPersistentDisk(strconv.Itoa(disk.ID()))
err = vm.agentEnvService.Update(newAgentEnv)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Configuring userdata on VirtualGuest with id: `%d`", virtualGuest.Id))
}
if len(newAgentEnv.Disks.Persistent) == 1 {
for key, devicePath := range newAgentEnv.Disks.Persistent {
leftDiskId, err := strconv.Atoi(key)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to transfer disk id %s from string to int", key))
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "Left Disk Id %d", leftDiskId)
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "Left Disk device path %s", devicePath)
virtualGuest, volume, err := vm.fetchVMandIscsiVolume(vm.ID(), leftDiskId)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to fetch disk `%d` and virtual gusest `%d`", disk.ID(), virtualGuest.Id))
}
_, err = vm.discoveryOpenIscsiTargetsBasedOnShellScript(virtualGuest, volume)
if err != nil {
return bosherr.WrapError(err, fmt.Sprintf("Failed to reattach volume `%s` to virtual guest `%d`", key, virtualGuest.Id))
}
command := fmt.Sprintf("sleep 5; mount %s-part1 /var/vcap/store", devicePath)
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return bosherr.WrapError(err, "mount /var/vcap/store")
}
}
}
return nil
}
// Private methods
func (vm SoftLayerVM) extractTagsFromVMMetadata(vmMetadata VMMetadata) ([]string, error) {
tags := []string{}
status := ""
for key, value := range vmMetadata {
if key == "compiling" || key == "job" || key == "index" || key == "deployment" {
stringValue, err := value.(string)
if !err {
return []string{}, bosherr.Errorf("Cannot convert tags metadata value `%v` to string", value)
}
if status == "" {
status = key + ":" + stringValue
} else {
status = status + "," + key + ":" + stringValue
}
}
tags = vm.parseTags(status)
}
return tags, nil
}
func (vm SoftLayerVM) parseTags(value string) []string {
return strings.Split(value, ",")
}
func (vm SoftLayerVM) waitForVolumeAttached(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage, hasMultiPath bool) (string, error) {
oldDisks, err := vm.getIscsiDeviceNamesBasedOnShellScript(virtualGuest, hasMultiPath)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to get devices names from virtual guest `%d`", virtualGuest.Id))
}
if len(oldDisks) > 2 {
return "", bosherr.Error(fmt.Sprintf("Too manay persistent disks attached to virtual guest `%d`", virtualGuest.Id))
}
credential, err := vm.getAllowedHostCredential(virtualGuest)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to get iscsi host auth from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.backupOpenIscsiConfBasedOnShellScript(virtualGuest)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to backup open iscsi conf files from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.writeOpenIscsiInitiatornameBasedOnShellScript(virtualGuest, credential)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to write open iscsi initiatorname from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.writeOpenIscsiConfBasedOnShellScript(virtualGuest, volume, credential)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to write open iscsi conf from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.restartOpenIscsiBasedOnShellScript(virtualGuest)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to restart open iscsi from virtual guest `%d`", virtualGuest.Id))
}
_, err = vm.discoveryOpenIscsiTargetsBasedOnShellScript(virtualGuest, volume)
if err != nil {
return "", bosherr.WrapErrorf(err, "Failed to attach volume with id %d to virtual guest with id: %d.", volume.Id, virtualGuest.Id)
}
var deviceName string
totalTime := time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
newDisks, err := vm.getIscsiDeviceNamesBasedOnShellScript(virtualGuest, hasMultiPath)
if err != nil {
return "", bosherr.WrapError(err, fmt.Sprintf("Failed to get devices names from virtual guest `%d`", virtualGuest.Id))
}
if len(oldDisks) == 0 {
if len(newDisks) > 0 {
deviceName = newDisks[0]
return deviceName, nil
}
}
var included bool
for _, newDisk := range newDisks {
for _, oldDisk := range oldDisks {
if strings.EqualFold(newDisk, oldDisk) {
included = true
}
}
if !included {
deviceName = newDisk
}
included = false
}
if len(deviceName) > 0 {
return deviceName, nil
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
return "", bosherr.Errorf("Failed to attach disk '%d' to virtual guest '%d'", volume.Id, virtualGuest.Id)
}
func (vm SoftLayerVM) hasMulitPathToolBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) (bool, error) {
command := fmt.Sprintf("echo `command -v multipath`")
output, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, err
}
if len(output) > 0 && strings.Contains(output, "multipath") {
return true, nil
}
return false, nil
}
func (vm SoftLayerVM) getIscsiDeviceNamesBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, hasMultiPath bool) ([]string, error) {
devices := []string{}
command1 := fmt.Sprintf("dmsetup ls")
command2 := fmt.Sprintf("cat /proc/partitions")
if hasMultiPath {
result, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command1)
if err != nil {
return devices, err
}
if strings.Contains(result, "No devices found") {
return devices, nil
}
lines := strings.Split(strings.Trim(result, "\n"), "\n")
for i := 0; i < len(lines); i++ {
if match, _ := regexp.MatchString("-part1", lines[i]); !match {
devices = append(devices, strings.Fields(lines[i])[0])
}
}
} else {
result, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command2)
if err != nil {
return devices, err
}
lines := strings.Split(strings.Trim(result, "\n"), "\n")
for i := 0; i < len(lines); i++ {
if match, _ := regexp.MatchString("sd[a-z]$", lines[i]); match {
vals := strings.Fields(lines[i])
devices = append(devices, vals[len(vals)-1])
}
}
}
return devices, nil
}
func (vm SoftLayerVM) fetchVMandIscsiVolume(vmId int, volumeId int) (datatypes.SoftLayer_Virtual_Guest, datatypes.SoftLayer_Network_Storage, error) {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapError(err, "Cannot get softlayer virtual guest service.")
}
networkStorageService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Service()
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapError(err, "Cannot get network storage service.")
}
virtualGuest, err := virtualGuestService.GetObject(vmId)
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapErrorf(err, "Cannot get virtual guest with id: %d", vmId)
}
volume, err := networkStorageService.GetIscsiVolume(volumeId)
if err != nil {
return datatypes.SoftLayer_Virtual_Guest{}, datatypes.SoftLayer_Network_Storage{}, bosherr.WrapErrorf(err, "Cannot get iSCSI volume with id: %d", volumeId)
}
return virtualGuest, volume, nil
}
func (vm SoftLayerVM) getAllowedHostCredential(virtualGuest datatypes.SoftLayer_Virtual_Guest) (AllowedHostCredential, error) {
virtualGuestService, err := vm.softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return AllowedHostCredential{}, bosherr.WrapError(err, "Cannot get softlayer virtual guest service.")
}
allowedHost, err := virtualGuestService.GetAllowedHost(virtualGuest.Id)
if err != nil {
return AllowedHostCredential{}, bosherr.WrapErrorf(err, "Cannot get allowed host with instance id: %d", virtualGuest.Id)
}
if allowedHost.Id == 0 {
return AllowedHostCredential{}, bosherr.Errorf("Cannot get allowed host with instance id: %d", virtualGuest.Id)
}
allowedHostService, err := vm.softLayerClient.GetSoftLayer_Network_Storage_Allowed_Host_Service()
if err != nil {
return AllowedHostCredential{}, bosherr.WrapError(err, "Cannot get network storage allowed host service.")
}
credential, err := allowedHostService.GetCredential(allowedHost.Id)
if err != nil {
return AllowedHostCredential{}, bosherr.WrapErrorf(err, "Cannot get credential with allowed host id: %d", allowedHost.Id)
}
return AllowedHostCredential{
Iqn: allowedHost.Name,
Username: credential.Username,
Password: credential.Password,
}, nil
}
func (vm SoftLayerVM) backupOpenIscsiConfBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) (bool, error) {
command := fmt.Sprintf("cp /etc/iscsi/iscsid.conf{,.save}")
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "backuping open iscsi conf")
}
return true, nil
}
func (vm SoftLayerVM) restartOpenIscsiBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) (bool, error) {
command := fmt.Sprintf("/etc/init.d/open-iscsi restart")
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "restarting open iscsi")
}
return true, nil
}
func (vm SoftLayerVM) discoveryOpenIscsiTargetsBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage) (bool, error) {
command := fmt.Sprintf("sleep 5; iscsiadm -m discovery -t sendtargets -p %s", volume.ServiceResourceBackendIpAddress)
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "discoverying open iscsi targets")
}
command = "sleep 5; echo `iscsiadm -m node -l`"
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "login iscsi targets")
}
return true, nil
}
func (vm SoftLayerVM) writeOpenIscsiInitiatornameBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, credential AllowedHostCredential) (bool, error) {
if len(credential.Iqn) > 0 {
command := fmt.Sprintf("echo 'InitiatorName=%s' > /etc/iscsi/initiatorname.iscsi", credential.Iqn)
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return false, bosherr.WrapError(err, "Writing to /etc/iscsi/initiatorname.iscsi")
}
}
return true, nil
}
func (vm SoftLayerVM) writeOpenIscsiConfBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage, credential AllowedHostCredential) (bool, error) {
buffer := bytes.NewBuffer([]byte{})
t := template.Must(template.New("open_iscsid_conf").Parse(etcIscsidConfTemplate))
if len(credential.Password) == 0 {
err := t.Execute(buffer, volume)
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
} else {
err := t.Execute(buffer, credential)
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
}
file, err := ioutil.TempFile(os.TempDir(), "iscsid_conf_")
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
defer os.Remove(file.Name())
_, err = file.WriteString(buffer.String())
if err != nil {
return false, bosherr.WrapError(err, "Generating config from template")
}
if err = vm.uploadFile(virtualGuest, file.Name(), "/etc/iscsi/iscsid.conf"); err != nil {
return false, bosherr.WrapError(err, "Writing to /etc/iscsi/iscsid.conf")
}
return true, nil
}
const etcIscsidConfTemplate = `# Generated by bosh-agent
node.startup = automatic
node.session.auth.authmethod = CHAP
node.session.auth.username = {{.Username}}
node.session.auth.password = {{.Password}}
discovery.sendtargets.auth.authmethod = CHAP
discovery.sendtargets.auth.username = {{.Username}}
discovery.sendtargets.auth.password = {{.Password}}
node.session.timeo.replacement_timeout = 120
node.conn[0].timeo.login_timeout = 15
node.conn[0].timeo.logout_timeout = 15
node.conn[0].timeo.noop_out_interval = 10
node.conn[0].timeo.noop_out_timeout = 15
node.session.iscsi.InitialR2T = No
node.session.iscsi.ImmediateData = Yes
node.session.iscsi.FirstBurstLength = 262144
node.session.iscsi.MaxBurstLength = 16776192
node.conn[0].iscsi.MaxRecvDataSegmentLength = 65536
`
func (vm SoftLayerVM) detachVolumeBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage, hasMultiPath bool) error {
// umount /var/vcap/store in case read-only mount
isMounted, err := vm.isMountPoint(virtualGuest, "/var/vcap/store")
if err != nil {
return bosherr.WrapError(err, "check mount point /var/vcap/store")
}
if isMounted {
step00 := fmt.Sprintf("umount -l /var/vcap/store")
_, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step00)
if err != nil {
return bosherr.WrapError(err, "umount -l /var/vcap/store")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "umount -l /var/vcap/store", nil)
}
// stop open-iscsi
step1 := fmt.Sprintf("/etc/init.d/open-iscsi stop")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step1)
if err != nil {
return bosherr.WrapError(err, "Restarting open iscsi")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "/etc/init.d/open-iscsi stop", nil)
// clean up /etc/iscsi/send_targets/
step2 := fmt.Sprintf("rm -rf /etc/iscsi/send_targets")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step2)
if err != nil {
return bosherr.WrapError(err, "Removing /etc/iscsi/send_targets")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "rm -rf /etc/iscsi/send_targets", nil)
// clean up /etc/iscsi/nodes/
step3 := fmt.Sprintf("rm -rf /etc/iscsi/nodes")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step3)
if err != nil {
return bosherr.WrapError(err, "Removing /etc/iscsi/nodes")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "rm -rf /etc/iscsi/nodes", nil)
// start open-iscsi
step4 := fmt.Sprintf("/etc/init.d/open-iscsi start")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step4)
if err != nil {
return bosherr.WrapError(err, "Restarting open iscsi")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "/etc/init.d/open-iscsi start", nil)
if hasMultiPath {
// restart dm-multipath tool
step5 := fmt.Sprintf("service multipath-tools restart")
_, err = vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, step5)
if err != nil {
return bosherr.WrapError(err, "Restarting Multipath deamon")
}
vm.logger.Debug(SOFTLAYER_VM_LOG_TAG, "service multipath-tools restart", nil)
}
return nil
}
func (vm SoftLayerVM) findOpenIscsiTargetBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest) ([]string, error) {
command := "sleep 5 ; iscsiadm -m session -P3 | awk '/Target: /{print $2}'"
output, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return []string{}, err
}
targets := []string{}
lines := strings.Split(strings.Trim(output, "\n"), "\n")
for _, line := range lines {
targets = append(targets, strings.Split(line, ",")[0])
}
if len(targets) > 0 {
return targets, nil
}
return []string{}, errors.New(fmt.Sprintf("Cannot find matched iSCSI device"))
}
func (vm SoftLayerVM) findOpenIscsiPortalsBasedOnShellScript(virtualGuest datatypes.SoftLayer_Virtual_Guest, volume datatypes.SoftLayer_Network_Storage) ([]string, error) {
command := "sleep 5 ; iscsiadm -m session -P3 | awk 'BEGIN{ lel=0} { if($0 ~ /Current Portal: /){ portal = $3 ; lel=NR } else { if( NR==(lel+46) && $0 ~ /Attached scsi disk /) {print portal}}}'"
output, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
if err != nil {
return []string{}, err
}
portals := []string{}
lines := strings.Split(strings.Trim(output, "\n"), "\n")
for _, line := range lines {
portals = append(portals, strings.Split(line, ",")[0])
}
return portals, nil
}
func (vm SoftLayerVM) getRootPassword(virtualGuest datatypes.SoftLayer_Virtual_Guest) string {
passwords := virtualGuest.OperatingSystem.Passwords
for _, password := range passwords {
if password.Username == ROOT_USER_NAME {
return password.Password
}
}
return ""
}
func (vm SoftLayerVM) postCheckActiveTransactionsForOSReload(softLayerClient sl.Client) error {
virtualGuestService, err := softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating VirtualGuestService from SoftLayer client")
}
totalTime := time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
activeTransactions, err := virtualGuestService.GetActiveTransactions(vm.ID())
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Getting active transactions from SoftLayer client")
}
}
if len(activeTransactions) > 0 {
vm.logger.Info(SOFTLAYER_VM_OS_RELOAD_TAG, "OS Reload transaction started")
break
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
if totalTime >= bslcommon.TIMEOUT {
return errors.New(fmt.Sprintf("Waiting for OS Reload transaction to start TIME OUT!"))
}
err = bslcommon.WaitForVirtualGuest(vm.softLayerClient, vm.ID(), "RUNNING")
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, fmt.Sprintf("PowerOn failed with VirtualGuest id %d", vm.ID()))
}
}
vm.logger.Info(SOFTLAYER_VM_OS_RELOAD_TAG, fmt.Sprintf("The virtual guest %d is powered on", vm.ID()))
return nil
}
func (vm SoftLayerVM) postCheckActiveTransactionsForDeleteVM(softLayerClient sl.Client, virtualGuestId int) error {
virtualGuestService, err := softLayerClient.GetSoftLayer_Virtual_Guest_Service()
if err != nil {
return bosherr.WrapError(err, "Creating VirtualGuestService from SoftLayer client")
}
totalTime := time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
activeTransactions, err := virtualGuestService.GetActiveTransactions(virtualGuestId)
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Getting active transactions from SoftLayer client")
}
}
if len(activeTransactions) > 0 {
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "Delete VM transaction started", nil)
break
}
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
if totalTime >= bslcommon.TIMEOUT {
return errors.New(fmt.Sprintf("Waiting for DeleteVM transaction to start TIME OUT!"))
}
totalTime = time.Duration(0)
for totalTime < bslcommon.TIMEOUT {
vm1, err := virtualGuestService.GetObject(virtualGuestId)
if err != nil || vm1.Id == 0 {
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "VM doesn't exist. Delete done", nil)
break
}
activeTransaction, err := virtualGuestService.GetActiveTransaction(virtualGuestId)
if err != nil {
if !strings.Contains(err.Error(), "HTTP error code") {
return bosherr.WrapError(err, "Getting active transactions from SoftLayer client")
}
}
averageDuration := activeTransaction.TransactionStatus.AverageDuration
if strings.HasPrefix(averageDuration, ".") || averageDuration == "" {
averageDuration = "0" + averageDuration
}
averageTransactionDuration, err := strconv.ParseFloat(averageDuration, 32)
if err != nil {
averageTransactionDuration = 0
}
if averageTransactionDuration > 30 {
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "Deleting VM instance had been launched and it is a long transaction. Please check Softlayer Portal", nil)
break
}
vm.logger.Info(SOFTLAYER_VM_LOG_TAG, "This is a short transaction, waiting for all active transactions to complete", nil)
totalTime += bslcommon.POLLING_INTERVAL
time.Sleep(bslcommon.POLLING_INTERVAL)
}
if totalTime >= bslcommon.TIMEOUT {
return errors.New(fmt.Sprintf("After deleting a vm, waiting for active transactions to complete TIME OUT!"))
}
return nil
}
func (vm SoftLayerVM) isMountPoint(virtualGuest datatypes.SoftLayer_Virtual_Guest, path string) (bool, error) {
mounts, err := vm.searchMounts(virtualGuest)
if err != nil {
return false, bosherr.WrapError(err, "Searching mounts")
}
for _, mount := range mounts {
if mount.MountPoint == path {
return true, nil
}
}
return false, nil
}
func (vm SoftLayerVM) searchMounts(virtualGuest datatypes.SoftLayer_Virtual_Guest) ([]Mount, error) {
var mounts []Mount
stdout, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, "mount")
if err != nil {
return mounts, bosherr.WrapError(err, "Running mount")
}
// e.g. '/dev/sda on /boot type ext2 (rw)'
for _, mountEntry := range strings.Split(stdout, "\n") {
if mountEntry == "" {
continue
}
mountFields := strings.Fields(mountEntry)
mounts = append(mounts, Mount{
PartitionPath: mountFields[0],
MountPoint: mountFields[2],
})
}
return mounts, nil
}
func (vm SoftLayerVM) execCommand(virtualGuest datatypes.SoftLayer_Virtual_Guest, command string) (string, error) {
result, err := vm.sshClient.ExecCommand(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, command)
return result, err
}
func (vm SoftLayerVM) uploadFile(virtualGuest datatypes.SoftLayer_Virtual_Guest, srcFile string, destFile string) error {
err := vm.sshClient.UploadFile(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, srcFile, destFile)
return err
}
func (vm SoftLayerVM) downloadFile(virtualGuest datatypes.SoftLayer_Virtual_Guest, srcFile string, destFile string) error {
err := vm.sshClient.DownloadFile(ROOT_USER_NAME, vm.getRootPassword(virtualGuest), virtualGuest.PrimaryBackendIpAddress, srcFile, destFile)
return err
}
|
package xweb
import (
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/coscms/tagfast"
"github.com/coscms/xweb/httpsession"
"github.com/coscms/xweb/lib/route"
"github.com/coscms/xweb/log"
)
var (
mapperType = reflect.TypeOf(Mapper{})
)
type JSON struct {
Data interface{}
}
type JSONP struct {
Data interface{}
Callback string
}
type RENDER struct {
Tmpl string
*T
}
type JUMP struct {
Url string
Code int
}
type XML struct {
Data interface{}
}
type FILE struct {
Data string
}
const (
Debug = iota + 1
Product
XSRF_TAG string = "_xsrf"
)
type App struct {
BasePath string
Name string
Domain string
Route *route.Route
filters []Filter
Server *Server
AppConfig *AppConfig
Config *CONF
Actions map[string]interface{}
ActionsPath map[reflect.Type]string
ActionsNamePath map[string]string
ActionsMethodRoute map[string]map[string]string
FuncMaps template.FuncMap
Logger *log.Logger
VarMaps T
SessionManager *httpsession.Manager //Session manager
RootTemplate *template.Template
ErrorTemplate *template.Template
StaticVerMgr *StaticVerMgr
TemplateMgr *TemplateMgr
ContentEncoding string
RequestTime time.Time
Cryptor
XsrfManager
}
func NewAppConfig() *AppConfig {
return &AppConfig{
Mode: Product,
StaticDir: "static",
TemplateDir: "templates",
SessionOn: true,
SessionTimeout: 3600,
MaxUploadSize: 10 * 1024 * 1024,
StaticFileVersion: true,
CacheTemplates: true,
ReloadTemplates: true,
CheckXsrf: true,
FormMapToStruct: true,
}
}
type AppConfig struct {
Mode int
StaticDir string
TemplateDir string
SessionOn bool
MaxUploadSize int64
CookieSecret string
CookieLimitIP bool
CookieLimitUA bool
CookiePrefix string
CookieDomain string
StaticFileVersion bool
CacheTemplates bool
ReloadTemplates bool
CheckXsrf bool
SessionTimeout time.Duration
FormMapToStruct bool
EnableHttpCache bool
AuthBasedOnCookie bool
}
func NewApp(path string, name string) *App {
return &App{
BasePath: path,
Name: name,
Route: route.NewRoute(),
AppConfig: NewAppConfig(),
Config: NewCONF(),
Actions: map[string]interface{}{},
ActionsPath: map[reflect.Type]string{},
ActionsNamePath: map[string]string{},
ActionsMethodRoute: make(map[string]map[string]string),
FuncMaps: DefaultFuncs,
VarMaps: T{},
filters: make([]Filter, 0),
StaticVerMgr: DefaultStaticVerMgr,
TemplateMgr: DefaultTemplateMgr,
Cryptor: DefaultCryptor,
XsrfManager: DefaultXsrfManager,
}
}
func (a *App) IsRootApp() bool {
return a.BasePath == "/"
}
func (a *App) initApp() {
var isRootApp bool = a.IsRootApp()
if a.AppConfig.StaticFileVersion {
if isRootApp || a.Server.RootApp.AppConfig.StaticDir != a.AppConfig.StaticDir {
if !isRootApp {
a.StaticVerMgr = new(StaticVerMgr)
}
a.StaticVerMgr.Init(a, a.AppConfig.StaticDir)
} else {
a.StaticVerMgr = a.Server.RootApp.StaticVerMgr
}
}
if a.AppConfig.CacheTemplates {
if isRootApp || a.Server.RootApp.AppConfig.TemplateDir != a.AppConfig.TemplateDir {
if !isRootApp {
a.TemplateMgr = new(TemplateMgr)
}
a.TemplateMgr.Init(a, a.AppConfig.TemplateDir, a.AppConfig.ReloadTemplates)
} else {
a.TemplateMgr = a.Server.RootApp.TemplateMgr
}
}
a.FuncMaps["StaticUrl"] = a.StaticUrl
a.FuncMaps["XsrfName"] = XsrfName
a.VarMaps["XwebVer"] = Version
if a.AppConfig.SessionOn {
if a.Server.SessionManager != nil {
a.SessionManager = a.Server.SessionManager
} else {
a.SessionManager = httpsession.Default()
if a.AppConfig.SessionTimeout > time.Second {
a.SessionManager.SetMaxAge(a.AppConfig.SessionTimeout)
}
a.SessionManager.Run()
}
}
if a.Logger == nil {
a.Logger = a.Server.Logger
}
}
func (a *App) Close() {
if a.AppConfig.StaticFileVersion && a.StaticVerMgr != nil {
a.StaticVerMgr.Close()
}
if a.AppConfig.CacheTemplates && a.TemplateMgr != nil {
a.TemplateMgr.Close()
}
if a.AppConfig.SessionOn && a.Server.SessionManager == nil &&
a.SessionManager != nil {
//a.SessionManager.Close()
}
}
func (a *App) DelDomain() {
a.Domain = ""
if domain, ok := a.Server.App2Domain[a.Name]; ok {
delete(a.Server.App2Domain, a.Name)
delete(a.Server.Domain2App, domain)
}
}
func (a *App) SetDomain(domain string) {
a.Domain = domain
a.Server.App2Domain[a.Name] = domain
a.Server.Domain2App[domain] = a.Name
}
func (a *App) SetStaticDir(dir string) {
a.AppConfig.StaticDir = dir
}
func (a *App) SetTemplateDir(path string) {
a.AppConfig.TemplateDir = path
}
func (a *App) getTemplatePath(name string) string {
templateFile := path.Join(a.AppConfig.TemplateDir, name)
if fileExists(templateFile) {
return templateFile
}
return ""
}
func (app *App) SetConfig(name string, val interface{}) {
app.Config.SetInterface(name, val)
}
func (app *App) GetConfig(name string) interface{} {
return app.Config.GetInterface(name)
}
func (app *App) SetConfigString(name string, val string) {
app.Config.SetString(name, val)
}
func (app *App) GetConfigString(name string) string {
return app.Config.GetString(name)
}
func (app *App) AddAction(cs ...interface{}) {
for _, c := range cs {
app.AddRouter("/", c)
}
}
func (app *App) AutoAction(cs ...interface{}) {
for _, c := range cs {
t := reflect.Indirect(reflect.ValueOf(c)).Type()
name := t.Name()
if strings.HasSuffix(name, "Action") {
path := strings.ToLower(name[:len(name)-6])
app.AddRouter("/"+path, c)
} else {
app.Warn("AutoAction needs a named ends with Action")
}
}
}
func (app *App) Assign(name string, varOrFun interface{}) {
if reflect.TypeOf(varOrFun).Kind() == reflect.Func {
app.FuncMaps[name] = varOrFun
} else {
app.VarMaps[name] = varOrFun
}
}
func (app *App) MultiAssign(t *T) {
for name, value := range *t {
app.Assign(name, value)
}
}
func (app *App) AddFilter(filter Filter) {
app.filters = append(app.filters, filter)
}
func (app *App) Debug(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Debug(args...)
}
func (app *App) Info(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Info(args...)
}
func (app *App) Warn(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Warn(args...)
}
func (app *App) Error(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Error(args...)
}
func (app *App) Fatal(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Fatal(args...)
}
func (app *App) Panic(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Panic(args...)
}
func (app *App) Debugf(format string, params ...interface{}) {
app.Logger.Debugf("["+app.Name+"] "+format, params...)
}
func (app *App) Infof(format string, params ...interface{}) {
app.Logger.Infof("["+app.Name+"] "+format, params...)
}
func (app *App) Warnf(format string, params ...interface{}) {
app.Logger.Warnf("["+app.Name+"] "+format, params...)
}
func (app *App) Errorf(format string, params ...interface{}) {
app.Logger.Errorf("["+app.Name+"] "+format, params...)
}
func (app *App) Fatalf(format string, params ...interface{}) {
app.Logger.Fatalf("["+app.Name+"] "+format, params...)
}
func (app *App) Panicf(format string, params ...interface{}) {
app.Logger.Panicf("["+app.Name+"] "+format, params...)
}
func (app *App) filter(w http.ResponseWriter, req *http.Request) bool {
for _, filter := range app.filters {
if !filter.Do(w, req) {
return false
}
}
return true
}
func (app *App) AddRouter(url string, c interface{}) {
t := reflect.TypeOf(c).Elem()
v := reflect.ValueOf(c)
actionFullName := t.Name()
actionShortName := strings.TrimSuffix(actionFullName, "Action")
actionShortName = strings.ToLower(actionShortName)
app.ActionsPath[t] = url
app.Actions[actionFullName] = c
app.ActionsNamePath[actionFullName] = url
app.ActionsMethodRoute[actionFullName] = make(map[string]string)
for i := 0; i < t.NumField(); i++ {
if t.Field(i).Type != mapperType {
continue
}
name := t.Field(i).Name
a := strings.Title(name)
m := v.MethodByName(a)
if !m.IsValid() {
continue
}
tag := t.Field(i).Tag
tagStr := tag.Get("xweb")
methods := map[string]bool{} //map[string]bool{"GET": true, "POST": true}
var p string
if tagStr != "" {
tags := strings.Split(tagStr, " ")
path := tagStr
length := len(tags)
if length >= 2 { //`xweb:"GET|POST /index"`
for _, method := range strings.Split(tags[0], "|") {
method = strings.ToUpper(method)
m := v.MethodByName(a + "_" + method)
methods[method] = m.IsValid()
}
path = tags[1]
if path == "" {
path = name
}
if tags[1][0] != '/' {
path = "/" + actionShortName + "/" + path
}
} else if length == 1 {
if matched, _ := regexp.MatchString(`^[A-Z]+(\|[A-Z]+)*$`, tags[0]); !matched {
//非全大写字母时,判断为网址规则
path = tags[0]
if tags[0][0] != '/' { //`xweb:"index"`
path = "/" + actionShortName + "/" + path
}
m := v.MethodByName(a + "_GET")
methods["GET"] = m.IsValid()
m = v.MethodByName(a + "_POST")
methods["POST"] = m.IsValid()
} else { //`xweb:"GET|POST"`
for _, method := range strings.Split(tags[0], "|") {
method = strings.ToUpper(method)
m := v.MethodByName(a + "_" + method)
methods[method] = m.IsValid()
}
path = "/" + actionShortName + "/" + name
}
} else {
path = "/" + actionShortName + "/" + name
m := v.MethodByName(a + "_GET")
methods["GET"] = m.IsValid()
m = v.MethodByName(a + "_POST")
methods["POST"] = m.IsValid()
}
p = strings.TrimRight(url, "/") + path
} else {
p = strings.TrimRight(url, "/") + "/" + actionShortName + "/" + name
m := v.MethodByName(a + "_GET")
methods["GET"] = m.IsValid()
m = v.MethodByName(a + "_POST")
methods["POST"] = m.IsValid()
}
p = removeStick(p)
app.Route.Set(p, a, methods, t)
app.Debug("Action:", actionFullName+"."+a+";", "Route Information:", p+";", "Request Method:", methods)
}
}
func (a *App) ElapsedTimeString() string {
return fmt.Sprintf("%.3fs", a.ElapsedTime())
}
func (a *App) ElapsedTime() float64 {
return time.Now().Sub(a.RequestTime).Seconds()
}
func (a *App) VisitedLog(req *http.Request, statusCode int, requestPath string, responseSize int64) {
if statusCode == 0 {
statusCode = 200
}
if statusCode >= 200 && statusCode < 400 {
a.Info(req.RemoteAddr, req.Method, statusCode, requestPath, responseSize, a.ElapsedTimeString())
} else {
a.Error(req.RemoteAddr, req.Method, statusCode, requestPath, responseSize, a.ElapsedTimeString())
}
}
// the main route handler in web.go
func (a *App) routeHandler(req *http.Request, w http.ResponseWriter) {
var (
requestPath string = req.URL.Path
statusCode int = 0
responseSize int64 = 0
)
defer func() {
a.VisitedLog(req, statusCode, requestPath, responseSize)
}()
if !a.IsRootApp() || a.Server.Config.UrlSuffix != "" || a.Server.Config.UrlPrefix != "" {
// static files, needed op
if req.Method == "GET" || req.Method == "HEAD" {
success, size := a.TryServingFile(requestPath, req, w)
if success {
statusCode = 200
responseSize = size
return
}
if requestPath == "/favicon.ico" {
statusCode = 404
a.error(w, 404, "Page not found")
return
}
}
}
//ignore errors from ParseForm because it's usually harmless.
ct := req.Header.Get("Content-Type")
if strings.Contains(ct, "multipart/form-data") {
req.ParseMultipartForm(a.AppConfig.MaxUploadSize)
} else {
req.ParseForm()
}
//Set the default content-type
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if !a.filter(w, req) {
statusCode = 302
return
}
extension := ".html"
if epos := strings.LastIndex(req.URL.Path, "."); epos > 0 {
extension = req.URL.Path[epos:]
req.URL.Path = req.URL.Path[0:epos]
}
requestPath = req.URL.Path //支持filter更改req.URL.Path
reqPath := removeStick(requestPath)
if a.Domain == "" && a.BasePath != "/" {
reqPath = "/" + strings.TrimPrefix(reqPath, a.BasePath)
}
reqMethod := Ternary(req.Method == "HEAD", "GET", req.Method).(string)
args, fnName, rfType, on := a.Route.Get(reqPath, reqMethod)
if rfType != nil && fnName != "" {
var (
isBreak bool
suffix string
)
if on {
suffix = reqMethod
}
isBreak, statusCode, responseSize = a.run(req, w, fnName, rfType, args, suffix, extension)
if isBreak {
return
}
}
// try serving index.html or index.htm
if req.Method == "GET" || req.Method == "HEAD" {
if ok, size := a.TryServingFile(path.Join(requestPath, "index.html"), req, w); ok {
statusCode = 200
responseSize = size
return
} else if ok, size := a.TryServingFile(path.Join(requestPath, "index.htm"), req, w); ok {
statusCode = 200
responseSize = size
return
}
}
a.error(w, 404, "Page not found")
statusCode = 404
}
func (a *App) run(req *http.Request, w http.ResponseWriter,
handlerName string, reflectType reflect.Type,
args []reflect.Value, handlerSuffix string, extensionName string) (isBreak bool,
statusCode int, responseSize int64) {
if handlerSuffix != "" {
handlerName += "_" + handlerSuffix
}
isBreak = true
vc := reflect.New(reflectType)
c := &Action{
Request: req,
App: a,
ResponseWriter: w,
T: T{},
f: T{},
Option: &ActionOption{
AutoMapForm: a.AppConfig.FormMapToStruct,
CheckXsrf: a.AppConfig.CheckXsrf,
},
ExtensionName: extensionName,
}
for k, v := range a.VarMaps {
c.T[k] = v
}
elem := vc.Elem()
//设置Action字段的值
fieldA := elem.FieldByName("Action")
if fieldA.IsValid() {
fieldA.Set(reflect.ValueOf(c))
}
//设置C字段的值
fieldC := elem.FieldByName("C")
if fieldC.IsValid() {
fieldC.Set(reflect.ValueOf(vc))
}
//执行Init方法
initM := vc.MethodByName("Init")
if initM.IsValid() {
initM.Call([]reflect.Value{})
}
//表单数据自动映射到结构体
if c.Option.AutoMapForm {
a.StructMap(elem, req)
}
//验证XSRF
if c.Option.CheckXsrf {
a.XsrfManager.Init(c)
if req.Method == "POST" {
formVals := req.Form[XSRF_TAG]
var formVal string
if len(formVals) > 0 {
formVal = formVals[0]
}
if formVal == "" ||
!a.XsrfManager.Valid(a.AppConfig.CookiePrefix+
XSRF_TAG, formVal) {
a.error(w, 500, "xsrf token error.")
a.Error("xsrf token error.")
statusCode = 500
return
}
}
}
structName := reflect.ValueOf(reflectType.Name())
actionName := reflect.ValueOf(handlerName)
//执行Before方法
initM = vc.MethodByName("Before")
if initM.IsValid() {
structAction := []reflect.Value{structName, actionName}
if ok := initM.Call(structAction); ok[0].Kind() == reflect.Bool && !ok[0].Bool() {
responseSize = c.ResponseSize
return
}
}
ret, err := a.SafelyCall(vc, handlerName, args)
if err != nil {
//there was an error or panic while calling the handler
if a.AppConfig.Mode == Debug {
a.error(w, 500, fmt.Sprintf("<pre>handler error: %v</pre>", err))
} else if a.AppConfig.Mode == Product {
a.error(w, 500, "Server Error")
}
statusCode = 500
responseSize = c.ResponseSize
return
}
statusCode = fieldA.Interface().(*Action).StatusCode
//执行After方法
initM = vc.MethodByName("After")
if initM.IsValid() {
structAction := []reflect.Value{structName, actionName}
structAction = append(structAction, ret...)
if len(structAction) != initM.Type().NumIn() {
a.Errorf("Error : %v.After(): The number of params is not adapted.", structName)
return
}
ret = initM.Call(structAction)
}
if len(ret) == 0 {
responseSize = c.ResponseSize
return
}
sval := ret[0]
intf := sval.Interface()
kind := sval.Kind()
var content []byte
if intf == nil || kind == reflect.Bool {
responseSize = c.ResponseSize
return
} else if kind == reflect.String {
content = []byte(sval.String())
} else if kind == reflect.Slice && sval.Type().Elem().Kind() == reflect.Uint8 {
content = intf.([]byte)
} else if _, ok := intf.(bool); ok {
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(JSON); ok {
c.ServeJson(obj.Data)
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(JSONP); ok {
c.ServeJsonp(obj.Data, obj.Callback)
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(XML); ok {
c.ServeXml(obj.Data)
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(FILE); ok {
c.ServeFile(obj.Data)
return
} else if obj, ok := intf.(RENDER); ok {
c.Render(obj.Tmpl, obj.T)
return
} else if obj, ok := intf.(JUMP); ok {
c.Redirect(obj.Url, obj.Code)
return
} else if err, ok := intf.(error); ok {
if err != nil {
a.Error("Error:", err)
a.error(w, 500, "Server Error")
statusCode = 500
} else {
responseSize = c.ResponseSize
}
return
} else if str, ok := intf.(string); ok {
content = []byte(str)
} else if byt, ok := intf.([]byte); ok {
content = byt
} else {
var validType bool
Event("OutputBaseOnExtensionName", []interface{}{c, intf}, func(ok bool) {
if !ok {
validType = true
return
}
switch c.ExtensionName {
case ".json":
c.ServeJson(intf)
responseSize = c.ResponseSize
validType = true
case ".xml":
c.ServeXml(intf)
responseSize = c.ResponseSize
validType = true
}
})
if !validType {
a.Warnf("unknown returned result type %v, ignored %v", kind, intf)
return
}
}
w.Header().Set("Content-Length", strconv.Itoa(len(content)))
size, err := w.Write(content)
if err != nil {
a.Errorf("Error during write: %v", err)
statusCode = 500
return
}
responseSize = int64(size)
return
}
func (a *App) error(w http.ResponseWriter, status int, content string) error {
w.WriteHeader(status)
if errorTmpl == "" {
errTmplFile := a.AppConfig.TemplateDir + "/_error.html"
if file, err := os.Stat(errTmplFile); err == nil && !file.IsDir() {
if b, e := ioutil.ReadFile(errTmplFile); e == nil {
errorTmpl = string(b)
}
}
if errorTmpl == "" {
errorTmpl = defaultErrorTmpl
}
}
res := fmt.Sprintf(errorTmpl, status, statusText[status],
status, statusText[status], content, Version)
_, err := w.Write([]byte(res))
return err
}
func (a *App) StaticUrl(url string) string {
var basePath string
if a.AppConfig.StaticDir == RootApp().AppConfig.StaticDir {
basePath = RootApp().BasePath
} else {
basePath = a.BasePath
}
if !a.AppConfig.StaticFileVersion {
return path.Join(basePath, url)
}
ver := a.StaticVerMgr.GetVersion(url)
if ver == "" {
return path.Join(basePath, url)
}
return path.Join(basePath, url+"?v="+ver)
}
// safelyCall invokes `function` in recover block
func (a *App) SafelyCall(vc reflect.Value, method string, args []reflect.Value) (resp []reflect.Value, err error) {
defer func() {
if e := recover(); e != nil {
if !a.Server.Config.RecoverPanic {
// go back to panic
panic(e)
} else {
resp = nil
var content string
content = fmt.Sprintf("Handler crashed with error: %v", e)
for i := 1; ; i += 1 {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
} else {
content += "\n"
}
content += fmt.Sprintf("%v %v", file, line)
}
a.Error(content)
err = errors.New(content)
return
}
}
}()
function := vc.MethodByName(method)
return function.Call(args), err
}
// Init content-length header.
func (a *App) InitHeadContent(w http.ResponseWriter, contentLength int64) {
if a.ContentEncoding == "gzip" {
w.Header().Set("Content-Encoding", "gzip")
} else if a.ContentEncoding == "deflate" {
w.Header().Set("Content-Encoding", "deflate")
} else {
w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10))
}
}
// tryServingFile attempts to serve a static file, and returns
// whether or not the operation is successful.
func (a *App) TryServingFile(name string, req *http.Request, w http.ResponseWriter) (bool, int64) {
newPath := name
if strings.HasPrefix(name, a.BasePath) {
newPath = name[len(a.BasePath):]
}
var size int64
staticFile := filepath.Join(a.AppConfig.StaticDir, newPath)
finfo, err := os.Stat(staticFile)
if err != nil {
return false, size
}
if !finfo.IsDir() {
size = finfo.Size()
isStaticFileToCompress := false
if a.Server.Config.EnableGzip && a.Server.Config.StaticExtensionsToGzip != nil && len(a.Server.Config.StaticExtensionsToGzip) > 0 {
for _, statExtension := range a.Server.Config.StaticExtensionsToGzip {
if strings.HasSuffix(strings.ToLower(staticFile), strings.ToLower(statExtension)) {
isStaticFileToCompress = true
break
}
}
}
if isStaticFileToCompress {
a.ContentEncoding = GetAcceptEncodingZip(req)
memzipfile, err := OpenMemZipFile(staticFile, a.ContentEncoding)
if err != nil {
return false, size
}
a.InitHeadContent(w, finfo.Size())
http.ServeContent(w, req, staticFile, finfo.ModTime(), memzipfile)
} else {
http.ServeFile(w, req, staticFile)
}
return true, size
}
return false, size
}
// StructMap function mapping params to controller's properties
func (a *App) StructMap(m interface{}, r *http.Request) error {
return a.namedStructMap(m, r, "")
}
// user[name][test]
func SplitJson(s string) ([]string, error) {
res := make([]string, 0)
var begin, end int
var isleft bool
for i, r := range s {
switch r {
case '[':
isleft = true
if i > 0 && s[i-1] != ']' {
if begin == end {
return nil, errors.New("unknow character")
}
res = append(res, s[begin:end+1])
}
begin = i + 1
end = begin
case ']':
if !isleft {
return nil, errors.New("unknow character")
}
isleft = false
if begin != end {
//return nil, errors.New("unknow character")
res = append(res, s[begin:end+1])
begin = i + 1
end = begin
}
default:
end = i
}
if i == len(s)-1 && begin != end {
res = append(res, s[begin:end+1])
}
}
return res, nil
}
func (a *App) namedStructMap(m interface{}, r *http.Request, topName string) error {
vc := reflect.ValueOf(m).Elem()
tc := reflect.TypeOf(m).Elem()
for k, t := range r.Form {
if k == XSRF_TAG || k == "" {
continue
}
if topName != "" {
if !strings.HasPrefix(k, topName) {
continue
}
k = k[len(topName)+1:]
}
v := t[0]
names := strings.Split(k, ".")
var err error
if len(names) == 1 {
names, err = SplitJson(k)
if err != nil {
a.Warn("Unrecognize form key", k, err)
continue
}
}
var value reflect.Value = vc
for i, name := range names {
name = strings.Title(name)
if i != len(names)-1 {
if value.Kind() != reflect.Struct {
a.Warnf("arg error, value kind is %v", value.Kind())
break
}
//fmt.Println(name)
value = value.FieldByName(name)
if !value.IsValid() {
a.Warnf("(%v value is not valid %v)", name, value)
break
}
if !value.CanSet() {
a.Warnf("can not set %v -> %v", name, value.Interface())
break
}
if tagfast.Tag2(tc, name, "form_options") == "-" {
continue
}
if value.Kind() == reflect.Ptr {
if value.IsNil() {
value.Set(reflect.New(value.Type().Elem()))
}
value = value.Elem()
}
} else {
if value.Kind() != reflect.Struct {
a.Warnf("arg error, value %v kind is %v", name, value.Kind())
break
}
tv := value.FieldByName(name)
if !tv.IsValid() {
break
}
if !tv.CanSet() {
a.Warnf("can not set %v to %v", k, tv)
break
}
if tagfast.Tag2(tc, name, "form_options") == "-" {
continue
}
if tv.Kind() == reflect.Ptr {
tv.Set(reflect.New(tv.Type().Elem()))
tv = tv.Elem()
}
var l interface{}
switch k := tv.Kind(); k {
case reflect.String:
switch tagfast.Tag2(tc, name, "form_filter") {
case "html":
v = DefaultHtmlFilter(v)
}
l = v
tv.Set(reflect.ValueOf(l))
case reflect.Bool:
l = (v != "false" && v != "0")
tv.Set(reflect.ValueOf(l))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
x, err := strconv.Atoi(v)
if err != nil {
a.Warnf("arg %v as int: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Int64:
x, err := strconv.ParseInt(v, 10, 64)
if err != nil {
a.Warnf("arg %v as int64: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Float32, reflect.Float64:
x, err := strconv.ParseFloat(v, 64)
if err != nil {
a.Warnf("arg %v as float64: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
x, err := strconv.ParseUint(v, 10, 64)
if err != nil {
a.Warnf("arg %v as uint: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Struct:
if tvf, ok := tv.Interface().(FromConversion); ok {
err := tvf.FromString(v)
if err != nil {
a.Warnf("struct %v invoke FromString faild", tvf)
}
} else if tv.Type().String() == "time.Time" {
x, err := time.Parse("2006-01-02 15:04:05.000 -0700", v)
if err != nil {
x, err = time.Parse("2006-01-02 15:04:05", v)
if err != nil {
x, err = time.Parse("2006-01-02", v)
if err != nil {
a.Warnf("unsupported time format %v, %v", v, err)
break
}
}
}
l = x
tv.Set(reflect.ValueOf(l))
} else {
a.Warn("can not set an struct which is not implement Fromconversion interface")
}
case reflect.Ptr:
a.Warn("can not set an ptr of ptr")
case reflect.Slice, reflect.Array:
tt := tv.Type().Elem()
tk := tt.Kind()
if tk == reflect.String {
tv.Set(reflect.ValueOf(t))
break
}
if tv.IsNil() {
tv.Set(reflect.MakeSlice(tv.Type(), len(t), len(t)))
}
for i, s := range t {
var err error
switch tk {
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int8, reflect.Int64:
var v int64
v, err = strconv.ParseInt(s, 10, tt.Bits())
if err == nil {
tv.Index(i).SetInt(v)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var v uint64
v, err = strconv.ParseUint(s, 10, tt.Bits())
if err == nil {
tv.Index(i).SetUint(v)
}
case reflect.Float32, reflect.Float64:
var v float64
v, err = strconv.ParseFloat(s, tt.Bits())
if err == nil {
tv.Index(i).SetFloat(v)
}
case reflect.Bool:
var v bool
v, err = strconv.ParseBool(s)
if err == nil {
tv.Index(i).SetBool(v)
}
case reflect.Complex64, reflect.Complex128:
// TODO:
err = fmt.Errorf("unsupported slice element type %v", tk.String())
default:
err = fmt.Errorf("unsupported slice element type %v", tk.String())
}
if err != nil {
a.Warnf("slice error: %v, %v", name, err)
break
}
}
default:
break
}
}
}
}
return nil
}
func (app *App) Redirect(w http.ResponseWriter, requestPath, url string, status ...int) error {
err := redirect(w, url, status...)
if err != nil {
app.Errorf("redirect error: %s", err)
return err
}
return nil
}
func (app *App) Action(name string) interface{} {
if v, ok := app.Actions[name]; ok {
return v
}
return nil
}
/*
example:
{
"AdminAction":{
"Index":["GET","POST"],
"Add": ["GET","POST"],
"Edit": ["GET","POST"]
}
}
*/
func (app *App) Nodes() (r map[string]map[string][]string) {
r = make(map[string]map[string][]string)
for _, val := range app.Route.Regexp {
name := val.ReflectType.Name()
if _, ok := r[name]; !ok {
r[name] = make(map[string][]string)
}
if _, ok := r[name][val.ExecuteFunc]; !ok {
r[name][val.ExecuteFunc] = make([]string, 0)
}
for k, _ := range val.RequestMethod {
r[name][val.ExecuteFunc] = append(r[name][val.ExecuteFunc], k) //FUNC1:[POST,GET]
}
}
for _, val := range app.Route.Static {
name := val.ReflectType.Name()
if _, ok := r[name]; !ok {
r[name] = make(map[string][]string)
}
if _, ok := r[name][val.ExecuteFunc]; !ok {
r[name][val.ExecuteFunc] = make([]string, 0)
}
for k, _ := range val.RequestMethod {
r[name][val.ExecuteFunc] = append(r[name][val.ExecuteFunc], k) //FUNC1:[POST,GET]
}
}
return
}
improved
package xweb
import (
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/coscms/tagfast"
"github.com/coscms/xweb/httpsession"
"github.com/coscms/xweb/lib/route"
"github.com/coscms/xweb/log"
)
var (
mapperType = reflect.TypeOf(Mapper{})
)
type JSON struct {
Data interface{}
}
type JSONP struct {
Data interface{}
Callback string
}
type SHOW struct {
Tmpl string
*T
}
type JUMP struct {
Url string
Code int
}
type XML struct {
Data interface{}
}
type FILE struct {
Data string
}
const (
Debug = iota + 1
Product
XSRF_TAG string = "_xsrf"
)
type App struct {
BasePath string
Name string
Domain string
Route *route.Route
filters []Filter
Server *Server
AppConfig *AppConfig
Config *CONF
Actions map[string]interface{}
ActionsPath map[reflect.Type]string
ActionsNamePath map[string]string
ActionsMethodRoute map[string]map[string]string
FuncMaps template.FuncMap
Logger *log.Logger
VarMaps T
SessionManager *httpsession.Manager //Session manager
RootTemplate *template.Template
ErrorTemplate *template.Template
StaticVerMgr *StaticVerMgr
TemplateMgr *TemplateMgr
ContentEncoding string
RequestTime time.Time
Cryptor
XsrfManager
}
func NewAppConfig() *AppConfig {
return &AppConfig{
Mode: Product,
StaticDir: "static",
TemplateDir: "templates",
SessionOn: true,
SessionTimeout: 3600,
MaxUploadSize: 10 * 1024 * 1024,
StaticFileVersion: true,
CacheTemplates: true,
ReloadTemplates: true,
CheckXsrf: true,
FormMapToStruct: true,
}
}
type AppConfig struct {
Mode int
StaticDir string
TemplateDir string
SessionOn bool
MaxUploadSize int64
CookieSecret string
CookieLimitIP bool
CookieLimitUA bool
CookiePrefix string
CookieDomain string
StaticFileVersion bool
CacheTemplates bool
ReloadTemplates bool
CheckXsrf bool
SessionTimeout time.Duration
FormMapToStruct bool
EnableHttpCache bool
AuthBasedOnCookie bool
}
func NewApp(path string, name string) *App {
return &App{
BasePath: path,
Name: name,
Route: route.NewRoute(),
AppConfig: NewAppConfig(),
Config: NewCONF(),
Actions: map[string]interface{}{},
ActionsPath: map[reflect.Type]string{},
ActionsNamePath: map[string]string{},
ActionsMethodRoute: make(map[string]map[string]string),
FuncMaps: DefaultFuncs,
VarMaps: T{},
filters: make([]Filter, 0),
StaticVerMgr: DefaultStaticVerMgr,
TemplateMgr: DefaultTemplateMgr,
Cryptor: DefaultCryptor,
XsrfManager: DefaultXsrfManager,
}
}
func (a *App) IsRootApp() bool {
return a.BasePath == "/"
}
func (a *App) initApp() {
var isRootApp bool = a.IsRootApp()
if a.AppConfig.StaticFileVersion {
if isRootApp || a.Server.RootApp.AppConfig.StaticDir != a.AppConfig.StaticDir {
if !isRootApp {
a.StaticVerMgr = new(StaticVerMgr)
}
a.StaticVerMgr.Init(a, a.AppConfig.StaticDir)
} else {
a.StaticVerMgr = a.Server.RootApp.StaticVerMgr
}
}
if a.AppConfig.CacheTemplates {
if isRootApp || a.Server.RootApp.AppConfig.TemplateDir != a.AppConfig.TemplateDir {
if !isRootApp {
a.TemplateMgr = new(TemplateMgr)
}
a.TemplateMgr.Init(a, a.AppConfig.TemplateDir, a.AppConfig.ReloadTemplates)
} else {
a.TemplateMgr = a.Server.RootApp.TemplateMgr
}
}
a.FuncMaps["StaticUrl"] = a.StaticUrl
a.FuncMaps["XsrfName"] = XsrfName
a.VarMaps["XwebVer"] = Version
if a.AppConfig.SessionOn {
if a.Server.SessionManager != nil {
a.SessionManager = a.Server.SessionManager
} else {
a.SessionManager = httpsession.Default()
if a.AppConfig.SessionTimeout > time.Second {
a.SessionManager.SetMaxAge(a.AppConfig.SessionTimeout)
}
a.SessionManager.Run()
}
}
if a.Logger == nil {
a.Logger = a.Server.Logger
}
}
func (a *App) Close() {
if a.AppConfig.StaticFileVersion && a.StaticVerMgr != nil {
a.StaticVerMgr.Close()
}
if a.AppConfig.CacheTemplates && a.TemplateMgr != nil {
a.TemplateMgr.Close()
}
if a.AppConfig.SessionOn && a.Server.SessionManager == nil &&
a.SessionManager != nil {
//a.SessionManager.Close()
}
}
func (a *App) DelDomain() {
a.Domain = ""
if domain, ok := a.Server.App2Domain[a.Name]; ok {
delete(a.Server.App2Domain, a.Name)
delete(a.Server.Domain2App, domain)
}
}
func (a *App) SetDomain(domain string) {
a.Domain = domain
a.Server.App2Domain[a.Name] = domain
a.Server.Domain2App[domain] = a.Name
}
func (a *App) SetStaticDir(dir string) {
a.AppConfig.StaticDir = dir
}
func (a *App) SetTemplateDir(path string) {
a.AppConfig.TemplateDir = path
}
func (a *App) getTemplatePath(name string) string {
templateFile := path.Join(a.AppConfig.TemplateDir, name)
if fileExists(templateFile) {
return templateFile
}
return ""
}
func (app *App) SetConfig(name string, val interface{}) {
app.Config.SetInterface(name, val)
}
func (app *App) GetConfig(name string) interface{} {
return app.Config.GetInterface(name)
}
func (app *App) SetConfigString(name string, val string) {
app.Config.SetString(name, val)
}
func (app *App) GetConfigString(name string) string {
return app.Config.GetString(name)
}
func (app *App) AddAction(cs ...interface{}) {
for _, c := range cs {
app.AddRouter("/", c)
}
}
func (app *App) AutoAction(cs ...interface{}) {
for _, c := range cs {
t := reflect.Indirect(reflect.ValueOf(c)).Type()
name := t.Name()
if strings.HasSuffix(name, "Action") {
path := strings.ToLower(name[:len(name)-6])
app.AddRouter("/"+path, c)
} else {
app.Warn("AutoAction needs a named ends with Action")
}
}
}
func (app *App) Assign(name string, varOrFun interface{}) {
if reflect.TypeOf(varOrFun).Kind() == reflect.Func {
app.FuncMaps[name] = varOrFun
} else {
app.VarMaps[name] = varOrFun
}
}
func (app *App) MultiAssign(t *T) {
for name, value := range *t {
app.Assign(name, value)
}
}
func (app *App) AddFilter(filter Filter) {
app.filters = append(app.filters, filter)
}
func (app *App) Debug(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Debug(args...)
}
func (app *App) Info(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Info(args...)
}
func (app *App) Warn(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Warn(args...)
}
func (app *App) Error(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Error(args...)
}
func (app *App) Fatal(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Fatal(args...)
}
func (app *App) Panic(params ...interface{}) {
args := append([]interface{}{"[" + app.Name + "]"}, params...)
app.Logger.Panic(args...)
}
func (app *App) Debugf(format string, params ...interface{}) {
app.Logger.Debugf("["+app.Name+"] "+format, params...)
}
func (app *App) Infof(format string, params ...interface{}) {
app.Logger.Infof("["+app.Name+"] "+format, params...)
}
func (app *App) Warnf(format string, params ...interface{}) {
app.Logger.Warnf("["+app.Name+"] "+format, params...)
}
func (app *App) Errorf(format string, params ...interface{}) {
app.Logger.Errorf("["+app.Name+"] "+format, params...)
}
func (app *App) Fatalf(format string, params ...interface{}) {
app.Logger.Fatalf("["+app.Name+"] "+format, params...)
}
func (app *App) Panicf(format string, params ...interface{}) {
app.Logger.Panicf("["+app.Name+"] "+format, params...)
}
func (app *App) filter(w http.ResponseWriter, req *http.Request) bool {
for _, filter := range app.filters {
if !filter.Do(w, req) {
return false
}
}
return true
}
func (app *App) AddRouter(url string, c interface{}) {
t := reflect.TypeOf(c).Elem()
v := reflect.ValueOf(c)
actionFullName := t.Name()
actionShortName := strings.TrimSuffix(actionFullName, "Action")
actionShortName = strings.ToLower(actionShortName)
app.ActionsPath[t] = url
app.Actions[actionFullName] = c
app.ActionsNamePath[actionFullName] = url
app.ActionsMethodRoute[actionFullName] = make(map[string]string)
for i := 0; i < t.NumField(); i++ {
if t.Field(i).Type != mapperType {
continue
}
name := t.Field(i).Name
a := strings.Title(name)
m := v.MethodByName(a)
if !m.IsValid() {
continue
}
tag := t.Field(i).Tag
tagStr := tag.Get("xweb")
methods := map[string]bool{} //map[string]bool{"GET": true, "POST": true}
var p string
if tagStr != "" {
tags := strings.Split(tagStr, " ")
path := tagStr
length := len(tags)
if length >= 2 { //`xweb:"GET|POST /index"`
for _, method := range strings.Split(tags[0], "|") {
method = strings.ToUpper(method)
m := v.MethodByName(a + "_" + method)
methods[method] = m.IsValid()
}
path = tags[1]
if path == "" {
path = name
}
if tags[1][0] != '/' {
path = "/" + actionShortName + "/" + path
}
} else if length == 1 {
if matched, _ := regexp.MatchString(`^[A-Z]+(\|[A-Z]+)*$`, tags[0]); !matched {
//非全大写字母时,判断为网址规则
path = tags[0]
if tags[0][0] != '/' { //`xweb:"index"`
path = "/" + actionShortName + "/" + path
}
m := v.MethodByName(a + "_GET")
methods["GET"] = m.IsValid()
m = v.MethodByName(a + "_POST")
methods["POST"] = m.IsValid()
} else { //`xweb:"GET|POST"`
for _, method := range strings.Split(tags[0], "|") {
method = strings.ToUpper(method)
m := v.MethodByName(a + "_" + method)
methods[method] = m.IsValid()
}
path = "/" + actionShortName + "/" + name
}
} else {
path = "/" + actionShortName + "/" + name
m := v.MethodByName(a + "_GET")
methods["GET"] = m.IsValid()
m = v.MethodByName(a + "_POST")
methods["POST"] = m.IsValid()
}
p = strings.TrimRight(url, "/") + path
} else {
p = strings.TrimRight(url, "/") + "/" + actionShortName + "/" + name
m := v.MethodByName(a + "_GET")
methods["GET"] = m.IsValid()
m = v.MethodByName(a + "_POST")
methods["POST"] = m.IsValid()
}
p = removeStick(p)
app.Route.Set(p, a, methods, t)
app.Debug("Action:", actionFullName+"."+a+";", "Route Information:", p+";", "Request Method:", methods)
}
}
func (a *App) ElapsedTimeString() string {
return fmt.Sprintf("%.3fs", a.ElapsedTime())
}
func (a *App) ElapsedTime() float64 {
return time.Now().Sub(a.RequestTime).Seconds()
}
func (a *App) VisitedLog(req *http.Request, statusCode int, requestPath string, responseSize int64) {
if statusCode == 0 {
statusCode = 200
}
if statusCode >= 200 && statusCode < 400 {
a.Info(req.RemoteAddr, req.Method, statusCode, requestPath, responseSize, a.ElapsedTimeString())
} else {
a.Error(req.RemoteAddr, req.Method, statusCode, requestPath, responseSize, a.ElapsedTimeString())
}
}
// the main route handler in web.go
func (a *App) routeHandler(req *http.Request, w http.ResponseWriter) {
var (
requestPath string = req.URL.Path
statusCode int = 0
responseSize int64 = 0
)
defer func() {
a.VisitedLog(req, statusCode, requestPath, responseSize)
}()
if !a.IsRootApp() || a.Server.Config.UrlSuffix != "" || a.Server.Config.UrlPrefix != "" {
// static files, needed op
if req.Method == "GET" || req.Method == "HEAD" {
success, size := a.TryServingFile(requestPath, req, w)
if success {
statusCode = 200
responseSize = size
return
}
if requestPath == "/favicon.ico" {
statusCode = 404
a.error(w, 404, "Page not found")
return
}
}
}
//ignore errors from ParseForm because it's usually harmless.
ct := req.Header.Get("Content-Type")
if strings.Contains(ct, "multipart/form-data") {
req.ParseMultipartForm(a.AppConfig.MaxUploadSize)
} else {
req.ParseForm()
}
//Set the default content-type
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if !a.filter(w, req) {
statusCode = 302
return
}
extension := ".html"
if epos := strings.LastIndex(req.URL.Path, "."); epos > 0 {
extension = req.URL.Path[epos:]
req.URL.Path = req.URL.Path[0:epos]
}
requestPath = req.URL.Path //支持filter更改req.URL.Path
reqPath := removeStick(requestPath)
if a.Domain == "" && a.BasePath != "/" {
reqPath = "/" + strings.TrimPrefix(reqPath, a.BasePath)
}
reqMethod := Ternary(req.Method == "HEAD", "GET", req.Method).(string)
args, fnName, rfType, on := a.Route.Get(reqPath, reqMethod)
if rfType != nil && fnName != "" {
var (
isBreak bool
suffix string
)
if on {
suffix = reqMethod
}
isBreak, statusCode, responseSize = a.run(req, w, fnName, rfType, args, suffix, extension)
if isBreak {
return
}
}
// try serving index.html or index.htm
if req.Method == "GET" || req.Method == "HEAD" {
if ok, size := a.TryServingFile(path.Join(requestPath, "index.html"), req, w); ok {
statusCode = 200
responseSize = size
return
} else if ok, size := a.TryServingFile(path.Join(requestPath, "index.htm"), req, w); ok {
statusCode = 200
responseSize = size
return
}
}
a.error(w, 404, "Page not found")
statusCode = 404
}
func (a *App) run(req *http.Request, w http.ResponseWriter,
handlerName string, reflectType reflect.Type,
args []reflect.Value, handlerSuffix string, extensionName string) (isBreak bool,
statusCode int, responseSize int64) {
if handlerSuffix != "" {
handlerName += "_" + handlerSuffix
}
isBreak = true
vc := reflect.New(reflectType)
c := &Action{
Request: req,
App: a,
ResponseWriter: w,
T: T{},
f: T{},
Option: &ActionOption{
AutoMapForm: a.AppConfig.FormMapToStruct,
CheckXsrf: a.AppConfig.CheckXsrf,
},
ExtensionName: extensionName,
}
for k, v := range a.VarMaps {
c.T[k] = v
}
elem := vc.Elem()
//设置Action字段的值
fieldA := elem.FieldByName("Action")
if fieldA.IsValid() {
fieldA.Set(reflect.ValueOf(c))
}
//设置C字段的值
fieldC := elem.FieldByName("C")
if fieldC.IsValid() {
fieldC.Set(reflect.ValueOf(vc))
}
//执行Init方法
initM := vc.MethodByName("Init")
if initM.IsValid() {
initM.Call([]reflect.Value{})
}
//表单数据自动映射到结构体
if c.Option.AutoMapForm {
a.StructMap(elem, req)
}
//验证XSRF
if c.Option.CheckXsrf {
a.XsrfManager.Init(c)
if req.Method == "POST" {
formVals := req.Form[XSRF_TAG]
var formVal string
if len(formVals) > 0 {
formVal = formVals[0]
}
if formVal == "" ||
!a.XsrfManager.Valid(a.AppConfig.CookiePrefix+
XSRF_TAG, formVal) {
a.error(w, 500, "xsrf token error.")
a.Error("xsrf token error.")
statusCode = 500
return
}
}
}
structName := reflect.ValueOf(reflectType.Name())
actionName := reflect.ValueOf(handlerName)
//执行Before方法
initM = vc.MethodByName("Before")
if initM.IsValid() {
structAction := []reflect.Value{structName, actionName}
if ok := initM.Call(structAction); ok[0].Kind() == reflect.Bool && !ok[0].Bool() {
responseSize = c.ResponseSize
return
}
}
ret, err := a.SafelyCall(vc, handlerName, args)
if err != nil {
//there was an error or panic while calling the handler
if a.AppConfig.Mode == Debug {
a.error(w, 500, fmt.Sprintf("<pre>handler error: %v</pre>", err))
} else if a.AppConfig.Mode == Product {
a.error(w, 500, "Server Error")
}
statusCode = 500
responseSize = c.ResponseSize
return
}
statusCode = fieldA.Interface().(*Action).StatusCode
//执行After方法
initM = vc.MethodByName("After")
if initM.IsValid() {
structAction := []reflect.Value{structName, actionName}
structAction = append(structAction, ret...)
if len(structAction) != initM.Type().NumIn() {
a.Errorf("Error : %v.After(): The number of params is not adapted.", structName)
return
}
ret = initM.Call(structAction)
}
if len(ret) == 0 {
responseSize = c.ResponseSize
return
}
sval := ret[0]
intf := sval.Interface()
kind := sval.Kind()
var content []byte
if intf == nil || kind == reflect.Bool {
responseSize = c.ResponseSize
return
} else if kind == reflect.String {
content = []byte(sval.String())
} else if kind == reflect.Slice && sval.Type().Elem().Kind() == reflect.Uint8 {
content = intf.([]byte)
} else if _, ok := intf.(bool); ok {
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(JSON); ok {
c.ServeJson(obj.Data)
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(JSONP); ok {
c.ServeJsonp(obj.Data, obj.Callback)
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(XML); ok {
c.ServeXml(obj.Data)
responseSize = c.ResponseSize
return
} else if obj, ok := intf.(FILE); ok {
c.ServeFile(obj.Data)
return
} else if obj, ok := intf.(SHOW); ok {
c.Render(obj.Tmpl, obj.T)
return
} else if obj, ok := intf.(JUMP); ok {
c.Redirect(obj.Url, obj.Code)
return
} else if err, ok := intf.(error); ok {
if err != nil {
a.Error("Error:", err)
a.error(w, 500, "Server Error")
statusCode = 500
} else {
responseSize = c.ResponseSize
}
return
} else if str, ok := intf.(string); ok {
content = []byte(str)
} else if byt, ok := intf.([]byte); ok {
content = byt
} else {
var validType bool
Event("OutputBaseOnExtensionName", []interface{}{c, intf}, func(ok bool) {
if !ok {
validType = true
return
}
switch c.ExtensionName {
case ".json":
c.ServeJson(intf)
responseSize = c.ResponseSize
validType = true
case ".xml":
c.ServeXml(intf)
responseSize = c.ResponseSize
validType = true
}
})
if !validType {
a.Warnf("unknown returned result type %v, ignored %v", kind, intf)
return
}
}
w.Header().Set("Content-Length", strconv.Itoa(len(content)))
size, err := w.Write(content)
if err != nil {
a.Errorf("Error during write: %v", err)
statusCode = 500
return
}
responseSize = int64(size)
return
}
func (a *App) error(w http.ResponseWriter, status int, content string) error {
w.WriteHeader(status)
if errorTmpl == "" {
errTmplFile := a.AppConfig.TemplateDir + "/_error.html"
if file, err := os.Stat(errTmplFile); err == nil && !file.IsDir() {
if b, e := ioutil.ReadFile(errTmplFile); e == nil {
errorTmpl = string(b)
}
}
if errorTmpl == "" {
errorTmpl = defaultErrorTmpl
}
}
res := fmt.Sprintf(errorTmpl, status, statusText[status],
status, statusText[status], content, Version)
_, err := w.Write([]byte(res))
return err
}
func (a *App) StaticUrl(url string) string {
var basePath string
if a.AppConfig.StaticDir == RootApp().AppConfig.StaticDir {
basePath = RootApp().BasePath
} else {
basePath = a.BasePath
}
if !a.AppConfig.StaticFileVersion {
return path.Join(basePath, url)
}
ver := a.StaticVerMgr.GetVersion(url)
if ver == "" {
return path.Join(basePath, url)
}
return path.Join(basePath, url+"?v="+ver)
}
// safelyCall invokes `function` in recover block
func (a *App) SafelyCall(vc reflect.Value, method string, args []reflect.Value) (resp []reflect.Value, err error) {
defer func() {
if e := recover(); e != nil {
if !a.Server.Config.RecoverPanic {
// go back to panic
panic(e)
} else {
resp = nil
var content string
content = fmt.Sprintf("Handler crashed with error: %v", e)
for i := 1; ; i += 1 {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
} else {
content += "\n"
}
content += fmt.Sprintf("%v %v", file, line)
}
a.Error(content)
err = errors.New(content)
return
}
}
}()
function := vc.MethodByName(method)
return function.Call(args), err
}
// Init content-length header.
func (a *App) InitHeadContent(w http.ResponseWriter, contentLength int64) {
if a.ContentEncoding == "gzip" {
w.Header().Set("Content-Encoding", "gzip")
} else if a.ContentEncoding == "deflate" {
w.Header().Set("Content-Encoding", "deflate")
} else {
w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10))
}
}
// tryServingFile attempts to serve a static file, and returns
// whether or not the operation is successful.
func (a *App) TryServingFile(name string, req *http.Request, w http.ResponseWriter) (bool, int64) {
newPath := name
if strings.HasPrefix(name, a.BasePath) {
newPath = name[len(a.BasePath):]
}
var size int64
staticFile := filepath.Join(a.AppConfig.StaticDir, newPath)
finfo, err := os.Stat(staticFile)
if err != nil {
return false, size
}
if !finfo.IsDir() {
size = finfo.Size()
isStaticFileToCompress := false
if a.Server.Config.EnableGzip && a.Server.Config.StaticExtensionsToGzip != nil && len(a.Server.Config.StaticExtensionsToGzip) > 0 {
for _, statExtension := range a.Server.Config.StaticExtensionsToGzip {
if strings.HasSuffix(strings.ToLower(staticFile), strings.ToLower(statExtension)) {
isStaticFileToCompress = true
break
}
}
}
if isStaticFileToCompress {
a.ContentEncoding = GetAcceptEncodingZip(req)
memzipfile, err := OpenMemZipFile(staticFile, a.ContentEncoding)
if err != nil {
return false, size
}
a.InitHeadContent(w, finfo.Size())
http.ServeContent(w, req, staticFile, finfo.ModTime(), memzipfile)
} else {
http.ServeFile(w, req, staticFile)
}
return true, size
}
return false, size
}
// StructMap function mapping params to controller's properties
func (a *App) StructMap(m interface{}, r *http.Request) error {
return a.namedStructMap(m, r, "")
}
// user[name][test]
func SplitJson(s string) ([]string, error) {
res := make([]string, 0)
var begin, end int
var isleft bool
for i, r := range s {
switch r {
case '[':
isleft = true
if i > 0 && s[i-1] != ']' {
if begin == end {
return nil, errors.New("unknow character")
}
res = append(res, s[begin:end+1])
}
begin = i + 1
end = begin
case ']':
if !isleft {
return nil, errors.New("unknow character")
}
isleft = false
if begin != end {
//return nil, errors.New("unknow character")
res = append(res, s[begin:end+1])
begin = i + 1
end = begin
}
default:
end = i
}
if i == len(s)-1 && begin != end {
res = append(res, s[begin:end+1])
}
}
return res, nil
}
func (a *App) namedStructMap(m interface{}, r *http.Request, topName string) error {
vc := reflect.ValueOf(m).Elem()
tc := reflect.TypeOf(m).Elem()
for k, t := range r.Form {
if k == XSRF_TAG || k == "" {
continue
}
if topName != "" {
if !strings.HasPrefix(k, topName) {
continue
}
k = k[len(topName)+1:]
}
v := t[0]
names := strings.Split(k, ".")
var err error
if len(names) == 1 {
names, err = SplitJson(k)
if err != nil {
a.Warn("Unrecognize form key", k, err)
continue
}
}
var value reflect.Value = vc
for i, name := range names {
name = strings.Title(name)
if i != len(names)-1 {
if value.Kind() != reflect.Struct {
a.Warnf("arg error, value kind is %v", value.Kind())
break
}
//fmt.Println(name)
value = value.FieldByName(name)
if !value.IsValid() {
a.Warnf("(%v value is not valid %v)", name, value)
break
}
if !value.CanSet() {
a.Warnf("can not set %v -> %v", name, value.Interface())
break
}
if tagfast.Tag2(tc, name, "form_options") == "-" {
continue
}
if value.Kind() == reflect.Ptr {
if value.IsNil() {
value.Set(reflect.New(value.Type().Elem()))
}
value = value.Elem()
}
} else {
if value.Kind() != reflect.Struct {
a.Warnf("arg error, value %v kind is %v", name, value.Kind())
break
}
tv := value.FieldByName(name)
if !tv.IsValid() {
break
}
if !tv.CanSet() {
a.Warnf("can not set %v to %v", k, tv)
break
}
if tagfast.Tag2(tc, name, "form_options") == "-" {
continue
}
if tv.Kind() == reflect.Ptr {
tv.Set(reflect.New(tv.Type().Elem()))
tv = tv.Elem()
}
var l interface{}
switch k := tv.Kind(); k {
case reflect.String:
switch tagfast.Tag2(tc, name, "form_filter") {
case "html":
v = DefaultHtmlFilter(v)
}
l = v
tv.Set(reflect.ValueOf(l))
case reflect.Bool:
l = (v != "false" && v != "0")
tv.Set(reflect.ValueOf(l))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32:
x, err := strconv.Atoi(v)
if err != nil {
a.Warnf("arg %v as int: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Int64:
x, err := strconv.ParseInt(v, 10, 64)
if err != nil {
a.Warnf("arg %v as int64: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Float32, reflect.Float64:
x, err := strconv.ParseFloat(v, 64)
if err != nil {
a.Warnf("arg %v as float64: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
x, err := strconv.ParseUint(v, 10, 64)
if err != nil {
a.Warnf("arg %v as uint: %v", v, err)
break
}
l = x
tv.Set(reflect.ValueOf(l))
case reflect.Struct:
if tvf, ok := tv.Interface().(FromConversion); ok {
err := tvf.FromString(v)
if err != nil {
a.Warnf("struct %v invoke FromString faild", tvf)
}
} else if tv.Type().String() == "time.Time" {
x, err := time.Parse("2006-01-02 15:04:05.000 -0700", v)
if err != nil {
x, err = time.Parse("2006-01-02 15:04:05", v)
if err != nil {
x, err = time.Parse("2006-01-02", v)
if err != nil {
a.Warnf("unsupported time format %v, %v", v, err)
break
}
}
}
l = x
tv.Set(reflect.ValueOf(l))
} else {
a.Warn("can not set an struct which is not implement Fromconversion interface")
}
case reflect.Ptr:
a.Warn("can not set an ptr of ptr")
case reflect.Slice, reflect.Array:
tt := tv.Type().Elem()
tk := tt.Kind()
if tk == reflect.String {
tv.Set(reflect.ValueOf(t))
break
}
if tv.IsNil() {
tv.Set(reflect.MakeSlice(tv.Type(), len(t), len(t)))
}
for i, s := range t {
var err error
switch tk {
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int8, reflect.Int64:
var v int64
v, err = strconv.ParseInt(s, 10, tt.Bits())
if err == nil {
tv.Index(i).SetInt(v)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var v uint64
v, err = strconv.ParseUint(s, 10, tt.Bits())
if err == nil {
tv.Index(i).SetUint(v)
}
case reflect.Float32, reflect.Float64:
var v float64
v, err = strconv.ParseFloat(s, tt.Bits())
if err == nil {
tv.Index(i).SetFloat(v)
}
case reflect.Bool:
var v bool
v, err = strconv.ParseBool(s)
if err == nil {
tv.Index(i).SetBool(v)
}
case reflect.Complex64, reflect.Complex128:
// TODO:
err = fmt.Errorf("unsupported slice element type %v", tk.String())
default:
err = fmt.Errorf("unsupported slice element type %v", tk.String())
}
if err != nil {
a.Warnf("slice error: %v, %v", name, err)
break
}
}
default:
break
}
}
}
}
return nil
}
func (app *App) Redirect(w http.ResponseWriter, requestPath, url string, status ...int) error {
err := redirect(w, url, status...)
if err != nil {
app.Errorf("redirect error: %s", err)
return err
}
return nil
}
func (app *App) Action(name string) interface{} {
if v, ok := app.Actions[name]; ok {
return v
}
return nil
}
/*
example:
{
"AdminAction":{
"Index":["GET","POST"],
"Add": ["GET","POST"],
"Edit": ["GET","POST"]
}
}
*/
func (app *App) Nodes() (r map[string]map[string][]string) {
r = make(map[string]map[string][]string)
for _, val := range app.Route.Regexp {
name := val.ReflectType.Name()
if _, ok := r[name]; !ok {
r[name] = make(map[string][]string)
}
if _, ok := r[name][val.ExecuteFunc]; !ok {
r[name][val.ExecuteFunc] = make([]string, 0)
}
for k, _ := range val.RequestMethod {
r[name][val.ExecuteFunc] = append(r[name][val.ExecuteFunc], k) //FUNC1:[POST,GET]
}
}
for _, val := range app.Route.Static {
name := val.ReflectType.Name()
if _, ok := r[name]; !ok {
r[name] = make(map[string][]string)
}
if _, ok := r[name][val.ExecuteFunc]; !ok {
r[name][val.ExecuteFunc] = make([]string, 0)
}
for k, _ := range val.RequestMethod {
r[name][val.ExecuteFunc] = append(r[name][val.ExecuteFunc], k) //FUNC1:[POST,GET]
}
}
return
}
|
package golf
import (
"net/http"
"os"
"path"
"strings"
"sync"
)
// Application is an abstraction of a Golf application, can be used for
// configuration, etc.
type Application struct {
router *router
// A map of string slices as value to indicate the static files.
staticRouter map[string][]string
// The View model of the application. View handles the templating and page
// rendering.
View *View
// Config provides configuration management.
Config *Config
SessionManager SessionManager
// NotFoundHandler handles requests when no route is matched.
NotFoundHandler HandlerFunc
// MiddlewareChain is the default middlewares that Golf uses.
MiddlewareChain *Chain
pool sync.Pool
errorHandler map[int]ErrorHandlerFunc
// The default error handler, if the corresponding error code is not specified
// in the `errorHandler` map, this handler will be called.
DefaultErrorHandler ErrorHandlerFunc
handlerChain HandlerFunc
}
// New is used for creating a new Golf Application instance.
func New() *Application {
app := new(Application)
app.router = newRouter()
app.staticRouter = make(map[string][]string)
app.View = NewView()
app.Config = NewConfig(app)
// debug, _ := app.Config.GetBool("debug", false)
app.errorHandler = make(map[int]ErrorHandlerFunc)
app.MiddlewareChain = NewChain()
app.DefaultErrorHandler = defaultErrorHandler
app.pool.New = func() interface{} {
return new(Context)
}
return app
}
// First search if any of the static route matches the request.
// If not, look up the URL in the router.
func (app *Application) handler(ctx *Context) {
for prefix, staticPathSlice := range app.staticRouter {
if strings.HasPrefix(ctx.Request.URL.Path, prefix) {
for _, staticPath := range staticPathSlice {
filePath := path.Join(staticPath, ctx.Request.URL.Path[len(prefix):])
fileInfo, err := os.Stat(filePath)
if err == nil && !fileInfo.IsDir() {
staticHandler(ctx, filePath)
return
}
}
}
}
handler, params, err := app.router.FindRoute(ctx.Request.Method, ctx.Request.URL.Path)
if err != nil {
app.handleError(ctx, 404)
} else {
ctx.Params = params
handler(ctx)
}
ctx.Send()
}
// Serve a static file
func staticHandler(ctx *Context, filePath string) {
http.ServeFile(ctx.Response, ctx.Request, filePath)
}
// Basic entrance of an `http.ResponseWriter` and an `http.Request`.
func (app *Application) ServeHTTP(res http.ResponseWriter, req *http.Request) {
if app.handlerChain == nil {
app.handlerChain = app.MiddlewareChain.Final(app.handler)
}
ctx := app.pool.Get().(*Context)
ctx.reset()
ctx.Request = req
ctx.Response = res
ctx.App = app
app.handlerChain(ctx)
app.pool.Put(ctx)
}
// Run the Golf Application.
func (app *Application) Run(addr string) {
err := http.ListenAndServe(addr, app)
if err != nil {
panic(err)
}
}
// RunTLS runs the app with TLS support.
func (app *Application) RunTLS(addr, certFile, keyFile string) {
err := http.ListenAndServeTLS(addr, certFile, keyFile, app)
if err != nil {
panic(err)
}
}
// Static is used for registering a static folder
func (app *Application) Static(url string, path string) {
url = strings.TrimRight(url, "/")
app.staticRouter[url] = append(app.staticRouter[url], path)
}
// Get method is used for registering a Get method route
func (app *Application) Get(pattern string, handler HandlerFunc) {
app.router.AddRoute("GET", pattern, handler)
}
// Post method is used for registering a Post method route
func (app *Application) Post(pattern string, handler HandlerFunc) {
app.router.AddRoute("POST", pattern, handler)
}
// Put method is used for registering a Put method route
func (app *Application) Put(pattern string, handler HandlerFunc) {
app.router.AddRoute("PUT", pattern, handler)
}
// Delete method is used for registering a Delete method route
func (app *Application) Delete(pattern string, handler HandlerFunc) {
app.router.AddRoute("DELETE", pattern, handler)
}
// Error method is used for registering an handler for a specified HTTP error code.
func (app *Application) Error(statusCode int, handler ErrorHandlerFunc) {
app.errorHandler[statusCode] = handler
}
// Handles a HTTP Error, if there is a corresponding handler set in the map
// `errorHandler`, then call it. Otherwise call the `defaultErrorHandler`.
func (app *Application) handleError(ctx *Context, statusCode int, data ...map[string]interface{}) {
ctx.StatusCode = statusCode
handler, ok := app.errorHandler[ctx.StatusCode]
if !ok {
defaultErrorHandler(ctx, data...)
return
}
handler(ctx)
}
[feat] Add Patch, Head, Options
package golf
import (
"net/http"
"os"
"path"
"strings"
"sync"
)
// Application is an abstraction of a Golf application, can be used for
// configuration, etc.
type Application struct {
router *router
// A map of string slices as value to indicate the static files.
staticRouter map[string][]string
// The View model of the application. View handles the templating and page
// rendering.
View *View
// Config provides configuration management.
Config *Config
SessionManager SessionManager
// NotFoundHandler handles requests when no route is matched.
NotFoundHandler HandlerFunc
// MiddlewareChain is the default middlewares that Golf uses.
MiddlewareChain *Chain
pool sync.Pool
errorHandler map[int]ErrorHandlerFunc
// The default error handler, if the corresponding error code is not specified
// in the `errorHandler` map, this handler will be called.
DefaultErrorHandler ErrorHandlerFunc
handlerChain HandlerFunc
}
// New is used for creating a new Golf Application instance.
func New() *Application {
app := new(Application)
app.router = newRouter()
app.staticRouter = make(map[string][]string)
app.View = NewView()
app.Config = NewConfig(app)
// debug, _ := app.Config.GetBool("debug", false)
app.errorHandler = make(map[int]ErrorHandlerFunc)
app.MiddlewareChain = NewChain()
app.DefaultErrorHandler = defaultErrorHandler
app.pool.New = func() interface{} {
return new(Context)
}
return app
}
// First search if any of the static route matches the request.
// If not, look up the URL in the router.
func (app *Application) handler(ctx *Context) {
for prefix, staticPathSlice := range app.staticRouter {
if strings.HasPrefix(ctx.Request.URL.Path, prefix) {
for _, staticPath := range staticPathSlice {
filePath := path.Join(staticPath, ctx.Request.URL.Path[len(prefix):])
fileInfo, err := os.Stat(filePath)
if err == nil && !fileInfo.IsDir() {
staticHandler(ctx, filePath)
return
}
}
}
}
handler, params, err := app.router.FindRoute(ctx.Request.Method, ctx.Request.URL.Path)
if err != nil {
app.handleError(ctx, 404)
} else {
ctx.Params = params
handler(ctx)
}
ctx.Send()
}
// Serve a static file
func staticHandler(ctx *Context, filePath string) {
http.ServeFile(ctx.Response, ctx.Request, filePath)
}
// Basic entrance of an `http.ResponseWriter` and an `http.Request`.
func (app *Application) ServeHTTP(res http.ResponseWriter, req *http.Request) {
if app.handlerChain == nil {
app.handlerChain = app.MiddlewareChain.Final(app.handler)
}
ctx := app.pool.Get().(*Context)
ctx.reset()
ctx.Request = req
ctx.Response = res
ctx.App = app
app.handlerChain(ctx)
app.pool.Put(ctx)
}
// Run the Golf Application.
func (app *Application) Run(addr string) {
err := http.ListenAndServe(addr, app)
if err != nil {
panic(err)
}
}
// RunTLS runs the app with TLS support.
func (app *Application) RunTLS(addr, certFile, keyFile string) {
err := http.ListenAndServeTLS(addr, certFile, keyFile, app)
if err != nil {
panic(err)
}
}
// Static is used for registering a static folder
func (app *Application) Static(url string, path string) {
url = strings.TrimRight(url, "/")
app.staticRouter[url] = append(app.staticRouter[url], path)
}
// Get method is used for registering a Get method route
func (app *Application) Get(pattern string, handler HandlerFunc) {
app.router.AddRoute("GET", pattern, handler)
}
// Post method is used for registering a Post method route
func (app *Application) Post(pattern string, handler HandlerFunc) {
app.router.AddRoute("POST", pattern, handler)
}
// Put method is used for registering a Put method route
func (app *Application) Put(pattern string, handler HandlerFunc) {
app.router.AddRoute("PUT", pattern, handler)
}
// Delete method is used for registering a Delete method route
func (app *Application) Delete(pattern string, handler HandlerFunc) {
app.router.AddRoute("DELETE", pattern, handler)
}
// Patch method is used for registering a Patch method route
func (app *Application) Patch(pattern string, handler HandlerFunc) {
app.router.AddRoute("PATCH", pattern, handler)
}
// Options method is used for registering a Options method route
func (app *Application) Options(pattern string, handler HandlerFunc) {
app.router.AddRoute("OPTIONS", pattern, handler)
}
// Head method is used for registering a Head method route
func (app *Application) Head(pattern string, handler HandlerFunc) {
app.router.AddRoute("HEAD", pattern, handler)
}
// Error method is used for registering an handler for a specified HTTP error code.
func (app *Application) Error(statusCode int, handler ErrorHandlerFunc) {
app.errorHandler[statusCode] = handler
}
// Handles a HTTP Error, if there is a corresponding handler set in the map
// `errorHandler`, then call it. Otherwise call the `defaultErrorHandler`.
func (app *Application) handleError(ctx *Context, statusCode int, data ...map[string]interface{}) {
ctx.StatusCode = statusCode
handler, ok := app.errorHandler[ctx.StatusCode]
if !ok {
defaultErrorHandler(ctx, data...)
return
}
handler(ctx)
}
|
package cli
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
)
// printerFunc is the function signature for fmt.Fprintln
type printerFunc func(io.Writer, ...interface{}) (int, error)
var (
changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md"
appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL)
contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
errInvalidActionType = NewExitError("ERROR invalid Action type. "+
fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+
fmt.Sprintf("See %s", appActionDeprecationURL), 2)
showFlagError printerFunc = fmt.Fprintln
)
// App is the main structure of a cli application. It is recommended that
// an app be created with the cli.NewApp() function
type App struct {
// The name of the program. Defaults to path.Base(os.Args[0])
Name string
// Full name of command for help, defaults to Name
HelpName string
// Description of the program.
Usage string
// Text to override the USAGE section of help
UsageText string
// Description of the program argument format.
ArgsUsage string
// Version of the program
Version string
// Description of the program
Description string
// List of commands to execute
Commands []Command
// List of flags to parse
Flags []Flag
// Boolean to enable bash completion commands
EnableBashCompletion bool
// Boolean to hide built-in help command
HideHelp bool
// Boolean to hide built-in version flag and the VERSION section of help
HideVersion bool
// Populate on app startup, only gettable through method Categories()
categories CommandCategories
// An action to execute when the bash-completion flag is set
BashComplete BashCompleteFunc
// An action to execute before any subcommands are run, but after the context is ready
// If a non-nil error is returned, no subcommands are run
Before BeforeFunc
// An action to execute after any subcommands are run, but after the subcommand has finished
// It is run even if Action() panics
After AfterFunc
// The action to execute when no subcommands are specified
// Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}`
// *Note*: support for the deprecated `Action` signature will be removed in a future version
Action interface{}
// Execute this function if the proper command cannot be found
CommandNotFound CommandNotFoundFunc
// Execute this function if an usage error occurs
OnUsageError OnUsageErrorFunc
// Compilation date
Compiled time.Time
// List of all authors who contributed
Authors []Author
// Copyright of the binary if any
Copyright string
// Name of Author (Note: Use App.Authors, this is deprecated)
Author string
// Email of Author (Note: Use App.Authors, this is deprecated)
Email string
// Writer writer to write output to
Writer io.Writer
// ErrWriter writes error output
ErrWriter io.Writer
// Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to
// function as a default, so this is optional.
ExitErrHandler ExitErrHandlerFunc
// Other custom info
Metadata map[string]interface{}
// Carries a function which returns app specific info.
ExtraInfo func() map[string]string
// CustomAppHelpTemplate the text template for app help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
CustomAppHelpTemplate string
didSetup bool
}
// Tries to find out when this binary was compiled.
// Returns the current time if it fails to find it.
func compileTime() time.Time {
info, err := os.Stat(os.Args[0])
if err != nil {
return time.Now()
}
return info.ModTime()
}
// NewApp creates a new cli Application with some reasonable defaults for Name,
// Usage, Version and Action.
func NewApp() *App {
return &App{
Name: filepath.Base(os.Args[0]),
HelpName: filepath.Base(os.Args[0]),
Usage: "A new cli application",
UsageText: "",
Version: "0.0.0",
BashComplete: DefaultAppComplete,
Action: helpCommand.Action,
Compiled: compileTime(),
Writer: os.Stdout,
}
}
// Setup runs initialization code to ensure all data structures are ready for
// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
// will return early if setup has already happened.
func (a *App) Setup() {
if a.didSetup {
return
}
a.didSetup = true
if a.Author != "" || a.Email != "" {
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
}
newCmds := []Command{}
for _, c := range a.Commands {
if c.HelpName == "" {
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
}
newCmds = append(newCmds, c)
}
a.Commands = newCmds
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
a.Commands = append(a.Commands, helpCommand)
if (HelpFlag != BoolFlag{}) {
a.appendFlag(HelpFlag)
}
}
if !a.HideVersion {
a.appendFlag(VersionFlag)
}
a.categories = CommandCategories{}
for _, command := range a.Commands {
a.categories = a.categories.AddCommand(command.Category, command)
}
sort.Sort(a.categories)
if a.Metadata == nil {
a.Metadata = make(map[string]interface{})
}
if a.Writer == nil {
a.Writer = os.Stdout
}
}
// Run is the entry point to the cli app. Parses the arguments slice and routes
// to the proper flag/args combination
func (a *App) Run(arguments []string) (err error) {
a.Setup()
// handle the completion flag separately from the flagset since
// completion could be attempted after a flag, but before its value was put
// on the command line. this causes the flagset to interpret the completion
// flag name as the value of the flag before it which is undesirable
// note that we can only do this because the shell autocomplete function
// always appends the completion flag at the end of the command
shellComplete, arguments := checkShellCompleteFlag(a, arguments)
// parse flags
set, err := flagSet(a.Name, a.Flags)
if err != nil {
return err
}
set.SetOutput(ioutil.Discard)
err = set.Parse(arguments[1:])
nerr := normalizeFlags(a.Flags, set)
context := NewContext(a, set, nil)
if nerr != nil {
fmt.Fprintln(a.Writer, nerr)
ShowAppHelp(context)
return nerr
}
context.shellComplete = shellComplete
if checkCompletions(context) {
return nil
}
if err != nil {
if a.OnUsageError != nil {
err := a.OnUsageError(context, err, false)
a.handleExitCoder(context, err)
return err
}
fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
ShowAppHelp(context)
return err
}
if !a.HideHelp && checkHelp(context) {
ShowAppHelp(context)
return nil
}
if !a.HideVersion && checkVersion(context) {
ShowVersion(context)
return nil
}
cerr := checkRequiredFlags(a.Flags, set)
if cerr != nil {
showFlagError(a.Writer, cerr)
ShowAppHelp(context)
return cerr
}
if a.After != nil {
defer func() {
if afterErr := a.After(context); afterErr != nil {
if err != nil {
err = NewMultiError(err, afterErr)
} else {
err = afterErr
}
}
}()
}
if a.Before != nil {
beforeErr := a.Before(context)
if beforeErr != nil {
fmt.Fprintf(a.Writer, "%v\n\n", beforeErr)
ShowAppHelp(context)
a.handleExitCoder(context, beforeErr)
err = beforeErr
return err
}
}
args := context.Args()
if args.Present() {
name := args.First()
c := a.Command(name)
if c != nil {
return c.Run(context)
}
}
if a.Action == nil {
a.Action = helpCommand.Action
}
// Run default Action
err = HandleAction(a.Action, context)
a.handleExitCoder(context, err)
return err
}
// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
//
// Deprecated: instead you should return an error that fulfills cli.ExitCoder
// to cli.App.Run. This will cause the application to exit with the given eror
// code in the cli.ExitCoder
func (a *App) RunAndExitOnError() {
if err := a.Run(os.Args); err != nil {
fmt.Fprintln(a.errWriter(), err)
OsExiter(1)
}
}
// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
// generate command-specific flags
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
// append help to commands
if len(a.Commands) > 0 {
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
a.Commands = append(a.Commands, helpCommand)
if (HelpFlag != BoolFlag{}) {
a.appendFlag(HelpFlag)
}
}
}
newCmds := []Command{}
for _, c := range a.Commands {
if c.HelpName == "" {
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
}
newCmds = append(newCmds, c)
}
a.Commands = newCmds
// parse flags
set, err := flagSet(a.Name, a.Flags)
if err != nil {
return err
}
set.SetOutput(ioutil.Discard)
err = set.Parse(ctx.Args().Tail())
nerr := normalizeFlags(a.Flags, set)
context := NewContext(a, set, ctx)
if nerr != nil {
fmt.Fprintln(a.Writer, nerr)
fmt.Fprintln(a.Writer)
if len(a.Commands) > 0 {
ShowSubcommandHelp(context)
} else {
ShowCommandHelp(ctx, context.Args().First())
}
return nerr
}
cerr := checkRequiredFlags(a.Flags, set)
if cerr != nil {
showFlagError(a.Writer, cerr)
ShowSubcommandHelp(context)
return cerr
}
if checkCompletions(context) {
return nil
}
if err != nil {
if a.OnUsageError != nil {
err = a.OnUsageError(context, err, true)
a.handleExitCoder(context, err)
return err
}
fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
ShowSubcommandHelp(context)
return err
}
if len(a.Commands) > 0 {
if checkSubcommandHelp(context) {
return nil
}
} else {
if checkCommandHelp(ctx, context.Args().First()) {
return nil
}
}
if a.After != nil {
defer func() {
afterErr := a.After(context)
if afterErr != nil {
a.handleExitCoder(context, err)
if err != nil {
err = NewMultiError(err, afterErr)
} else {
err = afterErr
}
}
}()
}
if a.Before != nil {
beforeErr := a.Before(context)
if beforeErr != nil {
a.handleExitCoder(context, beforeErr)
err = beforeErr
return err
}
}
args := context.Args()
if args.Present() {
name := args.First()
c := a.Command(name)
if c != nil {
return c.Run(context)
}
}
// Run default Action
err = HandleAction(a.Action, context)
a.handleExitCoder(context, err)
return err
}
// Command returns the named command on App. Returns nil if the command does not exist
func (a *App) Command(name string) *Command {
for _, c := range a.Commands {
if c.HasName(name) {
return &c
}
}
return nil
}
// Categories returns a slice containing all the categories with the commands they contain
func (a *App) Categories() CommandCategories {
return a.categories
}
// VisibleCategories returns a slice of categories and commands that are
// Hidden=false
func (a *App) VisibleCategories() []*CommandCategory {
ret := []*CommandCategory{}
for _, category := range a.categories {
if visible := func() *CommandCategory {
for _, command := range category.Commands {
if !command.Hidden {
return category
}
}
return nil
}(); visible != nil {
ret = append(ret, visible)
}
}
return ret
}
// VisibleCommands returns a slice of the Commands with Hidden=false
func (a *App) VisibleCommands() []Command {
ret := []Command{}
for _, command := range a.Commands {
if !command.Hidden {
ret = append(ret, command)
}
}
return ret
}
// VisibleFlags returns a slice of the Flags with Hidden=false
func (a *App) VisibleFlags() []Flag {
return visibleFlags(a.Flags)
}
func (a *App) hasFlag(flag Flag) bool {
for _, f := range a.Flags {
if flag == f {
return true
}
}
return false
}
func (a *App) errWriter() io.Writer {
// When the app ErrWriter is nil use the package level one.
if a.ErrWriter == nil {
return ErrWriter
}
return a.ErrWriter
}
func (a *App) appendFlag(flag Flag) {
if !a.hasFlag(flag) {
a.Flags = append(a.Flags, flag)
}
}
func (a *App) handleExitCoder(context *Context, err error) {
if a.ExitErrHandler != nil {
a.ExitErrHandler(context, err)
} else {
HandleExitCoder(err)
}
}
// Author represents someone who has contributed to a cli project.
type Author struct {
Name string // The Authors name
Email string // The Authors email
}
// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
func (a Author) String() string {
e := ""
if a.Email != "" {
e = " <" + a.Email + ">"
}
return fmt.Sprintf("%v%v", a.Name, e)
}
// HandleAction attempts to figure out which Action signature was used. If
// it's an ActionFunc or a func with the legacy signature for Action, the func
// is run!
func HandleAction(action interface{}, context *Context) (err error) {
switch a := action.(type) {
case ActionFunc:
return a(context)
case func(*Context) error:
return a(context)
case func(*Context): // deprecated function signature
a(context)
return nil
}
return errInvalidActionType
}
reduce diff
package cli
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
)
// printerFunc is the function signature for fmt.Fprintln
type printerFunc func(io.Writer, ...interface{}) (int, error)
var (
changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md"
appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL)
contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
errInvalidActionType = NewExitError("ERROR invalid Action type. "+
fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+
fmt.Sprintf("See %s", appActionDeprecationURL), 2)
showFlagError printerFunc = fmt.Fprintln
)
// App is the main structure of a cli application. It is recommended that
// an app be created with the cli.NewApp() function
type App struct {
// The name of the program. Defaults to path.Base(os.Args[0])
Name string
// Full name of command for help, defaults to Name
HelpName string
// Description of the program.
Usage string
// Text to override the USAGE section of help
UsageText string
// Description of the program argument format.
ArgsUsage string
// Version of the program
Version string
// Description of the program
Description string
// List of commands to execute
Commands []Command
// List of flags to parse
Flags []Flag
// Boolean to enable bash completion commands
EnableBashCompletion bool
// Boolean to hide built-in help command
HideHelp bool
// Boolean to hide built-in version flag and the VERSION section of help
HideVersion bool
// Populate on app startup, only gettable through method Categories()
categories CommandCategories
// An action to execute when the bash-completion flag is set
BashComplete BashCompleteFunc
// An action to execute before any subcommands are run, but after the context is ready
// If a non-nil error is returned, no subcommands are run
Before BeforeFunc
// An action to execute after any subcommands are run, but after the subcommand has finished
// It is run even if Action() panics
After AfterFunc
// The action to execute when no subcommands are specified
// Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}`
// *Note*: support for the deprecated `Action` signature will be removed in a future version
Action interface{}
// Execute this function if the proper command cannot be found
CommandNotFound CommandNotFoundFunc
// Execute this function if an usage error occurs
OnUsageError OnUsageErrorFunc
// Compilation date
Compiled time.Time
// List of all authors who contributed
Authors []Author
// Copyright of the binary if any
Copyright string
// Name of Author (Note: Use App.Authors, this is deprecated)
Author string
// Email of Author (Note: Use App.Authors, this is deprecated)
Email string
// Writer writer to write output to
Writer io.Writer
// ErrWriter writes error output
ErrWriter io.Writer
// Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to
// function as a default, so this is optional.
ExitErrHandler ExitErrHandlerFunc
// Other custom info
Metadata map[string]interface{}
// Carries a function which returns app specific info.
ExtraInfo func() map[string]string
// CustomAppHelpTemplate the text template for app help topic.
// cli.go uses text/template to render templates. You can
// render custom help text by setting this variable.
CustomAppHelpTemplate string
didSetup bool
}
// Tries to find out when this binary was compiled.
// Returns the current time if it fails to find it.
func compileTime() time.Time {
info, err := os.Stat(os.Args[0])
if err != nil {
return time.Now()
}
return info.ModTime()
}
// NewApp creates a new cli Application with some reasonable defaults for Name,
// Usage, Version and Action.
func NewApp() *App {
return &App{
Name: filepath.Base(os.Args[0]),
HelpName: filepath.Base(os.Args[0]),
Usage: "A new cli application",
UsageText: "",
Version: "0.0.0",
BashComplete: DefaultAppComplete,
Action: helpCommand.Action,
Compiled: compileTime(),
Writer: os.Stdout,
}
}
// Setup runs initialization code to ensure all data structures are ready for
// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
// will return early if setup has already happened.
func (a *App) Setup() {
if a.didSetup {
return
}
a.didSetup = true
if a.Author != "" || a.Email != "" {
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
}
newCmds := []Command{}
for _, c := range a.Commands {
if c.HelpName == "" {
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
}
newCmds = append(newCmds, c)
}
a.Commands = newCmds
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
a.Commands = append(a.Commands, helpCommand)
if (HelpFlag != BoolFlag{}) {
a.appendFlag(HelpFlag)
}
}
if !a.HideVersion {
a.appendFlag(VersionFlag)
}
a.categories = CommandCategories{}
for _, command := range a.Commands {
a.categories = a.categories.AddCommand(command.Category, command)
}
sort.Sort(a.categories)
if a.Metadata == nil {
a.Metadata = make(map[string]interface{})
}
if a.Writer == nil {
a.Writer = os.Stdout
}
}
// Run is the entry point to the cli app. Parses the arguments slice and routes
// to the proper flag/args combination
func (a *App) Run(arguments []string) (err error) {
a.Setup()
// handle the completion flag separately from the flagset since
// completion could be attempted after a flag, but before its value was put
// on the command line. this causes the flagset to interpret the completion
// flag name as the value of the flag before it which is undesirable
// note that we can only do this because the shell autocomplete function
// always appends the completion flag at the end of the command
shellComplete, arguments := checkShellCompleteFlag(a, arguments)
// parse flags
set, err := flagSet(a.Name, a.Flags)
if err != nil {
return err
}
set.SetOutput(ioutil.Discard)
err = set.Parse(arguments[1:])
nerr := normalizeFlags(a.Flags, set)
context := NewContext(a, set, nil)
if nerr != nil {
fmt.Fprintln(a.Writer, nerr)
ShowAppHelp(context)
return nerr
}
context.shellComplete = shellComplete
if checkCompletions(context) {
return nil
}
if err != nil {
if a.OnUsageError != nil {
err := a.OnUsageError(context, err, false)
a.handleExitCoder(context, err)
return err
}
fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
ShowAppHelp(context)
return err
}
if !a.HideHelp && checkHelp(context) {
ShowAppHelp(context)
return nil
}
if !a.HideVersion && checkVersion(context) {
ShowVersion(context)
return nil
}
cerr := checkRequiredFlags(a.Flags, set)
if cerr != nil {
showFlagError(a.Writer, cerr)
ShowAppHelp(context)
return cerr
}
if a.After != nil {
defer func() {
if afterErr := a.After(context); afterErr != nil {
if err != nil {
err = NewMultiError(err, afterErr)
} else {
err = afterErr
}
}
}()
}
if a.Before != nil {
beforeErr := a.Before(context)
if beforeErr != nil {
fmt.Fprintf(a.Writer, "%v\n\n", beforeErr)
ShowAppHelp(context)
a.handleExitCoder(context, beforeErr)
err = beforeErr
return err
}
}
args := context.Args()
if args.Present() {
name := args.First()
c := a.Command(name)
if c != nil {
return c.Run(context)
}
}
if a.Action == nil {
a.Action = helpCommand.Action
}
// Run default Action
err = HandleAction(a.Action, context)
a.handleExitCoder(context, err)
return err
}
// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
//
// Deprecated: instead you should return an error that fulfills cli.ExitCoder
// to cli.App.Run. This will cause the application to exit with the given eror
// code in the cli.ExitCoder
func (a *App) RunAndExitOnError() {
if err := a.Run(os.Args); err != nil {
fmt.Fprintln(a.errWriter(), err)
OsExiter(1)
}
}
// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
// generate command-specific flags
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
// append help to commands
if len(a.Commands) > 0 {
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
a.Commands = append(a.Commands, helpCommand)
if (HelpFlag != BoolFlag{}) {
a.appendFlag(HelpFlag)
}
}
}
newCmds := []Command{}
for _, c := range a.Commands {
if c.HelpName == "" {
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
}
newCmds = append(newCmds, c)
}
a.Commands = newCmds
// parse flags
set, err := flagSet(a.Name, a.Flags)
if err != nil {
return err
}
set.SetOutput(ioutil.Discard)
err = set.Parse(ctx.Args().Tail())
nerr := normalizeFlags(a.Flags, set)
context := NewContext(a, set, ctx)
if nerr != nil {
fmt.Fprintln(a.Writer, nerr)
fmt.Fprintln(a.Writer)
if len(a.Commands) > 0 {
ShowSubcommandHelp(context)
} else {
ShowCommandHelp(ctx, context.Args().First())
}
return nerr
}
cerr := checkRequiredFlags(a.Flags, set)
if cerr != nil {
showFlagError(a.Writer, cerr)
ShowSubcommandHelp(context)
return cerr
}
if checkCompletions(context) {
return nil
}
if err != nil {
if a.OnUsageError != nil {
err = a.OnUsageError(context, err, true)
a.handleExitCoder(context, err)
return err
}
fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error())
ShowSubcommandHelp(context)
return err
}
if len(a.Commands) > 0 {
if checkSubcommandHelp(context) {
return nil
}
} else {
if checkCommandHelp(ctx, context.Args().First()) {
return nil
}
}
if a.After != nil {
defer func() {
afterErr := a.After(context)
if afterErr != nil {
a.handleExitCoder(context, err)
if err != nil {
err = NewMultiError(err, afterErr)
} else {
err = afterErr
}
}
}()
}
if a.Before != nil {
beforeErr := a.Before(context)
if beforeErr != nil {
a.handleExitCoder(context, beforeErr)
err = beforeErr
return err
}
}
args := context.Args()
if args.Present() {
name := args.First()
c := a.Command(name)
if c != nil {
return c.Run(context)
}
}
// Run default Action
err = HandleAction(a.Action, context)
a.handleExitCoder(context, err)
return err
}
// Command returns the named command on App. Returns nil if the command does not exist
func (a *App) Command(name string) *Command {
for _, c := range a.Commands {
if c.HasName(name) {
return &c
}
}
return nil
}
// Categories returns a slice containing all the categories with the commands they contain
func (a *App) Categories() CommandCategories {
return a.categories
}
// VisibleCategories returns a slice of categories and commands that are
// Hidden=false
func (a *App) VisibleCategories() []*CommandCategory {
ret := []*CommandCategory{}
for _, category := range a.categories {
if visible := func() *CommandCategory {
for _, command := range category.Commands {
if !command.Hidden {
return category
}
}
return nil
}(); visible != nil {
ret = append(ret, visible)
}
}
return ret
}
// VisibleCommands returns a slice of the Commands with Hidden=false
func (a *App) VisibleCommands() []Command {
ret := []Command{}
for _, command := range a.Commands {
if !command.Hidden {
ret = append(ret, command)
}
}
return ret
}
// VisibleFlags returns a slice of the Flags with Hidden=false
func (a *App) VisibleFlags() []Flag {
return visibleFlags(a.Flags)
}
func (a *App) hasFlag(flag Flag) bool {
for _, f := range a.Flags {
if flag == f {
return true
}
}
return false
}
func (a *App) errWriter() io.Writer {
// When the app ErrWriter is nil use the package level one.
if a.ErrWriter == nil {
return ErrWriter
}
return a.ErrWriter
}
func (a *App) appendFlag(flag Flag) {
if !a.hasFlag(flag) {
a.Flags = append(a.Flags, flag)
}
}
func (a *App) handleExitCoder(context *Context, err error) {
if a.ExitErrHandler != nil {
a.ExitErrHandler(context, err)
} else {
HandleExitCoder(err)
}
}
// Author represents someone who has contributed to a cli project.
type Author struct {
Name string // The Authors name
Email string // The Authors email
}
// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
func (a Author) String() string {
e := ""
if a.Email != "" {
e = " <" + a.Email + ">"
}
return fmt.Sprintf("%v%v", a.Name, e)
}
// HandleAction attempts to figure out which Action signature was used. If
// it's an ActionFunc or a func with the legacy signature for Action, the func
// is run!
func HandleAction(action interface{}, context *Context) (err error) {
switch a := action.(type) {
case ActionFunc:
return a(context)
case func(*Context) error:
return a(context)
case func(*Context): // deprecated function signature
a(context)
return nil
}
return errInvalidActionType
}
|
package main
import (
"archive/tar"
"bytes"
"context"
"flag"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/gorilla/mux"
//"io"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
)
var port string
var dockercli, err = client.NewClient("unix:///var/run/docker.sock", "v1.25", nil, map[string]string{"User-Agent": "nanoserverless"})
var tagprefix = "nanoserverless"
var registry string
type base struct {
Run string
ViewCode []string
FromImg string
ExtraBuild string
}
type createResponse struct {
Base string
Image string
Dockerfile string
Code string
CreateLog string
PushLog string
}
var bases = make(map[string]base)
func init() {
flag.StringVar(&port, "port", "80", "give me a port number")
registry = os.Getenv("REGISTRY_URL")
if registry != "" {
registry += "/"
synchroRepo()
}
bases["php7"] = base{
"#!/bin/sh\nphp app",
[]string{"cat", "/app"},
"php:7",
"",
}
bases["node7"] = base{
"#!/bin/sh\nnode app",
[]string{"cat", "/app"},
"node:7",
"",
}
bases["java8"] = base{
"#!/bin/sh\njava app",
[]string{"cat", "/app.java"},
"openjdk:8",
"RUN mv app app.java && javac app.java",
}
bases["go17"] = base{
"",
[]string{"cat", "/go/src/app.go"},
"golang:1.7",
"WORKDIR /go/src\nENV CGO_ENABLED=0\nENV GO_PATH=/go/src\nRUN mv /app ./app.go && go build -a --installsuffix cgo --ldflags=-s -o /run",
}
bases["python27"] = base{
"#!/bin/sh\npython app",
[]string{"cat", "/app"},
"python:2.7",
"",
}
bases["bash4"] = base{
"#!/bin/sh\nbash app",
[]string{"cat", "/app"},
"bash:4",
"",
}
}
func main() {
// defer profile.Start().Stop()
flag.Parse()
// Router
r := mux.NewRouter()
r.HandleFunc("/list", list)
r.HandleFunc("/{base}/{name}", infofunc)
r.HandleFunc("/{base}/{name}/create", create)
r.HandleFunc("/{base}/{name}/exec", exec)
r.HandleFunc("/{base}/{name}/up", up)
r.HandleFunc("/{base}/{name}/down", down)
r.HandleFunc("/{base}/{name}/code", code)
r.HandleFunc("/whoami", whoami)
http.Handle("/", r)
fmt.Println("Starting up on port " + port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func whoami(w http.ResponseWriter, req *http.Request) {
u, _ := url.Parse(req.URL.String())
queryParams := u.Query()
wait := queryParams.Get("wait")
if len(wait) > 0 {
duration, err := time.ParseDuration(wait)
if err == nil {
time.Sleep(duration)
}
}
hostname, _ := os.Hostname()
fmt.Fprintln(w, "Hostname:", hostname)
req.Write(w)
}
func synchroRepo() {
ctx := context.Background()
resp_http, err := http.Get("http://" + registry + "v2/_catalog")
if err != nil {
log.Fatal(err)
}
defer resp_http.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp_http.Body)
//fmt.Fprint(w, buf.String())
type Repos struct {
Repositories []string `json:"repositories"`
}
var repos Repos
err = json.Unmarshal(buf.Bytes(), &repos)
if err != nil {
fmt.Println("error:", err)
}
for _, tag := range repos.Repositories {
// Pull all start by "tagprefix-"
if strings.HasPrefix(tag, tagprefix+"-") {
fmt.Println("Pulling image :", registry+tag)
// Trying to pull image
resp_pull, err := dockercli.ImagePull(ctx, registry+tag, types.ImagePullOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
// Wait pull finish
buf_pull := new(bytes.Buffer)
buf_pull.ReadFrom(resp_pull)
buf_pull.String()
if err != nil {
panic(err)
}
}
}
}
func list(w http.ResponseWriter, req *http.Request) {
ctx := context.Background()
// Get images from registry
if registry != "" {
synchroRepo()
}
// List local images
images, err := dockercli.ImageList(ctx, types.ImageListOptions{})
if err != nil {
log.Fatal(err)
}
for _, image := range images {
for _, tag := range image.RepoTags {
if strings.HasPrefix(tag, registry+tagprefix+"-") {
fmt.Fprintln(w, "local tag", tag)
}
}
}
}
func infofunc(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
fmt.Fprintln(w, "Service", servicename, "status :")
// Get tasks
serviceNameFilter := filters.NewArgs()
serviceNameFilter.Add("name", servicename)
tasks, err := dockercli.TaskList(ctx, types.TaskListOptions{
Filters: serviceNameFilter,
})
if err != nil {
log.Fatal(err)
}
for _, task := range tasks {
fmt.Fprintln(w, "Task", task.Slot, task.Status.ContainerStatus.ContainerID, task.Status.State, "("+task.Status.Message+")")
}
if len(tasks) == 0 {
fmt.Fprintln(w, "Not UP")
}
//fmt.Fprintln(w, "You're trying to get info on the", base, name, "function")
//fmt.Fprintln(w, "But it's not implement yet :D")
}
func down(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
err := dockercli.ServiceRemove(ctx, servicename)
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(w, "Service", servicename, "removed")
}
func up(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
/* Goal :
docker service create \
--name nanoserverless-node7-pi \
--network nanoserverless \
nanoserverless-node7-pi
*/
// Network
network := swarm.NetworkAttachmentConfig{
Target: "nanoserverless",
}
// Create
service := swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: servicename,
//Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()),
},
TaskTemplate: swarm.TaskSpec{
ContainerSpec: swarm.ContainerSpec{
Image: registry + tag,
/*Args: opts.args,
Env: currentEnv,
Hostname: opts.hostname,
Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()),
Dir: opts.workdir,
User: opts.user,
Groups: opts.groups.GetAll(),
TTY: opts.tty,
ReadOnly: opts.readOnly,
Mounts: opts.mounts.Value(),
DNSConfig: &swarm.DNSConfig{
Nameservers: opts.dns.GetAll(),
Search: opts.dnsSearch.GetAll(),
Options: opts.dnsOption.GetAll(),
},
Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()),
StopGracePeriod: opts.stopGrace.Value(),
Secrets: nil,
Healthcheck: healthConfig,*/
},
Networks: []swarm.NetworkAttachmentConfig{network},
/*Resources: opts.resources.ToResourceRequirements(),
RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
Placement: &swarm.Placement{
Constraints: opts.constraints.GetAll(),
},
LogDriver: opts.logDriver.toLogDriver(),*/
},
//Networks: convertNetworks(opts.networks.GetAll()),
/*Mode: serviceMode,
UpdateConfig: &swarm.UpdateConfig{
Parallelism: opts.update.parallelism,
Delay: opts.update.delay,
Monitor: opts.update.monitor,
FailureAction: opts.update.onFailure,
MaxFailureRatio: opts.update.maxFailureRatio.Value(),
},
EndpointSpec: opts.endpoint.ToEndpointSpec(),*/
}
resp, err := dockercli.ServiceCreate(ctx, service, types.ServiceCreateOptions{})
if err != nil {
panic(err)
}
fmt.Fprintln(w, "Service id ", resp.ID, "created")
}
func code(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
ctx := context.Background()
baseStruct, ok := bases[base]
if !ok {
fmt.Fprintln(w, base, "not supported yet !")
return
}
// Create
resp, err := dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: baseStruct.ViewCode,
// AttachStdout: true,
}, nil, nil, "")
if err != nil {
// Trying to pull image
resp_pull, err := dockercli.ImagePull(ctx, registry+tag, types.ImagePullOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
// Wait pull finish
buf_pull := new(bytes.Buffer)
buf_pull.ReadFrom(resp_pull)
buf_pull.String()
if err != nil {
panic(err)
}
resp, err = dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: baseStruct.ViewCode,
}, nil, nil, "")
if err != nil {
panic(err)
}
}
// Run
if err := dockercli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
panic(err)
}
// Wait
if _, err = dockercli.ContainerWait(ctx, resp.ID); err != nil {
panic(err)
}
// Logs
responseBody, err := dockercli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
panic(err)
}
defer responseBody.Close()
// Print
/*buf := new(bytes.Buffer)
buf.ReadFrom(out)
result := buf.String()
fmt.Fprintln(w, result)*/
//io.Copy(w, []byte(out))
/*fmt.Fprintln(w, "Result:")
buf := new(bytes.Buffer)
buf.ReadFrom(responseBody)
newStr := buf.String()*/
stdcopy.StdCopy(w, w, responseBody)
//fmt.Fprintln(w, newStr)
// Delete
_ = dockercli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
}
func exec(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
// Test if we can http to the service
tr := &http.Transport{
Proxy: nil,
}
client := &http.Client{Transport: tr}
resp_http, err := client.Get("http://" + servicename)
if err != nil {
// Create
resp, err := dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: []string{"/run"},
// AttachStdout: true,
}, nil, nil, "")
if err != nil {
// Trying to pull image
resp_pull, err := dockercli.ImagePull(ctx, registry+tag, types.ImagePullOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
// Wait pull finish
buf_pull := new(bytes.Buffer)
buf_pull.ReadFrom(resp_pull)
buf_pull.String()
if err != nil {
panic(err)
}
resp, err = dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: []string{"/run"},
}, nil, nil, "")
if err != nil {
panic(err)
}
}
// Run
if err := dockercli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
panic(err)
}
// Wait
if _, err = dockercli.ContainerWait(ctx, resp.ID); err != nil {
panic(err)
}
// Logs
responseBody, err := dockercli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
panic(err)
}
defer responseBody.Close()
// Print
/*buf := new(bytes.Buffer)
buf.ReadFrom(out)
result := buf.String()
fmt.Fprintln(w, result)*/
//io.Copy(w, []byte(out))
/*fmt.Fprintln(w, "Result:")
buf := new(bytes.Buffer)
buf.ReadFrom(responseBody)
newStr := buf.String()*/
stdcopy.StdCopy(w, w, responseBody)
//fmt.Fprintln(w, newStr)
// Delete
_ = dockercli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
} else {
defer resp_http.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp_http.Body)
fmt.Fprint(w, buf.String())
}
}
func create(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
bodyb, _ := ioutil.ReadAll(req.Body)
body := string(bodyb)
url := req.URL.Query().Get("url")
baseStruct, ok := bases[base]
if !ok {
fmt.Fprintln(w, base, "not supported yet !")
return
}
// Generate dockerfile
dockerfile := "FROM "
dockerfile += baseStruct.FromImg
dockerfile += "\nCOPY shell2http /"
dockerfile += "\nCOPY app /"
dockerfile += "\nCOPY run /"
dockerfile += "\n" + baseStruct.ExtraBuild
dockerfile += "\nENTRYPOINT [\"/shell2http\", \"-port=80\", \"-cgi\", \"-export-all-vars\", \"/\", \"/run\"]"
// Generate app
//app := ""
//app += baseStruct.PreCode + "\n"
app := ""
if url != "" {
resp_http, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp_http.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp_http.Body)
app = buf.String()
} else {
app = body
}
//app += baseStruct.PostCode
// Generate run
run := baseStruct.Run
// Buffer context
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
// Add some files
var files = []struct {
Name, Body string
}{
{"Dockerfile", dockerfile},
{"app", app},
{"run", run},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Mode: 0700,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Add shell2http
dat, err := ioutil.ReadFile("/shell2http")
if err != nil {
log.Fatal(err)
}
hdr := &tar.Header{
Name: "/shell2http",
Mode: 0700,
Size: int64(len(dat)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write(dat); err != nil {
log.Fatalln(err)
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
// Open the tar archive for reading.
reader := bytes.NewReader(buf.Bytes())
// Docker build
buildOptions := types.ImageBuildOptions{
Tags: []string{registry + tag},
NoCache: true,
SuppressOutput: true,
}
response, err := dockercli.ImageBuild(context.Background(), reader, buildOptions)
if err != nil {
log.Fatalln(err)
//fmt.Fprintln(w, "Error in creating image", tag)
}
defer response.Body.Close()
buf2 := new(bytes.Buffer)
buf2.ReadFrom(response.Body)
result := buf2.String()
/*fmt.Fprintln(w, "Image ", registry+tag, "created !\n")
fmt.Fprintln(w, "Dockerfile:")
fmt.Fprintln(w, dockerfile, "\n")
fmt.Fprintln(w, "Code:")
fmt.Fprintln(w, app, "\n")
fmt.Fprintln(w, "Log:")
fmt.Fprintln(w, result, "\n")*/
//fmt.Fprintln(w, "response:", response.Body)
//buildCtx := ioutil.NopCloser(reader)
//dockercli.ImageBuild(context.Background(), buildCtx, buildOptions)
// Push image if registry
result_push := ""
if registry != "" {
response_push, err := dockercli.ImagePush(context.Background(), registry+tag, types.ImagePushOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
if err != nil {
log.Fatalln(err)
}
buf3 := new(bytes.Buffer)
buf3.ReadFrom(response_push)
result_push = buf3.String()
/*fmt.Fprintln(w, "Push:")
fmt.Fprintln(w, result_push, "\n")*/
}
full_response := createResponse{
Base: base,
Image: registry + tag,
Dockerfile: dockerfile,
Code: app,
CreateLog: result,
PushLog: result_push,
}
json_result, err := json.Marshal(full_response)
if err != nil {
log.Fatalln(err)
}
fmt.Fprintln(w, string(json_result))
}
pass query to env and so more
package main
import (
"archive/tar"
"bytes"
"context"
"flag"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/gorilla/mux"
//"io"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"time"
)
var port string
var dockercli, err = client.NewClient("unix:///var/run/docker.sock", "v1.25", nil, map[string]string{"User-Agent": "nanoserverless"})
var tagprefix = "nanoserverless"
var registry string
type base struct {
Run string
ViewCode []string
FromImg string
ExtraBuild string
}
type createResponse struct {
Base string
Image string
Dockerfile string
Code string
CreateLog string
PushLog string
}
var bases = make(map[string]base)
func init() {
flag.StringVar(&port, "port", "80", "give me a port number")
registry = os.Getenv("REGISTRY_URL")
if registry != "" {
registry += "/"
synchroRepo()
}
bases["php7"] = base{
"#!/bin/sh\nphp app",
[]string{"cat", "/app"},
"php:7",
"",
}
bases["node7"] = base{
"#!/bin/sh\nnode app",
[]string{"cat", "/app"},
"node:7",
"",
}
bases["java8"] = base{
"#!/bin/sh\njava app",
[]string{"cat", "/app.java"},
"openjdk:8",
"RUN mv app app.java && javac app.java",
}
bases["go17"] = base{
"",
[]string{"cat", "/go/src/app.go"},
"golang:1.7",
"WORKDIR /go/src\nENV CGO_ENABLED=0\nENV GO_PATH=/go/src\nRUN mv /app ./app.go && go build -a --installsuffix cgo --ldflags=-s -o /run",
}
bases["python27"] = base{
"#!/bin/sh\npython app",
[]string{"cat", "/app"},
"python:2.7",
"",
}
bases["bash4"] = base{
"#!/bin/sh\nbash app",
[]string{"cat", "/app"},
"bash:4",
"",
}
}
func main() {
// defer profile.Start().Stop()
flag.Parse()
// Router
r := mux.NewRouter()
r.HandleFunc("/list", list)
r.HandleFunc("/{base}/{name}", infofunc)
r.HandleFunc("/{base}/{name}/create", create)
r.HandleFunc("/{base}/{name}/exec", exec)
r.HandleFunc("/{base}/{name}/up", up)
r.HandleFunc("/{base}/{name}/down", down)
r.HandleFunc("/{base}/{name}/code", code)
r.HandleFunc("/whoami", whoami)
http.Handle("/", r)
fmt.Println("Starting up on port " + port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
func whoami(w http.ResponseWriter, req *http.Request) {
u, _ := url.Parse(req.URL.String())
queryParams := u.Query()
wait := queryParams.Get("wait")
if len(wait) > 0 {
duration, err := time.ParseDuration(wait)
if err == nil {
time.Sleep(duration)
}
}
hostname, _ := os.Hostname()
fmt.Fprintln(w, "Hostname:", hostname)
req.Write(w)
}
func synchroRepo() {
ctx := context.Background()
resp_http, err := http.Get("http://" + registry + "v2/_catalog")
if err != nil {
log.Fatal(err)
}
defer resp_http.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp_http.Body)
//fmt.Fprint(w, buf.String())
type Repos struct {
Repositories []string `json:"repositories"`
}
var repos Repos
err = json.Unmarshal(buf.Bytes(), &repos)
if err != nil {
fmt.Println("error:", err)
}
for _, tag := range repos.Repositories {
// Pull all start by "tagprefix-"
if strings.HasPrefix(tag, tagprefix+"-") {
fmt.Println("Pulling image :", registry+tag)
// Trying to pull image
resp_pull, err := dockercli.ImagePull(ctx, registry+tag, types.ImagePullOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
// Wait pull finish
buf_pull := new(bytes.Buffer)
buf_pull.ReadFrom(resp_pull)
buf_pull.String()
if err != nil {
panic(err)
}
}
}
}
func list(w http.ResponseWriter, req *http.Request) {
ctx := context.Background()
// Get images from registry
if registry != "" {
synchroRepo()
}
// List local images
images, err := dockercli.ImageList(ctx, types.ImageListOptions{})
if err != nil {
log.Fatal(err)
}
for _, image := range images {
for _, tag := range image.RepoTags {
if strings.HasPrefix(tag, registry+tagprefix+"-") {
fmt.Fprintln(w, "local tag", tag)
}
}
}
}
func infofunc(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
fmt.Fprintln(w, "Service", servicename, "status :")
// Get tasks
serviceNameFilter := filters.NewArgs()
serviceNameFilter.Add("name", servicename)
tasks, err := dockercli.TaskList(ctx, types.TaskListOptions{
Filters: serviceNameFilter,
})
if err != nil {
log.Fatal(err)
}
for _, task := range tasks {
fmt.Fprintln(w, "Task", task.Slot, task.Status.ContainerStatus.ContainerID, task.Status.State, "("+task.Status.Message+")")
}
if len(tasks) == 0 {
fmt.Fprintln(w, "Not UP")
}
//fmt.Fprintln(w, "You're trying to get info on the", base, name, "function")
//fmt.Fprintln(w, "But it's not implement yet :D")
}
func down(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
err := dockercli.ServiceRemove(ctx, servicename)
if err != nil {
log.Fatal(err)
}
fmt.Fprintln(w, "Service", servicename, "removed")
}
func up(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
ctx := context.Background()
/* Goal :
docker service create \
--name nanoserverless-node7-pi \
--network nanoserverless \
nanoserverless-node7-pi
*/
// Network
network := swarm.NetworkAttachmentConfig{
Target: "nanoserverless",
}
// Create
service := swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: servicename,
//Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()),
},
TaskTemplate: swarm.TaskSpec{
ContainerSpec: swarm.ContainerSpec{
Image: registry + tag,
/*Args: opts.args,
Env: currentEnv,
Hostname: opts.hostname,
Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()),
Dir: opts.workdir,
User: opts.user,
Groups: opts.groups.GetAll(),
TTY: opts.tty,
ReadOnly: opts.readOnly,
Mounts: opts.mounts.Value(),
DNSConfig: &swarm.DNSConfig{
Nameservers: opts.dns.GetAll(),
Search: opts.dnsSearch.GetAll(),
Options: opts.dnsOption.GetAll(),
},
Hosts: convertExtraHostsToSwarmHosts(opts.hosts.GetAll()),
StopGracePeriod: opts.stopGrace.Value(),
Secrets: nil,
Healthcheck: healthConfig,*/
},
Networks: []swarm.NetworkAttachmentConfig{network},
/*Resources: opts.resources.ToResourceRequirements(),
RestartPolicy: opts.restartPolicy.ToRestartPolicy(),
Placement: &swarm.Placement{
Constraints: opts.constraints.GetAll(),
},
LogDriver: opts.logDriver.toLogDriver(),*/
},
//Networks: convertNetworks(opts.networks.GetAll()),
/*Mode: serviceMode,
UpdateConfig: &swarm.UpdateConfig{
Parallelism: opts.update.parallelism,
Delay: opts.update.delay,
Monitor: opts.update.monitor,
FailureAction: opts.update.onFailure,
MaxFailureRatio: opts.update.maxFailureRatio.Value(),
},
EndpointSpec: opts.endpoint.ToEndpointSpec(),*/
}
resp, err := dockercli.ServiceCreate(ctx, service, types.ServiceCreateOptions{})
if err != nil {
panic(err)
}
fmt.Fprintln(w, "Service id ", resp.ID, "created")
}
func code(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
ctx := context.Background()
baseStruct, ok := bases[base]
if !ok {
fmt.Fprintln(w, base, "not supported yet !")
return
}
// Create
resp, err := dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: baseStruct.ViewCode,
// AttachStdout: true,
}, nil, nil, "")
if err != nil {
// Trying to pull image
resp_pull, err := dockercli.ImagePull(ctx, registry+tag, types.ImagePullOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
// Wait pull finish
buf_pull := new(bytes.Buffer)
buf_pull.ReadFrom(resp_pull)
buf_pull.String()
if err != nil {
panic(err)
}
resp, err = dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: baseStruct.ViewCode,
}, nil, nil, "")
if err != nil {
panic(err)
}
}
// Run
if err := dockercli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
panic(err)
}
// Wait
if _, err = dockercli.ContainerWait(ctx, resp.ID); err != nil {
panic(err)
}
// Logs
responseBody, err := dockercli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
panic(err)
}
defer responseBody.Close()
// Print
/*buf := new(bytes.Buffer)
buf.ReadFrom(out)
result := buf.String()
fmt.Fprintln(w, result)*/
//io.Copy(w, []byte(out))
/*fmt.Fprintln(w, "Result:")
buf := new(bytes.Buffer)
buf.ReadFrom(responseBody)
newStr := buf.String()*/
stdcopy.StdCopy(w, w, responseBody)
//fmt.Fprintln(w, newStr)
// Delete
_ = dockercli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
}
func exec(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
servicename := tag
query := req.URL.RawQuery
ctx := context.Background()
// Test if we can http to the service
tr := &http.Transport{
Proxy: nil,
}
client := &http.Client{Transport: tr}
newreq, err := http.NewRequest(req.Method, "http://"+servicename, nil)
// Passing query and body
newreq.URL.RawQuery = query
newreq.Body = req.Body
newreq.RemoteAddr = req.RemoteAddr
newreq.Header.Set("User-Agent", req.UserAgent())
// Doing req
resp_http, err := client.Do(newreq)
if err != nil {
// Env var
env := []string{}
env = append(env, "QUERY_STRING="+query)
env = append(env, "REQUEST_METHOD="+req.Method)
env = append(env, "HTTP_USER_AGENT="+req.UserAgent())
env = append(env, "REMOTE_ADDR="+req.RemoteAddr)
env = append(env, "SERVER_PROTOCOL="+req.Proto)
// Create
resp, err := dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: []string{"/run"},
Env: env,
// AttachStdout: true,
}, nil, nil, "")
if err != nil {
// Trying to pull image
resp_pull, err := dockercli.ImagePull(ctx, registry+tag, types.ImagePullOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
// Wait pull finish
buf_pull := new(bytes.Buffer)
buf_pull.ReadFrom(resp_pull)
buf_pull.String()
if err != nil {
panic(err)
}
resp, err = dockercli.ContainerCreate(ctx, &container.Config{
Image: registry + tag,
Entrypoint: []string{"/run"},
}, nil, nil, "")
if err != nil {
panic(err)
}
}
// Run
if err := dockercli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
panic(err)
}
// Wait
if _, err = dockercli.ContainerWait(ctx, resp.ID); err != nil {
panic(err)
}
// Logs
responseBody, err := dockercli.ContainerLogs(ctx, resp.ID, types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true})
if err != nil {
panic(err)
}
defer responseBody.Close()
// Print
/*buf := new(bytes.Buffer)
buf.ReadFrom(out)
result := buf.String()
fmt.Fprintln(w, result)*/
//io.Copy(w, []byte(out))
/*fmt.Fprintln(w, "Result:")
buf := new(bytes.Buffer)
buf.ReadFrom(responseBody)
newStr := buf.String()*/
stdcopy.StdCopy(w, w, responseBody)
//fmt.Fprintln(w, newStr)
// Delete
_ = dockercli.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
} else {
defer resp_http.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp_http.Body)
fmt.Fprint(w, buf.String())
}
}
func create(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
base := vars["base"]
name := vars["name"]
tag := tagprefix + "-" + base + "-" + name
bodyb, _ := ioutil.ReadAll(req.Body)
body := string(bodyb)
url := req.URL.Query().Get("url")
baseStruct, ok := bases[base]
if !ok {
fmt.Fprintln(w, base, "not supported yet !")
return
}
// Generate dockerfile
dockerfile := "FROM "
dockerfile += baseStruct.FromImg
dockerfile += "\nCOPY shell2http /"
dockerfile += "\nCOPY app /"
dockerfile += "\nCOPY run /"
dockerfile += "\n" + baseStruct.ExtraBuild
dockerfile += "\nENTRYPOINT [\"/shell2http\", \"-port=80\", \"-cgi\", \"-export-all-vars\", \"/\", \"/run\"]"
// Generate app
//app := ""
//app += baseStruct.PreCode + "\n"
app := ""
if url != "" {
resp_http, err := http.Get(url)
if err != nil {
log.Fatal(err)
}
defer resp_http.Body.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(resp_http.Body)
app = buf.String()
} else {
app = body
}
//app += baseStruct.PostCode
// Generate run
run := baseStruct.Run
// Buffer context
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
// Add some files
var files = []struct {
Name, Body string
}{
{"Dockerfile", dockerfile},
{"app", app},
{"run", run},
}
for _, file := range files {
hdr := &tar.Header{
Name: file.Name,
Mode: 0700,
Size: int64(len(file.Body)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write([]byte(file.Body)); err != nil {
log.Fatalln(err)
}
}
// Add shell2http
dat, err := ioutil.ReadFile("/shell2http")
if err != nil {
log.Fatal(err)
}
hdr := &tar.Header{
Name: "/shell2http",
Mode: 0700,
Size: int64(len(dat)),
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatalln(err)
}
if _, err := tw.Write(dat); err != nil {
log.Fatalln(err)
}
// Make sure to check the error on Close.
if err := tw.Close(); err != nil {
log.Fatalln(err)
}
// Open the tar archive for reading.
reader := bytes.NewReader(buf.Bytes())
// Docker build
buildOptions := types.ImageBuildOptions{
Tags: []string{registry + tag},
NoCache: true,
SuppressOutput: true,
}
response, err := dockercli.ImageBuild(context.Background(), reader, buildOptions)
if err != nil {
log.Fatalln(err)
//fmt.Fprintln(w, "Error in creating image", tag)
}
defer response.Body.Close()
buf2 := new(bytes.Buffer)
buf2.ReadFrom(response.Body)
result := buf2.String()
/*fmt.Fprintln(w, "Image ", registry+tag, "created !\n")
fmt.Fprintln(w, "Dockerfile:")
fmt.Fprintln(w, dockerfile, "\n")
fmt.Fprintln(w, "Code:")
fmt.Fprintln(w, app, "\n")
fmt.Fprintln(w, "Log:")
fmt.Fprintln(w, result, "\n")*/
//fmt.Fprintln(w, "response:", response.Body)
//buildCtx := ioutil.NopCloser(reader)
//dockercli.ImageBuild(context.Background(), buildCtx, buildOptions)
// Push image if registry
result_push := ""
if registry != "" {
response_push, err := dockercli.ImagePush(context.Background(), registry+tag, types.ImagePushOptions{
RegistryAuth: "ewogICJ1c2VybmFtZSI6ICIiLAogICJwYXNzd29yZCI6ICIiLAogICJlbWFpbCI6ICIiLAogICJzZXJ2ZXJhZGRyZXNzIjogIiIKfQo=",
})
if err != nil {
log.Fatalln(err)
}
buf3 := new(bytes.Buffer)
buf3.ReadFrom(response_push)
result_push = buf3.String()
/*fmt.Fprintln(w, "Push:")
fmt.Fprintln(w, result_push, "\n")*/
}
full_response := createResponse{
Base: base,
Image: registry + tag,
Dockerfile: dockerfile,
Code: app,
CreateLog: result,
PushLog: result_push,
}
json_result, err := json.Marshal(full_response)
if err != nil {
log.Fatalln(err)
}
fmt.Fprintln(w, string(json_result))
}
|
package main
import (
)
func initAppListner() error {
return nil
}
removed unused app.go file
|
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A helper that allows using gcsfuse with mount(8).
//
// Can be invoked using a command-line of the form expected for mount helpers.
// Calls the gcsfuse binary, which it finds from one of a list of expected
// locations, and waits for it to complete. The device and mount point are
// passed on as positional arguments, and other known options are converted to
// appropriate flags.
//
// This binary returns with exit code zero only after gcsfuse has reported that
// it has successfully mounted the file system. Further output from gcsfuse is
// suppressed.
package main
// Example invocation on OS X:
//
// mount -t porp -o foo=bar\ baz -o ro,blah bucket ~/tmp/mp
//
// becomes the following arguments:
//
// Arg 0: "/sbin/mount_gcsfuse "
// Arg 1: "-o"
// Arg 2: "foo=bar baz"
// Arg 3: "-o"
// Arg 4: "ro"
// Arg 5: "-o"
// Arg 6: "blah"
// Arg 7: "bucket"
// Arg 8: "/path/to/mp"
//
// On Linux, the fstab entry
//
// bucket /path/to/mp porp user,foo=bar\040baz
//
// becomes
//
// Arg 0: "/sbin/mount.gcsfuse"
// Arg 1: "bucket"
// Arg 2: "/path/to/mp"
// Arg 3: "-o"
// Arg 4: "rw,noexec,nosuid,nodev,user,foo=bar baz"
//
import (
"fmt"
"os"
"os/exec"
"path"
"strings"
"github.com/googlecloudplatform/gcsfuse/internal/mount"
)
// Turn mount-style options into gcsfuse arguments. Skip known detritus that
// the mount command gives us.
//
// The result of this function should be appended to exec.Command.Args.
func makeGcsfuseArgs(
device string,
mountPoint string,
opts map[string]string) (args []string, err error) {
// Deal with options.
for name, value := range opts {
switch name {
// Don't pass through options that are relevant to mount(8) but not to
// gcsfuse, and that fusermount chokes on with "Invalid argument" on Linux.
case "user", "nouser", "auto", "noauto", "_netdev", "no_netdev":
// Special case: support mount-like formatting for gcsfuse bool flags.
case "implicit_dirs":
args = append(args, "--"+strings.Replace(name, "_", "-", -1))
// Special case: support mount-like formatting for gcsfuse string flags.
case "dir_mode",
"file_mode",
"key_file",
"temp_dir",
"gid",
"uid",
"only_dir",
"limit_ops_per_sec",
"limit_bytes_per_sec",
"stat_cache_ttl",
"type_cache_ttl",
"billing_project":
args = append(args, "--"+strings.Replace(name, "_", "-", -1), value)
// Special case: support mount-like formatting for gcsfuse debug flags.
case "debug_fuse",
"debug_gcs",
"debug_http",
"debug_invariants":
args = append(args, "--"+name)
// Pass through everything else.
default:
var formatted string
if value == "" {
formatted = name
} else {
formatted = fmt.Sprintf("%s=%s", name, value)
}
args = append(args, "-o", formatted)
}
}
// Set the bucket and mount point.
args = append(args, device, mountPoint)
return
}
// Parse the supplied command-line arguments from a mount(8) invocation on OS X
// or Linux.
func parseArgs(
args []string) (
device string,
mountPoint string,
opts map[string]string,
err error) {
opts = make(map[string]string)
// Process each argument in turn.
positionalCount := 0
for i, s := range args {
switch {
// Skip the program name.
case i == 0:
continue
// "-o" is illegal only when at the end. We handle its argument in the case
// below.
case s == "-o":
if i == len(args)-1 {
err = fmt.Errorf("Unexpected -o at end of args.")
return
}
// systemd passes -n (alias --no-mtab) to the mount helper. This seems to
// be a result of the new setup on many Linux systems with /etc/mtab as a
// symlink pointing to /proc/self/mounts. /proc/self/mounts is read-only,
// so any helper that would normally write to /etc/mtab should be
// configured not to do so. Because systemd does not provide a way to
// disable this behavior for mount helpers that do not write to /etc/mtab,
// we ignore the flag.
case s == "-n":
continue
// Is this an options string following a "-o"?
case i > 0 && args[i-1] == "-o":
mount.ParseOptions(opts, s)
// Is this the device?
case positionalCount == 0:
device = s
positionalCount++
// Is this the mount point?
case positionalCount == 1:
mountPoint = s
positionalCount++
default:
err = fmt.Errorf("Unexpected arg %d: %q", i, s)
return
}
}
if positionalCount != 2 {
err = fmt.Errorf("Expected two positional arguments; got %d.", positionalCount)
return
}
return
}
func run(args []string) (err error) {
// If invoked with a single "--help" argument, print a usage message and exit
// successfully.
if len(args) == 2 && args[1] == "--help" {
fmt.Fprintf(
os.Stderr,
"Usage: %s [-o options] bucket_name mount_point\n",
args[0])
return
}
// Find the path to gcsfuse.
gcsfusePath, err := findGcsfuse()
if err != nil {
err = fmt.Errorf("findGcsfuse: %v", err)
return
}
// Find the path to fusermount.
fusermountPath, err := findFusermount()
if err != nil {
err = fmt.Errorf("findFusermount: %v", err)
return
}
// Attempt to parse arguments.
device, mountPoint, opts, err := parseArgs(args)
if err != nil {
err = fmt.Errorf("parseArgs: %v", err)
return
}
// Choose gcsfuse args.
gcsfuseArgs, err := makeGcsfuseArgs(device, mountPoint, opts)
if err != nil {
err = fmt.Errorf("makeGcsfuseArgs: %v", err)
return
}
fmt.Fprintf(
os.Stderr,
"Calling gcsfuse with arguments: %s\n",
strings.Join(gcsfuseArgs, " "))
// Run gcsfuse.
cmd := exec.Command(gcsfusePath, gcsfuseArgs...)
cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s", path.Dir(fusermountPath)))
// Pass through the https_proxy/http_proxy environment variable,
// in case the host requires a proxy server to reach the GCS endpoint.
// http_proxy has precedence over http_proxy, in case both are set
if p, ok := os.LookupEnv("https_proxy"); ok {
cmd.Env = append(cmd.Env, fmt.Sprintf("https_proxy=%s", p))
} else if p, ok := os.LookupEnv("http_proxy"); ok {
cmd.Env = append(cmd.Env, fmt.Sprintf("http_proxy=%s", p))
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
err = fmt.Errorf("running gcsfuse: %v", err)
return
}
return
}
func main() {
err := run(os.Args)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
Add missing gcsfuse options to mount.gcsfuse
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A helper that allows using gcsfuse with mount(8).
//
// Can be invoked using a command-line of the form expected for mount helpers.
// Calls the gcsfuse binary, which it finds from one of a list of expected
// locations, and waits for it to complete. The device and mount point are
// passed on as positional arguments, and other known options are converted to
// appropriate flags.
//
// This binary returns with exit code zero only after gcsfuse has reported that
// it has successfully mounted the file system. Further output from gcsfuse is
// suppressed.
package main
// Example invocation on OS X:
//
// mount -t porp -o foo=bar\ baz -o ro,blah bucket ~/tmp/mp
//
// becomes the following arguments:
//
// Arg 0: "/sbin/mount_gcsfuse "
// Arg 1: "-o"
// Arg 2: "foo=bar baz"
// Arg 3: "-o"
// Arg 4: "ro"
// Arg 5: "-o"
// Arg 6: "blah"
// Arg 7: "bucket"
// Arg 8: "/path/to/mp"
//
// On Linux, the fstab entry
//
// bucket /path/to/mp porp user,foo=bar\040baz
//
// becomes
//
// Arg 0: "/sbin/mount.gcsfuse"
// Arg 1: "bucket"
// Arg 2: "/path/to/mp"
// Arg 3: "-o"
// Arg 4: "rw,noexec,nosuid,nodev,user,foo=bar baz"
//
import (
"fmt"
"os"
"os/exec"
"path"
"strings"
"github.com/googlecloudplatform/gcsfuse/internal/mount"
)
// Turn mount-style options into gcsfuse arguments. Skip known detritus that
// the mount command gives us.
//
// The result of this function should be appended to exec.Command.Args.
func makeGcsfuseArgs(
device string,
mountPoint string,
opts map[string]string) (args []string, err error) {
// Deal with options.
for name, value := range opts {
switch name {
// Don't pass through options that are relevant to mount(8) but not to
// gcsfuse, and that fusermount chokes on with "Invalid argument" on Linux.
case "user", "nouser", "auto", "noauto", "_netdev", "no_netdev":
// Special case: support mount-like formatting for gcsfuse bool flags.
case "implicit_dirs":
args = append(args, "--"+strings.Replace(name, "_", "-", -1))
// Special case: support mount-like formatting for gcsfuse string flags.
case "dir_mode",
"file_mode",
"uid",
"gid",
"only_dir",
"billing_project",
"key_file",
"limit_bytes_per_sec",
"limit_ops_per_sec",
"max_retry_sleep",
"stat_cache_capacity",
"stat_cache_ttl",
"type_cache_ttl",
"local_file_cache",
"temp_dir",
"disable_http2",
"max_conns_per_host",
"monitoring_port",
"log_file":
args = append(args, "--"+strings.Replace(name, "_", "-", -1), value)
// Special case: support mount-like formatting for gcsfuse debug flags.
case "debug_fuse",
"debug_gcs",
"debug_http",
"debug_invariants":
args = append(args, "--"+name)
// Pass through everything else.
default:
var formatted string
if value == "" {
formatted = name
} else {
formatted = fmt.Sprintf("%s=%s", name, value)
}
args = append(args, "-o", formatted)
}
}
// Set the bucket and mount point.
args = append(args, device, mountPoint)
return
}
// Parse the supplied command-line arguments from a mount(8) invocation on OS X
// or Linux.
func parseArgs(
args []string) (
device string,
mountPoint string,
opts map[string]string,
err error) {
opts = make(map[string]string)
// Process each argument in turn.
positionalCount := 0
for i, s := range args {
switch {
// Skip the program name.
case i == 0:
continue
// "-o" is illegal only when at the end. We handle its argument in the case
// below.
case s == "-o":
if i == len(args)-1 {
err = fmt.Errorf("Unexpected -o at end of args.")
return
}
// systemd passes -n (alias --no-mtab) to the mount helper. This seems to
// be a result of the new setup on many Linux systems with /etc/mtab as a
// symlink pointing to /proc/self/mounts. /proc/self/mounts is read-only,
// so any helper that would normally write to /etc/mtab should be
// configured not to do so. Because systemd does not provide a way to
// disable this behavior for mount helpers that do not write to /etc/mtab,
// we ignore the flag.
case s == "-n":
continue
// Is this an options string following a "-o"?
case i > 0 && args[i-1] == "-o":
mount.ParseOptions(opts, s)
// Is this the device?
case positionalCount == 0:
device = s
positionalCount++
// Is this the mount point?
case positionalCount == 1:
mountPoint = s
positionalCount++
default:
err = fmt.Errorf("Unexpected arg %d: %q", i, s)
return
}
}
if positionalCount != 2 {
err = fmt.Errorf("Expected two positional arguments; got %d.", positionalCount)
return
}
return
}
func run(args []string) (err error) {
// If invoked with a single "--help" argument, print a usage message and exit
// successfully.
if len(args) == 2 && args[1] == "--help" {
fmt.Fprintf(
os.Stderr,
"Usage: %s [-o options] bucket_name mount_point\n",
args[0])
return
}
// Find the path to gcsfuse.
gcsfusePath, err := findGcsfuse()
if err != nil {
err = fmt.Errorf("findGcsfuse: %v", err)
return
}
// Find the path to fusermount.
fusermountPath, err := findFusermount()
if err != nil {
err = fmt.Errorf("findFusermount: %v", err)
return
}
// Attempt to parse arguments.
device, mountPoint, opts, err := parseArgs(args)
if err != nil {
err = fmt.Errorf("parseArgs: %v", err)
return
}
// Choose gcsfuse args.
gcsfuseArgs, err := makeGcsfuseArgs(device, mountPoint, opts)
if err != nil {
err = fmt.Errorf("makeGcsfuseArgs: %v", err)
return
}
fmt.Fprintf(
os.Stderr,
"Calling gcsfuse with arguments: %s\n",
strings.Join(gcsfuseArgs, " "))
// Run gcsfuse.
cmd := exec.Command(gcsfusePath, gcsfuseArgs...)
cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s", path.Dir(fusermountPath)))
// Pass through the https_proxy/http_proxy environment variable,
// in case the host requires a proxy server to reach the GCS endpoint.
// http_proxy has precedence over http_proxy, in case both are set
if p, ok := os.LookupEnv("https_proxy"); ok {
cmd.Env = append(cmd.Env, fmt.Sprintf("https_proxy=%s", p))
} else if p, ok := os.LookupEnv("http_proxy"); ok {
cmd.Env = append(cmd.Env, fmt.Sprintf("http_proxy=%s", p))
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
err = fmt.Errorf("running gcsfuse: %v", err)
return
}
return
}
func main() {
err := run(os.Args)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
|
package main
import (
"encoding/json"
"expvar"
"fmt"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"sync"
"time"
"github.com/codahale/hdrhistogram"
)
var expStats *sequinsStats
type sequinsStats struct {
Qps struct {
Total int64
ByStatus map[string]int64
total int64
status200 int64
status400 int64
status404 int64
status500 int64
status501 int64
status502 int64
}
Latency struct {
Max float64
Mean float64
P9999 float64
P99 float64
P95 float64
P90 float64
P75 float64
P50 float64
P25 float64
}
latencyHist *hdrhistogram.Histogram
queries chan queryStats
DiskUsed int64
lock sync.RWMutex
}
type queryStats struct {
duration time.Duration
status int
}
func startDebugServer(config sequinsConfig) {
mux := http.NewServeMux()
s := &http.Server{
Addr: config.Debug.Bind,
Handler: mux,
}
if config.Debug.Expvars {
mux.HandleFunc("/debug/vars", expvarHandler)
expStats = newStats(config.LocalStore)
expvar.Publish("sequins", expStats)
}
if config.Debug.Pprof {
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
}
go s.ListenAndServe()
}
// expvarHandler is copied from the stdlib.
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func newStats(localStorePath string) *sequinsStats {
s := &sequinsStats{
latencyHist: hdrhistogram.New(0, int64(10*time.Second/time.Microsecond), 5),
queries: make(chan queryStats, 1024),
}
go s.updateRequestStats()
go s.updateDiskStats(localStorePath)
return s
}
func (s *sequinsStats) updateRequestStats() {
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
s.snapshotRequestStats()
s.latencyHist.Reset()
s.Qps.total = 0
s.Qps.status200 = 0
s.Qps.status400 = 0
s.Qps.status404 = 0
s.Qps.status500 = 0
s.Qps.status501 = 0
s.Qps.status502 = 0
case q := <-s.queries:
s.latencyHist.RecordValue(int64(q.duration / time.Microsecond))
s.Qps.total++
switch q.status {
case 0, 200:
s.Qps.status200++
case 400:
s.Qps.status400++
case 404:
s.Qps.status404++
case 500:
s.Qps.status500++
case 501:
s.Qps.status501++
case 502:
s.Qps.status502++
default:
panic("unknown http status code")
}
}
}
}
func (s *sequinsStats) snapshotRequestStats() {
s.lock.Lock()
defer s.lock.Unlock()
s.Qps.Total = s.Qps.total
s.Qps.ByStatus = make(map[string]int64)
s.Qps.ByStatus["200"] = s.Qps.status200
s.Qps.ByStatus["400"] = s.Qps.status400
s.Qps.ByStatus["404"] = s.Qps.status404
s.Qps.ByStatus["500"] = s.Qps.status500
s.Qps.ByStatus["501"] = s.Qps.status501
s.Qps.ByStatus["502"] = s.Qps.status502
ms := float64(1000)
s.Latency.Max = float64(s.latencyHist.Max()) / ms
s.Latency.Mean = s.latencyHist.Mean() / ms
s.Latency.P9999 = float64(s.latencyHist.ValueAtQuantile(99.99)) / ms
s.Latency.P99 = float64(s.latencyHist.ValueAtQuantile(99.0)) / ms
s.Latency.P95 = float64(s.latencyHist.ValueAtQuantile(95.0)) / ms
s.Latency.P90 = float64(s.latencyHist.ValueAtQuantile(90.0)) / ms
s.Latency.P75 = float64(s.latencyHist.ValueAtQuantile(75.0)) / ms
s.Latency.P50 = float64(s.latencyHist.ValueAtQuantile(50.0)) / ms
s.Latency.P25 = float64(s.latencyHist.ValueAtQuantile(25.0)) / ms
}
func (s *sequinsStats) updateDiskStats(path string) {
s.calculateDiskUsage(path)
ticker := time.NewTicker(1 * time.Minute)
for range ticker.C {
s.calculateDiskUsage(path)
}
}
func (s *sequinsStats) calculateDiskUsage(path string) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if info != nil && !info.IsDir() {
size += info.Size()
}
return err
})
if err == nil {
s.lock.Lock()
defer s.lock.Unlock()
s.DiskUsed = size
}
}
func (s *sequinsStats) String() string {
s.lock.RLock()
defer s.lock.RUnlock()
b, err := json.Marshal(s)
if err != nil {
panic(err)
}
return string(b)
}
// trackingHandler is an http.Handler that tracks request times.
type trackingHandler struct {
*sequins
}
func trackQueries(s *sequins) trackingHandler {
return trackingHandler{s}
}
func (t trackingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.URL.Query().Get("proxy") == "" {
w = trackQuery(w)
defer w.(*queryTracker).done()
}
t.sequins.ServeHTTP(w, r)
}
type queryTracker struct {
http.ResponseWriter
start time.Time
status int
}
func trackQuery(w http.ResponseWriter) *queryTracker {
return &queryTracker{
ResponseWriter: w,
start: time.Now(),
}
}
func (t *queryTracker) WriteHeader(status int) {
t.status = status
t.ResponseWriter.WriteHeader(status)
}
func (t *queryTracker) done() {
if expStats == nil {
return
}
q := queryStats{
duration: time.Now().Sub(t.start),
status: t.status,
}
select {
case expStats.queries <- q:
default:
}
}
Don't track latency for status pages
package main
import (
"encoding/json"
"expvar"
"fmt"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/codahale/hdrhistogram"
)
var expStats *sequinsStats
type sequinsStats struct {
Qps struct {
Total int64
ByStatus map[string]int64
total int64
status200 int64
status400 int64
status404 int64
status500 int64
status501 int64
status502 int64
}
Latency struct {
Max float64
Mean float64
P9999 float64
P99 float64
P95 float64
P90 float64
P75 float64
P50 float64
P25 float64
}
latencyHist *hdrhistogram.Histogram
queries chan queryStats
DiskUsed int64
lock sync.RWMutex
}
type queryStats struct {
duration time.Duration
status int
}
func startDebugServer(config sequinsConfig) {
mux := http.NewServeMux()
s := &http.Server{
Addr: config.Debug.Bind,
Handler: mux,
}
if config.Debug.Expvars {
mux.HandleFunc("/debug/vars", expvarHandler)
expStats = newStats(config.LocalStore)
expvar.Publish("sequins", expStats)
}
if config.Debug.Pprof {
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
}
go s.ListenAndServe()
}
// expvarHandler is copied from the stdlib.
func expvarHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "{\n")
first := true
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}
func newStats(localStorePath string) *sequinsStats {
s := &sequinsStats{
latencyHist: hdrhistogram.New(0, int64(10*time.Second/time.Microsecond), 5),
queries: make(chan queryStats, 1024),
}
go s.updateRequestStats()
go s.updateDiskStats(localStorePath)
return s
}
func (s *sequinsStats) updateRequestStats() {
ticker := time.NewTicker(time.Second)
for {
select {
case <-ticker.C:
s.snapshotRequestStats()
s.latencyHist.Reset()
s.Qps.total = 0
s.Qps.status200 = 0
s.Qps.status400 = 0
s.Qps.status404 = 0
s.Qps.status500 = 0
s.Qps.status501 = 0
s.Qps.status502 = 0
case q := <-s.queries:
s.latencyHist.RecordValue(int64(q.duration / time.Microsecond))
s.Qps.total++
switch q.status {
case 0, 200:
s.Qps.status200++
case 400:
s.Qps.status400++
case 404:
s.Qps.status404++
case 500:
s.Qps.status500++
case 501:
s.Qps.status501++
case 502:
s.Qps.status502++
default:
panic("unknown http status code")
}
}
}
}
func (s *sequinsStats) snapshotRequestStats() {
s.lock.Lock()
defer s.lock.Unlock()
s.Qps.Total = s.Qps.total
s.Qps.ByStatus = make(map[string]int64)
s.Qps.ByStatus["200"] = s.Qps.status200
s.Qps.ByStatus["400"] = s.Qps.status400
s.Qps.ByStatus["404"] = s.Qps.status404
s.Qps.ByStatus["500"] = s.Qps.status500
s.Qps.ByStatus["501"] = s.Qps.status501
s.Qps.ByStatus["502"] = s.Qps.status502
ms := float64(1000)
s.Latency.Max = float64(s.latencyHist.Max()) / ms
s.Latency.Mean = s.latencyHist.Mean() / ms
s.Latency.P9999 = float64(s.latencyHist.ValueAtQuantile(99.99)) / ms
s.Latency.P99 = float64(s.latencyHist.ValueAtQuantile(99.0)) / ms
s.Latency.P95 = float64(s.latencyHist.ValueAtQuantile(95.0)) / ms
s.Latency.P90 = float64(s.latencyHist.ValueAtQuantile(90.0)) / ms
s.Latency.P75 = float64(s.latencyHist.ValueAtQuantile(75.0)) / ms
s.Latency.P50 = float64(s.latencyHist.ValueAtQuantile(50.0)) / ms
s.Latency.P25 = float64(s.latencyHist.ValueAtQuantile(25.0)) / ms
}
func (s *sequinsStats) updateDiskStats(path string) {
s.calculateDiskUsage(path)
ticker := time.NewTicker(1 * time.Minute)
for range ticker.C {
s.calculateDiskUsage(path)
}
}
func (s *sequinsStats) calculateDiskUsage(path string) {
var size int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if info != nil && !info.IsDir() {
size += info.Size()
}
return err
})
if err == nil {
s.lock.Lock()
defer s.lock.Unlock()
s.DiskUsed = size
}
}
func (s *sequinsStats) String() string {
s.lock.RLock()
defer s.lock.RUnlock()
b, err := json.Marshal(s)
if err != nil {
panic(err)
}
return string(b)
}
// trackingHandler is an http.Handler that tracks request times.
type trackingHandler struct {
*sequins
}
func trackQueries(s *sequins) trackingHandler {
return trackingHandler{s}
}
func (t trackingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Don't track queries to the status pages, and don't track proxied
// queries.
path := strings.TrimPrefix(r.URL.Path, "/")
if strings.Index(path, "/") > 0 && r.URL.Query().Get("proxy") == "" {
w = trackQuery(w)
defer w.(*queryTracker).done()
}
t.sequins.ServeHTTP(w, r)
}
type queryTracker struct {
http.ResponseWriter
start time.Time
status int
}
func trackQuery(w http.ResponseWriter) *queryTracker {
return &queryTracker{
ResponseWriter: w,
start: time.Now(),
}
}
func (t *queryTracker) WriteHeader(status int) {
t.status = status
t.ResponseWriter.WriteHeader(status)
}
func (t *queryTracker) done() {
if expStats == nil {
return
}
q := queryStats{
duration: time.Now().Sub(t.start),
status: t.status,
}
select {
case expStats.queries <- q:
default:
}
}
|
package main
/* HOW TO RUN THE TES
Be sure you have a running ngrok instance. This is needed so klient can connect
to our kontrol. Run it with:
./ngrok -authtoken="CMY-UsZMWdx586A3tA0U" -subdomain="kloud-test" 4099
Postgres and mongodb url is same is in the koding dev config. below is an example go test command:
KLOUD_KONTROL_URL="http://kloud-test.ngrok.com/kite" KLOUD_MONGODB_URL=192.168.59.103:27017/koding KONTROL_POSTGRES_PASSWORD=kontrolapplication KONTROL_STORAGE=postgres KONTROL_POSTGRES_USERNAME=kontrolapplication KONTROL_POSTGRES_DBNAME=social KONTROL_POSTGRES_HOST=192.168.59.103 go test -v -timeout 20m
*/
import (
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"testing"
"time"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"github.com/koding/kite"
"github.com/koding/kite/config"
"github.com/koding/kite/kontrol"
"github.com/koding/kite/protocol"
"github.com/koding/kite/testkeys"
"github.com/koding/kite/testutil"
"koding/db/models"
"koding/db/mongodb/modelhelper"
"koding/kites/kloud/keys"
"koding/kites/kloud/kloud"
"koding/kites/kloud/koding"
"koding/kites/kloud/machinestate"
"koding/kites/kloud/multiec2"
kloudprotocol "koding/kites/kloud/protocol"
"koding/kites/kloud/sshutil"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
)
var (
kloudKite *kite.Kite
kld *kloud.Kloud
remote *kite.Client
conf *config.Config
provider *koding.Provider
)
type args struct {
MachineId string
}
func init() {
conf = config.New()
conf.Username = "testuser"
conf.KontrolURL = os.Getenv("KLOUD_KONTROL_URL")
if conf.KontrolURL == "" {
conf.KontrolURL = "http://localhost:4099/kite"
}
conf.KontrolKey = testkeys.Public
conf.KontrolUser = "testuser"
conf.KiteKey = testutil.NewKiteKey().Raw
// Power up our own kontrol kite for self-contained tests
kontrol.DefaultPort = 4099
kntrl := kontrol.New(conf.Copy(), "0.1.0", testkeys.Public, testkeys.Private)
kntrl.SetStorage(kontrol.NewPostgres(nil, kntrl.Kite.Log))
go kntrl.Run()
<-kntrl.Kite.ServerReadyNotify()
// Power up kloud kite
kloudKite = kite.New("kloud", "0.0.1")
kloudKite.Config = conf.Copy()
kloudKite.Config.Port = 4002
kiteURL := &url.URL{Scheme: "http", Host: "localhost:4002", Path: "/kite"}
_, err := kloudKite.Register(kiteURL)
if err != nil {
log.Fatal(err)
}
provider = newKodingProvider()
// Add Kloud handlers
kld := newKloud(provider)
kloudKite.HandleFunc("build", kld.Build)
kloudKite.HandleFunc("destroy", kld.Destroy)
kloudKite.HandleFunc("start", kld.Start)
kloudKite.HandleFunc("stop", kld.Stop)
kloudKite.HandleFunc("reinit", kld.Reinit)
kloudKite.HandleFunc("resize", kld.Resize)
kloudKite.HandleFunc("event", kld.Event)
go kloudKite.Run()
<-kloudKite.ServerReadyNotify()
user := kite.New("user", "0.0.1")
user.Config = conf.Copy()
kloudQuery := &protocol.KontrolQuery{
Username: "testuser",
Environment: conf.Environment,
Name: "kloud",
}
kites, err := user.GetKites(kloudQuery)
if err != nil {
log.Fatal(err)
}
// Get the caller
remote = kites[0]
if err := remote.Dial(); err != nil {
log.Fatal(err)
}
}
// Main VM action tests (build, start, stop, destroy, resize, reinit)
func TestPing(t *testing.T) {
_, err := remote.Tell("kite.ping")
if err != nil {
t.Fatal(err)
}
}
func TestSingleMachine(t *testing.T) {
userData, err := createUser()
if err != nil {
t.Fatal(err)
}
// build
if err := build(userData.MachineId); err != nil {
t.Error(err)
}
// now try to ssh into the machine with temporary private key we created in
// the beginning
if err := checkSSHKey(userData.MachineId, userData.PrivateKey); err != nil {
t.Error(err)
}
// invalid calls after build
if err := build(userData.MachineId); err == nil {
t.Error("`build` method can not be called on `running` machines.")
}
// stop
log.Println("Stopping machine")
if err := stop(userData.MachineId); err != nil {
t.Error(err)
}
if err := build(userData.MachineId); err == nil {
t.Error("`build` method can not be called on `stopped` machines.")
}
if err := stop(userData.MachineId); err == nil {
t.Error("`stop` method can not be called on `stopped` machines.")
}
// start
log.Println("Starting machine")
if err := start(userData.MachineId); err != nil {
t.Error(err)
}
// resize
log.Println("Resizing machine")
storageWant := 5
err = provider.Session.Run("jMachines", func(c *mgo.Collection) error {
return c.UpdateId(
bson.ObjectIdHex(userData.MachineId),
bson.M{
"$set": bson.M{
"meta.storage_size": storageWant,
},
},
)
})
if err != nil {
t.Error(err)
}
if err := resize(userData.MachineId); err != nil {
t.Error(err)
}
storageGot, err := getAmazonStorageSize(userData.MachineId)
if err != nil {
t.Error(err)
}
if storageGot != storageWant {
t.Errorf("Resizing completed but storage sizes do not match. Want: %dGB, Got: %dGB",
storageWant,
storageGot,
)
}
// reinit
log.Println("Reinitializing machine")
if err := reinit(userData.MachineId); err != nil {
t.Error(err)
}
// destroy
log.Println("Destroying machine")
if err := destroy(userData.MachineId); err != nil {
t.Error(err)
}
if err := stop(userData.MachineId); err == nil {
t.Error("`stop` method can not be called on `terminated` machines.")
}
if err := start(userData.MachineId); err == nil {
t.Error("`start` method can not be called on `terminated` machines.")
}
if err := destroy(userData.MachineId); err == nil {
t.Error("`destroy` method can not be called on `terminated` machines.")
}
if err := resize(userData.MachineId); err == nil {
t.Error("`resize` method can not be called on `terminated` machines.")
}
if err := reinit(userData.MachineId); err == nil {
t.Error("`reinit` method can not be called on `terminated` machines.")
}
}
type singleUser struct {
MachineId string
PrivateKey string
PublicKey string
}
// createUser creates a test user in jUsers and a single jMachine document.
func createUser() (*singleUser, error) {
privateKey, publicKey, err := sshutil.TemporaryKey()
if err != nil {
return nil, err
}
username := "testuser"
// cleanup old document
if err := provider.Session.Run("jUsers", func(c *mgo.Collection) error {
return c.Remove(bson.M{"username": username})
}); err != nil {
return nil, err
}
userId := bson.NewObjectId()
user := &models.User{
ObjectId: userId,
Email: "testuser@testuser.com",
LastLoginDate: time.Now().UTC(),
RegisteredAt: time.Now().UTC(),
Name: username, // bson equivelant is username
Password: "somerandomnumbers",
Status: "confirmed",
SshKeys: []struct {
Title string `bson:"title"`
Key string `bson:"key"`
}{
{Key: publicKey},
},
}
if err := provider.Session.Run("jUsers", func(c *mgo.Collection) error {
return c.Insert(&user)
}); err != nil {
return nil, err
}
// later we can add more users with "Owner:false" to test sharing capabilities
users := []models.Permissions{
{Id: userId, Sudo: true, Owner: true},
}
machineId := bson.NewObjectId()
machine := &koding.MachineDocument{
Id: machineId,
Label: "",
Domain: username + ".dev.koding.io",
Credential: username,
Provider: "koding",
CreatedAt: time.Now().UTC(),
Meta: bson.M{
"region": "eu-west-1",
"instance_type": "t2.micro",
"storage_size": 3,
"alwaysOn": false,
},
Users: users,
Groups: make([]models.Permissions, 0),
}
machine.Assignee.InProgress = false
machine.Assignee.AssignedAt = time.Now().UTC()
machine.Status.State = machinestate.NotInitialized.String()
machine.Status.ModifiedAt = time.Now().UTC()
if err := provider.Session.Run("jMachines", func(c *mgo.Collection) error {
return c.Insert(&machine)
}); err != nil {
return nil, err
}
return &singleUser{
MachineId: machineId.Hex(),
PrivateKey: privateKey,
PublicKey: publicKey,
}, nil
}
func build(id string) error {
buildArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("build", buildArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: buildArgs.MachineId,
Type: "build",
},
})
return listenEvent(eArgs, machinestate.Running)
}
func checkSSHKey(id, privateKey string) error {
// now try to ssh into the machine with temporary private key we created in
// the beginning
machine, err := provider.Get(id)
if err != nil {
return err
}
sshConfig, err := sshutil.SshConfig("root", privateKey)
if err != nil {
return err
}
log.Printf("Connecting to machine with ip '%s' via ssh\n", machine.IpAddress)
sshClient, err := sshutil.ConnectSSH(machine.IpAddress+":22", sshConfig)
if err != nil {
return err
}
log.Printf("Testing SSH deployment")
output, err := sshClient.StartCommand("whoami")
if err != nil {
return err
}
if strings.TrimSpace(string(output)) != "root" {
return fmt.Errorf("Whoami result should be root, got: %s", string(output))
}
return nil
}
func destroy(id string) error {
destroyArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("destroy", destroyArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: destroyArgs.MachineId,
Type: "destroy",
},
})
return listenEvent(eArgs, machinestate.Terminated)
}
func start(id string) error {
startArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("start", startArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: startArgs.MachineId,
Type: "start",
},
})
return listenEvent(eArgs, machinestate.Running)
}
func stop(id string) error {
stopArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("stop", stopArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: stopArgs.MachineId,
Type: "stop",
},
})
return listenEvent(eArgs, machinestate.Stopped)
}
func reinit(id string) error {
reinitArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("reinit", reinitArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: reinitArgs.MachineId,
Type: "reinit",
},
})
return listenEvent(eArgs, machinestate.Running)
}
func resize(id string) error {
resizeArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("resize", resizeArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: resizeArgs.MachineId,
Type: "resize",
},
})
return listenEvent(eArgs, machinestate.Running)
}
// listenEvent calls the event method of kloud with the given arguments until
// the desiredState is received. It times out if the desired state is not
// reached in 10 miunuts.
func listenEvent(args kloud.EventArgs, desiredState machinestate.State) error {
tryUntil := time.Now().Add(time.Minute * 10)
for {
resp, err := remote.Tell("event", args)
if err != nil {
return err
}
var events []kloud.EventResponse
if err := resp.Unmarshal(&events); err != nil {
return err
}
e := events[0]
if e.Error != nil {
return e.Error
}
event := e.Event
if event.Status == desiredState {
return nil
}
if time.Now().After(tryUntil) {
return fmt.Errorf("Timeout while waiting for state %s", desiredState)
}
time.Sleep(2 * time.Second)
continue // still pending
}
return nil
}
func newKodingProvider() *koding.Provider {
auth := aws.Auth{
AccessKey: "AKIAJFKDHRJ7Q5G4MOUQ",
SecretKey: "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",
}
mongoURL := os.Getenv("KLOUD_MONGODB_URL")
if mongoURL == "" {
panic("KLOUD_MONGODB_URL is not set")
}
modelhelper.Initialize(mongoURL)
db := modelhelper.Mongo
domainStorage := koding.NewDomainStorage(db)
testChecker := &TestChecker{}
return &koding.Provider{
Session: db,
Kite: kloudKite,
Log: newLogger("koding", true),
KontrolURL: conf.KontrolURL,
KontrolPrivateKey: testkeys.Private,
KontrolPublicKey: testkeys.Public,
Test: true,
EC2Clients: multiec2.New(auth, []string{
"us-east-1",
"ap-southeast-1",
"us-west-2",
"eu-west-1",
}),
DNS: koding.NewDNSClient("dev.koding.io", auth),
DomainStorage: domainStorage,
Bucket: koding.NewBucket("koding-klient", "development/latest", auth),
KeyName: keys.DeployKeyName,
PublicKey: keys.DeployPublicKey,
PrivateKey: keys.DeployPrivateKey,
PlanChecker: func(_ *kloudprotocol.Machine) (koding.Checker, error) {
return testChecker, nil
},
}
}
func newKloud(p *koding.Provider) *kloud.Kloud {
kld := kloud.New()
kld.Log = newLogger("kloud", true)
kld.Locker = p
kld.Storage = p
kld.DomainStorage = p.DomainStorage
kld.Domainer = p.DNS
kld.Debug = true
kld.AddProvider("koding", p)
return kld
}
func getAmazonStorageSize(machineId string) (int, error) {
m, err := provider.Get(machineId)
if err != nil {
return 0, err
}
a, err := provider.NewClient(m)
if err != nil {
return 0, err
}
instance, err := a.Instance(a.Id())
if err != nil {
return 0, err
}
if len(instance.BlockDevices) == 0 {
return 0, fmt.Errorf("fatal error: no block device available")
}
// we need in a lot of placages!
oldVolumeId := instance.BlockDevices[0].VolumeId
oldVolResp, err := a.Client.Volumes([]string{oldVolumeId}, ec2.NewFilter())
if err != nil {
return 0, err
}
volSize := oldVolResp.Volumes[0].Size
currentSize, err := strconv.Atoi(volSize)
if err != nil {
return 0, err
}
return currentSize, nil
}
// TestChecker satisfies Checker interface
type TestChecker struct{}
func (c *TestChecker) Total() error {
return nil
}
func (c *TestChecker) AlwaysOn() error {
return nil
}
func (c *TestChecker) Timeout() error {
return nil
}
func (c *TestChecker) Storage(wantStorage int) error {
return nil
}
func (c *TestChecker) AllowedInstances(wantInstance koding.InstanceType) error {
return nil
}
kloud/tests: add some more docs
package main
/* HOW TO RUN THE TES
Be sure you have a running ngrok instance. This is needed so klient can connect
to our kontrol. Run it with:
./ngrok -authtoken="CMY-UsZMWdx586A3tA0U" -subdomain="kloud-test" 4099
Postgres and mongodb url is same is in the koding dev config. below is an example go test command:
KLOUD_KONTROL_URL="http://kloud-test.ngrok.com/kite"
KLOUD_MONGODB_URL=192.168.59.103:27017/koding
KONTROL_POSTGRES_PASSWORD=kontrolapplication KONTROL_STORAGE=postgres
KONTROL_POSTGRES_USERNAME=kontrolapplication KONTROL_POSTGRES_DBNAME=social
KONTROL_POSTGRES_HOST=192.168.59.103 go test -v -timeout 20m
To get profile files first compile a binary and call that particular binary with additional flags:
go test -c
KLOUD_KONTROL_URL="http://kloud-test.ngrok.com/kite"
KLOUD_MONGODB_URL=192.168.59.103:27017/koding
KONTROL_POSTGRES_PASSWORD=kontrolapplication KONTROL_STORAGE=postgres
KONTROL_POSTGRES_USERNAME=kontrolapplication KONTROL_POSTGRES_DBNAME=social
KONTROL_POSTGRES_HOST=192.168.59.103 ./kloud.test -test.v -test.timeout 20m
-test.cpuprofile=kloud_cpu.prof -test.memprofile=kloud_mem.prof
Create a nice graph from the cpu profile
go tool pprof --pdf kloud.test kloud_cpu.prof > kloud_cpu.pdf
*/
import (
"fmt"
"log"
"net/url"
"os"
"strconv"
"strings"
"testing"
"time"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
"github.com/koding/kite"
"github.com/koding/kite/config"
"github.com/koding/kite/kontrol"
"github.com/koding/kite/protocol"
"github.com/koding/kite/testkeys"
"github.com/koding/kite/testutil"
"koding/db/models"
"koding/db/mongodb/modelhelper"
"koding/kites/kloud/keys"
"koding/kites/kloud/kloud"
"koding/kites/kloud/koding"
"koding/kites/kloud/machinestate"
"koding/kites/kloud/multiec2"
kloudprotocol "koding/kites/kloud/protocol"
"koding/kites/kloud/sshutil"
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/ec2"
)
var (
kloudKite *kite.Kite
kld *kloud.Kloud
remote *kite.Client
conf *config.Config
provider *koding.Provider
)
type args struct {
MachineId string
}
func init() {
conf = config.New()
conf.Username = "testuser"
conf.KontrolURL = os.Getenv("KLOUD_KONTROL_URL")
if conf.KontrolURL == "" {
conf.KontrolURL = "http://localhost:4099/kite"
}
conf.KontrolKey = testkeys.Public
conf.KontrolUser = "testuser"
conf.KiteKey = testutil.NewKiteKey().Raw
// Power up our own kontrol kite for self-contained tests
kontrol.DefaultPort = 4099
kntrl := kontrol.New(conf.Copy(), "0.1.0", testkeys.Public, testkeys.Private)
kntrl.SetStorage(kontrol.NewPostgres(nil, kntrl.Kite.Log))
go kntrl.Run()
<-kntrl.Kite.ServerReadyNotify()
// Power up kloud kite
kloudKite = kite.New("kloud", "0.0.1")
kloudKite.Config = conf.Copy()
kloudKite.Config.Port = 4002
kiteURL := &url.URL{Scheme: "http", Host: "localhost:4002", Path: "/kite"}
_, err := kloudKite.Register(kiteURL)
if err != nil {
log.Fatal(err)
}
provider = newKodingProvider()
// Add Kloud handlers
kld := newKloud(provider)
kloudKite.HandleFunc("build", kld.Build)
kloudKite.HandleFunc("destroy", kld.Destroy)
kloudKite.HandleFunc("start", kld.Start)
kloudKite.HandleFunc("stop", kld.Stop)
kloudKite.HandleFunc("reinit", kld.Reinit)
kloudKite.HandleFunc("resize", kld.Resize)
kloudKite.HandleFunc("event", kld.Event)
go kloudKite.Run()
<-kloudKite.ServerReadyNotify()
user := kite.New("user", "0.0.1")
user.Config = conf.Copy()
kloudQuery := &protocol.KontrolQuery{
Username: "testuser",
Environment: conf.Environment,
Name: "kloud",
}
kites, err := user.GetKites(kloudQuery)
if err != nil {
log.Fatal(err)
}
// Get the caller
remote = kites[0]
if err := remote.Dial(); err != nil {
log.Fatal(err)
}
}
func TestPing(t *testing.T) {
_, err := remote.Tell("kite.ping")
if err != nil {
t.Fatal(err)
}
}
// TestSingleMachine creates a test user document and a single machine document
// that is bound to thar particular test user. It builds, stops, starts,
// resize, reinit and destroys the machine in order.
func TestSingleMachine(t *testing.T) {
userData, err := createUser()
if err != nil {
t.Fatal(err)
}
// build
if err := build(userData.MachineId); err != nil {
t.Error(err)
}
// now try to ssh into the machine with temporary private key we created in
// the beginning
if err := checkSSHKey(userData.MachineId, userData.PrivateKey); err != nil {
t.Error(err)
}
// invalid calls after build
if err := build(userData.MachineId); err == nil {
t.Error("`build` method can not be called on `running` machines.")
}
// stop
log.Println("Stopping machine")
if err := stop(userData.MachineId); err != nil {
t.Error(err)
}
if err := build(userData.MachineId); err == nil {
t.Error("`build` method can not be called on `stopped` machines.")
}
if err := stop(userData.MachineId); err == nil {
t.Error("`stop` method can not be called on `stopped` machines.")
}
// start
log.Println("Starting machine")
if err := start(userData.MachineId); err != nil {
t.Error(err)
}
// resize
log.Println("Resizing machine")
storageWant := 5
err = provider.Session.Run("jMachines", func(c *mgo.Collection) error {
return c.UpdateId(
bson.ObjectIdHex(userData.MachineId),
bson.M{
"$set": bson.M{
"meta.storage_size": storageWant,
},
},
)
})
if err != nil {
t.Error(err)
}
if err := resize(userData.MachineId); err != nil {
t.Error(err)
}
storageGot, err := getAmazonStorageSize(userData.MachineId)
if err != nil {
t.Error(err)
}
if storageGot != storageWant {
t.Errorf("Resizing completed but storage sizes do not match. Want: %dGB, Got: %dGB",
storageWant,
storageGot,
)
}
// reinit
log.Println("Reinitializing machine")
if err := reinit(userData.MachineId); err != nil {
t.Error(err)
}
// destroy
log.Println("Destroying machine")
if err := destroy(userData.MachineId); err != nil {
t.Error(err)
}
if err := stop(userData.MachineId); err == nil {
t.Error("`stop` method can not be called on `terminated` machines.")
}
if err := start(userData.MachineId); err == nil {
t.Error("`start` method can not be called on `terminated` machines.")
}
if err := destroy(userData.MachineId); err == nil {
t.Error("`destroy` method can not be called on `terminated` machines.")
}
if err := resize(userData.MachineId); err == nil {
t.Error("`resize` method can not be called on `terminated` machines.")
}
if err := reinit(userData.MachineId); err == nil {
t.Error("`reinit` method can not be called on `terminated` machines.")
}
}
type singleUser struct {
MachineId string
PrivateKey string
PublicKey string
}
// createUser creates a test user in jUsers and a single jMachine document.
func createUser() (*singleUser, error) {
privateKey, publicKey, err := sshutil.TemporaryKey()
if err != nil {
return nil, err
}
username := "testuser"
// cleanup old document
if err := provider.Session.Run("jUsers", func(c *mgo.Collection) error {
return c.Remove(bson.M{"username": username})
}); err != nil {
return nil, err
}
userId := bson.NewObjectId()
user := &models.User{
ObjectId: userId,
Email: "testuser@testuser.com",
LastLoginDate: time.Now().UTC(),
RegisteredAt: time.Now().UTC(),
Name: username, // bson equivelant is username
Password: "somerandomnumbers",
Status: "confirmed",
SshKeys: []struct {
Title string `bson:"title"`
Key string `bson:"key"`
}{
{Key: publicKey},
},
}
if err := provider.Session.Run("jUsers", func(c *mgo.Collection) error {
return c.Insert(&user)
}); err != nil {
return nil, err
}
// later we can add more users with "Owner:false" to test sharing capabilities
users := []models.Permissions{
{Id: userId, Sudo: true, Owner: true},
}
machineId := bson.NewObjectId()
machine := &koding.MachineDocument{
Id: machineId,
Label: "",
Domain: username + ".dev.koding.io",
Credential: username,
Provider: "koding",
CreatedAt: time.Now().UTC(),
Meta: bson.M{
"region": "eu-west-1",
"instance_type": "t2.micro",
"storage_size": 3,
"alwaysOn": false,
},
Users: users,
Groups: make([]models.Permissions, 0),
}
machine.Assignee.InProgress = false
machine.Assignee.AssignedAt = time.Now().UTC()
machine.Status.State = machinestate.NotInitialized.String()
machine.Status.ModifiedAt = time.Now().UTC()
if err := provider.Session.Run("jMachines", func(c *mgo.Collection) error {
return c.Insert(&machine)
}); err != nil {
return nil, err
}
return &singleUser{
MachineId: machineId.Hex(),
PrivateKey: privateKey,
PublicKey: publicKey,
}, nil
}
func build(id string) error {
buildArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("build", buildArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: buildArgs.MachineId,
Type: "build",
},
})
return listenEvent(eArgs, machinestate.Running)
}
func checkSSHKey(id, privateKey string) error {
// now try to ssh into the machine with temporary private key we created in
// the beginning
machine, err := provider.Get(id)
if err != nil {
return err
}
sshConfig, err := sshutil.SshConfig("root", privateKey)
if err != nil {
return err
}
log.Printf("Connecting to machine with ip '%s' via ssh\n", machine.IpAddress)
sshClient, err := sshutil.ConnectSSH(machine.IpAddress+":22", sshConfig)
if err != nil {
return err
}
log.Printf("Testing SSH deployment")
output, err := sshClient.StartCommand("whoami")
if err != nil {
return err
}
if strings.TrimSpace(string(output)) != "root" {
return fmt.Errorf("Whoami result should be root, got: %s", string(output))
}
return nil
}
func destroy(id string) error {
destroyArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("destroy", destroyArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: destroyArgs.MachineId,
Type: "destroy",
},
})
return listenEvent(eArgs, machinestate.Terminated)
}
func start(id string) error {
startArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("start", startArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: startArgs.MachineId,
Type: "start",
},
})
return listenEvent(eArgs, machinestate.Running)
}
func stop(id string) error {
stopArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("stop", stopArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: stopArgs.MachineId,
Type: "stop",
},
})
return listenEvent(eArgs, machinestate.Stopped)
}
func reinit(id string) error {
reinitArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("reinit", reinitArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: reinitArgs.MachineId,
Type: "reinit",
},
})
return listenEvent(eArgs, machinestate.Running)
}
func resize(id string) error {
resizeArgs := &args{
MachineId: id,
}
resp, err := remote.Tell("resize", resizeArgs)
if err != nil {
return err
}
var result kloud.ControlResult
err = resp.Unmarshal(&result)
if err != nil {
return err
}
eArgs := kloud.EventArgs([]kloud.EventArg{
kloud.EventArg{
EventId: resizeArgs.MachineId,
Type: "resize",
},
})
return listenEvent(eArgs, machinestate.Running)
}
// listenEvent calls the event method of kloud with the given arguments until
// the desiredState is received. It times out if the desired state is not
// reached in 10 miunuts.
func listenEvent(args kloud.EventArgs, desiredState machinestate.State) error {
tryUntil := time.Now().Add(time.Minute * 10)
for {
resp, err := remote.Tell("event", args)
if err != nil {
return err
}
var events []kloud.EventResponse
if err := resp.Unmarshal(&events); err != nil {
return err
}
e := events[0]
if e.Error != nil {
return e.Error
}
event := e.Event
if event.Status == desiredState {
return nil
}
if time.Now().After(tryUntil) {
return fmt.Errorf("Timeout while waiting for state %s", desiredState)
}
time.Sleep(2 * time.Second)
continue // still pending
}
return nil
}
func newKodingProvider() *koding.Provider {
auth := aws.Auth{
AccessKey: "AKIAJFKDHRJ7Q5G4MOUQ",
SecretKey: "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",
}
mongoURL := os.Getenv("KLOUD_MONGODB_URL")
if mongoURL == "" {
panic("KLOUD_MONGODB_URL is not set")
}
modelhelper.Initialize(mongoURL)
db := modelhelper.Mongo
domainStorage := koding.NewDomainStorage(db)
testChecker := &TestChecker{}
return &koding.Provider{
Session: db,
Kite: kloudKite,
Log: newLogger("koding", true),
KontrolURL: conf.KontrolURL,
KontrolPrivateKey: testkeys.Private,
KontrolPublicKey: testkeys.Public,
Test: true,
EC2Clients: multiec2.New(auth, []string{
"us-east-1",
"ap-southeast-1",
"us-west-2",
"eu-west-1",
}),
DNS: koding.NewDNSClient("dev.koding.io", auth),
DomainStorage: domainStorage,
Bucket: koding.NewBucket("koding-klient", "development/latest", auth),
KeyName: keys.DeployKeyName,
PublicKey: keys.DeployPublicKey,
PrivateKey: keys.DeployPrivateKey,
PlanChecker: func(_ *kloudprotocol.Machine) (koding.Checker, error) {
return testChecker, nil
},
}
}
func newKloud(p *koding.Provider) *kloud.Kloud {
kld := kloud.New()
kld.Log = newLogger("kloud", true)
kld.Locker = p
kld.Storage = p
kld.DomainStorage = p.DomainStorage
kld.Domainer = p.DNS
kld.Debug = true
kld.AddProvider("koding", p)
return kld
}
func getAmazonStorageSize(machineId string) (int, error) {
m, err := provider.Get(machineId)
if err != nil {
return 0, err
}
a, err := provider.NewClient(m)
if err != nil {
return 0, err
}
instance, err := a.Instance(a.Id())
if err != nil {
return 0, err
}
if len(instance.BlockDevices) == 0 {
return 0, fmt.Errorf("fatal error: no block device available")
}
// we need in a lot of placages!
oldVolumeId := instance.BlockDevices[0].VolumeId
oldVolResp, err := a.Client.Volumes([]string{oldVolumeId}, ec2.NewFilter())
if err != nil {
return 0, err
}
volSize := oldVolResp.Volumes[0].Size
currentSize, err := strconv.Atoi(volSize)
if err != nil {
return 0, err
}
return currentSize, nil
}
// TestChecker satisfies Checker interface
type TestChecker struct{}
func (c *TestChecker) Total() error {
return nil
}
func (c *TestChecker) AlwaysOn() error {
return nil
}
func (c *TestChecker) Timeout() error {
return nil
}
func (c *TestChecker) Storage(wantStorage int) error {
return nil
}
func (c *TestChecker) AllowedInstances(wantInstance koding.InstanceType) error {
return nil
}
|
package kontrol
import (
"encoding/json"
"errors"
"fmt"
"koding/db/mongodb/modelhelper"
"koding/kite/dnode"
"koding/kite/kite"
"koding/kite/protocol"
"koding/tools/config"
"net"
"strconv"
"strings"
"time"
"github.com/coreos/go-etcd/etcd"
"github.com/dgrijalva/jwt-go"
"github.com/nu7hatch/gouuid"
"github.com/op/go-logging"
)
const (
HeartbeatInterval = 1 * time.Minute
HeartbeatDelay = 2 * time.Minute
KitesPrefix = "/kites"
)
var log *logging.Logger
type Kontrol struct {
kite *kite.Kite
etcd *etcd.Client
watcherHub *watcherHub
}
func New() *Kontrol {
kiteOptions := &kite.Options{
Kitename: "kontrol",
Version: "0.0.1",
Port: strconv.Itoa(config.Current.NewKontrol.Port),
Region: "sj",
Environment: "development",
Username: "koding",
}
// Read list of etcd servers from config.
machines := make([]string, len(config.Current.Etcd))
for i, s := range config.Current.Etcd {
machines[i] = "http://" + s.Host + ":" + strconv.FormatUint(uint64(s.Port), 10)
}
kontrol := &Kontrol{
kite: kite.New(kiteOptions),
etcd: etcd.NewClient(machines),
watcherHub: newWatcherHub(),
}
log = kontrol.kite.Log
kontrol.kite.KontrolEnabled = false // Because we are Kontrol!
kontrol.kite.Authenticators["kodingKey"] = kontrol.AuthenticateFromKodingKey
kontrol.kite.Authenticators["sessionID"] = kontrol.AuthenticateFromSessionID
kontrol.kite.HandleFunc("register", kontrol.handleRegister)
kontrol.kite.HandleFunc("getKites", kontrol.handleGetKites)
kontrol.kite.HandleFunc("getToken", kontrol.handleGetToken)
// Disable until we got all things set up - arslan
// kontrol.kite.EnableTLS(
// config.Current.NewKontrol.CertFile,
// config.Current.NewKontrol.KeyFile,
// )
return kontrol
}
func (k *Kontrol) Run() {
k.init()
k.kite.Run()
}
func (k *Kontrol) Start() {
k.init()
k.kite.Start()
}
// init does common operations of Run() and Start().
func (k *Kontrol) init() {
go k.WatchEtcd()
go k.registerSelf()
}
// registerValue is the type of the value that is saved to etcd.
type registerValue struct {
URL protocol.KiteURL
KodingKey string
Visibility protocol.Visibility
}
func (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {
log.Info("Register request from: %#v", r.RemoteKite.Kite)
// Only accept requests with kodingKey because we need this info
// for generating tokens for this kite.
if r.Authentication.Type != "kodingKey" {
return nil, fmt.Errorf("Unexpected authentication type: %s", r.Authentication.Type)
}
if r.RemoteKite.URL.URL == nil {
return nil, errors.New("Empty 'url' field")
}
// In case Kite.URL does not contain a hostname, the r.RemoteAddr is used.
host, port, _ := net.SplitHostPort(r.RemoteKite.URL.Host)
if host == "" {
host, _, _ = net.SplitHostPort(r.RemoteAddr)
r.RemoteKite.URL.Host = net.JoinHostPort(host, port)
}
return k.register(r.RemoteKite, r.Authentication.Key)
}
func (k *Kontrol) register(r *kite.RemoteKite, kodingkey string) (*protocol.RegisterResult, error) {
kite := &r.Kite
if kite.Visibility != protocol.Public && kite.Visibility != protocol.Private {
return nil, errors.New("Invalid visibility field")
}
key, err := getKiteKey(kite)
if err != nil {
return nil, err
}
// setKey sets the value of the Kite in etcd.
setKey := k.makeSetter(kite, key, kodingkey)
// Register to etcd.
prev, err := setKey()
if err != nil {
log.Critical("etcd setKey error: %s", err)
return nil, errors.New("internal error - register")
}
if prev != "" {
log.Notice("Kite (%s) is already registered. Doing nothing.", key)
} else if err := requestHeartbeat(r, setKey); err != nil {
return nil, err
}
log.Info("Kite registered: %s", key)
k.watcherHub.Notify(kite, protocol.Register, kodingkey)
r.OnDisconnect(func() {
// Delete from etcd, WatchEtcd() will get the event
// and will notify watchers of this Kite for deregistration.
k.etcd.Delete(key, false)
})
// send response back to the kite, also identify him with the new name
ip, _, _ := net.SplitHostPort(r.URL.Host)
return &protocol.RegisterResult{
Result: protocol.AllowKite,
Username: r.Username,
PublicIP: ip,
}, nil
}
func requestHeartbeat(r *kite.RemoteKite, setterFunc func() (string, error)) error {
heartbeatFunc := func(req *kite.Request) {
prev, err := setterFunc()
if err == nil && prev == "" {
log.Warning("Came heartbeat but the Kite (%s) is not registered. Re-registering it. It may be an indication that the heartbeat delay is too short.", req.RemoteKite.ID)
}
}
heartbeatArgs := []interface{}{
HeartbeatInterval / time.Second,
kite.Callback(heartbeatFunc),
}
_, err := r.Tell("heartbeat", heartbeatArgs...)
return err
}
// registerSelf adds Kontrol itself to etcd.
func (k *Kontrol) registerSelf() {
key, err := getKiteKey(&k.kite.Kite)
if err != nil {
panic(err)
}
setter := k.makeSetter(&k.kite.Kite, key, k.kite.KodingKey)
for {
if _, err := setter(); err != nil {
log.Critical(err.Error())
time.Sleep(time.Second)
continue
}
time.Sleep(HeartbeatInterval)
}
}
// makeSetter returns a func for setting the kite key with value in etcd.
func (k *Kontrol) makeSetter(kite *protocol.Kite, etcdKey, kodingkey string) func() (string, error) {
rv := ®isterValue{
URL: kite.URL,
KodingKey: kodingkey,
Visibility: kite.Visibility,
}
valueBytes, _ := json.Marshal(rv)
value := string(valueBytes)
ttl := uint64(HeartbeatDelay / time.Second)
return func() (prevValue string, err error) {
resp, err := k.etcd.Set(etcdKey, value, ttl)
if err != nil {
log.Critical("etcd error: %s", err)
return
}
if resp.PrevNode != nil {
prevValue = resp.PrevNode.Value
}
// Set the TTL for the username. Otherwise, empty dirs remain in etcd.
_, err = k.etcd.UpdateDir(KitesPrefix+"/"+kite.Username, ttl)
if err != nil {
log.Critical("etcd error: %s", err)
return
}
return
}
}
// getKiteKey returns a string representing the kite uniquely
// that is suitable to use as a key for etcd.
func getKiteKey(k *protocol.Kite) (string, error) {
// Order is important.
fields := map[string]string{
"username": k.Username,
"environment": k.Environment,
"name": k.Name,
"version": k.Version,
"region": k.Region,
"hostname": k.Hostname,
"id": k.ID,
}
// Validate fields.
for k, v := range fields {
if v == "" {
return "", fmt.Errorf("Empty Kite field: %s", k)
}
if strings.ContainsRune(v, '/') {
return "", fmt.Errorf("Field \"%s\" must not contain '/'", k)
}
}
// Build key.
key := "/"
for _, v := range fields {
key = key + v + "/"
}
key = strings.TrimSuffix(key, "/")
return KitesPrefix + key, nil
}
// getQueryKey returns the etcd key for the query.
func getQueryKey(q *protocol.KontrolQuery) (string, error) {
fields := map[string]string{
"username": q.Username,
"environment": q.Environment,
"name": q.Name,
"version": q.Version,
"region": q.Region,
"hostname": q.Hostname,
"id": q.ID,
}
if q.Username == "" {
return "", errors.New("Empty username field")
}
// Validate query and build key.
path := "/"
empty := false // encountered with empty field?
empytField := "" // for error log
for k, v := range fields {
if v == "" {
empty = true
empytField = k
continue
}
if empty && v != "" {
return "", fmt.Errorf("Invalid query. Query option is not set: %s", empytField)
}
path = path + v + "/"
}
path = strings.TrimSuffix(path, "/")
return KitesPrefix + path, nil
}
func (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) {
if len(r.Args) != 1 && len(r.Args) != 2 {
return nil, errors.New("Invalid number of arguments")
}
var query protocol.KontrolQuery
err := r.Args[0].Unmarshal(&query)
if err != nil {
return nil, errors.New("Invalid query argument")
}
// To be called when a Kite is registered or deregistered matching the query.
var watchCallback dnode.Function
if len(r.Args) == 2 {
watchCallback = r.Args[1].MustFunction()
}
kites, err := k.getKites(r, query, watchCallback)
if err != nil {
return nil, err
}
allowed := make([]*protocol.KiteWithToken, 0, len(kites))
for _, kite := range kites {
if canAccess(r.RemoteKite.Kite, kite.Kite) {
allowed = append(allowed, kite)
}
}
return allowed, nil
}
func (k *Kontrol) getKites(r *kite.Request, query protocol.KontrolQuery, watchCallback dnode.Function) ([]*protocol.KiteWithToken, error) {
key, err := getQueryKey(&query)
if err != nil {
return nil, err
}
resp, err := k.etcd.Get(
key,
false, // sorting flag, we don't care about sorting for now
true, // recursive, return all child directories too
)
if err != nil {
if etcdErr, ok := err.(*etcd.EtcdError); ok {
if etcdErr.ErrorCode == 100 { // Key Not Found
return make([]*protocol.KiteWithToken, 0), nil
}
}
log.Critical("etcd error: %s", err)
return nil, fmt.Errorf("internal error - getKites")
}
kvs := flatten(resp.Node.Nodes)
kitesWithToken, err := addTokenToKites(kvs, r.Username)
if err != nil {
return nil, err
}
// Register callbacks to our watcher hub.
// It will call them when a Kite registered/unregistered matching the query.
if watchCallback != nil {
k.watcherHub.RegisterWatcher(r.RemoteKite, &query, watchCallback)
}
return kitesWithToken, nil
}
// flatten converts the recursive etcd directory structure to flat one that contains Kites.
func flatten(in etcd.Nodes) (out etcd.Nodes) {
for _, node := range in {
if node.Dir {
out = append(out, flatten(node.Nodes)...)
continue
}
out = append(out, node)
}
return
}
func addTokenToKites(nodes etcd.Nodes, username string) ([]*protocol.KiteWithToken, error) {
kitesWithToken := make([]*protocol.KiteWithToken, len(nodes))
for i, node := range nodes {
kite, kodingKey, err := kiteFromEtcdKV(node.Key, node.Value)
if err != nil {
return nil, err
}
kitesWithToken[i], err = addTokenToKite(kite, username, kodingKey)
if err != nil {
return nil, err
}
}
return kitesWithToken, nil
}
func addTokenToKite(kite *protocol.Kite, username, kodingKey string) (*protocol.KiteWithToken, error) {
tkn, err := generateToken(kite, username)
if err != nil {
return nil, err
}
return &protocol.KiteWithToken{
Kite: *kite,
Token: tkn,
}, nil
}
// generateToken returns a JWT token string. Please see the URL for details:
// http://tools.ietf.org/html/draft-ietf-oauth-json-web-token-13#section-4.1
func generateToken(kite *protocol.Kite, username string) (string, error) {
tknID, err := uuid.NewV4()
if err != nil {
return "", errors.New("Server error: Cannot generate a token")
}
// Identifies the expiration time after which the JWT MUST NOT be accepted
// for processing.
ttl := 1 * time.Hour
// Implementers MAY provide for some small leeway, usually no more than
// a few minutes, to account for clock skew.
leeway := 1 * time.Minute
tkn := jwt.New(jwt.GetSigningMethod("RS256"))
tkn.Claims["iss"] = "koding.com" // Issuer
tkn.Claims["sub"] = username // Subject
tkn.Claims["aud"] = kite.ID // Audience
tkn.Claims["exp"] = time.Now().UTC().Add(ttl).Add(leeway).Unix() // Expiration Time
tkn.Claims["nbf"] = time.Now().UTC().Add(-leeway).Unix() // Not Before
tkn.Claims["iat"] = time.Now().UTC().Unix() // Issued At
tkn.Claims["jti"] = tknID.String() // JWT ID
signed, err := tkn.SignedString(rsaKey)
if err != nil {
return "", errors.New("Server error: Cannot generate a token")
}
return signed, nil
}
// kiteFromEtcdKV returns a *protocol.Kite and Koding Key string from an etcd key.
// etcd key is like: /kites/devrim/development/mathworker/1/localhost/tardis.local/662ed473-351f-4c9f-786b-99cf02cdaadb
func kiteFromEtcdKV(key, value string) (*protocol.Kite, string, error) {
fields := strings.Split(strings.TrimPrefix(key, "/"), "/")
if len(fields) != 8 || (len(fields) > 0 && fields[0] != "kites") {
return nil, "", fmt.Errorf("Invalid Kite: %s", key)
}
kite := new(protocol.Kite)
kite.Username = fields[1]
kite.Environment = fields[2]
kite.Name = fields[3]
kite.Version = fields[4]
kite.Region = fields[5]
kite.Hostname = fields[6]
kite.ID = fields[7]
rv := new(registerValue)
json.Unmarshal([]byte(value), rv)
kite.URL = rv.URL
return kite, rv.KodingKey, nil
}
// WatchEtcd watches all Kite changes on etcd cluster
// and notifies registered watchers on this Kontrol instance.
func (k *Kontrol) WatchEtcd() {
var resp *etcd.Response
var err error
for {
resp, err = k.etcd.Set("/_kontrol_get_index", "OK", 1)
if err == nil {
break
}
log.Critical("etcd error 1: %s", err.Error())
time.Sleep(time.Second)
}
index := resp.Node.ModifiedIndex
log.Info("etcd: index = %d", index)
receiver := make(chan *etcd.Response)
go func() {
for {
_, err := k.etcd.Watch(KitesPrefix, index+1, true, receiver, nil)
if err != nil {
log.Critical("etcd error 2: %s", err)
time.Sleep(time.Second)
}
}
}()
// Channel is never closed.
for resp := range receiver {
// log.Debug("etcd: change received: %#v", resp)
index = resp.Node.ModifiedIndex
// Notify deregistration events.
if strings.HasPrefix(resp.Node.Key, KitesPrefix) && (resp.Action == "delete" || resp.Action == "expire") {
kite, _, err := kiteFromEtcdKV(resp.Node.Key, resp.Node.Value)
if err == nil {
k.watcherHub.Notify(kite, protocol.Deregister, "")
}
}
}
}
func (k *Kontrol) handleGetToken(r *kite.Request) (interface{}, error) {
var kite *protocol.Kite
err := r.Args.MustSliceOfLength(1)[0].Unmarshal(&kite)
if err != nil {
return nil, errors.New("Invalid Kite")
}
if !canAccess(r.RemoteKite.Kite, *kite) {
return nil, errors.New("Forbidden")
}
kiteKey, err := getKiteKey(kite)
if err != nil {
return nil, err
}
resp, err := k.etcd.Get(kiteKey, false, false)
if err != nil {
return nil, err
}
var kiteVal registerValue
err = json.Unmarshal([]byte(resp.Node.Value), &kiteVal)
if err != nil {
return nil, err
}
return generateToken(kite, r.Username)
}
// canAccess makes some access control checks and returns true
// if fromKite can talk with toKite.
func canAccess(fromKite protocol.Kite, toKite protocol.Kite) bool {
// Do not allow other users if kite is private.
if fromKite.Username != toKite.Username && toKite.Visibility == protocol.Private {
return false
}
// Prevent access to development/staging kites if the requester is not owner.
if fromKite.Username != toKite.Username && toKite.Environment != "production" {
return false
}
return true
}
func (k *Kontrol) AuthenticateFromSessionID(r *kite.Request) error {
username, err := findUsernameFromSessionID(r.Authentication.Key)
if err != nil {
return err
}
r.Username = username
return nil
}
func findUsernameFromSessionID(sessionID string) (string, error) {
session, err := modelhelper.GetSession(sessionID)
if err != nil {
return "", err
}
return session.Username, nil
}
func (k *Kontrol) AuthenticateFromKodingKey(r *kite.Request) error {
username, err := findUsernameFromKey(r.Authentication.Key)
if err != nil {
return err
}
r.Username = username
return nil
}
func findUsernameFromKey(key string) (string, error) {
kodingKey, err := modelhelper.GetKodingKeysByKey(key)
if err != nil {
return "", errors.New("kodingkey not found in kontrol db")
}
account, err := modelhelper.GetAccountById(kodingKey.Owner)
if err != nil {
return "", fmt.Errorf("register get user err %s", err)
}
if account.Profile.Nickname == "" {
return "", errors.New("nickname is empty, could not register kite")
}
return account.Profile.Nickname, nil
}
kite: fix empty ip address bug
package kontrol
import (
"encoding/json"
"errors"
"fmt"
"koding/db/mongodb/modelhelper"
"koding/kite/dnode"
"koding/kite/kite"
"koding/kite/protocol"
"koding/tools/config"
"net"
"strconv"
"strings"
"time"
"github.com/coreos/go-etcd/etcd"
"github.com/dgrijalva/jwt-go"
"github.com/nu7hatch/gouuid"
"github.com/op/go-logging"
)
const (
HeartbeatInterval = 1 * time.Minute
HeartbeatDelay = 2 * time.Minute
KitesPrefix = "/kites"
)
var log *logging.Logger
type Kontrol struct {
kite *kite.Kite
etcd *etcd.Client
watcherHub *watcherHub
}
func New() *Kontrol {
kiteOptions := &kite.Options{
Kitename: "kontrol",
Version: "0.0.1",
Port: strconv.Itoa(config.Current.NewKontrol.Port),
Region: "sj",
Environment: "development",
Username: "koding",
}
// Read list of etcd servers from config.
machines := make([]string, len(config.Current.Etcd))
for i, s := range config.Current.Etcd {
machines[i] = "http://" + s.Host + ":" + strconv.FormatUint(uint64(s.Port), 10)
}
kontrol := &Kontrol{
kite: kite.New(kiteOptions),
etcd: etcd.NewClient(machines),
watcherHub: newWatcherHub(),
}
log = kontrol.kite.Log
kontrol.kite.KontrolEnabled = false // Because we are Kontrol!
kontrol.kite.Authenticators["kodingKey"] = kontrol.AuthenticateFromKodingKey
kontrol.kite.Authenticators["sessionID"] = kontrol.AuthenticateFromSessionID
kontrol.kite.HandleFunc("register", kontrol.handleRegister)
kontrol.kite.HandleFunc("getKites", kontrol.handleGetKites)
kontrol.kite.HandleFunc("getToken", kontrol.handleGetToken)
// Disable until we got all things set up - arslan
// kontrol.kite.EnableTLS(
// config.Current.NewKontrol.CertFile,
// config.Current.NewKontrol.KeyFile,
// )
return kontrol
}
func (k *Kontrol) Run() {
k.init()
k.kite.Run()
}
func (k *Kontrol) Start() {
k.init()
k.kite.Start()
}
// init does common operations of Run() and Start().
func (k *Kontrol) init() {
go k.WatchEtcd()
go k.registerSelf()
}
// registerValue is the type of the value that is saved to etcd.
type registerValue struct {
URL protocol.KiteURL
KodingKey string
Visibility protocol.Visibility
}
func (k *Kontrol) handleRegister(r *kite.Request) (interface{}, error) {
log.Info("Register request from: %#v", r.RemoteKite.Kite)
// Only accept requests with kodingKey because we need this info
// for generating tokens for this kite.
if r.Authentication.Type != "kodingKey" {
return nil, fmt.Errorf("Unexpected authentication type: %s", r.Authentication.Type)
}
if r.RemoteKite.URL.URL == nil {
return nil, errors.New("Empty 'url' field")
}
// In case Kite.URL does not contain a hostname, the r.RemoteAddr is used.
host, port, _ := net.SplitHostPort(r.RemoteKite.URL.Host)
if host == "" {
host, _, _ = net.SplitHostPort(r.RemoteAddr)
r.RemoteKite.URL.Host = net.JoinHostPort(host, port)
}
return k.register(r.RemoteKite, r.Authentication.Key, r.RemoteAddr)
}
func (k *Kontrol) register(r *kite.RemoteKite, kodingkey, remoteAddr string) (*protocol.RegisterResult, error) {
kite := &r.Kite
if kite.Visibility != protocol.Public && kite.Visibility != protocol.Private {
return nil, errors.New("Invalid visibility field")
}
key, err := getKiteKey(kite)
if err != nil {
return nil, err
}
// setKey sets the value of the Kite in etcd.
setKey := k.makeSetter(kite, key, kodingkey)
// Register to etcd.
prev, err := setKey()
if err != nil {
log.Critical("etcd setKey error: %s", err)
return nil, errors.New("internal error - register")
}
if prev != "" {
log.Notice("Kite (%s) is already registered. Doing nothing.", key)
} else if err := requestHeartbeat(r, setKey); err != nil {
return nil, err
}
log.Info("Kite registered: %s", key)
k.watcherHub.Notify(kite, protocol.Register, kodingkey)
r.OnDisconnect(func() {
// Delete from etcd, WatchEtcd() will get the event
// and will notify watchers of this Kite for deregistration.
k.etcd.Delete(key, false)
})
// send response back to the kite, also identify him with the new name
ip, _, _ := net.SplitHostPort(remoteAddr)
return &protocol.RegisterResult{
Result: protocol.AllowKite,
Username: r.Username,
PublicIP: ip,
}, nil
}
func requestHeartbeat(r *kite.RemoteKite, setterFunc func() (string, error)) error {
heartbeatFunc := func(req *kite.Request) {
prev, err := setterFunc()
if err == nil && prev == "" {
log.Warning("Came heartbeat but the Kite (%s) is not registered. Re-registering it. It may be an indication that the heartbeat delay is too short.", req.RemoteKite.ID)
}
}
heartbeatArgs := []interface{}{
HeartbeatInterval / time.Second,
kite.Callback(heartbeatFunc),
}
_, err := r.Tell("heartbeat", heartbeatArgs...)
return err
}
// registerSelf adds Kontrol itself to etcd.
func (k *Kontrol) registerSelf() {
key, err := getKiteKey(&k.kite.Kite)
if err != nil {
panic(err)
}
setter := k.makeSetter(&k.kite.Kite, key, k.kite.KodingKey)
for {
if _, err := setter(); err != nil {
log.Critical(err.Error())
time.Sleep(time.Second)
continue
}
time.Sleep(HeartbeatInterval)
}
}
// makeSetter returns a func for setting the kite key with value in etcd.
func (k *Kontrol) makeSetter(kite *protocol.Kite, etcdKey, kodingkey string) func() (string, error) {
rv := ®isterValue{
URL: kite.URL,
KodingKey: kodingkey,
Visibility: kite.Visibility,
}
valueBytes, _ := json.Marshal(rv)
value := string(valueBytes)
ttl := uint64(HeartbeatDelay / time.Second)
return func() (prevValue string, err error) {
resp, err := k.etcd.Set(etcdKey, value, ttl)
if err != nil {
log.Critical("etcd error: %s", err)
return
}
if resp.PrevNode != nil {
prevValue = resp.PrevNode.Value
}
// Set the TTL for the username. Otherwise, empty dirs remain in etcd.
_, err = k.etcd.UpdateDir(KitesPrefix+"/"+kite.Username, ttl)
if err != nil {
log.Critical("etcd error: %s", err)
return
}
return
}
}
// getKiteKey returns a string representing the kite uniquely
// that is suitable to use as a key for etcd.
func getKiteKey(k *protocol.Kite) (string, error) {
// Order is important.
fields := map[string]string{
"username": k.Username,
"environment": k.Environment,
"name": k.Name,
"version": k.Version,
"region": k.Region,
"hostname": k.Hostname,
"id": k.ID,
}
// Validate fields.
for k, v := range fields {
if v == "" {
return "", fmt.Errorf("Empty Kite field: %s", k)
}
if strings.ContainsRune(v, '/') {
return "", fmt.Errorf("Field \"%s\" must not contain '/'", k)
}
}
// Build key.
key := "/"
for _, v := range fields {
key = key + v + "/"
}
key = strings.TrimSuffix(key, "/")
return KitesPrefix + key, nil
}
// getQueryKey returns the etcd key for the query.
func getQueryKey(q *protocol.KontrolQuery) (string, error) {
fields := map[string]string{
"username": q.Username,
"environment": q.Environment,
"name": q.Name,
"version": q.Version,
"region": q.Region,
"hostname": q.Hostname,
"id": q.ID,
}
if q.Username == "" {
return "", errors.New("Empty username field")
}
// Validate query and build key.
path := "/"
empty := false // encountered with empty field?
empytField := "" // for error log
for k, v := range fields {
if v == "" {
empty = true
empytField = k
continue
}
if empty && v != "" {
return "", fmt.Errorf("Invalid query. Query option is not set: %s", empytField)
}
path = path + v + "/"
}
path = strings.TrimSuffix(path, "/")
return KitesPrefix + path, nil
}
func (k *Kontrol) handleGetKites(r *kite.Request) (interface{}, error) {
if len(r.Args) != 1 && len(r.Args) != 2 {
return nil, errors.New("Invalid number of arguments")
}
var query protocol.KontrolQuery
err := r.Args[0].Unmarshal(&query)
if err != nil {
return nil, errors.New("Invalid query argument")
}
// To be called when a Kite is registered or deregistered matching the query.
var watchCallback dnode.Function
if len(r.Args) == 2 {
watchCallback = r.Args[1].MustFunction()
}
kites, err := k.getKites(r, query, watchCallback)
if err != nil {
return nil, err
}
allowed := make([]*protocol.KiteWithToken, 0, len(kites))
for _, kite := range kites {
if canAccess(r.RemoteKite.Kite, kite.Kite) {
allowed = append(allowed, kite)
}
}
return allowed, nil
}
func (k *Kontrol) getKites(r *kite.Request, query protocol.KontrolQuery, watchCallback dnode.Function) ([]*protocol.KiteWithToken, error) {
key, err := getQueryKey(&query)
if err != nil {
return nil, err
}
resp, err := k.etcd.Get(
key,
false, // sorting flag, we don't care about sorting for now
true, // recursive, return all child directories too
)
if err != nil {
if etcdErr, ok := err.(*etcd.EtcdError); ok {
if etcdErr.ErrorCode == 100 { // Key Not Found
return make([]*protocol.KiteWithToken, 0), nil
}
}
log.Critical("etcd error: %s", err)
return nil, fmt.Errorf("internal error - getKites")
}
kvs := flatten(resp.Node.Nodes)
kitesWithToken, err := addTokenToKites(kvs, r.Username)
if err != nil {
return nil, err
}
// Register callbacks to our watcher hub.
// It will call them when a Kite registered/unregistered matching the query.
if watchCallback != nil {
k.watcherHub.RegisterWatcher(r.RemoteKite, &query, watchCallback)
}
return kitesWithToken, nil
}
// flatten converts the recursive etcd directory structure to flat one that contains Kites.
func flatten(in etcd.Nodes) (out etcd.Nodes) {
for _, node := range in {
if node.Dir {
out = append(out, flatten(node.Nodes)...)
continue
}
out = append(out, node)
}
return
}
func addTokenToKites(nodes etcd.Nodes, username string) ([]*protocol.KiteWithToken, error) {
kitesWithToken := make([]*protocol.KiteWithToken, len(nodes))
for i, node := range nodes {
kite, kodingKey, err := kiteFromEtcdKV(node.Key, node.Value)
if err != nil {
return nil, err
}
kitesWithToken[i], err = addTokenToKite(kite, username, kodingKey)
if err != nil {
return nil, err
}
}
return kitesWithToken, nil
}
func addTokenToKite(kite *protocol.Kite, username, kodingKey string) (*protocol.KiteWithToken, error) {
tkn, err := generateToken(kite, username)
if err != nil {
return nil, err
}
return &protocol.KiteWithToken{
Kite: *kite,
Token: tkn,
}, nil
}
// generateToken returns a JWT token string. Please see the URL for details:
// http://tools.ietf.org/html/draft-ietf-oauth-json-web-token-13#section-4.1
func generateToken(kite *protocol.Kite, username string) (string, error) {
tknID, err := uuid.NewV4()
if err != nil {
return "", errors.New("Server error: Cannot generate a token")
}
// Identifies the expiration time after which the JWT MUST NOT be accepted
// for processing.
ttl := 1 * time.Hour
// Implementers MAY provide for some small leeway, usually no more than
// a few minutes, to account for clock skew.
leeway := 1 * time.Minute
tkn := jwt.New(jwt.GetSigningMethod("RS256"))
tkn.Claims["iss"] = "koding.com" // Issuer
tkn.Claims["sub"] = username // Subject
tkn.Claims["aud"] = kite.ID // Audience
tkn.Claims["exp"] = time.Now().UTC().Add(ttl).Add(leeway).Unix() // Expiration Time
tkn.Claims["nbf"] = time.Now().UTC().Add(-leeway).Unix() // Not Before
tkn.Claims["iat"] = time.Now().UTC().Unix() // Issued At
tkn.Claims["jti"] = tknID.String() // JWT ID
signed, err := tkn.SignedString(rsaKey)
if err != nil {
return "", errors.New("Server error: Cannot generate a token")
}
return signed, nil
}
// kiteFromEtcdKV returns a *protocol.Kite and Koding Key string from an etcd key.
// etcd key is like: /kites/devrim/development/mathworker/1/localhost/tardis.local/662ed473-351f-4c9f-786b-99cf02cdaadb
func kiteFromEtcdKV(key, value string) (*protocol.Kite, string, error) {
fields := strings.Split(strings.TrimPrefix(key, "/"), "/")
if len(fields) != 8 || (len(fields) > 0 && fields[0] != "kites") {
return nil, "", fmt.Errorf("Invalid Kite: %s", key)
}
kite := new(protocol.Kite)
kite.Username = fields[1]
kite.Environment = fields[2]
kite.Name = fields[3]
kite.Version = fields[4]
kite.Region = fields[5]
kite.Hostname = fields[6]
kite.ID = fields[7]
rv := new(registerValue)
json.Unmarshal([]byte(value), rv)
kite.URL = rv.URL
return kite, rv.KodingKey, nil
}
// WatchEtcd watches all Kite changes on etcd cluster
// and notifies registered watchers on this Kontrol instance.
func (k *Kontrol) WatchEtcd() {
var resp *etcd.Response
var err error
for {
resp, err = k.etcd.Set("/_kontrol_get_index", "OK", 1)
if err == nil {
break
}
log.Critical("etcd error 1: %s", err.Error())
time.Sleep(time.Second)
}
index := resp.Node.ModifiedIndex
log.Info("etcd: index = %d", index)
receiver := make(chan *etcd.Response)
go func() {
for {
_, err := k.etcd.Watch(KitesPrefix, index+1, true, receiver, nil)
if err != nil {
log.Critical("etcd error 2: %s", err)
time.Sleep(time.Second)
}
}
}()
// Channel is never closed.
for resp := range receiver {
// log.Debug("etcd: change received: %#v", resp)
index = resp.Node.ModifiedIndex
// Notify deregistration events.
if strings.HasPrefix(resp.Node.Key, KitesPrefix) && (resp.Action == "delete" || resp.Action == "expire") {
kite, _, err := kiteFromEtcdKV(resp.Node.Key, resp.Node.Value)
if err == nil {
k.watcherHub.Notify(kite, protocol.Deregister, "")
}
}
}
}
func (k *Kontrol) handleGetToken(r *kite.Request) (interface{}, error) {
var kite *protocol.Kite
err := r.Args.MustSliceOfLength(1)[0].Unmarshal(&kite)
if err != nil {
return nil, errors.New("Invalid Kite")
}
if !canAccess(r.RemoteKite.Kite, *kite) {
return nil, errors.New("Forbidden")
}
kiteKey, err := getKiteKey(kite)
if err != nil {
return nil, err
}
resp, err := k.etcd.Get(kiteKey, false, false)
if err != nil {
return nil, err
}
var kiteVal registerValue
err = json.Unmarshal([]byte(resp.Node.Value), &kiteVal)
if err != nil {
return nil, err
}
return generateToken(kite, r.Username)
}
// canAccess makes some access control checks and returns true
// if fromKite can talk with toKite.
func canAccess(fromKite protocol.Kite, toKite protocol.Kite) bool {
// Do not allow other users if kite is private.
if fromKite.Username != toKite.Username && toKite.Visibility == protocol.Private {
return false
}
// Prevent access to development/staging kites if the requester is not owner.
if fromKite.Username != toKite.Username && toKite.Environment != "production" {
return false
}
return true
}
func (k *Kontrol) AuthenticateFromSessionID(r *kite.Request) error {
username, err := findUsernameFromSessionID(r.Authentication.Key)
if err != nil {
return err
}
r.Username = username
return nil
}
func findUsernameFromSessionID(sessionID string) (string, error) {
session, err := modelhelper.GetSession(sessionID)
if err != nil {
return "", err
}
return session.Username, nil
}
func (k *Kontrol) AuthenticateFromKodingKey(r *kite.Request) error {
username, err := findUsernameFromKey(r.Authentication.Key)
if err != nil {
return err
}
r.Username = username
return nil
}
func findUsernameFromKey(key string) (string, error) {
kodingKey, err := modelhelper.GetKodingKeysByKey(key)
if err != nil {
return "", errors.New("kodingkey not found in kontrol db")
}
account, err := modelhelper.GetAccountById(kodingKey.Owner)
if err != nil {
return "", fmt.Errorf("register get user err %s", err)
}
if account.Profile.Nickname == "" {
return "", errors.New("nickname is empty, could not register kite")
}
return account.Profile.Nickname, nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.