CombinedText stringlengths 4 3.42M |
|---|
// Copyright 2013 The lime Authors.
// Use of this source code is governed by a 2-clause
// BSD-style license that can be found in the LICENSE file.
package main
import (
"fmt"
"image/color"
"runtime"
"strings"
"sync"
"time"
"github.com/atotto/clipboard"
"gopkg.in/fsnotify.v1"
"github.com/limetext/backend"
"github.com/limetext/backend/keys"
"github.com/limetext/backend/log"
"github.com/limetext/backend/render"
"github.com/limetext/qml-go"
. "github.com/limetext/text"
"github.com/limetext/util"
)
const (
batching_enabled = true
qmlWindowFile = "qml/Window.qml"
qmlViewFile = "qml/View.qml"
// http://qt-project.org/doc/qt-5.1/qtcore/qt.html#KeyboardModifier-enum
shift_mod = 0x02000000
ctrl_mod = 0x04000000
alt_mod = 0x08000000
meta_mod = 0x10000000
keypad_mod = 0x20000000
)
type (
// keeping track of frontend state
frontend struct {
lock sync.Mutex
windows map[*backend.Window]*window
Console *view
qmlDispatch chan qmlDispatch
promptWaitGroup sync.WaitGroup
promptResult string
}
// Used for batching qml.Changed calls
qmlDispatch struct{ value, field interface{} }
)
var fe *frontend
func initFrontend() {
fe = &frontend{
windows: make(map[*backend.Window]*window),
}
go fe.qmlBatchLoop()
qml.Run(fe.loop)
}
func (f *frontend) window(w *backend.Window) *window {
return f.windows[w]
}
func (f *frontend) Show(bv *backend.View, r Region) {
// TODO
}
func (f *frontend) VisibleRegion(bv *backend.View) Region {
// TODO
return Region{0, bv.Size()}
}
func (f *frontend) StatusMessage(msg string) {
w := f.windows[backend.GetEditor().ActiveWindow()]
w.qw.Call("setFrontendStatus", msg)
go func() {
time.Sleep(5 * time.Second)
w.qw.Call("setFrontendStatus", "")
}()
}
const (
noIcon = iota
informationIcon
warningIcon
criticalIcon
questionIcon
okButton = 1024
cancelButton = 4194304
)
func (f *frontend) message(text string, icon, btns int) string {
cbs := make(map[string]int)
if btns&okButton != 0 {
cbs["accepted"] = 1
}
if btns&cancelButton != 0 {
cbs["rejected"] = 0
}
w := f.windows[backend.GetEditor().ActiveWindow()]
obj := w.qw.ObjectByName("messageDialog")
obj.Set("text", text)
obj.Set("icon", icon)
obj.Set("standardButtons", btns)
f.promptWaitGroup.Add(1)
obj.Call("open")
f.promptWaitGroup.Wait()
log.Fine("returning %d from dialog", f.promptResult)
return f.promptResult
}
func (f *frontend) ErrorMessage(msg string) {
log.Error(msg)
f.message(msg, criticalIcon, okButton)
}
func (f *frontend) MessageDialog(msg string) {
f.message(msg, informationIcon, okButton)
}
func (f *frontend) OkCancelDialog(msg, ok string) bool {
return f.message(msg, questionIcon, okButton|cancelButton) == "accepted"
}
func (f *frontend) Prompt(title, folder string, flags int) []string {
w := f.windows[backend.GetEditor().ActiveWindow()]
obj := w.qw.ObjectByName("fileDialog")
obj.Set("title", title)
obj.Set("folder", "file://"+folder)
obj.Set("selectExisting", flags&backend.PROMPT_SAVE_AS == 0)
obj.Set("selectFolder", flags&backend.PROMPT_ONLY_FOLDER == 1)
obj.Set("selectMultiple", flags&backend.PROMPT_SELECT_MULTIPLE == 1)
f.promptWaitGroup.Add(1)
obj.Call("open")
f.promptWaitGroup.Wait()
if f.promptResult != "accepted" {
return nil
}
res := obj.List("fileUrls")
files := make([]string, res.Len())
res.Convert(&files)
for i, file := range files {
if file[:7] == "file://" {
files[i] = file[7:]
}
}
log.Fine("Selected %s files", files)
return files
}
func (f *frontend) PromptClosed(result string) {
f.promptResult = result
f.promptWaitGroup.Done()
}
func (f *frontend) scroll(b Buffer) {
f.Show(backend.GetEditor().Console(), Region{b.Size(), b.Size()})
}
func (f *frontend) Erased(changed_buffer Buffer, region_removed Region, data_removed []rune) {
f.scroll(changed_buffer)
}
func (f *frontend) Inserted(changed_buffer Buffer, region_inserted Region, data_inserted []rune) {
f.scroll(changed_buffer)
}
// Apparently calling qml.Changed also triggers a re-draw, meaning that typed text is at the
// mercy of how quick Qt happens to be rendering.
// Try setting batching_enabled = false to see the effects of non-batching
func (f *frontend) qmlBatchLoop() {
queue := make(map[qmlDispatch]bool)
f.qmlDispatch = make(chan qmlDispatch, 1000)
for {
if len(queue) > 0 {
select {
case <-time.After(time.Millisecond * 20):
// Nothing happened for 20 milliseconds, so dispatch all queued changes
for k := range queue {
qml.Changed(k.value, k.field)
}
queue = make(map[qmlDispatch]bool)
case d := <-f.qmlDispatch:
queue[d] = true
}
} else {
queue[<-f.qmlDispatch] = true
}
}
}
func (f *frontend) qmlChanged(value, field interface{}) {
if !batching_enabled {
qml.Changed(value, field)
} else {
f.qmlDispatch <- qmlDispatch{value, field}
}
}
func (f *frontend) DefaultBg() color.RGBA {
c := f.colorScheme().Spice(&render.ViewRegions{})
c.Background.A = 0xff
return color.RGBA(c.Background)
}
func (f *frontend) DefaultFg() color.RGBA {
c := f.colorScheme().Spice(&render.ViewRegions{})
c.Foreground.A = 0xff
return color.RGBA(c.Foreground)
}
// Called when a new view is opened
func (f *frontend) onNew(bv *backend.View) {
w := f.windows[bv.Window()]
v := newView(bv)
w.views[bv] = v
if w.qw != nil {
w.qw.Call("addTab", v.id, v)
w.qw.Call("activateTab", v.id)
}
}
// called when a view is closed
func (f *frontend) onClose(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find closed view...")
return
}
w.qw.Call("removeTab", v.id)
delete(w.views, bv)
}
// called when a view has loaded
func (f *frontend) onLoad(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find loaded view")
return
}
v.Title = bv.FileName()
w.qw.Call("setTabTitle", v.id, v.Title)
}
func (f *frontend) onSelectionModified(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find modified view")
return
}
if v.qv == nil {
return
}
v.qv.Call("onSelectionModified")
}
func (f *frontend) onStatusChanged(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find status changed view")
return
}
if v.qv == nil {
return
}
v.qv.Call("onStatusChanged")
}
// Launches the provided command in a new goroutine
// (to avoid locking up the GUI)
func (f *frontend) RunCommand(command string) {
f.RunCommandWithArgs(command, make(backend.Args))
}
func (f *frontend) RunCommandWithArgs(command string, args backend.Args) {
ed := backend.GetEditor()
go ed.RunCommand(command, args)
}
func (f *frontend) HandleInput(text string, keycode int, modifiers int) bool {
log.Debug("frontend.HandleInput: text=%v, key=%x, modifiers=%x", text, keycode, modifiers)
shift := false
alt := false
ctrl := false
super := false
if key, ok := lut[keycode]; ok {
ed := backend.GetEditor()
if modifiers&shift_mod != 0 {
shift = true
}
if modifiers&alt_mod != 0 {
alt = true
}
if modifiers&ctrl_mod != 0 {
if runtime.GOOS == "darwin" {
super = true
} else {
ctrl = true
}
}
if modifiers&meta_mod != 0 {
if runtime.GOOS == "darwin" {
ctrl = true
} else {
super = true
}
}
ed.HandleInput(keys.KeyPress{Text: text, Key: key, Shift: shift, Alt: alt, Ctrl: ctrl, Super: super})
return true
}
return false
}
func (f *frontend) colorScheme() backend.ColorScheme {
ed := backend.GetEditor()
return ed.GetColorScheme(ed.Settings().String("color_scheme", ""))
}
// Quit closes all open windows to de-reference all qml objects
func (f *frontend) Quit() (err error) {
// TODO: handle changed files that aren't saved.
for _, w := range f.windows {
if w.qw != nil {
w.qw.Hide()
w.qw.Destroy()
w.qw = nil
}
}
return
}
func (f *frontend) loop() (err error) {
ed := backend.GetEditor()
// TODO: As InitCallback doc says initiation code to be deferred until
// after the UI is up and running. but because we dont have any
// scheme we are initing editor before the UI comes up.
ed.Init()
ed.SetDefaultPath("../packages/Default")
ed.SetUserPath("../packages/User")
ed.SetClipboardFuncs(clipboard.WriteAll, clipboard.ReadAll)
// Some packages(e.g Vintageos) need available window and view at start
// so we need at least one window and view before loading packages.
// Sublime text also has available window view on startup
w := ed.NewWindow()
w.NewFile()
ed.AddPackagesPath("../packages")
ed.SetFrontend(f)
ed.LogInput(false)
ed.LogCommands(false)
c := ed.Console()
f.Console = newView(c)
c.AddObserver(f.Console)
c.AddObserver(f)
var (
engine *qml.Engine
component qml.Object
// WaitGroup keeping track of open windows
wg sync.WaitGroup
)
// create and setup a new engine, destroying
// the old one if one exists.
//
// This is needed to re-load qml files to get
// the new file contents from disc as otherwise
// the old file would still be what is referenced.
newEngine := func() (err error) {
if engine != nil {
log.Debug("calling destroy")
// TODO(.): calling this appears to make the editor *very* crash-prone, just let it leak for now
// engine.Destroy()
engine = nil
}
log.Debug("calling newEngine")
engine = qml.NewEngine()
engine.On("quit", f.Quit)
log.Fine("setvar frontend")
engine.Context().SetVar("frontend", f)
qml.SetApplicationDisplayName("LimeText")
qml.SetWindowIcon("qml/lime.png")
// qml.SetDesktopFileName("qml/org.limetext.qml.LimeText.desktop")
log.Fine("loading %s", qmlWindowFile)
component, err = engine.LoadFile(qmlWindowFile)
return
}
if err := newEngine(); err != nil {
log.Error("Error on creating new engine: %s", err)
panic(err)
}
addWindow := func(bw *backend.Window) {
w := newWindow(bw)
f.windows[bw] = w
w.launch(&wg, component)
}
backend.OnNew.Add(f.onNew)
backend.OnClose.Add(f.onClose)
backend.OnLoad.Add(f.onLoad)
backend.OnSelectionModified.Add(f.onSelectionModified)
backend.OnNewWindow.Add(addWindow)
backend.OnStatusChanged.Add(f.onStatusChanged)
// we need to add windows and views that are added before we registered
// actions for OnNewWindow and OnNew events
for _, w := range ed.Windows() {
addWindow(w)
for _, v := range w.Views() {
f.onNew(v)
f.onLoad(v)
}
}
defer func() {
fmt.Println(util.Prof)
}()
// The rest of code is related to livereloading qml files
// TODO: this doesnt work currently
watch, err := fsnotify.NewWatcher()
if err != nil {
log.Error("Unable to create file watcher: %s", err)
return
}
defer watch.Close()
watch.Add("qml")
defer watch.Remove("qml")
reloadRequested := false
waiting := false
go func() {
// reloadRequested = true
// f.Quit()
lastTime := time.Now()
for {
select {
case ev := <-watch.Events:
if time.Now().Sub(lastTime) < 1*time.Second {
// quitting too frequently causes crashes
lastTime = time.Now()
continue
}
if strings.HasSuffix(ev.Name, ".qml") && ev.Op == fsnotify.Write && ev.Op != fsnotify.Chmod && !reloadRequested && waiting {
reloadRequested = true
f.Quit()
lastTime = time.Now()
}
}
}
}()
for {
// Reset reload status
reloadRequested = false
log.Debug("Waiting for all windows to close")
// wg would be the WaitGroup all windows belong to, so first we wait for
// all windows to close.
waiting = true
wg.Wait()
waiting = false
log.Debug("All windows closed. reloadRequest: %v", reloadRequested)
// then we check if there's a reload request in the pipe
if !reloadRequested || len(f.windows) == 0 {
// This would be a genuine exit; all windows closed by the user
break
}
// *We* closed all windows because we want to reload freshly changed qml
// files.
for {
log.Debug("Calling newEngine")
if err := newEngine(); err != nil {
// Reset reload status
reloadRequested = false
waiting = true
log.Error(err)
for !reloadRequested {
// This loop allows us to re-try reloading
// if there was an error in the file this time,
// we just loop around again when we receive the next
// reload request (ie on the next save of the file).
time.Sleep(time.Second)
}
waiting = false
continue
}
log.Debug("break")
break
}
log.Debug("re-launching all windows")
// Succeeded loading the file, re-launch all windows
for _, w := range f.windows {
w.launch(&wg, component)
for _, bv := range w.Back().Views() {
f.onNew(bv)
f.onLoad(bv)
}
}
}
return
}
qmlChanged batches in order, and more often
// Copyright 2013 The lime Authors.
// Use of this source code is governed by a 2-clause
// BSD-style license that can be found in the LICENSE file.
package main
import (
"fmt"
"image/color"
"runtime"
"strings"
"sync"
"time"
"github.com/atotto/clipboard"
"gopkg.in/fsnotify.v1"
"github.com/limetext/backend"
"github.com/limetext/backend/keys"
"github.com/limetext/backend/log"
"github.com/limetext/backend/render"
"github.com/limetext/qml-go"
. "github.com/limetext/text"
"github.com/limetext/util"
)
const (
batching_enabled = true
qmlWindowFile = "qml/Window.qml"
qmlViewFile = "qml/View.qml"
// http://qt-project.org/doc/qt-5.1/qtcore/qt.html#KeyboardModifier-enum
shift_mod = 0x02000000
ctrl_mod = 0x04000000
alt_mod = 0x08000000
meta_mod = 0x10000000
keypad_mod = 0x20000000
)
type (
// keeping track of frontend state
frontend struct {
lock sync.Mutex
windows map[*backend.Window]*window
Console *view
qmlDispatch chan qmlDispatch
promptWaitGroup sync.WaitGroup
promptResult string
}
// Used for batching qml.Changed calls
qmlDispatch struct{ value, field interface{} }
)
var fe *frontend
func initFrontend() {
fe = &frontend{
windows: make(map[*backend.Window]*window),
}
go fe.qmlBatchLoop()
qml.Run(fe.loop)
}
func (f *frontend) window(w *backend.Window) *window {
return f.windows[w]
}
func (f *frontend) Show(bv *backend.View, r Region) {
// TODO
}
func (f *frontend) VisibleRegion(bv *backend.View) Region {
// TODO
return Region{0, bv.Size()}
}
func (f *frontend) StatusMessage(msg string) {
w := f.windows[backend.GetEditor().ActiveWindow()]
w.qw.Call("setFrontendStatus", msg)
go func() {
time.Sleep(5 * time.Second)
w.qw.Call("setFrontendStatus", "")
}()
}
const (
noIcon = iota
informationIcon
warningIcon
criticalIcon
questionIcon
okButton = 1024
cancelButton = 4194304
)
func (f *frontend) message(text string, icon, btns int) string {
cbs := make(map[string]int)
if btns&okButton != 0 {
cbs["accepted"] = 1
}
if btns&cancelButton != 0 {
cbs["rejected"] = 0
}
w := f.windows[backend.GetEditor().ActiveWindow()]
obj := w.qw.ObjectByName("messageDialog")
obj.Set("text", text)
obj.Set("icon", icon)
obj.Set("standardButtons", btns)
f.promptWaitGroup.Add(1)
obj.Call("open")
f.promptWaitGroup.Wait()
log.Fine("returning %d from dialog", f.promptResult)
return f.promptResult
}
func (f *frontend) ErrorMessage(msg string) {
log.Error(msg)
f.message(msg, criticalIcon, okButton)
}
func (f *frontend) MessageDialog(msg string) {
f.message(msg, informationIcon, okButton)
}
func (f *frontend) OkCancelDialog(msg, ok string) bool {
return f.message(msg, questionIcon, okButton|cancelButton) == "accepted"
}
func (f *frontend) Prompt(title, folder string, flags int) []string {
w := f.windows[backend.GetEditor().ActiveWindow()]
obj := w.qw.ObjectByName("fileDialog")
obj.Set("title", title)
obj.Set("folder", "file://"+folder)
obj.Set("selectExisting", flags&backend.PROMPT_SAVE_AS == 0)
obj.Set("selectFolder", flags&backend.PROMPT_ONLY_FOLDER == 1)
obj.Set("selectMultiple", flags&backend.PROMPT_SELECT_MULTIPLE == 1)
f.promptWaitGroup.Add(1)
obj.Call("open")
f.promptWaitGroup.Wait()
if f.promptResult != "accepted" {
return nil
}
res := obj.List("fileUrls")
files := make([]string, res.Len())
res.Convert(&files)
for i, file := range files {
if file[:7] == "file://" {
files[i] = file[7:]
}
}
log.Fine("Selected %s files", files)
return files
}
func (f *frontend) PromptClosed(result string) {
f.promptResult = result
f.promptWaitGroup.Done()
}
func (f *frontend) scroll(b Buffer) {
f.Show(backend.GetEditor().Console(), Region{b.Size(), b.Size()})
}
func (f *frontend) Erased(changed_buffer Buffer, region_removed Region, data_removed []rune) {
f.scroll(changed_buffer)
}
func (f *frontend) Inserted(changed_buffer Buffer, region_inserted Region, data_inserted []rune) {
f.scroll(changed_buffer)
}
// Apparently calling qml.Changed also triggers a re-draw, meaning that typed text is at the
// mercy of how quick Qt happens to be rendering.
// Try setting batching_enabled = false to see the effects of non-batching
func (f *frontend) qmlBatchLoop() {
queue := make([]qmlDispatch, 0, 128)
f.qmlDispatch = make(chan qmlDispatch, 1000)
for {
if len(queue) > 0 {
select {
// QML likes to render at 60 fps, or 16 milliseconds per frame
case <-time.After(time.Millisecond * 8):
// Nothing happened for 20 milliseconds, so dispatch all queued changes
for _, k := range queue {
qml.Changed(k.value, k.field)
}
queue = queue[0:0]
case d := <-f.qmlDispatch:
queue = append(queue, d)
}
} else {
dispatch := <-f.qmlDispatch
queue = append(queue, dispatch)
}
}
}
func (f *frontend) qmlChanged(value, field interface{}) {
if !batching_enabled {
qml.Changed(value, field)
} else {
f.qmlDispatch <- qmlDispatch{value, field}
}
}
func (f *frontend) DefaultBg() color.RGBA {
c := f.colorScheme().Spice(&render.ViewRegions{})
c.Background.A = 0xff
return color.RGBA(c.Background)
}
func (f *frontend) DefaultFg() color.RGBA {
c := f.colorScheme().Spice(&render.ViewRegions{})
c.Foreground.A = 0xff
return color.RGBA(c.Foreground)
}
// Called when a new view is opened
func (f *frontend) onNew(bv *backend.View) {
w := f.windows[bv.Window()]
v := newView(bv)
w.views[bv] = v
if w.qw != nil {
w.qw.Call("addTab", v.id, v)
w.qw.Call("activateTab", v.id)
}
}
// called when a view is closed
func (f *frontend) onClose(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find closed view...")
return
}
w.qw.Call("removeTab", v.id)
delete(w.views, bv)
}
// called when a view has loaded
func (f *frontend) onLoad(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find loaded view")
return
}
v.Title = bv.FileName()
w.qw.Call("setTabTitle", v.id, v.Title)
}
func (f *frontend) onSelectionModified(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find modified view")
return
}
if v.qv == nil {
return
}
v.qv.Call("onSelectionModified")
}
func (f *frontend) onStatusChanged(bv *backend.View) {
w := f.windows[bv.Window()]
v := w.views[bv]
if v == nil {
log.Error("Couldn't find status changed view")
return
}
if v.qv == nil {
return
}
v.qv.Call("onStatusChanged")
}
// Launches the provided command in a new goroutine
// (to avoid locking up the GUI)
func (f *frontend) RunCommand(command string) {
f.RunCommandWithArgs(command, make(backend.Args))
}
func (f *frontend) RunCommandWithArgs(command string, args backend.Args) {
ed := backend.GetEditor()
go ed.RunCommand(command, args)
}
func (f *frontend) HandleInput(text string, keycode int, modifiers int) bool {
log.Debug("frontend.HandleInput: text=%v, key=%x, modifiers=%x", text, keycode, modifiers)
shift := false
alt := false
ctrl := false
super := false
if key, ok := lut[keycode]; ok {
ed := backend.GetEditor()
if modifiers&shift_mod != 0 {
shift = true
}
if modifiers&alt_mod != 0 {
alt = true
}
if modifiers&ctrl_mod != 0 {
if runtime.GOOS == "darwin" {
super = true
} else {
ctrl = true
}
}
if modifiers&meta_mod != 0 {
if runtime.GOOS == "darwin" {
ctrl = true
} else {
super = true
}
}
ed.HandleInput(keys.KeyPress{Text: text, Key: key, Shift: shift, Alt: alt, Ctrl: ctrl, Super: super})
return true
}
return false
}
func (f *frontend) colorScheme() backend.ColorScheme {
ed := backend.GetEditor()
return ed.GetColorScheme(ed.Settings().String("color_scheme", ""))
}
// Quit closes all open windows to de-reference all qml objects
func (f *frontend) Quit() (err error) {
// TODO: handle changed files that aren't saved.
for _, w := range f.windows {
if w.qw != nil {
w.qw.Hide()
w.qw.Destroy()
w.qw = nil
}
}
return
}
func (f *frontend) loop() (err error) {
ed := backend.GetEditor()
// TODO: As InitCallback doc says initiation code to be deferred until
// after the UI is up and running. but because we dont have any
// scheme we are initing editor before the UI comes up.
ed.Init()
ed.SetDefaultPath("../packages/Default")
ed.SetUserPath("../packages/User")
ed.SetClipboardFuncs(clipboard.WriteAll, clipboard.ReadAll)
// Some packages(e.g Vintageos) need available window and view at start
// so we need at least one window and view before loading packages.
// Sublime text also has available window view on startup
w := ed.NewWindow()
w.NewFile()
ed.AddPackagesPath("../packages")
ed.SetFrontend(f)
ed.LogInput(false)
ed.LogCommands(false)
c := ed.Console()
f.Console = newView(c)
c.AddObserver(f.Console)
c.AddObserver(f)
var (
engine *qml.Engine
component qml.Object
// WaitGroup keeping track of open windows
wg sync.WaitGroup
)
// create and setup a new engine, destroying
// the old one if one exists.
//
// This is needed to re-load qml files to get
// the new file contents from disc as otherwise
// the old file would still be what is referenced.
newEngine := func() (err error) {
if engine != nil {
log.Debug("calling destroy")
// TODO(.): calling this appears to make the editor *very* crash-prone, just let it leak for now
// engine.Destroy()
engine = nil
}
log.Debug("calling newEngine")
engine = qml.NewEngine()
engine.On("quit", f.Quit)
log.Fine("setvar frontend")
engine.Context().SetVar("frontend", f)
qml.SetApplicationDisplayName("LimeText")
qml.SetWindowIcon("qml/lime.png")
// qml.SetDesktopFileName("qml/org.limetext.qml.LimeText.desktop")
log.Fine("loading %s", qmlWindowFile)
component, err = engine.LoadFile(qmlWindowFile)
return
}
if err := newEngine(); err != nil {
log.Error("Error on creating new engine: %s", err)
panic(err)
}
addWindow := func(bw *backend.Window) {
w := newWindow(bw)
f.windows[bw] = w
w.launch(&wg, component)
}
backend.OnNew.Add(f.onNew)
backend.OnClose.Add(f.onClose)
backend.OnLoad.Add(f.onLoad)
backend.OnSelectionModified.Add(f.onSelectionModified)
backend.OnNewWindow.Add(addWindow)
backend.OnStatusChanged.Add(f.onStatusChanged)
// we need to add windows and views that are added before we registered
// actions for OnNewWindow and OnNew events
for _, w := range ed.Windows() {
addWindow(w)
for _, v := range w.Views() {
f.onNew(v)
f.onLoad(v)
}
}
defer func() {
fmt.Println(util.Prof)
}()
// The rest of code is related to livereloading qml files
// TODO: this doesnt work currently
watch, err := fsnotify.NewWatcher()
if err != nil {
log.Error("Unable to create file watcher: %s", err)
return
}
defer watch.Close()
watch.Add("qml")
defer watch.Remove("qml")
reloadRequested := false
waiting := false
go func() {
// reloadRequested = true
// f.Quit()
lastTime := time.Now()
for {
select {
case ev := <-watch.Events:
if time.Now().Sub(lastTime) < 1*time.Second {
// quitting too frequently causes crashes
lastTime = time.Now()
continue
}
if strings.HasSuffix(ev.Name, ".qml") && ev.Op == fsnotify.Write && ev.Op != fsnotify.Chmod && !reloadRequested && waiting {
reloadRequested = true
f.Quit()
lastTime = time.Now()
}
}
}
}()
for {
// Reset reload status
reloadRequested = false
log.Debug("Waiting for all windows to close")
// wg would be the WaitGroup all windows belong to, so first we wait for
// all windows to close.
waiting = true
wg.Wait()
waiting = false
log.Debug("All windows closed. reloadRequest: %v", reloadRequested)
// then we check if there's a reload request in the pipe
if !reloadRequested || len(f.windows) == 0 {
// This would be a genuine exit; all windows closed by the user
break
}
// *We* closed all windows because we want to reload freshly changed qml
// files.
for {
log.Debug("Calling newEngine")
if err := newEngine(); err != nil {
// Reset reload status
reloadRequested = false
waiting = true
log.Error(err)
for !reloadRequested {
// This loop allows us to re-try reloading
// if there was an error in the file this time,
// we just loop around again when we receive the next
// reload request (ie on the next save of the file).
time.Sleep(time.Second)
}
waiting = false
continue
}
log.Debug("break")
break
}
log.Debug("re-launching all windows")
// Succeeded loading the file, re-launch all windows
for _, w := range f.windows {
w.launch(&wg, component)
for _, bv := range w.Back().Views() {
f.onNew(bv)
f.onLoad(bv)
}
}
}
return
}
|
package nds
import (
"encoding/binary"
"errors"
"math/rand"
"reflect"
"time"
"appengine/datastore"
)
const (
// memcachePrefix is the namespace memcache uses to store entities.
memcachePrefix = "NDS0:"
// memcacheLockTime is the maximum length of time a memcache lock will be
// held for. 32 seconds is choosen as 30 seconds is the maximum amount of
// time an underlying datastore call will retry even if the API reports a
// success to the user.
memcacheLockTime = 32 * time.Second
)
var (
typeOfPropertyLoadSaver = reflect.TypeOf(
(*datastore.PropertyLoadSaver)(nil)).Elem()
typeOfPropertyList = reflect.TypeOf(datastore.PropertyList(nil))
ErrInvalidKey = datastore.ErrInvalidKey
ErrNoSuchEntity = datastore.ErrNoSuchEntity
)
const (
noneItem uint32 = iota
entityItem
lockItem
)
func itemLock() []byte {
b := make([]byte, 4)
binary.LittleEndian.PutUint32(b, rand.Uint32())
return b
}
func checkArgs(key *datastore.Key, val interface{}) error {
if key == nil {
return errors.New("nds: key is nil")
}
if val == nil {
return errors.New("nds: val is nil")
}
v := reflect.ValueOf(val)
if v.Type() == typeOfPropertyList {
return errors.New("nds: PropertyList not supported")
}
switch v.Kind() {
case reflect.Slice, reflect.Ptr:
return nil
default:
return errors.New("nds: val must be a slice or pointer")
}
}
func checkMultiArgs(keys []*datastore.Key, v reflect.Value) error {
if v.Kind() != reflect.Slice {
return errors.New("nds: vals is not a slice")
}
if len(keys) != v.Len() {
return errors.New("nds: keys and vals slices have different length")
}
if v.Type() == typeOfPropertyList {
return errors.New("nds: PropertyList not supported")
}
elemType := v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return errors.New("nds: PropertyLoadSaver not supporded")
}
switch elemType.Kind() {
case reflect.Struct, reflect.Interface:
return nil
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return nil
}
}
return errors.New("nds: unsupported vals type")
}
func createMemcacheKey(key *datastore.Key) string {
return memcachePrefix + key.Encode()
}
Restricted Get to only allow pointers.
package nds
import (
"encoding/binary"
"errors"
"math/rand"
"reflect"
"time"
"appengine/datastore"
)
const (
// memcachePrefix is the namespace memcache uses to store entities.
memcachePrefix = "NDS0:"
// memcacheLockTime is the maximum length of time a memcache lock will be
// held for. 32 seconds is choosen as 30 seconds is the maximum amount of
// time an underlying datastore call will retry even if the API reports a
// success to the user.
memcacheLockTime = 32 * time.Second
)
var (
typeOfPropertyLoadSaver = reflect.TypeOf(
(*datastore.PropertyLoadSaver)(nil)).Elem()
typeOfPropertyList = reflect.TypeOf(datastore.PropertyList(nil))
ErrInvalidKey = datastore.ErrInvalidKey
ErrNoSuchEntity = datastore.ErrNoSuchEntity
)
const (
noneItem uint32 = iota
entityItem
lockItem
)
func itemLock() []byte {
b := make([]byte, 4)
binary.LittleEndian.PutUint32(b, rand.Uint32())
return b
}
func checkArgs(key *datastore.Key, val interface{}) error {
if key == nil {
return errors.New("nds: key is nil")
}
if val == nil {
return errors.New("nds: val is nil")
}
v := reflect.ValueOf(val)
if v.Type() == typeOfPropertyList {
return errors.New("nds: PropertyList not supported")
}
switch v.Kind() {
case reflect.Ptr:
return nil
default:
return errors.New("nds: val must be a slice or pointer")
}
}
func checkMultiArgs(keys []*datastore.Key, v reflect.Value) error {
if v.Kind() != reflect.Slice {
return errors.New("nds: vals is not a slice")
}
if len(keys) != v.Len() {
return errors.New("nds: keys and vals slices have different length")
}
if v.Type() == typeOfPropertyList {
return errors.New("nds: PropertyList not supported")
}
elemType := v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return errors.New("nds: PropertyLoadSaver not supporded")
}
switch elemType.Kind() {
case reflect.Struct, reflect.Interface:
return nil
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return nil
}
}
return errors.New("nds: unsupported vals type")
}
func createMemcacheKey(key *datastore.Key) string {
return memcachePrefix + key.Encode()
}
|
package gps
import (
"strings"
"fmt"
"log"
"strconv"
"reflect"
)
// func validateNMEAChecksum determines if a string is a properly formatted NMEA sentence with a valid checksum.
//
// If the input string is valid, output is the input stripped of the "$" token and checksum, along with a boolean 'true'
// If the input string is the incorrect format, the checksum is missing/invalid, or checksum calculation fails, an error string and
// boolean 'false' are returned
//
// Checksum is calculated as XOR of all bytes between "$" and "*"
func validateNMEAChecksum(s string) (string, bool) {
//validate format. NMEA sentences start with "$" and end in "*xx" where xx is the XOR value of all bytes between
if !(strings.HasPrefix(s, "$") && strings.Contains(s, "*")) {
return "Invalid NMEA message", false
}
// strip leading "$" and split at "*"
s_split := strings.Split(strings.TrimPrefix(s, "$"), "*")
s_out := s_split[0]
s_cs := s_split[1]
if len(s_cs) < 2 {
return "Missing checksum. Fewer than two bytes after asterisk", false
}
cs, err := strconv.ParseUint(s_cs[:2], 16, 8)
if err != nil {
return "Invalid checksum", false
}
cs_calc := byte(0)
for i := range s_out {
cs_calc = cs_calc ^ byte(s_out[i])
}
if cs_calc != byte(cs) {
return fmt.Sprintf("Checksum failed. Calculated %#X; expected %#X", cs_calc, cs), false
}
return s_out, true
}
func createChecksummedNMEASentence(raw []byte) []byte {
cs_calc := byte(0)
for _,v := range raw {
cs_calc ^= v
}
return []byte(fmt.Sprintf("$%s*%02X\r\n", raw, cs_calc))
}
func processNMEASentence(line string, situation *SituationData) {
sentence, valid := validateNMEAChecksum(line)
if !valid {
log.Printf("GPS Error: invalid NMEA string: %s\n", sentence)
return
}
//log.Printf("Begin parse of %s\n", sentence)
ParseMessage(sentence, situation)
}
type NMEA struct {
Sentence string
Tokens []string
Situation *SituationData
}
// we split the sentence on commas, and use the first field via reflection to find a method with the same name
func ParseMessage(sentence string, situation *SituationData) *NMEA {
n := &NMEA{ sentence, strings.Split(sentence, ","), situation }
//log.Printf("NMEA Message type %s, data: %v\n", n.Tokens[0], n.Tokens[1:])
v := reflect.ValueOf(n)
m := v.MethodByName(n.Tokens[0])
if (m == reflect.Value{}) {
return nil
}
m.Call(nil)
return n
}
func durationSinceMidnight(fixtime string) (int, error) {
hr, err := strconv.Atoi(fixtime[0:2]); if err != nil { return 0, err }
min, err := strconv.Atoi(fixtime[2:4]); if err != nil { return 0, err }
sec, err := strconv.Atoi(fixtime[4:6]); if err != nil { return 0, err }
return sec + min*60 + hr*60*60, nil
}
func parseLatLon(s string, neg bool) (float32, error) {
minpos := len(s) - 7
deg, err := strconv.Atoi(s[0:minpos]); if err != nil { return 0.0, err }
min, err := strconv.ParseFloat(s[minpos:], 32); if err != nil { return 0.0, err }
sign := 1; if neg { sign = -1 }
return float32(sign) * (float32(deg) + float32(min/60.0)), nil
}
func (n *NMEA) GNGGA() { n.GPGGA() } // ublox 8 uses GNGGA in place of GPGGA to indicate multiple nav sources (GPS/GLONASS)
func (n *NMEA) GPGGA() {
log.Printf("In GPGGA\n")
s := n.Situation
s.Mu_GPS.Lock(); defer s.Mu_GPS.Unlock()
d, err := durationSinceMidnight(n.Tokens[1]); if err != nil { return }
s.LastFixSinceMidnightUTC = uint32(d)
if len(n.Tokens[2]) < 4 || len(n.Tokens[4]) < 4 { return } // sanity check lat/lon
lat, err := parseLatLon(n.Tokens[2], n.Tokens[3] == "S"); if err != nil { return }
lon, err := parseLatLon(n.Tokens[4], n.Tokens[5] == "W"); if err != nil { return }
s.Lat = lat; s.Lng = lon
log.Printf("Situation: %v\n", s)
}
func (n *NMEA) GPGSA() {
log.Printf("In GPGSA\n")
}
finishing up GGA
package gps
import (
"strings"
"fmt"
"log"
"strconv"
"reflect"
)
// func validateNMEAChecksum determines if a string is a properly formatted NMEA sentence with a valid checksum.
//
// If the input string is valid, output is the input stripped of the "$" token and checksum, along with a boolean 'true'
// If the input string is the incorrect format, the checksum is missing/invalid, or checksum calculation fails, an error string and
// boolean 'false' are returned
//
// Checksum is calculated as XOR of all bytes between "$" and "*"
func validateNMEAChecksum(s string) (string, bool) {
//validate format. NMEA sentences start with "$" and end in "*xx" where xx is the XOR value of all bytes between
if !(strings.HasPrefix(s, "$") && strings.Contains(s, "*")) {
return "Invalid NMEA message", false
}
// strip leading "$" and split at "*"
s_split := strings.Split(strings.TrimPrefix(s, "$"), "*")
s_out := s_split[0]
s_cs := s_split[1]
if len(s_cs) < 2 {
return "Missing checksum. Fewer than two bytes after asterisk", false
}
cs, err := strconv.ParseUint(s_cs[:2], 16, 8)
if err != nil {
return "Invalid checksum", false
}
cs_calc := byte(0)
for i := range s_out {
cs_calc = cs_calc ^ byte(s_out[i])
}
if cs_calc != byte(cs) {
return fmt.Sprintf("Checksum failed. Calculated %#X; expected %#X", cs_calc, cs), false
}
return s_out, true
}
func createChecksummedNMEASentence(raw []byte) []byte {
cs_calc := byte(0)
for _,v := range raw {
cs_calc ^= v
}
return []byte(fmt.Sprintf("$%s*%02X\r\n", raw, cs_calc))
}
func processNMEASentence(line string, situation *SituationData) {
sentence, valid := validateNMEAChecksum(line)
if !valid {
log.Printf("GPS Error: invalid NMEA string: %s\n", sentence)
return
}
//log.Printf("Begin parse of %s\n", sentence)
ParseMessage(sentence, situation)
}
type NMEA struct {
Sentence string
Tokens []string
Situation *SituationData
}
// we split the sentence on commas, and use the first field via reflection to find a method with the same name
func ParseMessage(sentence string, situation *SituationData) *NMEA {
n := &NMEA{ sentence, strings.Split(sentence, ","), situation }
//log.Printf("NMEA Message type %s, data: %v\n", n.Tokens[0], n.Tokens[1:])
v := reflect.ValueOf(n)
m := v.MethodByName(n.Tokens[0])
if (m == reflect.Value{}) {
return nil
}
m.Call(nil)
return n
}
func durationSinceMidnight(fixtime string) (int, error) {
hr, err := strconv.Atoi(fixtime[0:2]); if err != nil { return 0, err }
min, err := strconv.Atoi(fixtime[2:4]); if err != nil { return 0, err }
sec, err := strconv.Atoi(fixtime[4:6]); if err != nil { return 0, err }
return sec + min*60 + hr*60*60, nil
}
func parseLatLon(s string, neg bool) (float32, error) {
minpos := len(s) - 7
deg, err := strconv.Atoi(s[0:minpos]); if err != nil { return 0.0, err }
min, err := strconv.ParseFloat(s[minpos:], 32); if err != nil { return 0.0, err }
sign := 1; if neg { sign = -1 }
return float32(sign) * (float32(deg) + float32(min/60.0)), nil
}
func (n *NMEA) GNGGA() { n.GPGGA() } // ublox 8 uses GNGGA in place of GPGGA to indicate multiple nav sources (GPS/GLONASS)
func (n *NMEA) GPGGA() {
log.Printf("In GPGGA\n")
s := n.Situation
s.Mu_GPS.Lock(); defer s.Mu_GPS.Unlock()
d, err := durationSinceMidnight(n.Tokens[1]); if err != nil { return }
s.LastFixSinceMidnightUTC = uint32(d)
if len(n.Tokens[2]) < 4 || len(n.Tokens[4]) < 4 { return } // sanity check lat/lon
lat, err := parseLatLon(n.Tokens[2], n.Tokens[3] == "S"); if err != nil { return }
lon, err := parseLatLon(n.Tokens[4], n.Tokens[5] == "W"); if err != nil { return }
s.Lat = lat; s.Lng = lon
q, err := strconv.Atoi(n.Tokens[6]); if err != nil { return }
s.Quality = uint8(q)
sat, err := strconv.Atoi(n.Tokens[7]); if err != nil { return }
s.Satellites = uint16(sat)
hdop, err := strconv.ParseFloat(n.Tokens[8], 32); if err != nil { return }
s.Accuracy = float32(hdop * 4.0) // estimate for WAAS / DGPS solution
if s.Quality == 2 { s.Accuracy *= 2.0 } // doubles for non-WAAS solution
alt, err := strconv.ParseFloat(n.Tokens[9], 32); if err != nil { return }
s.Alt = float32(alt * 3.28084) // meters to feet
∆geoid, err := strconv.ParseFloat(n.Tokens[11], 32); if err != nil { return }
s.GeoidSep = float32(∆geoid * 3.28084) // meters to feet
// s.LastFixLocalTime = stratuxClock.Time
log.Printf("Situation: %v\n", s)
}
func (n *NMEA) GPGSA() {
log.Printf("In GPGSA\n")
}
|
// Copyright ©2013 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mat64
import (
"fmt"
check "launchpad.net/gocheck"
"math"
"github.com/gonum/floats"
)
func isUpperTriangular(a *Dense) bool {
rows, cols := a.Dims()
for c := 0; c < cols-1; c++ {
for r := c + 1; r < rows; r++ {
if math.Abs(a.At(r, c)) > 1e-14 {
return false
}
}
}
return true
}
func isOrthogonal(a *Dense) bool {
rows, cols := a.Dims()
col1 := make([]float64, rows)
col2 := make([]float64, rows)
for i := 0; i < cols-1; i++ {
for j := i + 1; j < cols; j++ {
a.Col(col1, i)
a.Col(col2, j)
dot := floats.Dot(col1, col2)
if math.Abs(dot) > 1e-14 {
return false
}
}
}
return true
}
func (s *S) TestQRD(c *check.C) {
for _, test := range []struct {
a [][]float64
name string
}{
{
name: "Square",
a: [][]float64{
{1.3, 2.4, 8.9},
{-2.6, 8.7, 9.1},
{5.6, 5.8, 2.1},
},
},
/*
{
name: "Skinny",
a: [][]float64{
{1.3, 2.4, 8.9},
{-2.6, 8.7, 9.1},
{5.6, 5.8, 2.1},
{19.4, 5.2, -26.1},
},
},
*/
} {
a := NewDense(flatten(test.a))
qf := QR(DenseCopyOf(a))
r := qf.R()
q := qf.Q()
fmt.Println("q=", q)
fmt.Println("r=", r)
rows, cols := r.Dims()
newA := NewDense(rows, cols, nil)
newA.Mul(q, r)
c.Check(isOrthogonal(q), check.Equals, true, check.Commentf("Test %v: Q not orthogonal", test.name))
c.Check(isUpperTriangular(r), check.Equals, true, check.Commentf("Test %v: R not upper triangular", test.name))
c.Check(a.EqualsApprox(newA, 1e-14), check.Equals, true, check.Commentf("Test %v: Q*R != A", test.name))
}
}
Added skinny QR test
// Copyright ©2013 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mat64
import (
//"fmt"
check "launchpad.net/gocheck"
"math"
"github.com/gonum/floats"
)
func isUpperTriangular(a *Dense) bool {
rows, cols := a.Dims()
for c := 0; c < cols-1; c++ {
for r := c + 1; r < rows; r++ {
if math.Abs(a.At(r, c)) > 1e-14 {
return false
}
}
}
return true
}
func isOrthogonal(a *Dense) bool {
rows, cols := a.Dims()
col1 := make([]float64, rows)
col2 := make([]float64, rows)
for i := 0; i < cols-1; i++ {
for j := i + 1; j < cols; j++ {
a.Col(col1, i)
a.Col(col2, j)
dot := floats.Dot(col1, col2)
if math.Abs(dot) > 1e-14 {
return false
}
}
}
return true
}
func (s *S) TestQRD(c *check.C) {
for _, test := range []struct {
a [][]float64
name string
}{
{
name: "Square",
a: [][]float64{
{1.3, 2.4, 8.9},
{-2.6, 8.7, 9.1},
{5.6, 5.8, 2.1},
},
},
{
name: "Skinny",
a: [][]float64{
{1.3, 2.4, 8.9},
{-2.6, 8.7, 9.1},
{5.6, 5.8, 2.1},
{19.4, 5.2, -26.1},
},
},
} {
a := NewDense(flatten(test.a))
qf := QR(DenseCopyOf(a))
r := qf.R()
q := qf.Q()
rows, cols := a.Dims()
newA := NewDense(rows, cols, nil)
newA.Mul(q, r)
c.Check(isOrthogonal(q), check.Equals, true, check.Commentf("Test %v: Q not orthogonal", test.name))
c.Check(isUpperTriangular(r), check.Equals, true, check.Commentf("Test %v: R not upper triangular", test.name))
c.Check(a.EqualsApprox(newA, 1e-14), check.Equals, true, check.Commentf("Test %v: Q*R != A", test.name))
}
}
|
package quic
import (
"fmt"
"io"
"sync"
"time"
"github.com/lucas-clemente/quic-go/internal/flowcontrol"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/qerr"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/internal/wire"
)
type receiveStreamI interface {
ReceiveStream
handleStreamFrame(*wire.StreamFrame) error
handleResetStreamFrame(*wire.ResetStreamFrame) error
closeForShutdown(error)
getWindowUpdate() protocol.ByteCount
}
type receiveStream struct {
mutex sync.Mutex
streamID protocol.StreamID
sender streamSender
frameQueue *frameSorter
finalOffset protocol.ByteCount
currentFrame []byte
currentFrameDone func()
currentFrameIsLast bool // is the currentFrame the last frame on this stream
readPosInFrame int
closeForShutdownErr error
cancelReadErr error
resetRemotelyErr *StreamError
closedForShutdown bool // set when CloseForShutdown() is called
finRead bool // set once we read a frame with a Fin
canceledRead bool // set when CancelRead() is called
resetRemotely bool // set when HandleResetStreamFrame() is called
readChan chan struct{}
deadline time.Time
flowController flowcontrol.StreamFlowController
version protocol.VersionNumber
}
var (
_ ReceiveStream = &receiveStream{}
_ receiveStreamI = &receiveStream{}
)
func newReceiveStream(
streamID protocol.StreamID,
sender streamSender,
flowController flowcontrol.StreamFlowController,
version protocol.VersionNumber,
) *receiveStream {
return &receiveStream{
streamID: streamID,
sender: sender,
flowController: flowController,
frameQueue: newFrameSorter(),
readChan: make(chan struct{}, 1),
finalOffset: protocol.MaxByteCount,
version: version,
}
}
func (s *receiveStream) StreamID() protocol.StreamID {
return s.streamID
}
// Read implements io.Reader. It is not thread safe!
func (s *receiveStream) Read(p []byte) (int, error) {
s.mutex.Lock()
completed, n, err := s.readImpl(p)
s.mutex.Unlock()
if completed {
s.sender.onStreamCompleted(s.streamID)
}
return n, err
}
func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, error) {
if s.finRead {
return false, 0, io.EOF
}
if s.canceledRead {
return false, 0, s.cancelReadErr
}
if s.resetRemotely {
return false, 0, s.resetRemotelyErr
}
if s.closedForShutdown {
return false, 0, s.closeForShutdownErr
}
bytesRead := 0
var deadlineTimer *utils.Timer
for bytesRead < len(p) {
if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) {
s.dequeueNextFrame()
}
if s.currentFrame == nil && bytesRead > 0 {
return false, bytesRead, s.closeForShutdownErr
}
for {
// Stop waiting on errors
if s.closedForShutdown {
return false, bytesRead, s.closeForShutdownErr
}
if s.canceledRead {
return false, bytesRead, s.cancelReadErr
}
if s.resetRemotely {
return false, bytesRead, s.resetRemotelyErr
}
deadline := s.deadline
if !deadline.IsZero() {
if !time.Now().Before(deadline) {
return false, bytesRead, errDeadline
}
if deadlineTimer == nil {
deadlineTimer = utils.NewTimer()
defer deadlineTimer.Stop()
}
deadlineTimer.Reset(deadline)
}
if s.currentFrame != nil || s.currentFrameIsLast {
break
}
s.mutex.Unlock()
if deadline.IsZero() {
<-s.readChan
} else {
select {
case <-s.readChan:
case <-deadlineTimer.Chan():
deadlineTimer.SetRead()
}
}
s.mutex.Lock()
if s.currentFrame == nil {
s.dequeueNextFrame()
}
}
if bytesRead > len(p) {
return false, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p))
}
if s.readPosInFrame > len(s.currentFrame) {
return false, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame))
}
s.mutex.Unlock()
m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:])
s.readPosInFrame += m
bytesRead += m
s.mutex.Lock()
// when a RESET_STREAM was received, the was already informed about the final byteOffset for this stream
if !s.resetRemotely {
s.flowController.AddBytesRead(protocol.ByteCount(m))
}
if s.readPosInFrame >= len(s.currentFrame) && s.currentFrameIsLast {
s.finRead = true
return true, bytesRead, io.EOF
}
}
return false, bytesRead, nil
}
func (s *receiveStream) dequeueNextFrame() {
var offset protocol.ByteCount
// We're done with the last frame. Release the buffer.
if s.currentFrameDone != nil {
s.currentFrameDone()
}
offset, s.currentFrame, s.currentFrameDone = s.frameQueue.Pop()
s.currentFrameIsLast = offset+protocol.ByteCount(len(s.currentFrame)) >= s.finalOffset
s.readPosInFrame = 0
}
func (s *receiveStream) CancelRead(errorCode StreamErrorCode) {
s.mutex.Lock()
completed := s.cancelReadImpl(errorCode)
s.mutex.Unlock()
if completed {
s.flowController.Abandon()
s.sender.onStreamCompleted(s.streamID)
}
}
func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) bool /* completed */ {
if s.finRead || s.canceledRead || s.resetRemotely {
return false
}
s.canceledRead = true
s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode)
s.signalRead()
s.sender.queueControlFrame(&wire.StopSendingFrame{
StreamID: s.streamID,
ErrorCode: errorCode,
})
// We're done with this stream if the final offset was already received.
return s.finalOffset != protocol.MaxByteCount
}
func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error {
s.mutex.Lock()
completed, err := s.handleStreamFrameImpl(frame)
s.mutex.Unlock()
if completed {
s.flowController.Abandon()
s.sender.onStreamCompleted(s.streamID)
}
return err
}
func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) (bool /* completed */, error) {
maxOffset := frame.Offset + frame.DataLen()
if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin); err != nil {
return false, err
}
var newlyRcvdFinalOffset bool
if frame.Fin {
newlyRcvdFinalOffset = s.finalOffset == protocol.MaxByteCount
s.finalOffset = maxOffset
}
if s.canceledRead {
return newlyRcvdFinalOffset, nil
}
if err := s.frameQueue.Push(frame.Data, frame.Offset, frame.PutBack); err != nil {
return false, err
}
s.signalRead()
return false, nil
}
func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error {
s.mutex.Lock()
completed, err := s.handleResetStreamFrameImpl(frame)
s.mutex.Unlock()
if completed {
s.flowController.Abandon()
s.sender.onStreamCompleted(s.streamID)
}
return err
}
func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) (bool /*completed */, error) {
if s.closedForShutdown {
return false, nil
}
if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil {
return false, err
}
newlyRcvdFinalOffset := s.finalOffset == protocol.MaxByteCount
s.finalOffset = frame.FinalSize
// ignore duplicate RESET_STREAM frames for this stream (after checking their final offset)
if s.resetRemotely {
return false, nil
}
s.resetRemotely = true
s.resetRemotelyErr = &StreamError{
StreamID: s.streamID,
ErrorCode: frame.ErrorCode,
}
s.signalRead()
return newlyRcvdFinalOffset, nil
}
func (s *receiveStream) CloseRemote(offset protocol.ByteCount) {
s.handleStreamFrame(&wire.StreamFrame{Fin: true, Offset: offset})
}
func (s *receiveStream) SetReadDeadline(t time.Time) error {
s.mutex.Lock()
s.deadline = t
s.mutex.Unlock()
s.signalRead()
return nil
}
// CloseForShutdown closes a stream abruptly.
// It makes Read unblock (and return the error) immediately.
// The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET.
func (s *receiveStream) closeForShutdown(err error) {
s.mutex.Lock()
s.closedForShutdown = true
s.closeForShutdownErr = err
s.mutex.Unlock()
s.signalRead()
}
func (s *receiveStream) getWindowUpdate() protocol.ByteCount {
return s.flowController.GetWindowUpdate()
}
// signalRead performs a non-blocking send on the readChan
func (s *receiveStream) signalRead() {
select {
case s.readChan <- struct{}{}:
default:
}
}
don't unlock the receive stream mutex for copying from STREAM frames
package quic
import (
"fmt"
"io"
"sync"
"time"
"github.com/lucas-clemente/quic-go/internal/flowcontrol"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/qerr"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/internal/wire"
)
type receiveStreamI interface {
ReceiveStream
handleStreamFrame(*wire.StreamFrame) error
handleResetStreamFrame(*wire.ResetStreamFrame) error
closeForShutdown(error)
getWindowUpdate() protocol.ByteCount
}
type receiveStream struct {
mutex sync.Mutex
streamID protocol.StreamID
sender streamSender
frameQueue *frameSorter
finalOffset protocol.ByteCount
currentFrame []byte
currentFrameDone func()
currentFrameIsLast bool // is the currentFrame the last frame on this stream
readPosInFrame int
closeForShutdownErr error
cancelReadErr error
resetRemotelyErr *StreamError
closedForShutdown bool // set when CloseForShutdown() is called
finRead bool // set once we read a frame with a Fin
canceledRead bool // set when CancelRead() is called
resetRemotely bool // set when HandleResetStreamFrame() is called
readChan chan struct{}
deadline time.Time
flowController flowcontrol.StreamFlowController
version protocol.VersionNumber
}
var (
_ ReceiveStream = &receiveStream{}
_ receiveStreamI = &receiveStream{}
)
func newReceiveStream(
streamID protocol.StreamID,
sender streamSender,
flowController flowcontrol.StreamFlowController,
version protocol.VersionNumber,
) *receiveStream {
return &receiveStream{
streamID: streamID,
sender: sender,
flowController: flowController,
frameQueue: newFrameSorter(),
readChan: make(chan struct{}, 1),
finalOffset: protocol.MaxByteCount,
version: version,
}
}
func (s *receiveStream) StreamID() protocol.StreamID {
return s.streamID
}
// Read implements io.Reader. It is not thread safe!
func (s *receiveStream) Read(p []byte) (int, error) {
s.mutex.Lock()
completed, n, err := s.readImpl(p)
s.mutex.Unlock()
if completed {
s.sender.onStreamCompleted(s.streamID)
}
return n, err
}
func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, error) {
if s.finRead {
return false, 0, io.EOF
}
if s.canceledRead {
return false, 0, s.cancelReadErr
}
if s.resetRemotely {
return false, 0, s.resetRemotelyErr
}
if s.closedForShutdown {
return false, 0, s.closeForShutdownErr
}
bytesRead := 0
var deadlineTimer *utils.Timer
for bytesRead < len(p) {
if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) {
s.dequeueNextFrame()
}
if s.currentFrame == nil && bytesRead > 0 {
return false, bytesRead, s.closeForShutdownErr
}
for {
// Stop waiting on errors
if s.closedForShutdown {
return false, bytesRead, s.closeForShutdownErr
}
if s.canceledRead {
return false, bytesRead, s.cancelReadErr
}
if s.resetRemotely {
return false, bytesRead, s.resetRemotelyErr
}
deadline := s.deadline
if !deadline.IsZero() {
if !time.Now().Before(deadline) {
return false, bytesRead, errDeadline
}
if deadlineTimer == nil {
deadlineTimer = utils.NewTimer()
defer deadlineTimer.Stop()
}
deadlineTimer.Reset(deadline)
}
if s.currentFrame != nil || s.currentFrameIsLast {
break
}
s.mutex.Unlock()
if deadline.IsZero() {
<-s.readChan
} else {
select {
case <-s.readChan:
case <-deadlineTimer.Chan():
deadlineTimer.SetRead()
}
}
s.mutex.Lock()
if s.currentFrame == nil {
s.dequeueNextFrame()
}
}
if bytesRead > len(p) {
return false, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p))
}
if s.readPosInFrame > len(s.currentFrame) {
return false, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame))
}
m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:])
s.readPosInFrame += m
bytesRead += m
// when a RESET_STREAM was received, the was already informed about the final byteOffset for this stream
if !s.resetRemotely {
s.flowController.AddBytesRead(protocol.ByteCount(m))
}
if s.readPosInFrame >= len(s.currentFrame) && s.currentFrameIsLast {
s.finRead = true
return true, bytesRead, io.EOF
}
}
return false, bytesRead, nil
}
func (s *receiveStream) dequeueNextFrame() {
var offset protocol.ByteCount
// We're done with the last frame. Release the buffer.
if s.currentFrameDone != nil {
s.currentFrameDone()
}
offset, s.currentFrame, s.currentFrameDone = s.frameQueue.Pop()
s.currentFrameIsLast = offset+protocol.ByteCount(len(s.currentFrame)) >= s.finalOffset
s.readPosInFrame = 0
}
func (s *receiveStream) CancelRead(errorCode StreamErrorCode) {
s.mutex.Lock()
completed := s.cancelReadImpl(errorCode)
s.mutex.Unlock()
if completed {
s.flowController.Abandon()
s.sender.onStreamCompleted(s.streamID)
}
}
func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) bool /* completed */ {
if s.finRead || s.canceledRead || s.resetRemotely {
return false
}
s.canceledRead = true
s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode)
s.signalRead()
s.sender.queueControlFrame(&wire.StopSendingFrame{
StreamID: s.streamID,
ErrorCode: errorCode,
})
// We're done with this stream if the final offset was already received.
return s.finalOffset != protocol.MaxByteCount
}
func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error {
s.mutex.Lock()
completed, err := s.handleStreamFrameImpl(frame)
s.mutex.Unlock()
if completed {
s.flowController.Abandon()
s.sender.onStreamCompleted(s.streamID)
}
return err
}
func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) (bool /* completed */, error) {
maxOffset := frame.Offset + frame.DataLen()
if err := s.flowController.UpdateHighestReceived(maxOffset, frame.Fin); err != nil {
return false, err
}
var newlyRcvdFinalOffset bool
if frame.Fin {
newlyRcvdFinalOffset = s.finalOffset == protocol.MaxByteCount
s.finalOffset = maxOffset
}
if s.canceledRead {
return newlyRcvdFinalOffset, nil
}
if err := s.frameQueue.Push(frame.Data, frame.Offset, frame.PutBack); err != nil {
return false, err
}
s.signalRead()
return false, nil
}
func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error {
s.mutex.Lock()
completed, err := s.handleResetStreamFrameImpl(frame)
s.mutex.Unlock()
if completed {
s.flowController.Abandon()
s.sender.onStreamCompleted(s.streamID)
}
return err
}
func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) (bool /*completed */, error) {
if s.closedForShutdown {
return false, nil
}
if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil {
return false, err
}
newlyRcvdFinalOffset := s.finalOffset == protocol.MaxByteCount
s.finalOffset = frame.FinalSize
// ignore duplicate RESET_STREAM frames for this stream (after checking their final offset)
if s.resetRemotely {
return false, nil
}
s.resetRemotely = true
s.resetRemotelyErr = &StreamError{
StreamID: s.streamID,
ErrorCode: frame.ErrorCode,
}
s.signalRead()
return newlyRcvdFinalOffset, nil
}
func (s *receiveStream) CloseRemote(offset protocol.ByteCount) {
s.handleStreamFrame(&wire.StreamFrame{Fin: true, Offset: offset})
}
func (s *receiveStream) SetReadDeadline(t time.Time) error {
s.mutex.Lock()
s.deadline = t
s.mutex.Unlock()
s.signalRead()
return nil
}
// CloseForShutdown closes a stream abruptly.
// It makes Read unblock (and return the error) immediately.
// The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET.
func (s *receiveStream) closeForShutdown(err error) {
s.mutex.Lock()
s.closedForShutdown = true
s.closeForShutdownErr = err
s.mutex.Unlock()
s.signalRead()
}
func (s *receiveStream) getWindowUpdate() protocol.ByteCount {
return s.flowController.GetWindowUpdate()
}
// signalRead performs a non-blocking send on the readChan
func (s *receiveStream) signalRead() {
select {
case s.readChan <- struct{}{}:
default:
}
}
|
//+build skip
package main
import (
"crypto/hmac"
"crypto/sha1"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"github.com/gorilla/mux"
mcassoc "github.com/lukegb/mcassoc/mcassoc"
minecraft "github.com/lukegb/mcassoc/minecraft"
mojang "github.com/lukegb/mcassoc/mojang"
"html/template"
"image/png"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"time"
)
var sesskey []byte
var authenticator mcassoc.Associfier
var httplistenloc string
type TemplatePageData struct {
Title string
}
type TemplateData struct {
PageData TemplatePageData
Data interface{}
}
type SigningData struct {
Username string `json:"username"`
UUID string `json:"uuid"`
Now int64 `json:"now"`
}
func generateSharedKey(siteid string) []byte {
z := hmac.New(sha512.New, sesskey)
z.Write([]byte(siteid))
key := z.Sum([]byte{})
return key
}
func generateDataBlob(data SigningData, siteid string) string {
skey := generateSharedKey(siteid)
databytes, _ := json.Marshal(data)
x := hmac.New(sha1.New, skey)
x.Write(databytes)
datahash := x.Sum(databytes)
return base64.URLEncoding.EncodeToString(datahash)
}
func HomePage(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("<!DOCTYPE html><html><body><h1>Minecraft Account Association</h1><p>For access, please email lukegb: my email is (my username) AT (my username) DOT com.</p></body></html>"))
}
func PerformPage(w http.ResponseWriter, r *http.Request) {
v := r.URL.Query()
siteID := v.Get("siteid")
postbackURL := v.Get("postback")
key := v.Get("key")
mcuser := v.Get("mcusername")
if pbu, err := url.Parse(postbackURL); err != nil || (pbu.Scheme != "http" && pbu.Scheme != "https") {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("postback must be a HTTP/HTTPS url"))
return
}
// check that the required fields are set
if siteID == "" || postbackURL == "" || key == "" {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("required parameter(s) missing"))
return
}
t := template.Must(template.ParseFiles("templates/minibase.html", "templates/perform.html"))
t.ExecuteTemplate(w, "layout", TemplateData{
PageData: TemplatePageData{
Title: "Minecraft Account Association",
},
Data: struct {
SiteID string
PostbackURL string
Key string
MCUser string
}{
SiteID: siteID,
PostbackURL: postbackURL,
Key: key,
MCUser: mcuser,
},
})
}
func ApiCheckUserPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("must be a POST request"))
return
}
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("data invalid"))
return
}
je := json.NewEncoder(w)
mcusername := r.Form.Get("mcusername")
// don't do anything yet
user, err := mojang.GetProfileByUsername(mcusername)
if err != nil {
if err == mojang.ERR_NO_SUCH_USER {
je.Encode(struct {
Error string `json:"error"`
}{
Error: "no such user",
})
return
} else {
log.Println("error while getting mojang profile", mcusername, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
}
mcprofile, err := minecraft.GetProfile(user.Id)
if err != nil {
log.Println("error while getting minecraft profile", mcusername, user.Id, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
skinim, err := minecraft.GetSkin(mcprofile)
if err != nil {
log.Println("error while getting skin", mcusername, user.Id, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
je.Encode(struct {
MCUsername string `json:"mcusername"`
UUID string `json:"uuid"`
Exists bool `json:"exists"`
}{
MCUsername: mcusername,
UUID: user.Id,
Exists: mcassoc.HasDatablock(skinim),
})
}
func ApiAuthenticateUserPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("must be a POST request"))
return
}
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("data invalid"))
return
}
je := json.NewEncoder(w)
uuid := r.Form.Get("uuid")
password := r.Form.Get("password")
mcprofile, err := minecraft.GetProfile(uuid)
if err != nil {
log.Println("error while getting minecraft profile", uuid, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
skinim, err := minecraft.GetSkin(mcprofile)
if err != nil {
log.Println("error while getting skin", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
passwordok, err := authenticator.Verify(password, skinim)
if err != nil {
log.Println("error verifying datablock", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
postbackurl := ""
postbackdata := ""
if passwordok {
// yay!
postbackstr := r.Form.Get("data[postback]")
postback, err := url.Parse(postbackstr)
if err != nil || (postback.Scheme != "http" && postback.Scheme != "https") {
w.WriteHeader(http.StatusPreconditionFailed)
return
}
postbackdata = generateDataBlob(SigningData{
Now: time.Now().Unix(),
UUID: mcprofile.Id,
Username: mcprofile.Name,
}, r.Form.Get("data[siteid]"))
postbackurl = postback.String()
}
je.Encode(struct {
MCUsername string `json:"mcusername"`
UUID string `json:"uuid"`
Correct bool `json:"correct"`
Postback string `json:"postback"`
PostbackData string `json:"postbackdata"`
}{
MCUsername: mcprofile.Name,
UUID: mcprofile.Id,
Correct: passwordok,
Postback: postbackurl,
PostbackData: postbackdata,
})
}
func ApiCreateUserPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("must be a POST request"))
return
}
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("data invalid"))
return
}
je := json.NewEncoder(w)
uuid := r.Form.Get("uuid")
password := r.Form.Get("password")
mcprofile, err := minecraft.GetProfile(uuid)
if err != nil {
log.Println("error while getting minecraft profile", uuid, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
skinim, err := minecraft.GetSkin(mcprofile)
if err != nil {
log.Println("error while getting skin", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
authedim, err := authenticator.Embed(password, skinim)
if err != nil {
log.Println("error while embedding into skin", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
tmpf, err := ioutil.TempFile("tmpskin/", uuid)
if err != nil {
log.Println("error while opening temp file", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer tmpf.Close()
err = png.Encode(tmpf, authedim)
if err != nil {
log.Println("error while writing authed skin image", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
je.Encode(struct {
Filename string `json:"filename"`
}{
Filename: path.Base(tmpf.Name()),
})
}
func SkinServerPage(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
f, err := os.Open(fmt.Sprintf("tmpskin/%s", vars["filename"]))
if err != nil {
w.WriteHeader(http.StatusNotFound)
return
}
defer f.Close()
w.Header().Add("Content-Type", "image/png")
io.Copy(w, f)
}
func myinit() {
var flagSesskey string
var flagAuthenticationKey string
flag.StringVar(&flagSesskey, "sesskey", "insecure", "session key (used for creating shared secrets with clients)")
flag.StringVar(&flagAuthenticationKey, "authkey", "insecure", "authentication key (used for hashing passwords)")
flag.StringVar(&httplistenloc, "listen", ":21333", "HTTP listener location")
flag.Parse()
// load the authentication keys
sesskey = []byte(flagSesskey)
authenticator = mcassoc.NewAssocifier(flagAuthenticationKey)
log.Println("Set session key", flagSesskey)
log.Println("Set authentication key", flagAuthenticationKey)
log.Println("Going to listen at", httplistenloc)
}
func main() {
myinit()
r := mux.NewRouter()
r.HandleFunc("/", HomePage)
r.HandleFunc("/perform", PerformPage)
r.HandleFunc("/api/user/check", ApiCheckUserPage)
r.HandleFunc("/api/user/create", ApiCreateUserPage)
r.HandleFunc("/api/user/authenticate", ApiAuthenticateUserPage)
r.HandleFunc("/media/skin/{filename:[0-9a-z]+}.png", SkinServerPage)
http.Handle("/", r)
log.Println("Running!")
err := http.ListenAndServe(httplistenloc, nil)
if err != nil {
log.Fatal("http.ListenAndServe: ", err)
}
}
You probably want that
package main
import (
"crypto/hmac"
"crypto/sha1"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"github.com/gorilla/mux"
mcassoc "github.com/lukegb/mcassoc/mcassoc"
minecraft "github.com/lukegb/mcassoc/minecraft"
mojang "github.com/lukegb/mcassoc/mojang"
"html/template"
"image/png"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"time"
)
var sesskey []byte
var authenticator mcassoc.Associfier
var httplistenloc string
type TemplatePageData struct {
Title string
}
type TemplateData struct {
PageData TemplatePageData
Data interface{}
}
type SigningData struct {
Username string `json:"username"`
UUID string `json:"uuid"`
Now int64 `json:"now"`
}
func generateSharedKey(siteid string) []byte {
z := hmac.New(sha512.New, sesskey)
z.Write([]byte(siteid))
key := z.Sum([]byte{})
return key
}
func generateDataBlob(data SigningData, siteid string) string {
skey := generateSharedKey(siteid)
databytes, _ := json.Marshal(data)
x := hmac.New(sha1.New, skey)
x.Write(databytes)
datahash := x.Sum(databytes)
return base64.URLEncoding.EncodeToString(datahash)
}
func HomePage(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("<!DOCTYPE html><html><body><h1>Minecraft Account Association</h1><p>For access, please email lukegb: my email is (my username) AT (my username) DOT com.</p></body></html>"))
}
func PerformPage(w http.ResponseWriter, r *http.Request) {
v := r.URL.Query()
siteID := v.Get("siteid")
postbackURL := v.Get("postback")
key := v.Get("key")
mcuser := v.Get("mcusername")
if pbu, err := url.Parse(postbackURL); err != nil || (pbu.Scheme != "http" && pbu.Scheme != "https") {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("postback must be a HTTP/HTTPS url"))
return
}
// check that the required fields are set
if siteID == "" || postbackURL == "" || key == "" {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("required parameter(s) missing"))
return
}
t := template.Must(template.ParseFiles("templates/minibase.html", "templates/perform.html"))
t.ExecuteTemplate(w, "layout", TemplateData{
PageData: TemplatePageData{
Title: "Minecraft Account Association",
},
Data: struct {
SiteID string
PostbackURL string
Key string
MCUser string
}{
SiteID: siteID,
PostbackURL: postbackURL,
Key: key,
MCUser: mcuser,
},
})
}
func ApiCheckUserPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("must be a POST request"))
return
}
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("data invalid"))
return
}
je := json.NewEncoder(w)
mcusername := r.Form.Get("mcusername")
// don't do anything yet
user, err := mojang.GetProfileByUsername(mcusername)
if err != nil {
if err == mojang.ERR_NO_SUCH_USER {
je.Encode(struct {
Error string `json:"error"`
}{
Error: "no such user",
})
return
} else {
log.Println("error while getting mojang profile", mcusername, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
}
mcprofile, err := minecraft.GetProfile(user.Id)
if err != nil {
log.Println("error while getting minecraft profile", mcusername, user.Id, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
skinim, err := minecraft.GetSkin(mcprofile)
if err != nil {
log.Println("error while getting skin", mcusername, user.Id, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
je.Encode(struct {
MCUsername string `json:"mcusername"`
UUID string `json:"uuid"`
Exists bool `json:"exists"`
}{
MCUsername: mcusername,
UUID: user.Id,
Exists: mcassoc.HasDatablock(skinim),
})
}
func ApiAuthenticateUserPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("must be a POST request"))
return
}
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("data invalid"))
return
}
je := json.NewEncoder(w)
uuid := r.Form.Get("uuid")
password := r.Form.Get("password")
mcprofile, err := minecraft.GetProfile(uuid)
if err != nil {
log.Println("error while getting minecraft profile", uuid, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
skinim, err := minecraft.GetSkin(mcprofile)
if err != nil {
log.Println("error while getting skin", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
passwordok, err := authenticator.Verify(password, skinim)
if err != nil {
log.Println("error verifying datablock", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
postbackurl := ""
postbackdata := ""
if passwordok {
// yay!
postbackstr := r.Form.Get("data[postback]")
postback, err := url.Parse(postbackstr)
if err != nil || (postback.Scheme != "http" && postback.Scheme != "https") {
w.WriteHeader(http.StatusPreconditionFailed)
return
}
postbackdata = generateDataBlob(SigningData{
Now: time.Now().Unix(),
UUID: mcprofile.Id,
Username: mcprofile.Name,
}, r.Form.Get("data[siteid]"))
postbackurl = postback.String()
}
je.Encode(struct {
MCUsername string `json:"mcusername"`
UUID string `json:"uuid"`
Correct bool `json:"correct"`
Postback string `json:"postback"`
PostbackData string `json:"postbackdata"`
}{
MCUsername: mcprofile.Name,
UUID: mcprofile.Id,
Correct: passwordok,
Postback: postbackurl,
PostbackData: postbackdata,
})
}
func ApiCreateUserPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
w.Header().Set("Allow", "POST")
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("must be a POST request"))
return
}
err := r.ParseForm()
if err != nil {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("data invalid"))
return
}
je := json.NewEncoder(w)
uuid := r.Form.Get("uuid")
password := r.Form.Get("password")
mcprofile, err := minecraft.GetProfile(uuid)
if err != nil {
log.Println("error while getting minecraft profile", uuid, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
skinim, err := minecraft.GetSkin(mcprofile)
if err != nil {
log.Println("error while getting skin", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
authedim, err := authenticator.Embed(password, skinim)
if err != nil {
log.Println("error while embedding into skin", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
tmpf, err := ioutil.TempFile("tmpskin/", uuid)
if err != nil {
log.Println("error while opening temp file", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer tmpf.Close()
err = png.Encode(tmpf, authedim)
if err != nil {
log.Println("error while writing authed skin image", uuid, mcprofile, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
je.Encode(struct {
Filename string `json:"filename"`
}{
Filename: path.Base(tmpf.Name()),
})
}
func SkinServerPage(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
f, err := os.Open(fmt.Sprintf("tmpskin/%s", vars["filename"]))
if err != nil {
w.WriteHeader(http.StatusNotFound)
return
}
defer f.Close()
w.Header().Add("Content-Type", "image/png")
io.Copy(w, f)
}
func myinit() {
var flagSesskey string
var flagAuthenticationKey string
flag.StringVar(&flagSesskey, "sesskey", "insecure", "session key (used for creating shared secrets with clients)")
flag.StringVar(&flagAuthenticationKey, "authkey", "insecure", "authentication key (used for hashing passwords)")
flag.StringVar(&httplistenloc, "listen", ":21333", "HTTP listener location")
flag.Parse()
// load the authentication keys
sesskey = []byte(flagSesskey)
authenticator = mcassoc.NewAssocifier(flagAuthenticationKey)
log.Println("Set session key", flagSesskey)
log.Println("Set authentication key", flagAuthenticationKey)
log.Println("Going to listen at", httplistenloc)
}
func main() {
myinit()
r := mux.NewRouter()
r.HandleFunc("/", HomePage)
r.HandleFunc("/perform", PerformPage)
r.HandleFunc("/api/user/check", ApiCheckUserPage)
r.HandleFunc("/api/user/create", ApiCreateUserPage)
r.HandleFunc("/api/user/authenticate", ApiAuthenticateUserPage)
r.HandleFunc("/media/skin/{filename:[0-9a-z]+}.png", SkinServerPage)
http.Handle("/", r)
log.Println("Running!")
err := http.ListenAndServe(httplistenloc, nil)
if err != nil {
log.Fatal("http.ListenAndServe: ", err)
}
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/mingrammer/go-codelab/models"
"log"
"net/http"
"strings"
"time"
"sync"
"strconv"
)
func main() {
var wg sync.WaitGroup
wg.Add(3)
gyroTicker := time.NewTicker(500 * time.Millisecond)
accelTicker := time.NewTicker(500 * time.Millisecond)
tempTicker := time.NewTicker(2 * time.Second)
go func() {
for {
select {
case <-gyroTicker.C:
url := getRequestServerUrl(8001)
gyroSensorData := models.GyroSensor{
Sensor: models.Sensor{
Name: "GyroSensor",
Type: "VelocitySensor",
GenTime: time.Now(),
},
AngleVelocityX: 32.54,
AngleVelocityY: 35.12,
AngleVelocityZ: 61.23,
}
fmt.Println(gyroSensorData)
sendJsonSensorData(url, gyroSensorData)
}
}
}()
go func() {
for {
select {
case <-accelTicker.C:
url := getRequestServerUrl(8002)
accelSensorData := models.AccelSensor{
Sensor: models.Sensor{
Name: "AccelerometerSensor",
Type: "VelocitySensor",
GenTime: time.Now(),
},
GravityAccX: 41.31,
GravityAccY: 81.36,
GravityAccZ: 46.19,
}
fmt.Println(accelSensorData)
sendJsonSensorData(url, accelSensorData)
}
}
}()
go func() {
for {
select {
case <-tempTicker.C:
url := getRequestServerUrl(8003)
tempSensorData := models.TempSensor{
Sensor: models.Sensor{
Name: "TemperatureSensor",
Type: "AtmosphericSensor",
GenTime: time.Now(),
},
Temperature: 84.13,
Humidity: 76.12,
}
fmt.Println(tempSensorData)
sendJsonSensorData(url, tempSensorData)
}
}
}()
wg.Wait()
}
func getRequestServerUrl(port int) string {
urlComponents := []string{"http://127.0.0.1", strconv.Itoa(port)}
return strings.Join(urlComponents, ":")
}
func sendJsonSensorData(url string, sensorValues interface{}) {
jsonBytes, err := json.Marshal(sensorValues)
if err != nil {
log.Fatal("Error occurs when mashaling the gyro sensor values")
}
buff := bytes.NewBuffer(jsonBytes)
resp, err := http.Post(url, "application/json", buff)
if err != nil || resp.StatusCode != 200 {
log.Fatal("Error occurs when request the post data")
}
}
Conflict Resolved
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/mingrammer/go-codelab/models"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
)
func main() {
var wg sync.WaitGroup
wg.Add(3)
gyroTicker := time.NewTicker(500 * time.Millisecond)
accelTicker := time.NewTicker(500 * time.Millisecond)
tempTicker := time.NewTicker(2 * time.Second)
go func() {
for {
select {
case <-gyroTicker.C:
url := getRequestServerUrl(8001)
gyroSensorData := models.GyroSensor{
Sensor: models.Sensor{
Name: "GyroSensor",
Type: "VelocitySensor",
GenTime: time.Now(),
},
AngleVelocityX: 32.54,
AngleVelocityY: 35.12,
AngleVelocityZ: 61.23,
}
fmt.Println(gyroSensorData)
sendJsonSensorData(url, gyroSensorData)
}
}
}()
go func() {
for {
select {
case <-accelTicker.C:
url := getRequestServerUrl(8002)
accelSensorData := models.AccelSensor{
Sensor: models.Sensor{
Name: "AccelerometerSensor",
Type: "VelocitySensor",
GenTime: time.Now(),
},
GravityAccX: 41.31,
GravityAccY: 81.36,
GravityAccZ: 46.19,
}
fmt.Println(accelSensorData)
sendJsonSensorData(url, accelSensorData)
}
}
}()
go func() {
for {
select {
case <-tempTicker.C:
url := getRequestServerUrl(8003)
tempSensorData := models.TempSensor{
Sensor: models.Sensor{
Name: "TemperatureSensor",
Type: "AtmosphericSensor",
GenTime: time.Now(),
},
Temperature: 84.13,
Humidity: 76.12,
}
fmt.Println(tempSensorData)
sendJsonSensorData(url, tempSensorData)
}
}
}()
wg.Wait()
}
func getRequestServerUrl(port int) string {
urlComponents := []string{"http://127.0.0.1", strconv.Itoa(port)}
return strings.Join(urlComponents, ":")
}
func sendJsonSensorData(url string, sensorValues interface{}) {
jsonBytes, err := json.Marshal(sensorValues)
if err != nil {
log.Fatal("Error occurs when mashaling the gyro sensor values")
}
buff := bytes.NewBuffer(jsonBytes)
resp, err := http.Post(url, "application/json", buff)
if err != nil || resp.StatusCode != 200 {
log.Fatal("Error occurs when request the post data")
}
}
|
package nomad
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
memdb "github.com/hashicorp/go-memdb"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
)
func TestJobEndpoint_Register(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
serviceName := out.TaskGroups[0].Tasks[0].Services[0].Name
expectedServiceName := "web-frontend"
if serviceName != expectedServiceName {
t.Fatalf("Expected Service Name: %s, Actual: %s", expectedServiceName, serviceName)
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Register_ACL(t *testing.T) {
t.Parallel()
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Try without a token, expect failure
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err == nil {
t.Fatalf("expected error")
}
// Try with a token
req.SecretID = root.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
}
func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Namespace = "foo"
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Try without a token, expect failure
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "non-existant namespace") {
t.Fatalf("expected namespace error: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("expected no job")
}
}
func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job containing an invalid driver
// config
job := mock.Job()
job.TaskGroups[0].Tasks[0].Config["foo"] = 1
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil {
t.Fatalf("expected a validation error")
}
if !strings.Contains(err.Error(), "-> config:") {
t.Fatalf("expected a driver config validation error but got: %v", err)
}
}
func TestJobEndpoint_Register_Payload(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job containing an invalid driver
// config
job := mock.Job()
job.Payload = []byte{0x1}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil {
t.Fatalf("expected a validation error")
}
if !strings.Contains(err.Error(), "payload") {
t.Fatalf("expected a payload error but got: %v", err)
}
}
func TestJobEndpoint_Register_Existing(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Update the job definition
job2 := mock.Job()
job2.Priority = 100
job2.ID = job.ID
req.Job = job2
// Attempt update
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.ModifyIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.Priority != 100 {
t.Fatalf("expected update")
}
if out.Version != 1 {
t.Fatalf("expected update")
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job2.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job2.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job2.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check to ensure the job version didn't get bumped because we submitted
// the same job
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Version != 1 {
t.Fatalf("expected no update; got %v; diff %v", out.Version, pretty.Diff(job2, out))
}
}
func TestJobEndpoint_Register_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request for a periodic job.
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
serviceName := out.TaskGroups[0].Tasks[0].Services[0].Name
expectedServiceName := "web-frontend"
if serviceName != expectedServiceName {
t.Fatalf("Expected Service Name: %s, Actual: %s", expectedServiceName, serviceName)
}
if resp.EvalID != "" {
t.Fatalf("Register created an eval for a periodic job")
}
}
func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request for a parameterized job.
job := mock.Job()
job.Type = structs.JobTypeBatch
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if resp.EvalID != "" {
t.Fatalf("Register created an eval for a parameterized job")
}
}
func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request and enforcing an incorrect index
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 100, // Not registered yet so not possible
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Create the register request and enforcing it is new
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
curIndex := resp.JobModifyIndex
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Reregister request and enforcing it be a new job
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Reregister request and enforcing it be at an incorrect index
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex - 1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Reregister request and enforcing it be at the correct index
job.Priority = job.Priority + 1
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Priority != job.Priority {
t.Fatalf("priority mis-match")
}
}
func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
f := false
c.VaultConfig.Enabled = &f
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "Vault not enabled") {
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault and allow authenticated
tr := true
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &tr
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
}
func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy but
// don't send a Vault token
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "missing Vault Token") {
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
// Add three tokens: one that allows the requesting policy, one that does
// not and one that returns an error
policy := "foo"
badToken := structs.GenerateUUID()
badPolicies := []string{"a", "b", "c"}
tvc.SetLookupTokenAllowedPolicies(badToken, badPolicies)
goodToken := structs.GenerateUUID()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
rootToken := structs.GenerateUUID()
rootPolicies := []string{"root"}
tvc.SetLookupTokenAllowedPolicies(rootToken, rootPolicies)
errToken := structs.GenerateUUID()
expectedErr := fmt.Errorf("return errors from vault")
tvc.SetLookupTokenError(errToken, expectedErr)
// Create the register request with a job asking for a vault policy but
// send the bad Vault token
job := mock.Job()
job.VaultToken = badToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(),
"doesn't allow access to the following policies: "+policy) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the err token
job.VaultToken = errToken
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), expectedErr.Error()) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the good token
job.VaultToken = goodToken
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.VaultToken != "" {
t.Fatalf("vault token not cleared")
}
// Check that an implicit constraint was created
constraints := out.TaskGroups[0].Constraints
if l := len(constraints); l != 1 {
t.Fatalf("Unexpected number of tests: %v", l)
}
if !constraints[0].Equal(vaultConstraint) {
t.Fatalf("bad constraint; got %#v; want %#v", constraints[0], vaultConstraint)
}
// Create the register request with another job asking for a vault policy but
// send the root Vault token
job2 := mock.Job()
job2.VaultToken = rootToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
out, err = state.JobByID(ws, job2.Namespace, job2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.VaultToken != "" {
t.Fatalf("vault token not cleared")
}
}
func TestJobEndpoint_Revert(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the initial register request
job := mock.Job()
job.Priority = 100
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Reregister again to get another version
job2 := job.Copy()
job2.Priority = 1
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create revert request and enforcing it be at an incorrect version
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: helper.Uint64ToPtr(10),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "enforcing version 10") {
t.Fatalf("expected enforcement error")
}
// Create revert request and enforcing it be at the current version
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "current version") {
t.Fatalf("expected current version err: %v", err)
}
// Create revert request and enforcing it be at version 1
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: helper.Uint64ToPtr(1),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
if resp.EvalID == "" || resp.EvalCreateIndex == 0 {
t.Fatalf("bad created eval: %+v", resp)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad job modify index: %d", resp.JobModifyIndex)
}
// Create revert request and don't enforce. We are at version 2 but it is
// the same as version 0
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
if resp.EvalID == "" || resp.EvalCreateIndex == 0 {
t.Fatalf("bad created eval: %+v", resp)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad job modify index: %d", resp.JobModifyIndex)
}
// Check that the job is at the correct version and that the eval was
// created
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Priority != job.Priority {
t.Fatalf("priority mis-match")
}
if out.Version != 2 {
t.Fatalf("got version %d; want %d", out.Version, 2)
}
eout, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eout == nil {
t.Fatalf("expected eval")
}
if eout.JobID != job.ID {
t.Fatalf("job id mis-match")
}
versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(versions) != 3 {
t.Fatalf("got %d versions; want %d", len(versions), 3)
}
}
func TestJobEndpoint_Revert_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
state := s1.fsm.State()
testutil.WaitForLeader(t, s1.RPC)
// Create the job
job := mock.Job()
err := state.UpsertJob(300, job)
assert.Nil(err)
job2 := job.Copy()
job2.Priority = 1
err = state.UpsertJob(400, job2)
assert.Nil(err)
// Create revert request and enforcing it be at the current version
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch the response without a valid token
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
assert.NotNil(err)
assert.Contains(err.Error(), "Permission denied")
// Attempt to fetch the response with an invalid token
invalidToken := CreatePolicyAndToken(t, state, 1001, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
revertReq.SecretID = invalidToken.SecretID
var invalidResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &invalidResp)
assert.NotNil(err)
assert.Contains(err.Error(), "Permission denied")
// Fetch the response with a valid management token
revertReq.SecretID = root.SecretID
var validResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &validResp)
assert.Nil(err)
// Try with a valid non-management token
validToken := CreatePolicyAndToken(t, state, 1003, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
revertReq.SecretID = validToken.SecretID
var validResp2 structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &validResp2)
assert.Nil(err)
}
func TestJobEndpoint_Stable(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the initial register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create stability request
stableReq := &structs.JobStabilityRequest{
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var stableResp structs.JobStabilityResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &stableResp); err != nil {
t.Fatalf("err: %v", err)
}
if stableResp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check that the job is marked stable
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if !out.Stable {
t.Fatalf("Job is not marked stable")
}
}
func TestJobEndpoint_Evaluate(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Lookup the evaluation
state := s1.fsm.State()
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil {
t.Fatal("expect an err")
}
}
func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Type = structs.JobTypeBatch
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil {
t.Fatal("expect an err")
}
}
func TestJobEndpoint_Deregister(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister but don't purge
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the job in the FSM
ws := memdb.NewWatchSet()
state := s1.fsm.State()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("job purged")
}
if !out.Stop {
t.Fatalf("job not stopped")
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, resp2.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp2.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != structs.JobDefaultPriority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != structs.JobTypeService {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobDeregister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp2.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
// Deregister and purge
dereg2 := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp3 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg2, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if resp3.Index == 0 {
t.Fatalf("bad index: %d", resp3.Index)
}
// Check for the job in the FSM
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
// Lookup the evaluation
eval, err = state.EvalByID(ws, resp3.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp3.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != structs.JobDefaultPriority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != structs.JobTypeService {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobDeregister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp3.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Deregister_NonExistent(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Deregister
jobID := "foo"
dereg := &structs.JobDeregisterRequest{
JobID: jobID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Lookup the evaluation
state := s1.fsm.State()
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, resp2.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp2.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != structs.JobDefaultPriority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != structs.JobTypeService {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobDeregister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != jobID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp2.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.PeriodicJob()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
if resp.EvalID != "" {
t.Fatalf("Deregister created an eval for a periodic job")
}
}
func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Type = structs.JobTypeBatch
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
if resp.EvalID != "" {
t.Fatalf("Deregister created an eval for a parameterized job")
}
}
func TestJobEndpoint_GetJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job.CreateIndex = resp.JobModifyIndex
job.ModifyIndex = resp.JobModifyIndex
job.JobModifyIndex = resp.JobModifyIndex
// Lookup the job
get := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
// Make a copy of the origin job and change the service name so that we can
// do a deep equal with the response from the GET JOB Api
j := job
j.TaskGroups[0].Tasks[0].Services[0].Name = "web-frontend"
for tgix, tg := range j.TaskGroups {
for tidx, t := range tg.Tasks {
for sidx, service := range t.Services {
for cidx, check := range service.Checks {
check.Name = resp2.Job.TaskGroups[tgix].Tasks[tidx].Services[sidx].Checks[cidx].Name
}
}
}
}
// Clear the submit times
j.SubmitTime = 0
resp2.Job.SubmitTime = 0
if !reflect.DeepEqual(j, resp2.Job) {
t.Fatalf("bad: %#v %#v", job, resp2.Job)
}
// Lookup non-existing job
get.JobID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
if resp2.Job != nil {
t.Fatalf("unexpected job")
}
}
func TestJobEndpoint_GetJob_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s1, root := testACLServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create the job
job := mock.Job()
err := state.UpsertJob(1000, job)
assert.Nil(err)
// Lookup the job
get := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Looking up the job without a token should fail
var resp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp)
assert.NotNil(err)
// Expect failure for request with an invalid token
invalidToken := CreatePolicyAndToken(t, state, 1003, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
get.SecretID = invalidToken.SecretID
var invalidResp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &invalidResp)
assert.NotNil(err)
// Looking up the job with a management token should succeed
get.SecretID = root.SecretID
var validResp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &validResp)
assert.Nil(err)
assert.Equal(job.ID, validResp.Job.ID)
// Looking up the job with a valid token should succeed
validToken := CreatePolicyAndToken(t, state, 1005, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
get.SecretID = validToken.SecretID
var validResp2 structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &validResp2)
assert.Nil(err)
assert.Equal(job.ID, validResp2.Job.ID)
}
func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
job1 := mock.Job()
job2 := mock.Job()
// Upsert a job we are not interested in first.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(100, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert another job later which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(200, job2); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobSpecificRequest{
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job2.Namespace,
MinQueryIndex: 150,
},
}
start := time.Now()
var resp structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp.Job == nil || resp.Job.ID != job2.ID {
t.Fatalf("bad: %#v", resp.Job)
}
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(300, job2.Namespace, job2.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
start = time.Now()
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Job != nil {
t.Fatalf("bad: %#v", resp2.Job)
}
}
func TestJobEndpoint_GetJobVersions(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Priority = 88
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 100
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the job
get := &structs.JobVersionsRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
// Make sure there are two job versions
versions := versionsResp.Versions
if l := len(versions); l != 2 {
t.Fatalf("Got %d versions; want 2", l)
}
if v := versions[0]; v.Priority != 100 || v.ID != job.ID || v.Version != 1 {
t.Fatalf("bad: %+v", v)
}
if v := versions[1]; v.Priority != 88 || v.ID != job.ID || v.Version != 0 {
t.Fatalf("bad: %+v", v)
}
// Lookup non-existing job
get.JobID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
if l := len(versionsResp.Versions); l != 0 {
t.Fatalf("unexpected versions: %d", l)
}
}
func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Priority = 88
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 90
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 100
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the job
get := &structs.JobVersionsRequest{
JobID: job.ID,
Diffs: true,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
// Make sure there are two job versions
versions := versionsResp.Versions
if l := len(versions); l != 3 {
t.Fatalf("Got %d versions; want 3", l)
}
if v := versions[0]; v.Priority != 100 || v.ID != job.ID || v.Version != 2 {
t.Fatalf("bad: %+v", v)
}
if v := versions[1]; v.Priority != 90 || v.ID != job.ID || v.Version != 1 {
t.Fatalf("bad: %+v", v)
}
if v := versions[2]; v.Priority != 88 || v.ID != job.ID || v.Version != 0 {
t.Fatalf("bad: %+v", v)
}
// Ensure we got diffs
diffs := versionsResp.Diffs
if l := len(diffs); l != 2 {
t.Fatalf("Got %d diffs; want 2", l)
}
d1 := diffs[0]
if len(d1.Fields) != 1 {
t.Fatalf("Got too many diffs: %#v", d1)
}
if d1.Fields[0].Name != "Priority" {
t.Fatalf("Got wrong field: %#v", d1)
}
if d1.Fields[0].Old != "90" && d1.Fields[0].New != "100" {
t.Fatalf("Got wrong field values: %#v", d1)
}
d2 := diffs[1]
if len(d2.Fields) != 1 {
t.Fatalf("Got too many diffs: %#v", d2)
}
if d2.Fields[0].Name != "Priority" {
t.Fatalf("Got wrong field: %#v", d2)
}
if d2.Fields[0].Old != "88" && d1.Fields[0].New != "90" {
t.Fatalf("Got wrong field values: %#v", d2)
}
}
func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
job1 := mock.Job()
job2 := mock.Job()
job3 := mock.Job()
job3.ID = job2.ID
job3.Priority = 1
// Upsert a job we are not interested in first.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(100, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert another job later which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(200, job2); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobVersionsRequest{
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job2.Namespace,
MinQueryIndex: 150,
},
}
start := time.Now()
var resp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Versions) != 1 || resp.Versions[0].ID != job2.ID {
t.Fatalf("bad: %#v", resp.Versions)
}
// Upsert the job again which should trigger the watch.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(300, job3); err != nil {
t.Fatalf("err: %v", err)
}
})
req2 := &structs.JobVersionsRequest{
JobID: job3.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job3.Namespace,
MinQueryIndex: 250,
},
}
var resp2 structs.JobVersionsResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", req2, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp.Index, 300)
}
if len(resp2.Versions) != 2 {
t.Fatalf("bad: %#v", resp2.Versions)
}
}
func TestJobEndpoint_GetJobSummary(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job.CreateIndex = resp.JobModifyIndex
job.ModifyIndex = resp.JobModifyIndex
job.JobModifyIndex = resp.JobModifyIndex
// Lookup the job summary
get := &structs.JobSummaryRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobSummaryResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
expectedJobSummary := structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
ModifyIndex: job.CreateIndex,
}
if !reflect.DeepEqual(resp2.JobSummary, &expectedJobSummary) {
t.Fatalf("exptected: %v, actual: %v", expectedJobSummary, resp2.JobSummary)
}
}
func TestJobEndpoint_Summary_ACL(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer srv.Shutdown()
codec := rpcClient(t, srv)
testutil.WaitForLeader(t, srv.RPC)
// Create the job
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
reg.SecretID = root.SecretID
var err error
// Register the job with a valid token
var regResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", reg, ®Resp)
assert.Nil(err)
job.CreateIndex = regResp.JobModifyIndex
job.ModifyIndex = regResp.JobModifyIndex
job.JobModifyIndex = regResp.JobModifyIndex
req := &structs.JobSummaryRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp)
assert.NotNil(err)
expectedJobSummary := &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
ModifyIndex: job.ModifyIndex,
}
// Expect success when using a management token
req.SecretID = root.SecretID
var mgmtResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &mgmtResp)
assert.Nil(err)
assert.Equal(expectedJobSummary, mgmtResp.JobSummary)
// Create the namespace policy and tokens
state := srv.fsm.State()
// Expect failure for request with an invalid token
invalidToken := CreatePolicyAndToken(t, state, 1003, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
req.SecretID = invalidToken.SecretID
var invalidResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &invalidResp)
assert.NotNil(err)
// Try with a valid token
validToken := CreatePolicyAndToken(t, state, 1001, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
req.SecretID = validToken.SecretID
var authResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &authResp)
assert.Nil(err)
assert.Equal(expectedJobSummary, authResp.JobSummary)
}
func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a job and insert it
job1 := mock.Job()
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(100, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Ensure the job summary request gets fired
req := &structs.JobSummaryRequest{
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
MinQueryIndex: 50,
},
}
var resp structs.JobSummaryResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
// Upsert an allocation for the job which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
alloc := mock.Alloc()
alloc.JobID = job1.ID
alloc.Job = job1
if err := state.UpsertAllocs(200, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
})
req = &structs.JobSummaryRequest{
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
MinQueryIndex: 199,
},
}
start = time.Now()
var resp1 structs.JobSummaryResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp1); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp1.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp1.JobSummary == nil {
t.Fatalf("bad: %#v", resp)
}
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(300, job1.Namespace, job1.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
start = time.Now()
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Job != nil {
t.Fatalf("bad: %#v", resp2.Job)
}
}
func TestJobEndpoint_ListJobs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
state := s1.fsm.State()
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Jobs) != 1 {
t.Fatalf("bad: %#v", resp2.Jobs)
}
if resp2.Jobs[0].ID != job.ID {
t.Fatalf("bad: %#v", resp2.Jobs[0])
}
// Lookup the jobs by prefix
get = &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
Prefix: resp2.Jobs[0].ID[:4],
},
}
var resp3 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if resp3.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp3.Index, 1000)
}
if len(resp3.Jobs) != 1 {
t.Fatalf("bad: %#v", resp3.Jobs)
}
if resp3.Jobs[0].ID != job.ID {
t.Fatalf("bad: %#v", resp3.Jobs[0])
}
}
func TestJobEndpoint_ListJobs_WithACL(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer srv.Shutdown()
codec := rpcClient(t, srv)
testutil.WaitForLeader(t, srv.RPC)
state := srv.fsm.State()
var err error
// Create the register request
job := mock.Job()
err = state.UpsertJob(1000, job)
assert.Nil(err)
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp)
assert.NotNil(err)
// Expect success for request with a management token
var mgmtResp structs.JobListResponse
req.SecretID = root.SecretID
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &mgmtResp)
assert.Nil(err)
assert.Equal(1, len(mgmtResp.Jobs))
assert.Equal(job.ID, mgmtResp.Jobs[0].ID)
// Expect failure for request with a token that has incorrect permissions
invalidToken := CreatePolicyAndToken(t, state, 1003, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
req.SecretID = invalidToken.SecretID
var invalidResp structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &invalidResp)
assert.NotNil(err)
// Try with a valid token with correct permissions
validToken := CreatePolicyAndToken(t, state, 1001, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
var validResp structs.JobListResponse
req.SecretID = validToken.SecretID
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &validResp)
assert.Nil(err)
assert.Equal(1, len(validResp.Jobs))
assert.Equal(job.ID, validResp.Jobs[0].ID)
}
func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the job
job := mock.Job()
// Upsert job triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(100, job); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
MinQueryIndex: 50,
},
}
start := time.Now()
var resp structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 100 {
t.Fatalf("Bad index: %d %d", resp.Index, 100)
}
if len(resp.Jobs) != 1 || resp.Jobs[0].ID != job.ID {
t.Fatalf("bad: %#v", resp)
}
// Job deletion triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(200, job.Namespace, job.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.MinQueryIndex = 150
start = time.Now()
var resp2 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 200 {
t.Fatalf("Bad index: %d %d", resp2.Index, 200)
}
if len(resp2.Jobs) != 0 {
t.Fatalf("bad: %#v", resp2)
}
}
func TestJobEndpoint_Allocations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = alloc1.JobID
state := s1.fsm.State()
state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))
state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(1000,
[]*structs.Allocation{alloc1, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: alloc1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
},
}
var resp2 structs.JobAllocationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Allocations) != 2 {
t.Fatalf("bad: %#v", resp2.Allocations)
}
}
func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = "job1"
state := s1.fsm.State()
// First upsert an unrelated alloc
time.AfterFunc(100*time.Millisecond, func() {
state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
err := state.UpsertAllocs(100, []*structs.Allocation{alloc1})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert an alloc for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(200, []*structs.Allocation{alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.JobAllocationsResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Allocations) != 1 || resp.Allocations[0].JobID != "job1" {
t.Fatalf("bad: %#v", resp.Allocations)
}
}
func TestJobEndpoint_Evaluations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = eval1.JobID
state := s1.fsm.State()
err := state.UpsertEvals(1000,
[]*structs.Evaluation{eval1, eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: eval1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
var resp2 structs.JobEvaluationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Evaluations) != 2 {
t.Fatalf("bad: %#v", resp2.Evaluations)
}
}
func TestJobEndpoint_Evaluations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = "job1"
state := s1.fsm.State()
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
err := state.UpsertEvals(100, []*structs.Evaluation{eval1})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
err := state.UpsertEvals(200, []*structs.Evaluation{eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.JobEvaluationsResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Evaluations) != 1 || resp.Evaluations[0].JobID != "job1" {
t.Fatalf("bad: %#v", resp.Evaluations)
}
}
func TestJobEndpoint_Deployments(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
assert.Nil(state.UpsertJob(1000, j), "UpsertJob")
assert.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.DeploymentListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
assert.EqualValues(1002, resp.Index, "response index")
assert.Len(resp.Deployments, 2, "deployments for job")
}
func TestJobEndpoint_Deployments_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d2.JobID = j.ID
assert.Nil(state.UpsertJob(50, j), "UpsertJob")
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment")
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment")
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: d2.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.DeploymentListResponse
start := time.Now()
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
assert.EqualValues(200, resp.Index, "response index")
assert.Len(resp.Deployments, 1, "deployments for job")
assert.Equal(d2.ID, resp.Deployments[0].ID, "returned deployment")
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
}
func TestJobEndpoint_LatestDeployment(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
d2.CreateIndex = d1.CreateIndex + 100
d2.ModifyIndex = d2.CreateIndex + 100
assert.Nil(state.UpsertJob(1000, j), "UpsertJob")
assert.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.SingleDeploymentResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
assert.EqualValues(1002, resp.Index, "response index")
assert.NotNil(resp.Deployment, "want a deployment")
assert.Equal(d2.ID, resp.Deployment.ID, "latest deployment for job")
}
func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d2.JobID = j.ID
assert.Nil(state.UpsertJob(50, j), "UpsertJob")
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment")
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment")
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: d2.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.SingleDeploymentResponse
start := time.Now()
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
assert.EqualValues(200, resp.Index, "response index")
assert.NotNil(resp.Deployment, "deployment for job")
assert.Equal(d2.ID, resp.Deployment.ID, "returned deployment")
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
}
func TestJobEndpoint_Plan_ACL(t *testing.T) {
t.Parallel()
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a plan request
job := mock.Job()
planReq := &structs.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Try without a token, expect failure
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err == nil {
t.Fatalf("expected error")
}
// Try with a token
planReq.SecretID = root.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create a plan request
planReq := &structs.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
if planResp.JobModifyIndex == 0 {
t.Fatalf("bad cas: %d", planResp.JobModifyIndex)
}
if planResp.Annotations == nil {
t.Fatalf("no annotations")
}
if planResp.Diff == nil {
t.Fatalf("no diff")
}
if len(planResp.FailedTGAllocs) == 0 {
t.Fatalf("no failed task group alloc metrics")
}
}
func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create a plan request
planReq := &structs.JobPlanRequest{
Job: job,
Diff: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
if planResp.JobModifyIndex == 0 {
t.Fatalf("bad cas: %d", planResp.JobModifyIndex)
}
if planResp.Annotations == nil {
t.Fatalf("no annotations")
}
if planResp.Diff != nil {
t.Fatalf("got diff")
}
if len(planResp.FailedTGAllocs) == 0 {
t.Fatalf("no failed task group alloc metrics")
}
}
func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
policy := "foo"
goodToken := structs.GenerateUUID()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.VaultToken = goodToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Check that there is an implicit vault constraint
constraints := out.TaskGroups[0].Constraints
if len(constraints) != 1 {
t.Fatalf("Expected an implicit constraint")
}
if !constraints[0].Equal(vaultConstraint) {
t.Fatalf("Expected implicit vault constraint")
}
}
func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job asking for a template that sends a
// signal
job := mock.Job()
signal := "SIGUSR1"
job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
&structs.Template{
SourcePath: "foo",
DestPath: "bar",
ChangeMode: structs.TemplateChangeModeSignal,
ChangeSignal: signal,
},
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Check that there is an implicit signal constraint
constraints := out.TaskGroups[0].Constraints
if len(constraints) != 1 {
t.Fatalf("Expected an implicit constraint")
}
sigConstraint := getSignalConstraint([]string{signal})
if !constraints[0].Equal(sigConstraint) {
t.Fatalf("Expected implicit vault constraint")
}
}
func TestJobEndpoint_ValidateJob_InvalidDriverConf(t *testing.T) {
t.Parallel()
// Create a mock job with an invalid config
job := mock.Job()
job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
"foo": "bar",
}
err, warnings := validateJob(job)
if err == nil || !strings.Contains(err.Error(), "-> config") {
t.Fatalf("Expected config error; got %v", err)
}
if warnings != nil {
t.Fatalf("got unexpected warnings: %v", warnings)
}
}
func TestJobEndpoint_ValidateJob_InvalidSignals(t *testing.T) {
t.Parallel()
// Create a mock job that wants to send a signal to a driver that can't
job := mock.Job()
job.TaskGroups[0].Tasks[0].Driver = "qemu"
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeSignal,
ChangeSignal: "SIGUSR1",
}
err, warnings := validateJob(job)
if err == nil || !strings.Contains(err.Error(), "support sending signals") {
t.Fatalf("Expected signal feasibility error; got %v", err)
}
if warnings != nil {
t.Fatalf("got unexpected warnings: %v", warnings)
}
}
func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
t.Parallel()
old := mock.Job()
new := mock.Job()
if err := validateJobUpdate(old, new); err != nil {
t.Errorf("expected update to be valid but got: %v", err)
}
new.Type = "batch"
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to a different type")
} else {
t.Log(err)
}
new = mock.Job()
new.Periodic = &structs.PeriodicConfig{Enabled: true}
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to periodic")
} else {
t.Log(err)
}
new = mock.Job()
new.ParameterizedJob = &structs.ParameterizedJobConfig{}
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to parameterized")
} else {
t.Log(err)
}
}
func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
job := mock.Job()
req := &structs.JobValidateRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to update without providing a valid token
var resp structs.JobValidateResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &resp)
assert.NotNil(err)
// Update with a valid token
req.SecretID = root.SecretID
var validResp structs.JobValidateResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &validResp)
assert.Nil(err)
assert.Equal("", validResp.Error)
assert.Equal("", validResp.Warnings)
}
func TestJobEndpoint_Dispatch(t *testing.T) {
t.Parallel()
// No requirements
d1 := mock.Job()
d1.Type = structs.JobTypeBatch
d1.ParameterizedJob = &structs.ParameterizedJobConfig{}
// Require input data
d2 := mock.Job()
d2.Type = structs.JobTypeBatch
d2.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: structs.DispatchPayloadRequired,
}
// Disallow input data
d3 := mock.Job()
d3.Type = structs.JobTypeBatch
d3.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: structs.DispatchPayloadForbidden,
}
// Require meta
d4 := mock.Job()
d4.Type = structs.JobTypeBatch
d4.ParameterizedJob = &structs.ParameterizedJobConfig{
MetaRequired: []string{"foo", "bar"},
}
// Optional meta
d5 := mock.Job()
d5.Type = structs.JobTypeBatch
d5.ParameterizedJob = &structs.ParameterizedJobConfig{
MetaOptional: []string{"foo", "bar"},
}
// Periodic dispatch job
d6 := mock.PeriodicJob()
d6.ParameterizedJob = &structs.ParameterizedJobConfig{}
d7 := mock.Job()
d7.Type = structs.JobTypeBatch
d7.ParameterizedJob = &structs.ParameterizedJobConfig{}
d7.Stop = true
reqNoInputNoMeta := &structs.JobDispatchRequest{}
reqInputDataNoMeta := &structs.JobDispatchRequest{
Payload: []byte("hello world"),
}
reqNoInputDataMeta := &structs.JobDispatchRequest{
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
},
}
reqInputDataMeta := &structs.JobDispatchRequest{
Payload: []byte("hello world"),
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
},
}
reqBadMeta := &structs.JobDispatchRequest{
Payload: []byte("hello world"),
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
"baz": "f3",
},
}
reqInputDataTooLarge := &structs.JobDispatchRequest{
Payload: make([]byte, DispatchPayloadSizeLimit+100),
}
type testCase struct {
name string
parameterizedJob *structs.Job
dispatchReq *structs.JobDispatchRequest
noEval bool
err bool
errStr string
}
cases := []testCase{
{
name: "optional input data w/ data",
parameterizedJob: d1,
dispatchReq: reqInputDataNoMeta,
err: false,
},
{
name: "optional input data w/o data",
parameterizedJob: d1,
dispatchReq: reqNoInputNoMeta,
err: false,
},
{
name: "require input data w/ data",
parameterizedJob: d2,
dispatchReq: reqInputDataNoMeta,
err: false,
},
{
name: "require input data w/o data",
parameterizedJob: d2,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "not provided but required",
},
{
name: "disallow input data w/o data",
parameterizedJob: d3,
dispatchReq: reqNoInputNoMeta,
err: false,
},
{
name: "disallow input data w/ data",
parameterizedJob: d3,
dispatchReq: reqInputDataNoMeta,
err: true,
errStr: "provided but forbidden",
},
{
name: "require meta w/ meta",
parameterizedJob: d4,
dispatchReq: reqInputDataMeta,
err: false,
},
{
name: "require meta w/o meta",
parameterizedJob: d4,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "did not provide required meta keys",
},
{
name: "optional meta w/ meta",
parameterizedJob: d5,
dispatchReq: reqNoInputDataMeta,
err: false,
},
{
name: "optional meta w/o meta",
parameterizedJob: d5,
dispatchReq: reqNoInputNoMeta,
err: false,
},
{
name: "optional meta w/ bad meta",
parameterizedJob: d5,
dispatchReq: reqBadMeta,
err: true,
errStr: "unpermitted metadata keys",
},
{
name: "optional input w/ too big of input",
parameterizedJob: d1,
dispatchReq: reqInputDataTooLarge,
err: true,
errStr: "Payload exceeds maximum size",
},
{
name: "periodic job dispatched, ensure no eval",
parameterizedJob: d6,
dispatchReq: reqNoInputNoMeta,
noEval: true,
},
{
name: "periodic job stopped, ensure error",
parameterizedJob: d7,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "stopped",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
regReq := &structs.JobRegisterRequest{
Job: tc.parameterizedJob,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
},
}
// Fetch the response
var regResp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", regReq, ®Resp); err != nil {
t.Fatalf("err: %v", err)
}
// Now try to dispatch
tc.dispatchReq.JobID = tc.parameterizedJob.ID
tc.dispatchReq.WriteRequest = structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
}
var dispatchResp structs.JobDispatchResponse
dispatchErr := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", tc.dispatchReq, &dispatchResp)
if dispatchErr == nil {
if tc.err {
t.Fatalf("Expected error: %v", dispatchErr)
}
// Check that we got an eval and job id back
switch dispatchResp.EvalID {
case "":
if !tc.noEval {
t.Fatalf("Bad response")
}
default:
if tc.noEval {
t.Fatalf("Got eval %q", dispatchResp.EvalID)
}
}
if dispatchResp.DispatchedJobID == "" {
t.Fatalf("Bad response")
}
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, tc.parameterizedJob.Namespace, dispatchResp.DispatchedJobID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != dispatchResp.JobCreateIndex {
t.Fatalf("index mis-match")
}
if out.ParentID != tc.parameterizedJob.ID {
t.Fatalf("bad parent ID")
}
if tc.noEval {
return
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, dispatchResp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != dispatchResp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
} else {
if !tc.err {
t.Fatalf("Got unexpected error: %v", dispatchErr)
} else if !strings.Contains(dispatchErr.Error(), tc.errStr) {
t.Fatalf("Expected err to include %q; got %v", tc.errStr, dispatchErr)
}
}
})
}
}
better test assertions
package nomad
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
memdb "github.com/hashicorp/go-memdb"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
)
func TestJobEndpoint_Register(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
serviceName := out.TaskGroups[0].Tasks[0].Services[0].Name
expectedServiceName := "web-frontend"
if serviceName != expectedServiceName {
t.Fatalf("Expected Service Name: %s, Actual: %s", expectedServiceName, serviceName)
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Register_ACL(t *testing.T) {
t.Parallel()
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Try without a token, expect failure
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err == nil {
t.Fatalf("expected error")
}
// Try with a token
req.SecretID = root.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
}
func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Namespace = "foo"
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Try without a token, expect failure
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "non-existant namespace") {
t.Fatalf("expected namespace error: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("expected no job")
}
}
func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job containing an invalid driver
// config
job := mock.Job()
job.TaskGroups[0].Tasks[0].Config["foo"] = 1
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil {
t.Fatalf("expected a validation error")
}
if !strings.Contains(err.Error(), "-> config:") {
t.Fatalf("expected a driver config validation error but got: %v", err)
}
}
func TestJobEndpoint_Register_Payload(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job containing an invalid driver
// config
job := mock.Job()
job.Payload = []byte{0x1}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil {
t.Fatalf("expected a validation error")
}
if !strings.Contains(err.Error(), "payload") {
t.Fatalf("expected a payload error but got: %v", err)
}
}
func TestJobEndpoint_Register_Existing(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Update the job definition
job2 := mock.Job()
job2.Priority = 100
job2.ID = job.ID
req.Job = job2
// Attempt update
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.ModifyIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.Priority != 100 {
t.Fatalf("expected update")
}
if out.Version != 1 {
t.Fatalf("expected update")
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job2.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job2.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job2.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check to ensure the job version didn't get bumped because we submitted
// the same job
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Version != 1 {
t.Fatalf("expected no update; got %v; diff %v", out.Version, pretty.Diff(job2, out))
}
}
func TestJobEndpoint_Register_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request for a periodic job.
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
serviceName := out.TaskGroups[0].Tasks[0].Services[0].Name
expectedServiceName := "web-frontend"
if serviceName != expectedServiceName {
t.Fatalf("Expected Service Name: %s, Actual: %s", expectedServiceName, serviceName)
}
if resp.EvalID != "" {
t.Fatalf("Register created an eval for a periodic job")
}
}
func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request for a parameterized job.
job := mock.Job()
job.Type = structs.JobTypeBatch
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if resp.EvalID != "" {
t.Fatalf("Register created an eval for a parameterized job")
}
}
func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request and enforcing an incorrect index
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 100, // Not registered yet so not possible
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Create the register request and enforcing it is new
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
curIndex := resp.JobModifyIndex
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Reregister request and enforcing it be a new job
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Reregister request and enforcing it be at an incorrect index
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex - 1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Reregister request and enforcing it be at the correct index
job.Priority = job.Priority + 1
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Priority != job.Priority {
t.Fatalf("priority mis-match")
}
}
func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
f := false
c.VaultConfig.Enabled = &f
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "Vault not enabled") {
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault and allow authenticated
tr := true
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &tr
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
}
func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy but
// don't send a Vault token
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "missing Vault Token") {
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
// Add three tokens: one that allows the requesting policy, one that does
// not and one that returns an error
policy := "foo"
badToken := structs.GenerateUUID()
badPolicies := []string{"a", "b", "c"}
tvc.SetLookupTokenAllowedPolicies(badToken, badPolicies)
goodToken := structs.GenerateUUID()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
rootToken := structs.GenerateUUID()
rootPolicies := []string{"root"}
tvc.SetLookupTokenAllowedPolicies(rootToken, rootPolicies)
errToken := structs.GenerateUUID()
expectedErr := fmt.Errorf("return errors from vault")
tvc.SetLookupTokenError(errToken, expectedErr)
// Create the register request with a job asking for a vault policy but
// send the bad Vault token
job := mock.Job()
job.VaultToken = badToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(),
"doesn't allow access to the following policies: "+policy) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the err token
job.VaultToken = errToken
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), expectedErr.Error()) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the good token
job.VaultToken = goodToken
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.VaultToken != "" {
t.Fatalf("vault token not cleared")
}
// Check that an implicit constraint was created
constraints := out.TaskGroups[0].Constraints
if l := len(constraints); l != 1 {
t.Fatalf("Unexpected number of tests: %v", l)
}
if !constraints[0].Equal(vaultConstraint) {
t.Fatalf("bad constraint; got %#v; want %#v", constraints[0], vaultConstraint)
}
// Create the register request with another job asking for a vault policy but
// send the root Vault token
job2 := mock.Job()
job2.VaultToken = rootToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
out, err = state.JobByID(ws, job2.Namespace, job2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.VaultToken != "" {
t.Fatalf("vault token not cleared")
}
}
func TestJobEndpoint_Revert(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the initial register request
job := mock.Job()
job.Priority = 100
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Reregister again to get another version
job2 := job.Copy()
job2.Priority = 1
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create revert request and enforcing it be at an incorrect version
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: helper.Uint64ToPtr(10),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "enforcing version 10") {
t.Fatalf("expected enforcement error")
}
// Create revert request and enforcing it be at the current version
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "current version") {
t.Fatalf("expected current version err: %v", err)
}
// Create revert request and enforcing it be at version 1
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: helper.Uint64ToPtr(1),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
if resp.EvalID == "" || resp.EvalCreateIndex == 0 {
t.Fatalf("bad created eval: %+v", resp)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad job modify index: %d", resp.JobModifyIndex)
}
// Create revert request and don't enforce. We are at version 2 but it is
// the same as version 0
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
if resp.EvalID == "" || resp.EvalCreateIndex == 0 {
t.Fatalf("bad created eval: %+v", resp)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad job modify index: %d", resp.JobModifyIndex)
}
// Check that the job is at the correct version and that the eval was
// created
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Priority != job.Priority {
t.Fatalf("priority mis-match")
}
if out.Version != 2 {
t.Fatalf("got version %d; want %d", out.Version, 2)
}
eout, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eout == nil {
t.Fatalf("expected eval")
}
if eout.JobID != job.ID {
t.Fatalf("job id mis-match")
}
versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(versions) != 3 {
t.Fatalf("got %d versions; want %d", len(versions), 3)
}
}
func TestJobEndpoint_Revert_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
state := s1.fsm.State()
testutil.WaitForLeader(t, s1.RPC)
// Create the job
job := mock.Job()
err := state.UpsertJob(300, job)
assert.Nil(err)
job2 := job.Copy()
job2.Priority = 1
err = state.UpsertJob(400, job2)
assert.Nil(err)
// Create revert request and enforcing it be at the current version
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch the response without a valid token
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
assert.NotNil(err)
assert.Contains(err.Error(), "Permission denied")
// Attempt to fetch the response with an invalid token
invalidToken := CreatePolicyAndToken(t, state, 1001, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
revertReq.SecretID = invalidToken.SecretID
var invalidResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &invalidResp)
assert.NotNil(err)
assert.Contains(err.Error(), "Permission denied")
// Fetch the response with a valid management token
revertReq.SecretID = root.SecretID
var validResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &validResp)
assert.Nil(err)
// Try with a valid non-management token
validToken := CreatePolicyAndToken(t, state, 1003, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
revertReq.SecretID = validToken.SecretID
var validResp2 structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &validResp2)
assert.Nil(err)
}
func TestJobEndpoint_Stable(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the initial register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create stability request
stableReq := &structs.JobStabilityRequest{
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var stableResp structs.JobStabilityResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &stableResp); err != nil {
t.Fatalf("err: %v", err)
}
if stableResp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check that the job is marked stable
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if !out.Stable {
t.Fatalf("Job is not marked stable")
}
}
func TestJobEndpoint_Evaluate(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Lookup the evaluation
state := s1.fsm.State()
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil {
t.Fatal("expect an err")
}
}
func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Type = structs.JobTypeBatch
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil {
t.Fatal("expect an err")
}
}
func TestJobEndpoint_Deregister(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister but don't purge
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the job in the FSM
ws := memdb.NewWatchSet()
state := s1.fsm.State()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("job purged")
}
if !out.Stop {
t.Fatalf("job not stopped")
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, resp2.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp2.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != structs.JobDefaultPriority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != structs.JobTypeService {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobDeregister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp2.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
// Deregister and purge
dereg2 := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp3 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg2, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if resp3.Index == 0 {
t.Fatalf("bad index: %d", resp3.Index)
}
// Check for the job in the FSM
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
// Lookup the evaluation
eval, err = state.EvalByID(ws, resp3.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp3.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != structs.JobDefaultPriority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != structs.JobTypeService {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobDeregister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp3.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Deregister_NonExistent(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Deregister
jobID := "foo"
dereg := &structs.JobDeregisterRequest{
JobID: jobID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Lookup the evaluation
state := s1.fsm.State()
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, resp2.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp2.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != structs.JobDefaultPriority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != structs.JobTypeService {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobDeregister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != jobID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp2.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
}
func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.PeriodicJob()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
if resp.EvalID != "" {
t.Fatalf("Deregister created an eval for a periodic job")
}
}
func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Type = structs.JobTypeBatch
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
if resp.EvalID != "" {
t.Fatalf("Deregister created an eval for a parameterized job")
}
}
func TestJobEndpoint_GetJob(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job.CreateIndex = resp.JobModifyIndex
job.ModifyIndex = resp.JobModifyIndex
job.JobModifyIndex = resp.JobModifyIndex
// Lookup the job
get := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
// Make a copy of the origin job and change the service name so that we can
// do a deep equal with the response from the GET JOB Api
j := job
j.TaskGroups[0].Tasks[0].Services[0].Name = "web-frontend"
for tgix, tg := range j.TaskGroups {
for tidx, t := range tg.Tasks {
for sidx, service := range t.Services {
for cidx, check := range service.Checks {
check.Name = resp2.Job.TaskGroups[tgix].Tasks[tidx].Services[sidx].Checks[cidx].Name
}
}
}
}
// Clear the submit times
j.SubmitTime = 0
resp2.Job.SubmitTime = 0
if !reflect.DeepEqual(j, resp2.Job) {
t.Fatalf("bad: %#v %#v", job, resp2.Job)
}
// Lookup non-existing job
get.JobID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
if resp2.Job != nil {
t.Fatalf("unexpected job")
}
}
func TestJobEndpoint_GetJob_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s1, root := testACLServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create the job
job := mock.Job()
err := state.UpsertJob(1000, job)
assert.Nil(err)
// Lookup the job
get := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Looking up the job without a token should fail
var resp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp)
assert.NotNil(err)
assert.Contains(err.Error(), "Permission denied")
// Expect failure for request with an invalid token
invalidToken := CreatePolicyAndToken(t, state, 1003, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
get.SecretID = invalidToken.SecretID
var invalidResp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &invalidResp)
assert.NotNil(err)
assert.Contains(err.Error(), "Permission denied")
// Looking up the job with a management token should succeed
get.SecretID = root.SecretID
var validResp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &validResp)
assert.Nil(err)
assert.Equal(job.ID, validResp.Job.ID)
// Looking up the job with a valid token should succeed
validToken := CreatePolicyAndToken(t, state, 1005, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
get.SecretID = validToken.SecretID
var validResp2 structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &validResp2)
assert.Nil(err)
assert.Equal(job.ID, validResp2.Job.ID)
}
func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
job1 := mock.Job()
job2 := mock.Job()
// Upsert a job we are not interested in first.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(100, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert another job later which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(200, job2); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobSpecificRequest{
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job2.Namespace,
MinQueryIndex: 150,
},
}
start := time.Now()
var resp structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp.Job == nil || resp.Job.ID != job2.ID {
t.Fatalf("bad: %#v", resp.Job)
}
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(300, job2.Namespace, job2.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
start = time.Now()
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Job != nil {
t.Fatalf("bad: %#v", resp2.Job)
}
}
func TestJobEndpoint_GetJobVersions(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Priority = 88
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 100
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the job
get := &structs.JobVersionsRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
// Make sure there are two job versions
versions := versionsResp.Versions
if l := len(versions); l != 2 {
t.Fatalf("Got %d versions; want 2", l)
}
if v := versions[0]; v.Priority != 100 || v.ID != job.ID || v.Version != 1 {
t.Fatalf("bad: %+v", v)
}
if v := versions[1]; v.Priority != 88 || v.ID != job.ID || v.Version != 0 {
t.Fatalf("bad: %+v", v)
}
// Lookup non-existing job
get.JobID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
if l := len(versionsResp.Versions); l != 0 {
t.Fatalf("unexpected versions: %d", l)
}
}
func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Priority = 88
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 90
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 100
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the job
get := &structs.JobVersionsRequest{
JobID: job.ID,
Diffs: true,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
// Make sure there are two job versions
versions := versionsResp.Versions
if l := len(versions); l != 3 {
t.Fatalf("Got %d versions; want 3", l)
}
if v := versions[0]; v.Priority != 100 || v.ID != job.ID || v.Version != 2 {
t.Fatalf("bad: %+v", v)
}
if v := versions[1]; v.Priority != 90 || v.ID != job.ID || v.Version != 1 {
t.Fatalf("bad: %+v", v)
}
if v := versions[2]; v.Priority != 88 || v.ID != job.ID || v.Version != 0 {
t.Fatalf("bad: %+v", v)
}
// Ensure we got diffs
diffs := versionsResp.Diffs
if l := len(diffs); l != 2 {
t.Fatalf("Got %d diffs; want 2", l)
}
d1 := diffs[0]
if len(d1.Fields) != 1 {
t.Fatalf("Got too many diffs: %#v", d1)
}
if d1.Fields[0].Name != "Priority" {
t.Fatalf("Got wrong field: %#v", d1)
}
if d1.Fields[0].Old != "90" && d1.Fields[0].New != "100" {
t.Fatalf("Got wrong field values: %#v", d1)
}
d2 := diffs[1]
if len(d2.Fields) != 1 {
t.Fatalf("Got too many diffs: %#v", d2)
}
if d2.Fields[0].Name != "Priority" {
t.Fatalf("Got wrong field: %#v", d2)
}
if d2.Fields[0].Old != "88" && d1.Fields[0].New != "90" {
t.Fatalf("Got wrong field values: %#v", d2)
}
}
func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
job1 := mock.Job()
job2 := mock.Job()
job3 := mock.Job()
job3.ID = job2.ID
job3.Priority = 1
// Upsert a job we are not interested in first.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(100, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert another job later which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(200, job2); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobVersionsRequest{
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job2.Namespace,
MinQueryIndex: 150,
},
}
start := time.Now()
var resp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Versions) != 1 || resp.Versions[0].ID != job2.ID {
t.Fatalf("bad: %#v", resp.Versions)
}
// Upsert the job again which should trigger the watch.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(300, job3); err != nil {
t.Fatalf("err: %v", err)
}
})
req2 := &structs.JobVersionsRequest{
JobID: job3.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job3.Namespace,
MinQueryIndex: 250,
},
}
var resp2 structs.JobVersionsResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", req2, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp.Index, 300)
}
if len(resp2.Versions) != 2 {
t.Fatalf("bad: %#v", resp2.Versions)
}
}
func TestJobEndpoint_GetJobSummary(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job.CreateIndex = resp.JobModifyIndex
job.ModifyIndex = resp.JobModifyIndex
job.JobModifyIndex = resp.JobModifyIndex
// Lookup the job summary
get := &structs.JobSummaryRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobSummaryResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
expectedJobSummary := structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
ModifyIndex: job.CreateIndex,
}
if !reflect.DeepEqual(resp2.JobSummary, &expectedJobSummary) {
t.Fatalf("exptected: %v, actual: %v", expectedJobSummary, resp2.JobSummary)
}
}
func TestJobEndpoint_Summary_ACL(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer srv.Shutdown()
codec := rpcClient(t, srv)
testutil.WaitForLeader(t, srv.RPC)
// Create the job
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
reg.SecretID = root.SecretID
var err error
// Register the job with a valid token
var regResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", reg, ®Resp)
assert.Nil(err)
job.CreateIndex = regResp.JobModifyIndex
job.ModifyIndex = regResp.JobModifyIndex
job.JobModifyIndex = regResp.JobModifyIndex
req := &structs.JobSummaryRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp)
assert.NotNil(err)
expectedJobSummary := &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
ModifyIndex: job.ModifyIndex,
}
// Expect success when using a management token
req.SecretID = root.SecretID
var mgmtResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &mgmtResp)
assert.Nil(err)
assert.Equal(expectedJobSummary, mgmtResp.JobSummary)
// Create the namespace policy and tokens
state := srv.fsm.State()
// Expect failure for request with an invalid token
invalidToken := CreatePolicyAndToken(t, state, 1003, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
req.SecretID = invalidToken.SecretID
var invalidResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &invalidResp)
assert.NotNil(err)
// Try with a valid token
validToken := CreatePolicyAndToken(t, state, 1001, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
req.SecretID = validToken.SecretID
var authResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &authResp)
assert.Nil(err)
assert.Equal(expectedJobSummary, authResp.JobSummary)
}
func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a job and insert it
job1 := mock.Job()
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(100, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Ensure the job summary request gets fired
req := &structs.JobSummaryRequest{
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
MinQueryIndex: 50,
},
}
var resp structs.JobSummaryResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
// Upsert an allocation for the job which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
alloc := mock.Alloc()
alloc.JobID = job1.ID
alloc.Job = job1
if err := state.UpsertAllocs(200, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
})
req = &structs.JobSummaryRequest{
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
MinQueryIndex: 199,
},
}
start = time.Now()
var resp1 structs.JobSummaryResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp1); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp1.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp1.JobSummary == nil {
t.Fatalf("bad: %#v", resp)
}
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(300, job1.Namespace, job1.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
start = time.Now()
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Job != nil {
t.Fatalf("bad: %#v", resp2.Job)
}
}
func TestJobEndpoint_ListJobs(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
state := s1.fsm.State()
err := state.UpsertJob(1000, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Jobs) != 1 {
t.Fatalf("bad: %#v", resp2.Jobs)
}
if resp2.Jobs[0].ID != job.ID {
t.Fatalf("bad: %#v", resp2.Jobs[0])
}
// Lookup the jobs by prefix
get = &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
Prefix: resp2.Jobs[0].ID[:4],
},
}
var resp3 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if resp3.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp3.Index, 1000)
}
if len(resp3.Jobs) != 1 {
t.Fatalf("bad: %#v", resp3.Jobs)
}
if resp3.Jobs[0].ID != job.ID {
t.Fatalf("bad: %#v", resp3.Jobs[0])
}
}
func TestJobEndpoint_ListJobs_WithACL(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer srv.Shutdown()
codec := rpcClient(t, srv)
testutil.WaitForLeader(t, srv.RPC)
state := srv.fsm.State()
var err error
// Create the register request
job := mock.Job()
err = state.UpsertJob(1000, job)
assert.Nil(err)
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp)
assert.NotNil(err)
// Expect success for request with a management token
var mgmtResp structs.JobListResponse
req.SecretID = root.SecretID
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &mgmtResp)
assert.Nil(err)
assert.Equal(1, len(mgmtResp.Jobs))
assert.Equal(job.ID, mgmtResp.Jobs[0].ID)
// Expect failure for request with a token that has incorrect permissions
invalidToken := CreatePolicyAndToken(t, state, 1003, "test-invalid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
req.SecretID = invalidToken.SecretID
var invalidResp structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &invalidResp)
assert.NotNil(err)
// Try with a valid token with correct permissions
validToken := CreatePolicyAndToken(t, state, 1001, "test-valid",
NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
var validResp structs.JobListResponse
req.SecretID = validToken.SecretID
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &validResp)
assert.Nil(err)
assert.Equal(1, len(validResp.Jobs))
assert.Equal(job.ID, validResp.Jobs[0].ID)
}
func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the job
job := mock.Job()
// Upsert job triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(100, job); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
MinQueryIndex: 50,
},
}
start := time.Now()
var resp structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 100 {
t.Fatalf("Bad index: %d %d", resp.Index, 100)
}
if len(resp.Jobs) != 1 || resp.Jobs[0].ID != job.ID {
t.Fatalf("bad: %#v", resp)
}
// Job deletion triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(200, job.Namespace, job.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.MinQueryIndex = 150
start = time.Now()
var resp2 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 200 {
t.Fatalf("Bad index: %d %d", resp2.Index, 200)
}
if len(resp2.Jobs) != 0 {
t.Fatalf("bad: %#v", resp2)
}
}
func TestJobEndpoint_Allocations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = alloc1.JobID
state := s1.fsm.State()
state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))
state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(1000,
[]*structs.Allocation{alloc1, alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: alloc1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
},
}
var resp2 structs.JobAllocationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Allocations) != 2 {
t.Fatalf("bad: %#v", resp2.Allocations)
}
}
func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = "job1"
state := s1.fsm.State()
// First upsert an unrelated alloc
time.AfterFunc(100*time.Millisecond, func() {
state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
err := state.UpsertAllocs(100, []*structs.Allocation{alloc1})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert an alloc for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(200, []*structs.Allocation{alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.JobAllocationsResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Allocations) != 1 || resp.Allocations[0].JobID != "job1" {
t.Fatalf("bad: %#v", resp.Allocations)
}
}
func TestJobEndpoint_Evaluations(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = eval1.JobID
state := s1.fsm.State()
err := state.UpsertEvals(1000,
[]*structs.Evaluation{eval1, eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: eval1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
var resp2 structs.JobEvaluationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Evaluations) != 2 {
t.Fatalf("bad: %#v", resp2.Evaluations)
}
}
func TestJobEndpoint_Evaluations_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = "job1"
state := s1.fsm.State()
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
err := state.UpsertEvals(100, []*structs.Evaluation{eval1})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
err := state.UpsertEvals(200, []*structs.Evaluation{eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.JobEvaluationsResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Evaluations) != 1 || resp.Evaluations[0].JobID != "job1" {
t.Fatalf("bad: %#v", resp.Evaluations)
}
}
func TestJobEndpoint_Deployments(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
assert.Nil(state.UpsertJob(1000, j), "UpsertJob")
assert.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.DeploymentListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
assert.EqualValues(1002, resp.Index, "response index")
assert.Len(resp.Deployments, 2, "deployments for job")
}
func TestJobEndpoint_Deployments_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d2.JobID = j.ID
assert.Nil(state.UpsertJob(50, j), "UpsertJob")
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment")
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment")
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: d2.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.DeploymentListResponse
start := time.Now()
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
assert.EqualValues(200, resp.Index, "response index")
assert.Len(resp.Deployments, 1, "deployments for job")
assert.Equal(d2.ID, resp.Deployments[0].ID, "returned deployment")
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
}
func TestJobEndpoint_LatestDeployment(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
d2.CreateIndex = d1.CreateIndex + 100
d2.ModifyIndex = d2.CreateIndex + 100
assert.Nil(state.UpsertJob(1000, j), "UpsertJob")
assert.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
assert.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.SingleDeploymentResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
assert.EqualValues(1002, resp.Index, "response index")
assert.NotNil(resp.Deployment, "want a deployment")
assert.Equal(d2.ID, resp.Deployment.ID, "latest deployment for job")
}
func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
t.Parallel()
s1 := testServer(t, nil)
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
assert := assert.New(t)
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d2.JobID = j.ID
assert.Nil(state.UpsertJob(50, j), "UpsertJob")
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment")
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
assert.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment")
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: d2.Namespace,
MinQueryIndex: 150,
},
}
var resp structs.SingleDeploymentResponse
start := time.Now()
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
assert.EqualValues(200, resp.Index, "response index")
assert.NotNil(resp.Deployment, "deployment for job")
assert.Equal(d2.ID, resp.Deployment.ID, "returned deployment")
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
}
func TestJobEndpoint_Plan_ACL(t *testing.T) {
t.Parallel()
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a plan request
job := mock.Job()
planReq := &structs.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Try without a token, expect failure
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err == nil {
t.Fatalf("expected error")
}
// Try with a token
planReq.SecretID = root.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create a plan request
planReq := &structs.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
if planResp.JobModifyIndex == 0 {
t.Fatalf("bad cas: %d", planResp.JobModifyIndex)
}
if planResp.Annotations == nil {
t.Fatalf("no annotations")
}
if planResp.Diff == nil {
t.Fatalf("no diff")
}
if len(planResp.FailedTGAllocs) == 0 {
t.Fatalf("no failed task group alloc metrics")
}
}
func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create a plan request
planReq := &structs.JobPlanRequest{
Job: job,
Diff: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
if planResp.JobModifyIndex == 0 {
t.Fatalf("bad cas: %d", planResp.JobModifyIndex)
}
if planResp.Annotations == nil {
t.Fatalf("no annotations")
}
if planResp.Diff != nil {
t.Fatalf("got diff")
}
if len(planResp.FailedTGAllocs) == 0 {
t.Fatalf("no failed task group alloc metrics")
}
}
func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
policy := "foo"
goodToken := structs.GenerateUUID()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.VaultToken = goodToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Check that there is an implicit vault constraint
constraints := out.TaskGroups[0].Constraints
if len(constraints) != 1 {
t.Fatalf("Expected an implicit constraint")
}
if !constraints[0].Equal(vaultConstraint) {
t.Fatalf("Expected implicit vault constraint")
}
}
func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
t.Parallel()
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job asking for a template that sends a
// signal
job := mock.Job()
signal := "SIGUSR1"
job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
&structs.Template{
SourcePath: "foo",
DestPath: "bar",
ChangeMode: structs.TemplateChangeModeSignal,
ChangeSignal: signal,
},
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Check that there is an implicit signal constraint
constraints := out.TaskGroups[0].Constraints
if len(constraints) != 1 {
t.Fatalf("Expected an implicit constraint")
}
sigConstraint := getSignalConstraint([]string{signal})
if !constraints[0].Equal(sigConstraint) {
t.Fatalf("Expected implicit vault constraint")
}
}
func TestJobEndpoint_ValidateJob_InvalidDriverConf(t *testing.T) {
t.Parallel()
// Create a mock job with an invalid config
job := mock.Job()
job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
"foo": "bar",
}
err, warnings := validateJob(job)
if err == nil || !strings.Contains(err.Error(), "-> config") {
t.Fatalf("Expected config error; got %v", err)
}
if warnings != nil {
t.Fatalf("got unexpected warnings: %v", warnings)
}
}
func TestJobEndpoint_ValidateJob_InvalidSignals(t *testing.T) {
t.Parallel()
// Create a mock job that wants to send a signal to a driver that can't
job := mock.Job()
job.TaskGroups[0].Tasks[0].Driver = "qemu"
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeSignal,
ChangeSignal: "SIGUSR1",
}
err, warnings := validateJob(job)
if err == nil || !strings.Contains(err.Error(), "support sending signals") {
t.Fatalf("Expected signal feasibility error; got %v", err)
}
if warnings != nil {
t.Fatalf("got unexpected warnings: %v", warnings)
}
}
func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
t.Parallel()
old := mock.Job()
new := mock.Job()
if err := validateJobUpdate(old, new); err != nil {
t.Errorf("expected update to be valid but got: %v", err)
}
new.Type = "batch"
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to a different type")
} else {
t.Log(err)
}
new = mock.Job()
new.Periodic = &structs.PeriodicConfig{Enabled: true}
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to periodic")
} else {
t.Log(err)
}
new = mock.Job()
new.ParameterizedJob = &structs.ParameterizedJobConfig{}
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to parameterized")
} else {
t.Log(err)
}
}
func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s1, root := testACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
job := mock.Job()
req := &structs.JobValidateRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to update without providing a valid token
var resp structs.JobValidateResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &resp)
assert.NotNil(err)
// Update with a valid token
req.SecretID = root.SecretID
var validResp structs.JobValidateResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &validResp)
assert.Nil(err)
assert.Equal("", validResp.Error)
assert.Equal("", validResp.Warnings)
}
func TestJobEndpoint_Dispatch(t *testing.T) {
t.Parallel()
// No requirements
d1 := mock.Job()
d1.Type = structs.JobTypeBatch
d1.ParameterizedJob = &structs.ParameterizedJobConfig{}
// Require input data
d2 := mock.Job()
d2.Type = structs.JobTypeBatch
d2.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: structs.DispatchPayloadRequired,
}
// Disallow input data
d3 := mock.Job()
d3.Type = structs.JobTypeBatch
d3.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: structs.DispatchPayloadForbidden,
}
// Require meta
d4 := mock.Job()
d4.Type = structs.JobTypeBatch
d4.ParameterizedJob = &structs.ParameterizedJobConfig{
MetaRequired: []string{"foo", "bar"},
}
// Optional meta
d5 := mock.Job()
d5.Type = structs.JobTypeBatch
d5.ParameterizedJob = &structs.ParameterizedJobConfig{
MetaOptional: []string{"foo", "bar"},
}
// Periodic dispatch job
d6 := mock.PeriodicJob()
d6.ParameterizedJob = &structs.ParameterizedJobConfig{}
d7 := mock.Job()
d7.Type = structs.JobTypeBatch
d7.ParameterizedJob = &structs.ParameterizedJobConfig{}
d7.Stop = true
reqNoInputNoMeta := &structs.JobDispatchRequest{}
reqInputDataNoMeta := &structs.JobDispatchRequest{
Payload: []byte("hello world"),
}
reqNoInputDataMeta := &structs.JobDispatchRequest{
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
},
}
reqInputDataMeta := &structs.JobDispatchRequest{
Payload: []byte("hello world"),
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
},
}
reqBadMeta := &structs.JobDispatchRequest{
Payload: []byte("hello world"),
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
"baz": "f3",
},
}
reqInputDataTooLarge := &structs.JobDispatchRequest{
Payload: make([]byte, DispatchPayloadSizeLimit+100),
}
type testCase struct {
name string
parameterizedJob *structs.Job
dispatchReq *structs.JobDispatchRequest
noEval bool
err bool
errStr string
}
cases := []testCase{
{
name: "optional input data w/ data",
parameterizedJob: d1,
dispatchReq: reqInputDataNoMeta,
err: false,
},
{
name: "optional input data w/o data",
parameterizedJob: d1,
dispatchReq: reqNoInputNoMeta,
err: false,
},
{
name: "require input data w/ data",
parameterizedJob: d2,
dispatchReq: reqInputDataNoMeta,
err: false,
},
{
name: "require input data w/o data",
parameterizedJob: d2,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "not provided but required",
},
{
name: "disallow input data w/o data",
parameterizedJob: d3,
dispatchReq: reqNoInputNoMeta,
err: false,
},
{
name: "disallow input data w/ data",
parameterizedJob: d3,
dispatchReq: reqInputDataNoMeta,
err: true,
errStr: "provided but forbidden",
},
{
name: "require meta w/ meta",
parameterizedJob: d4,
dispatchReq: reqInputDataMeta,
err: false,
},
{
name: "require meta w/o meta",
parameterizedJob: d4,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "did not provide required meta keys",
},
{
name: "optional meta w/ meta",
parameterizedJob: d5,
dispatchReq: reqNoInputDataMeta,
err: false,
},
{
name: "optional meta w/o meta",
parameterizedJob: d5,
dispatchReq: reqNoInputNoMeta,
err: false,
},
{
name: "optional meta w/ bad meta",
parameterizedJob: d5,
dispatchReq: reqBadMeta,
err: true,
errStr: "unpermitted metadata keys",
},
{
name: "optional input w/ too big of input",
parameterizedJob: d1,
dispatchReq: reqInputDataTooLarge,
err: true,
errStr: "Payload exceeds maximum size",
},
{
name: "periodic job dispatched, ensure no eval",
parameterizedJob: d6,
dispatchReq: reqNoInputNoMeta,
noEval: true,
},
{
name: "periodic job stopped, ensure error",
parameterizedJob: d7,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "stopped",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
s1 := testServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
regReq := &structs.JobRegisterRequest{
Job: tc.parameterizedJob,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
},
}
// Fetch the response
var regResp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", regReq, ®Resp); err != nil {
t.Fatalf("err: %v", err)
}
// Now try to dispatch
tc.dispatchReq.JobID = tc.parameterizedJob.ID
tc.dispatchReq.WriteRequest = structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
}
var dispatchResp structs.JobDispatchResponse
dispatchErr := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", tc.dispatchReq, &dispatchResp)
if dispatchErr == nil {
if tc.err {
t.Fatalf("Expected error: %v", dispatchErr)
}
// Check that we got an eval and job id back
switch dispatchResp.EvalID {
case "":
if !tc.noEval {
t.Fatalf("Bad response")
}
default:
if tc.noEval {
t.Fatalf("Got eval %q", dispatchResp.EvalID)
}
}
if dispatchResp.DispatchedJobID == "" {
t.Fatalf("Bad response")
}
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, tc.parameterizedJob.Namespace, dispatchResp.DispatchedJobID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != dispatchResp.JobCreateIndex {
t.Fatalf("index mis-match")
}
if out.ParentID != tc.parameterizedJob.ID {
t.Fatalf("bad parent ID")
}
if tc.noEval {
return
}
// Lookup the evaluation
eval, err := state.EvalByID(ws, dispatchResp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != dispatchResp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
} else {
if !tc.err {
t.Fatalf("Got unexpected error: %v", dispatchErr)
} else if !strings.Contains(dispatchErr.Error(), tc.errStr) {
t.Fatalf("Expected err to include %q; got %v", tc.errStr, dispatchErr)
}
}
})
}
}
|
package nil
import (
"fmt"
"reflect"
)
func Is(i interface{}) bool {
return i == nil || reflect.ValueOf(i).IsNil()
}
func MustNot(i interface{}) {
if IsNil(i) {
panic(fmt.Sprintf("%#v (%T) must not be nil", i, i))
}
}
// convenience functions, if included with .
func Nø(i interface{}) { MustNot(i) }
func IsNil(i interface{}) bool { return Is(i) }
updated documentation
package nil
import (
"fmt"
"reflect"
)
// returns true, if the inner value of i is nil
// otherwise false
func Is(i interface{}) bool {
return i == nil || reflect.ValueOf(i).IsNil()
}
// panics if the inner value of i is nil
func MustNot(i interface{}) {
if IsNil(i) {
panic(fmt.Sprintf("%#v (%T) must not be nil", i, i))
}
}
// convenience function to be used instead of MustNot
// if the package is included with a dot
func Nø(i interface{}) { MustNot(i) }
// convenience function to be used instead of Is
// if the package is included with a dot
func IsNil(i interface{}) bool { return Is(i) }
|
package main
// go:generate go-bindata .
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"text/template"
)
type NixDependency struct {
GoPackagePath string `json:"goPackagePath,omitempty"`
Fetch *FetchGit `json:"fetch,omitempty"`
IncludeFile string `json:"include,omitempty"`
Packages []string `json:"packages,omitempty"`
}
type FetchGit struct {
Type string `json:"type"`
Url string `json:"url"`
Rev string `json:"rev"`
Sha256 string `json:"sha256"`
}
func MergeDeps(srcFile string, dstFile string) error {
srcDepsList, err := loadDeps(srcFile)
if err != nil {
return err
}
dstDepsList, err := loadDeps(dstFile)
if err != nil {
return err
}
srcDeps := groupBySource(srcDepsList)
dstDeps := groupBySource(dstDepsList)
var newSrcDeps []*NixDependency
newSrcInclude := NixDependency{IncludeFile: dstFile}
newDstDeps := dstDepsList
for packagePath, srcDep := range srcDeps {
if dstDep, exist := dstDeps[packagePath]; exist {
if srcDep.Fetch.Rev == dstDep.Fetch.Rev {
fmt.Printf("Same version of %v found in both files, removing from %v\n",
packagePath, srcFile)
newSrcInclude.Packages = append(newSrcInclude.Packages, packagePath)
} else {
fmt.Printf("Package %v found in both files but in they use different version. You need to agree on its version manually.\n")
newSrcDeps = append(newSrcDeps, srcDep)
}
} else {
fmt.Printf("Moving %v from %v to %v\n", packagePath, srcFile, dstFile)
dstDeps[packagePath] = srcDep
newDstDeps = append(newDstDeps, srcDep)
newSrcInclude.Packages = append(newSrcInclude.Packages, packagePath)
}
}
if len(newSrcInclude.Packages) > 0 {
newSrcDeps = append(newSrcDeps, &newSrcInclude)
}
if len(newSrcDeps) == 0 {
if err := os.Remove(srcFile); err != nil {
return err
}
fmt.Printf("%v removed after all dependencies moved to %v\n", srcFile, dstFile)
} else {
if err := saveDeps(newSrcDeps, srcFile); err != nil {
return err
}
fmt.Printf("New %v saved\n", srcFile)
}
if err := saveDeps(newDstDeps, dstFile); err != nil {
return err
}
fmt.Printf("New %v saved\n", dstFile)
return nil
}
func groupBySource(depsList []*NixDependency) map[string]*NixDependency {
depsMap := make(map[string]*NixDependency)
for _, dep := range depsList {
depsMap[dep.GoPackagePath] = dep
}
return depsMap
}
func saveDeps(deps []*NixDependency, depsFilename string) error {
depsFile, err := os.Create(depsFilename)
if err != nil {
return err
}
defer depsFile.Close()
j, jerr := json.MarshalIndent(deps, "", " ")
if jerr != nil {
fmt.Println("jerr:", jerr.Error())
}
_, werr := depsFile.Write(j)
if werr != nil {
fmt.Println("werr:", werr.Error())
}
return nil
}
func loadDeps(depsFilename string) ([]*NixDependency, error) {
depsFile, err := ioutil.ReadFile(depsFilename)
if err != nil {
return nil, err
}
var deps []*NixDependency
err = json.Unmarshal(depsFile, &deps)
return deps, err
}
func writeFromTemplate(filename string, data interface{}) error {
templateData, err := Asset("templates/default.nix")
if err != nil {
return err
}
t, err := template.New(filename).Delims("[[", "]]").Parse(string(templateData))
if err != nil {
return err
}
target, err := os.Create(filename)
if err != nil {
return err
}
defer target.Close()
t.Execute(target, data)
return nil
}
func nixName(goImportPath string) string {
parts := strings.Split(goImportPath, "/")
return strings.Replace(parts[len(parts)-1], ".", "-", -1)
}
FIX: missing conflicted package name
package main
// go:generate go-bindata .
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"text/template"
)
type NixDependency struct {
GoPackagePath string `json:"goPackagePath,omitempty"`
Fetch *FetchGit `json:"fetch,omitempty"`
IncludeFile string `json:"include,omitempty"`
Packages []string `json:"packages,omitempty"`
}
type FetchGit struct {
Type string `json:"type"`
Url string `json:"url"`
Rev string `json:"rev"`
Sha256 string `json:"sha256"`
}
func MergeDeps(srcFile string, dstFile string) error {
srcDepsList, err := loadDeps(srcFile)
if err != nil {
return err
}
dstDepsList, err := loadDeps(dstFile)
if err != nil {
return err
}
srcDeps := groupBySource(srcDepsList)
dstDeps := groupBySource(dstDepsList)
var newSrcDeps []*NixDependency
newSrcInclude := NixDependency{IncludeFile: dstFile}
newDstDeps := dstDepsList
for packagePath, srcDep := range srcDeps {
if dstDep, exist := dstDeps[packagePath]; exist {
if srcDep.Fetch.Rev == dstDep.Fetch.Rev {
fmt.Printf("Same version of %v found in both files, removing from %v\n",
packagePath, srcFile)
newSrcInclude.Packages = append(newSrcInclude.Packages, packagePath)
} else {
fmt.Printf("Package %v found in both files but in they use different version. You need to agree on its version manually.\n", packagePath)
newSrcDeps = append(newSrcDeps, srcDep)
}
} else {
fmt.Printf("Moving %v from %v to %v\n", packagePath, srcFile, dstFile)
dstDeps[packagePath] = srcDep
newDstDeps = append(newDstDeps, srcDep)
newSrcInclude.Packages = append(newSrcInclude.Packages, packagePath)
}
}
if len(newSrcInclude.Packages) > 0 {
newSrcDeps = append(newSrcDeps, &newSrcInclude)
}
if len(newSrcDeps) == 0 {
if err := os.Remove(srcFile); err != nil {
return err
}
fmt.Printf("%v removed after all dependencies moved to %v\n", srcFile, dstFile)
} else {
if err := saveDeps(newSrcDeps, srcFile); err != nil {
return err
}
fmt.Printf("New %v saved\n", srcFile)
}
if err := saveDeps(newDstDeps, dstFile); err != nil {
return err
}
fmt.Printf("New %v saved\n", dstFile)
return nil
}
func groupBySource(depsList []*NixDependency) map[string]*NixDependency {
depsMap := make(map[string]*NixDependency)
for _, dep := range depsList {
depsMap[dep.GoPackagePath] = dep
}
return depsMap
}
func saveDeps(deps []*NixDependency, depsFilename string) error {
depsFile, err := os.Create(depsFilename)
if err != nil {
return err
}
defer depsFile.Close()
j, jerr := json.MarshalIndent(deps, "", " ")
if jerr != nil {
fmt.Println("jerr:", jerr.Error())
}
_, werr := depsFile.Write(j)
if werr != nil {
fmt.Println("werr:", werr.Error())
}
return nil
}
func loadDeps(depsFilename string) ([]*NixDependency, error) {
depsFile, err := ioutil.ReadFile(depsFilename)
if err != nil {
return nil, err
}
var deps []*NixDependency
err = json.Unmarshal(depsFile, &deps)
return deps, err
}
func writeFromTemplate(filename string, data interface{}) error {
templateData, err := Asset("templates/default.nix")
if err != nil {
return err
}
t, err := template.New(filename).Delims("[[", "]]").Parse(string(templateData))
if err != nil {
return err
}
target, err := os.Create(filename)
if err != nil {
return err
}
defer target.Close()
t.Execute(target, data)
return nil
}
func nixName(goImportPath string) string {
parts := strings.Split(goImportPath, "/")
return strings.Replace(parts[len(parts)-1], ".", "-", -1)
}
|
/*
file: nnc.go
author: alemedeiros <alexandre.n.medeiros _at_ gmail.com>
A n-sized noughts and crosses game library.
It is a generalization of noughts and crosses, with a n x n board.
To win, you have to fill a line, column or diagonal with your symbol.
*/
// Package nnc implements a n-sized noughts and crosses game.
package nnc
import (
"errors"
"runtime"
"time"
)
// Empty is an unplayed square;
// Cross is a 'X';
// Nought is a 'O';
const (
Empty byte = ' '
Cross byte = 'X'
Nought byte = 'O'
)
var ticker = time.NewTicker(time.Millisecond * 25)
// A Game is a game board, use New function to initialize a Game.
type Game struct {
board [][]byte
size int
count int
currPlayer byte
}
// Structure to save the move and its value.
type move struct {
value, i, j int
}
// CurrentPlayer method returns the player that should play.
func (g Game) CurrentPlayer() byte {
return g.currPlayer
}
// Board method returns a copy of the current state of the board.
func (g Game) Board() (board [][]byte) {
board = make([][]byte, g.size)
for i := range board {
board[i] = make([]byte, g.size)
copy(board[i], g.board[i])
}
return
}
// Get the minimum weighted playing position.
func min(a, b move) move {
if a.value <= b.value {
return a
} else {
return b
}
}
// Get the maximum weighted playing position.
func max(a, b move) move {
if a.value >= b.value {
return a
} else {
return b
}
}
// New function Initializes a game structure with a sz-sized board.
// First player is always Cross.
func New(sz int) (g Game) {
// Allocate a new Game structure
g = Game{
board: make([][]byte, sz),
size: sz,
count: sz * sz,
currPlayer: Cross, // First player is Cross
}
// Initialize board.
for i := range g.board {
g.board[i] = make([]byte, sz)
for j := range g.board[i] {
g.board[i][j] = Empty
}
}
return
}
// Return a copy of the current game.
func (g Game) copyGame() (ng Game) {
// Allocate a new Game structure
ng = Game{
board: make([][]byte, g.size),
size: g.size,
count: g.count,
currPlayer: g.currPlayer,
}
// Copy board.
for i := range ng.board {
ng.board[i] = make([]byte, g.size)
for j := range ng.board[i] {
ng.board[i][j] = g.board[i][j]
}
}
return
}
// Play method checks if the coordinates are inside the board and if it is the
// given player's turn.
//
// Return true and winner (Empty means draw) if the move ended the game.
func (g *Game) Play(x, y int, player byte) (done bool, winner byte, err error) {
// Validation check
if g.currPlayer != player {
return false, Empty, errors.New("not player's turn")
}
if x < 0 || g.size <= x || y < 0 || g.size <= y {
return false, Empty, errors.New("invalid position")
}
if g.board[x][y] != Empty {
print("error position: ", x, " ", y, "\n")
return false, Empty, errors.New("cell already played")
}
// Move is valid, do it!
g.board[x][y] = player
// Check if move ended the game
isDone, winner := g.isDone()
g.updateTurn()
g.count -= 1
return isDone, winner, nil
}
// PlayAI method checks if is the given player's turn, if so, it makes a move as
// that player.
//
// Return true and winner (Empty means draw) if the move ended the game.
func (g *Game) PlayAI(player byte) (done bool, winner byte, err error) {
// Validation check
if g.currPlayer != player {
return false, Empty, errors.New("not player's turn")
}
// A value greater than the maximum value possible for a game.
lim := g.size * g.size * 10
depth := g.size
///*
// Configure runtime max processors to use all processors.
runtime.GOMAXPROCS(runtime.NumCPU())
// Alpha-beta pruning
m := alphaBetaPruning(*g, depth, -lim, lim, -1, -1, player)
//*/
/*
// Configure runtime max processors to use all processors.
runtime.GOMAXPROCS(1)
// Serial alpha-beta pruning
//m := alphaBetaPruningSerial(*g, depth, -lim, lim, 0, -1, player)
//*/
return g.Play(m.i, m.j, player)
}
// Serial implementation of Alpha-Beta Pruning algorithm.
// TODO: Try not to copy the entire game structure
func alphaBetaPruningSerial(g Game, depth int, alpha, beta int, x, y int, player byte) move {
// Check for depth limit or if game is over
if depth == 0 {
return move{g.outcome(player), x, y}
}
if done, _ := g.isDone(); done {
return move{g.outcome(player), x, y}
}
// Check for whose turn it is
if curr := g.currPlayer; curr == player {
p := move{alpha, x, y}
for i, l := range g.board {
for j, e := range l {
var m move
// Check for possible move
if e != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, player)
m = alphaBetaPruningSerial(ng, depth-1, alpha, beta, i, j, player)
m.i = i
m.j = j
// Update alpha
p = max(p, m)
alpha = p.value
// Beta cut-off
if beta <= alpha {
return m
}
}
}
return p
} else {
p := move{beta, x, y}
for i, l := range g.board {
for j, e := range l {
var m move
// Check for possible move
if e != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, curr)
m = alphaBetaPruningSerial(ng, depth-1, alpha, beta, i, j, player)
m.i = i
m.j = j
// Update beta
p = min(p, m)
beta = p.value
// Alpha cut-off
if beta <= alpha {
return m
}
}
}
return p
}
}
type work struct {
g Game
a, b int
i, j int
}
// Parallel implementation of Alpha-Beta Pruning algorithm.
// TODO: Try not to copy the entire game structure
func alphaBetaPruning(g Game, depth int, alpha, beta int, x, y int, player byte) move {
// Check for depth limit or if game is over
if depth == 0 {
return move{g.outcome(player), x, y}
}
if done, _ := g.isDone(); done {
return move{g.outcome(player), x, y}
}
var m, p move
// Find first possible move.
firstmove:
for i, l := range g.board {
for j, e := range l {
// Check for possible move
if e != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, g.currPlayer)
m = alphaBetaPruning(ng, depth-1, alpha, beta, i, j, player)
m.i = i
m.j = j
// Update alpha/beta
if g.currPlayer == player {
alpha = m.value
} else {
beta = m.value
}
break firstmove
}
}
// Initialize goroutines
workCh := make(chan work, runtime.NumCPU())
resCh := make(chan move, runtime.NumCPU())
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
// Receive work
for w := range workCh {
// Calculate
nm := alphaBetaPruningSerial(w.g, depth-1, w.a, w.b, w.i, w.j, player)
nm.i = w.i
nm.j = w.j
// Return result
resCh <- nm
}
}()
}
count := 0
// Distribute work
genwork:
for i := 0; i < g.size; i++ {
for j := 0; j < g.size; j++ {
// Check for possible move
if g.board[i][j] != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, g.currPlayer)
// Send work
workCh <- work{ng, alpha, beta, i, j}
count++
// Get a result or generate another work
if count == runtime.NumCPU() {
// Must wait for a result
p = <-resCh
count--
} else {
// Check if there is any result available
select {
default:
continue
case p = <-resCh:
count--
}
}
// Evaluate the result received (move p)
if g.currPlayer == player {
m = max(m, p)
alpha = m.value
} else {
m = min(m, p)
beta = m.value
}
// Pruned
if beta <= alpha {
break genwork
}
}
}
// Close work channel and evaluate remaining results
close(workCh)
for count > 0 {
p = <-resCh
count--
if beta <= alpha {
continue
}
if g.currPlayer == player {
m = max(m, p)
alpha = m.value
} else {
m = min(m, p)
beta = m.value
}
}
return m
}
// updateTurn method updates whose turn it is.
//
// Assumes the turn was correctly set before call.
func (g *Game) updateTurn() error {
switch g.currPlayer {
case Cross:
g.currPlayer = Nought
case Nought:
g.currPlayer = Cross
default:
return errors.New("invalid player turn value")
}
return nil
}
func opponent(player byte) byte {
switch player {
case Cross:
return Nought
case Nought:
return Cross
default:
return 0
}
}
// isDone method determines if the game is over, and if it is, its winner.
// If winner is Empty, the it was a draw.
func (g Game) isDone() (done bool, winner byte) {
winner = Empty
done = true
var local bool
var init byte
// Check for winner
for i, sz := 0, g.size; i < sz; i++ {
// Lines
local = true
init = Empty
for j := 0; j < sz && local; j++ {
if j == 0 {
init = g.board[i][j]
}
if g.board[i][j] == Empty || g.board[i][j] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
// Columns
local = true
init = Empty
for j := 0; j < sz && local; j++ {
if j == 0 {
init = g.board[j][i]
}
if g.board[j][i] == Empty || g.board[j][i] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
}
// Diagonal
local = true
init = Empty
for i, sz := 0, g.size; i < sz && local; i++ {
if i == 0 {
init = g.board[i][i]
}
if g.board[i][i] == Empty || g.board[i][i] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
// Anti-diagonal
local = true
init = Empty
for i, sz := 0, g.size; i < sz && local; i++ {
if i == 0 {
init = g.board[i][sz-1-i]
}
if g.board[i][sz-1-i] == Empty || g.board[i][sz-1-i] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
// Check for draw
outerFor:
for i := range g.board {
for _, p := range g.board[i] {
if p == Empty {
done = false
break outerFor
}
}
}
return
}
// Outcome calculates the outcome function for a player (Nought/Cross) for the
// current game.
func (g Game) outcome(player byte) (sum int) {
if player != Nought && player != Cross {
return
}
for i, sz := 0, g.size; i < sz; i++ {
// Lines
linit, lsum := Empty, 0
for j := 0; j < sz; j++ {
// Empty squares don't change the outcome function.
if g.board[i][j] == Empty {
continue
}
// Initialize initial symbol.
if linit == Empty {
linit = g.board[i][j]
}
// Different symbols means line sum is 0.
if g.board[i][j] != linit {
lsum = 0
break
}
if g.board[i][j] == player {
lsum += 1 // Increment for player
} else {
lsum -= 1 // Decrement for opponent
}
}
// Colums
cinit, csum := Empty, 0
for j := 0; j < sz; j++ {
// Empty squares don't change the outcome function.
if g.board[j][i] == Empty {
continue
}
// Initialize initial symbol.
if cinit == Empty {
cinit = g.board[j][i]
}
// Different symbols means column sum is 0.
if g.board[j][i] != cinit {
csum = 0
break
}
if g.board[j][i] == player {
csum += 1 // Increment for player
} else {
csum -= 1 // Decrement for opponent
}
}
if lsum == sz || csum == sz {
return 3 * sz * sz
} else if lsum == -sz || csum == -sz {
return -(3 * sz * sz)
}
sum += lsum + csum
}
// Diagonal
dinit, dsum := Empty, 0
for i, sz := 0, g.size; i < sz; i++ {
// Empty squares don't change the outcome function.
if g.board[i][i] == Empty {
continue
}
// Initialize initial symbol.
if dinit == Empty {
dinit = g.board[i][i]
}
// Different symbols means diagonal sum is 0.
if g.board[i][i] != dinit {
dsum = 0
break
}
if g.board[i][i] == player {
dsum += 1 // Increment for player
} else {
dsum -= 1 // Decrement for opponent
}
}
if dsum == g.size {
return 3 * g.size * g.size
} else if dsum == -g.size {
return -(3 * g.size * g.size)
}
sum += dsum
// Anti-Diagonal
adinit, adsum := Empty, 0
for i, sz := 0, g.size; i < sz; i++ {
// Empty squares don't change the outcome function.
if g.board[i][sz-1-i] == Empty {
continue
}
// Initialize initial symbol.
if adinit == Empty {
adinit = g.board[i][sz-1-i]
}
// Different symbols means anti-diagonal sum is 0.
if g.board[i][sz-1-i] != adinit {
adsum = 0
break
}
if g.board[i][sz-1-i] == player {
adsum += 1 // Increment for player
} else {
adsum -= 1 // Decrement for opponent
}
}
if adsum == g.size {
return 3 * g.size * g.size
} else if adsum == -g.size {
return -(3 * g.size * g.size)
}
sum += adsum
return
}
Remove unused timer.
/*
file: nnc.go
author: alemedeiros <alexandre.n.medeiros _at_ gmail.com>
A n-sized noughts and crosses game library.
It is a generalization of noughts and crosses, with a n x n board.
To win, you have to fill a line, column or diagonal with your symbol.
*/
// Package nnc implements a n-sized noughts and crosses game.
package nnc
import (
"errors"
"runtime"
)
// Empty is an unplayed square;
// Cross is a 'X';
// Nought is a 'O';
const (
Empty byte = ' '
Cross byte = 'X'
Nought byte = 'O'
)
// A Game is a game board, use New function to initialize a Game.
type Game struct {
board [][]byte
size int
count int
currPlayer byte
}
// Structure to save the move and its value.
type move struct {
value, i, j int
}
// CurrentPlayer method returns the player that should play.
func (g Game) CurrentPlayer() byte {
return g.currPlayer
}
// Board method returns a copy of the current state of the board.
func (g Game) Board() (board [][]byte) {
board = make([][]byte, g.size)
for i := range board {
board[i] = make([]byte, g.size)
copy(board[i], g.board[i])
}
return
}
// Get the minimum weighted playing position.
func min(a, b move) move {
if a.value <= b.value {
return a
} else {
return b
}
}
// Get the maximum weighted playing position.
func max(a, b move) move {
if a.value >= b.value {
return a
} else {
return b
}
}
// New function Initializes a game structure with a sz-sized board.
// First player is always Cross.
func New(sz int) (g Game) {
// Allocate a new Game structure
g = Game{
board: make([][]byte, sz),
size: sz,
count: sz * sz,
currPlayer: Cross, // First player is Cross
}
// Initialize board.
for i := range g.board {
g.board[i] = make([]byte, sz)
for j := range g.board[i] {
g.board[i][j] = Empty
}
}
return
}
// Return a copy of the current game.
func (g Game) copyGame() (ng Game) {
// Allocate a new Game structure
ng = Game{
board: make([][]byte, g.size),
size: g.size,
count: g.count,
currPlayer: g.currPlayer,
}
// Copy board.
for i := range ng.board {
ng.board[i] = make([]byte, g.size)
for j := range ng.board[i] {
ng.board[i][j] = g.board[i][j]
}
}
return
}
// Play method checks if the coordinates are inside the board and if it is the
// given player's turn.
//
// Return true and winner (Empty means draw) if the move ended the game.
func (g *Game) Play(x, y int, player byte) (done bool, winner byte, err error) {
// Validation check
if g.currPlayer != player {
return false, Empty, errors.New("not player's turn")
}
if x < 0 || g.size <= x || y < 0 || g.size <= y {
return false, Empty, errors.New("invalid position")
}
if g.board[x][y] != Empty {
print("error position: ", x, " ", y, "\n")
return false, Empty, errors.New("cell already played")
}
// Move is valid, do it!
g.board[x][y] = player
// Check if move ended the game
isDone, winner := g.isDone()
g.updateTurn()
g.count -= 1
return isDone, winner, nil
}
// PlayAI method checks if is the given player's turn, if so, it makes a move as
// that player.
//
// Return true and winner (Empty means draw) if the move ended the game.
func (g *Game) PlayAI(player byte) (done bool, winner byte, err error) {
// Validation check
if g.currPlayer != player {
return false, Empty, errors.New("not player's turn")
}
// A value greater than the maximum value possible for a game.
lim := g.size * g.size * 10
depth := g.size
///*
// Configure runtime max processors to use all processors.
runtime.GOMAXPROCS(runtime.NumCPU())
// Alpha-beta pruning
m := alphaBetaPruning(*g, depth, -lim, lim, -1, -1, player)
//*/
/*
// Configure runtime max processors to use all processors.
runtime.GOMAXPROCS(1)
// Serial alpha-beta pruning
//m := alphaBetaPruningSerial(*g, depth, -lim, lim, 0, -1, player)
//*/
return g.Play(m.i, m.j, player)
}
// Serial implementation of Alpha-Beta Pruning algorithm.
// TODO: Try not to copy the entire game structure
func alphaBetaPruningSerial(g Game, depth int, alpha, beta int, x, y int, player byte) move {
// Check for depth limit or if game is over
if depth == 0 {
return move{g.outcome(player), x, y}
}
if done, _ := g.isDone(); done {
return move{g.outcome(player), x, y}
}
// Check for whose turn it is
if curr := g.currPlayer; curr == player {
p := move{alpha, x, y}
for i, l := range g.board {
for j, e := range l {
var m move
// Check for possible move
if e != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, player)
m = alphaBetaPruningSerial(ng, depth-1, alpha, beta, i, j, player)
m.i = i
m.j = j
// Update alpha
p = max(p, m)
alpha = p.value
// Beta cut-off
if beta <= alpha {
return m
}
}
}
return p
} else {
p := move{beta, x, y}
for i, l := range g.board {
for j, e := range l {
var m move
// Check for possible move
if e != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, curr)
m = alphaBetaPruningSerial(ng, depth-1, alpha, beta, i, j, player)
m.i = i
m.j = j
// Update beta
p = min(p, m)
beta = p.value
// Alpha cut-off
if beta <= alpha {
return m
}
}
}
return p
}
}
type work struct {
g Game
a, b int
i, j int
}
// Parallel implementation of Alpha-Beta Pruning algorithm.
// TODO: Try not to copy the entire game structure
func alphaBetaPruning(g Game, depth int, alpha, beta int, x, y int, player byte) move {
// Check for depth limit or if game is over
if depth == 0 {
return move{g.outcome(player), x, y}
}
if done, _ := g.isDone(); done {
return move{g.outcome(player), x, y}
}
var m, p move
// Find first possible move.
firstmove:
for i, l := range g.board {
for j, e := range l {
// Check for possible move
if e != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, g.currPlayer)
m = alphaBetaPruning(ng, depth-1, alpha, beta, i, j, player)
m.i = i
m.j = j
// Update alpha/beta
if g.currPlayer == player {
alpha = m.value
} else {
beta = m.value
}
break firstmove
}
}
// Initialize goroutines
workCh := make(chan work, runtime.NumCPU())
resCh := make(chan move, runtime.NumCPU())
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
// Receive work
for w := range workCh {
// Calculate
nm := alphaBetaPruningSerial(w.g, depth-1, w.a, w.b, w.i, w.j, player)
nm.i = w.i
nm.j = w.j
// Return result
resCh <- nm
}
}()
}
count := 0
// Distribute work
genwork:
for i := 0; i < g.size; i++ {
for j := 0; j < g.size; j++ {
// Check for possible move
if g.board[i][j] != Empty {
continue
}
// Generate updated game
ng := g.copyGame()
ng.Play(i, j, g.currPlayer)
// Send work
workCh <- work{ng, alpha, beta, i, j}
count++
// Get a result or generate another work
if count == runtime.NumCPU() {
// Must wait for a result
p = <-resCh
count--
} else {
// Check if there is any result available
select {
default:
continue
case p = <-resCh:
count--
}
}
// Evaluate the result received (move p)
if g.currPlayer == player {
m = max(m, p)
alpha = m.value
} else {
m = min(m, p)
beta = m.value
}
// Pruned
if beta <= alpha {
break genwork
}
}
}
// Close work channel and evaluate remaining results
close(workCh)
for count > 0 {
p = <-resCh
count--
if beta <= alpha {
continue
}
if g.currPlayer == player {
m = max(m, p)
alpha = m.value
} else {
m = min(m, p)
beta = m.value
}
}
return m
}
// updateTurn method updates whose turn it is.
//
// Assumes the turn was correctly set before call.
func (g *Game) updateTurn() error {
switch g.currPlayer {
case Cross:
g.currPlayer = Nought
case Nought:
g.currPlayer = Cross
default:
return errors.New("invalid player turn value")
}
return nil
}
func opponent(player byte) byte {
switch player {
case Cross:
return Nought
case Nought:
return Cross
default:
return 0
}
}
// isDone method determines if the game is over, and if it is, its winner.
// If winner is Empty, the it was a draw.
func (g Game) isDone() (done bool, winner byte) {
winner = Empty
done = true
var local bool
var init byte
// Check for winner
for i, sz := 0, g.size; i < sz; i++ {
// Lines
local = true
init = Empty
for j := 0; j < sz && local; j++ {
if j == 0 {
init = g.board[i][j]
}
if g.board[i][j] == Empty || g.board[i][j] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
// Columns
local = true
init = Empty
for j := 0; j < sz && local; j++ {
if j == 0 {
init = g.board[j][i]
}
if g.board[j][i] == Empty || g.board[j][i] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
}
// Diagonal
local = true
init = Empty
for i, sz := 0, g.size; i < sz && local; i++ {
if i == 0 {
init = g.board[i][i]
}
if g.board[i][i] == Empty || g.board[i][i] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
// Anti-diagonal
local = true
init = Empty
for i, sz := 0, g.size; i < sz && local; i++ {
if i == 0 {
init = g.board[i][sz-1-i]
}
if g.board[i][sz-1-i] == Empty || g.board[i][sz-1-i] != init {
local = false
}
}
// Return if someone won
if local {
return local, init
}
// Check for draw
outerFor:
for i := range g.board {
for _, p := range g.board[i] {
if p == Empty {
done = false
break outerFor
}
}
}
return
}
// Outcome calculates the outcome function for a player (Nought/Cross) for the
// current game.
func (g Game) outcome(player byte) (sum int) {
if player != Nought && player != Cross {
return
}
for i, sz := 0, g.size; i < sz; i++ {
// Lines
linit, lsum := Empty, 0
for j := 0; j < sz; j++ {
// Empty squares don't change the outcome function.
if g.board[i][j] == Empty {
continue
}
// Initialize initial symbol.
if linit == Empty {
linit = g.board[i][j]
}
// Different symbols means line sum is 0.
if g.board[i][j] != linit {
lsum = 0
break
}
if g.board[i][j] == player {
lsum += 1 // Increment for player
} else {
lsum -= 1 // Decrement for opponent
}
}
// Colums
cinit, csum := Empty, 0
for j := 0; j < sz; j++ {
// Empty squares don't change the outcome function.
if g.board[j][i] == Empty {
continue
}
// Initialize initial symbol.
if cinit == Empty {
cinit = g.board[j][i]
}
// Different symbols means column sum is 0.
if g.board[j][i] != cinit {
csum = 0
break
}
if g.board[j][i] == player {
csum += 1 // Increment for player
} else {
csum -= 1 // Decrement for opponent
}
}
if lsum == sz || csum == sz {
return 3 * sz * sz
} else if lsum == -sz || csum == -sz {
return -(3 * sz * sz)
}
sum += lsum + csum
}
// Diagonal
dinit, dsum := Empty, 0
for i, sz := 0, g.size; i < sz; i++ {
// Empty squares don't change the outcome function.
if g.board[i][i] == Empty {
continue
}
// Initialize initial symbol.
if dinit == Empty {
dinit = g.board[i][i]
}
// Different symbols means diagonal sum is 0.
if g.board[i][i] != dinit {
dsum = 0
break
}
if g.board[i][i] == player {
dsum += 1 // Increment for player
} else {
dsum -= 1 // Decrement for opponent
}
}
if dsum == g.size {
return 3 * g.size * g.size
} else if dsum == -g.size {
return -(3 * g.size * g.size)
}
sum += dsum
// Anti-Diagonal
adinit, adsum := Empty, 0
for i, sz := 0, g.size; i < sz; i++ {
// Empty squares don't change the outcome function.
if g.board[i][sz-1-i] == Empty {
continue
}
// Initialize initial symbol.
if adinit == Empty {
adinit = g.board[i][sz-1-i]
}
// Different symbols means anti-diagonal sum is 0.
if g.board[i][sz-1-i] != adinit {
adsum = 0
break
}
if g.board[i][sz-1-i] == player {
adsum += 1 // Increment for player
} else {
adsum -= 1 // Decrement for opponent
}
}
if adsum == g.size {
return 3 * g.size * g.size
} else if adsum == -g.size {
return -(3 * g.size * g.size)
}
sum += adsum
return
}
|
// Copyright 2012-2016 Apcera Inc. All rights reserved.
package server
import (
"bufio"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net"
"sync"
"sync/atomic"
"time"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER is another router in the cluster.
ROUTER
)
const (
// Original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// This signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 512
msgHeadProto = "MSG "
)
// For controlling dynamic buffer sizes.
const (
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 128
maxBufSize = 65536
)
// Represent client booleans with a bitmask
type clientFlag byte
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// Commenting out for now otherwise megacheck complains.
// We may need that in the future.
// clear unset the flag (would be equivalent to set the boolean to false)
// func (cf *clientFlag) clear(c clientFlag) {
// *cf &= ^c
// }
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
mpay int64
mu sync.Mutex
typ int
cid uint64
opts clientOpts
start time.Time
nc net.Conn
ncs string
bw *bufio.Writer
srv *Server
subs map[string]*subscription
perms *permissions
cache readCache
pcd map[*client]struct{}
atmr *time.Timer
ptmr *time.Timer
pout int
wfc int
msgb [msgScratchSize]byte
last time.Time
parseState
route *route
debug bool
trace bool
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
}
type permissions struct {
sub *Sublist
pub *Sublist
pcache map[string]bool
}
const (
maxResultCacheSize = 512
maxPermCacheSize = 32
pruneSize = 16
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
genid uint64
results map[string]*SublistResult
prand *rand.Rand
inMsgs int
inBytes int
subs int
}
func (c *client) String() (id string) {
return c.ncs
}
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
type subscription struct {
client *client
subject []byte
queue []byte
sid []byte
nm int64
max int64
}
type clientOpts struct {
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
SslRequired bool `json:"ssl_required"`
Authorization string `json:"auth_token"`
Username string `json:"user"`
Password string `json:"pass"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true}
func init() {
rand.Seed(time.Now().UnixNano())
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
c.subs = make(map[string]*subscription)
c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0)
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
// This is a scratch buffer used for processMsg()
// The msg header starts with "MSG ",
// in bytes that is [77 83 71 32].
c.msgb = [msgScratchSize]byte{77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
conn := "-"
if ip, ok := c.nc.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port)
}
switch c.typ {
case CLIENT:
c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
case ROUTER:
c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
}
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map any permissions
// into the client.
func (c *client) RegisterUser(user *User) {
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.mu.Lock()
c.perms = nil
c.mu.Unlock()
return
}
// Process Permissions and map into client connection structures.
c.mu.Lock()
defer c.mu.Unlock()
// Pre-allocate all to simplify checks later.
c.perms = &permissions{}
c.perms.sub = NewSublist()
c.perms.pub = NewSublist()
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
for _, pubSubject := range user.Permissions.Publish {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.Insert(sub)
}
// Loop over subscribe permissions
for _, subSubject := range user.Permissions.Subscribe {
sub := &subscription{subject: []byte(subSubject)}
c.perms.sub.Insert(sub)
}
}
func (c *client) readLoop() {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
nc := c.nc
s := c.srv
defer s.grWG.Done()
c.mu.Unlock()
if nc == nil {
return
}
// Start read buffer.
b := make([]byte, startBufSize)
// Snapshot server options.
opts := s.getOpts()
for {
n, err := nc.Read(b)
if err != nil {
c.closeConnection()
return
}
// Grab for updates for last activity.
last := time.Now()
// Clear inbound stats cache
c.cache.inMsgs = 0
c.cache.inBytes = 0
c.cache.subs = 0
if err := c.parse(b[:n]); err != nil {
// handled inline
if err != ErrMaxPayload && err != ErrAuthorization {
c.Errorf("Error reading from client: %s", err.Error())
c.sendErr("Parser Error")
c.closeConnection()
}
return
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
atomic.AddInt64(&c.inMsgs, int64(c.cache.inMsgs))
atomic.AddInt64(&c.inBytes, int64(c.cache.inBytes))
atomic.AddInt64(&s.inMsgs, int64(c.cache.inMsgs))
atomic.AddInt64(&s.inBytes, int64(c.cache.inBytes))
// Check pending clients for flush.
for cp := range c.pcd {
// Flush those in the set
cp.mu.Lock()
if cp.nc != nil {
// Gather the flush calls that happened before now.
// This is a signal into us about dynamic buffer allocation tuning.
wfc := cp.wfc
cp.wfc = 0
cp.nc.SetWriteDeadline(time.Now().Add(opts.WriteDeadline))
err := cp.bw.Flush()
cp.nc.SetWriteDeadline(time.Time{})
if err != nil {
c.Debugf("Error flushing: %v", err)
cp.mu.Unlock()
cp.closeConnection()
cp.mu.Lock()
} else {
// Update outbound last activity.
cp.last = last
// Check if we should tune the buffer.
sz := cp.bw.Available()
// Check for expansion opportunity.
if wfc > 2 && sz <= maxBufSize/2 {
cp.bw = bufio.NewWriterSize(cp.nc, sz*2)
}
// Check for shrinking opportunity.
if wfc == 0 && sz >= minBufSize*2 {
cp.bw = bufio.NewWriterSize(cp.nc, sz/2)
}
}
}
cp.mu.Unlock()
delete(c.pcd, cp)
}
// Check to see if we got closed, e.g. slow consumer
c.mu.Lock()
nc := c.nc
// Activity based on interest changes or data/msgs.
if c.cache.inMsgs > 0 || c.cache.subs > 0 {
c.last = last
}
c.mu.Unlock()
if nc == nil {
return
}
// Update buffer size as/if needed.
// Grow
if n == len(b) && len(b) < maxBufSize {
b = make([]byte, len(b)*2)
}
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
if n < len(b)/2 && len(b) > minBufSize {
b = make([]byte, len(b)/2)
}
}
}
func (c *client) traceMsg(msg []byte) {
if !c.trace {
return
}
// FIXME(dlc), allow limits to printable payload
c.Tracef("->> MSG_PAYLOAD: [%s]", string(msg[:len(msg)-LEN_CR_LF]))
}
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
if !c.trace {
return
}
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
if c.typ == ROUTER {
c.processRouteInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
switch c.typ {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
}
c.closeConnection()
}
func (c *client) processConnect(arg []byte) error {
c.traceInOp("CONNECT", arg)
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for c.nc != nil {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
typ := c.typ
r := c.route
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
c.mu.Unlock()
if srv != nil {
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthorization(c); !ok {
c.authViolation()
return ErrAuthorization
}
}
// Check client protocol request if it exists.
if typ == CLIENT && (proto < ClientProtoZero || proto > ClientProtoInfo) {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection()
return ErrBadClientProtocol
} else if typ == ROUTER && lang != "" {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
c.sendErr(ErrClientConnectedToRoutePort.Error())
c.closeConnection()
return ErrClientConnectedToRoutePort
}
// Grab connection name of remote route.
if typ == ROUTER && r != nil {
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.mu.Unlock()
}
if verbose {
c.sendOK()
}
return nil
}
func (c *client) authTimeout() {
c.sendErr(ErrAuthTimeout.Error())
c.Debugf("Authorization Timeout")
c.closeConnection()
}
func (c *client) authViolation() {
if c.srv != nil && c.srv.getOpts().Users != nil {
c.Errorf("%s - User %q",
ErrAuthorization.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthorization.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection()
}
func (c *client) maxConnExceeded() {
c.Errorf(ErrTooManyConnections.Error())
c.sendErr(ErrTooManyConnections.Error())
c.closeConnection()
}
func (c *client) maxPayloadViolation(sz int, max int64) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection()
}
// Assume the lock is held upon entry.
func (c *client) sendProto(info []byte, doFlush bool) error {
var err error
if c.bw != nil && c.nc != nil {
deadlineSet := false
if doFlush || c.bw.Available() < len(info) {
c.nc.SetWriteDeadline(time.Now().Add(c.srv.getOpts().WriteDeadline))
deadlineSet = true
}
_, err = c.bw.Write(info)
if err == nil && doFlush {
err = c.bw.Flush()
}
if deadlineSet {
c.nc.SetWriteDeadline(time.Time{})
}
}
return err
}
// Assume the lock is held upon entry.
func (c *client) sendInfo(info []byte) {
c.sendProto(info, true)
}
func (c *client) sendErr(err string) {
c.mu.Lock()
c.traceOutOp("-ERR", []byte(err))
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true)
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
c.traceOutOp("OK", nil)
// Can not autoflush this one, needs to be async.
c.sendProto([]byte("+OK\r\n"), false)
c.pcd[c] = needFlush
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
c.traceInOp("PING", nil)
if c.nc == nil {
c.mu.Unlock()
return
}
c.traceOutOp("PONG", nil)
if err := c.sendProto([]byte("PONG\r\n"), true); err != nil {
c.clearConnection()
c.Debugf("Error on Flush, error %s", err.Error())
c.mu.Unlock()
return
}
// The CONNECT should have been received, but make sure it
// is so before proceeding
if !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkClusterChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology.
checkClusterChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkClusterChange {
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and and code here
// to send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() {
c.sendInfo(srv.infoJSON)
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.traceInOp("PONG", nil)
c.mu.Lock()
c.pout = 0
c.mu.Unlock()
}
func (c *client) processMsgArgs(arg []byte) error {
if c.trace {
c.traceInOp("MSG", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
switch len(args) {
case 3:
c.pa.reply = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
case 4:
c.pa.reply = args[2]
c.pa.szb = args[3]
c.pa.size = parseSize(args[3])
default:
return fmt.Errorf("processMsgArgs Parse Error: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processMsgArgs Bad or Missing Size: '%s'", arg)
}
// Common ones processed after check for arg length
c.pa.subject = args[0]
c.pa.sid = args[1]
return nil
}
func (c *client) processPub(arg []byte) error {
if c.trace {
c.traceInOp("PUB", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt64(&c.mpay)
if maxPayload > 0 && int64(c.pa.size) > maxPayload {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) processSub(argo []byte) (err error) {
c.traceInOp("SUB", argo)
// Indicate activity.
c.cache.subs += 1
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.subject = args[0]
sub.queue = nil
sub.sid = args[1]
case 3:
sub.subject = args[0]
sub.queue = args[1]
sub.sid = args[2]
default:
return fmt.Errorf("processSub Parse Error: '%s'", arg)
}
shouldForward := false
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Check permissions if applicable.
if !c.canSubscribe(sub.subject) {
c.mu.Unlock()
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject))
c.Errorf("Subscription Violation - User %q, Subject %q, SID %s",
c.opts.Username, sub.subject, sub.sid)
return nil
}
// We can have two SUB protocols coming from a route due to some
// race conditions. We should make sure that we process only one.
sid := string(sub.sid)
if c.subs[sid] == nil {
c.subs[sid] = sub
if c.srv != nil {
err = c.srv.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
} else {
shouldForward = c.typ != ROUTER
}
}
}
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil
} else if c.opts.Verbose {
c.sendOK()
}
if shouldForward {
c.srv.broadcastSubscribe(sub)
}
return nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(sub []byte) bool {
if c.perms == nil {
return true
}
return len(c.perms.sub.Match(string(sub)).psubs) > 0
}
func (c *client) unsubscribe(sub *subscription) {
c.mu.Lock()
defer c.mu.Unlock()
if sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received\n",
string(sub.subject), sub.max, sub.nm)
return
}
c.traceOp("<-> %s", "DELSUB", sub.sid)
delete(c.subs, string(sub.sid))
if c.srv != nil {
c.srv.sl.Remove(sub)
}
}
func (c *client) processUnsub(arg []byte) error {
c.traceInOp("UNSUB", arg)
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
// Indicate activity.
c.cache.subs += 1
var sub *subscription
unsub := false
shouldForward := false
ok := false
c.mu.Lock()
if sub, ok = c.subs[string(sid)]; ok {
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
}
unsub = true
shouldForward = c.typ != ROUTER && c.srv != nil
}
c.mu.Unlock()
if unsub {
c.unsubscribe(sub)
}
if shouldForward {
c.srv.broadcastUnSubscribe(sub)
}
if c.opts.Verbose {
c.sendOK()
}
return nil
}
func (c *client) msgHeader(mh []byte, sub *subscription) []byte {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
if c.pa.reply != nil {
mh = append(mh, c.pa.reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, "\r\n"...)
return mh
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
var routeSeen = struct{}{}
func (c *client) deliverMsg(sub *subscription, mh, msg []byte) {
if sub.client == nil {
return
}
client := sub.client
client.mu.Lock()
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
// For routing..
shouldForward := client.typ != ROUTER && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
c.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'\n", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
defer client.srv.broadcastUnSubscribe(sub)
}
defer client.unsubscribe(sub)
} else if sub.nm > sub.max {
c.Debugf("Auto-unsubscribe limit [%d] exceeded\n", sub.max)
client.mu.Unlock()
client.unsubscribe(sub)
if shouldForward {
client.srv.broadcastUnSubscribe(sub)
}
return
}
}
if client.nc == nil {
client.mu.Unlock()
return
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg) - LEN_CR_LF)
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
atomic.AddInt64(&c.srv.outMsgs, 1)
atomic.AddInt64(&c.srv.outBytes, msgSize)
// Check to see if our writes will cause a flush
// in the underlying bufio. If so limit time we
// will wait for flush to complete.
deadlineSet := false
if client.bw.Available() < (len(mh) + len(msg)) {
client.wfc++
client.nc.SetWriteDeadline(time.Now().Add(client.srv.getOpts().WriteDeadline))
deadlineSet = true
}
// Deliver to the client.
_, err := client.bw.Write(mh)
if err != nil {
goto writeErr
}
_, err = client.bw.Write(msg)
if err != nil {
goto writeErr
}
if c.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
// TODO(dlc) - Do we need this or can we just call always?
if deadlineSet {
client.nc.SetWriteDeadline(time.Time{})
}
client.mu.Unlock()
c.pcd[client] = needFlush
return
writeErr:
if deadlineSet {
client.nc.SetWriteDeadline(time.Time{})
}
client.mu.Unlock()
if ne, ok := err.(net.Error); ok && ne.Timeout() {
atomic.AddInt64(&client.srv.slowConsumers, 1)
client.Noticef("Slow Consumer Detected")
client.closeConnection()
} else {
c.Debugf("Error writing msg: %v", err)
}
}
// processMsg is called to process an inbound msg from a client.
func (c *client) processMsg(msg []byte) {
// Snapshot server.
srv := c.srv
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.cache.inMsgs += 1
c.cache.inBytes += len(msg) - LEN_CR_LF
if c.trace {
c.traceMsg(msg)
}
// Disallow publish to _SYS.>, these are reserved for internals.
if c.pa.subject[0] == '_' && len(c.pa.subject) > 4 &&
c.pa.subject[1] == 'S' && c.pa.subject[2] == 'Y' &&
c.pa.subject[3] == 'S' && c.pa.subject[4] == '.' {
c.pubPermissionViolation(c.pa.subject)
return
}
// Check if published subject is allowed if we have permissions in place.
if c.perms != nil {
allowed, ok := c.perms.pcache[string(c.pa.subject)]
if ok && !allowed {
c.pubPermissionViolation(c.pa.subject)
return
}
if !ok {
r := c.perms.pub.Match(string(c.pa.subject))
notAllowed := len(r.psubs) == 0
if notAllowed {
c.pubPermissionViolation(c.pa.subject)
c.perms.pcache[string(c.pa.subject)] = false
} else {
c.perms.pcache[string(c.pa.subject)] = true
}
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
// Prune the permissions cache. Keeps us from unbounded growth.
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
r++
if r > pruneSize {
break
}
}
}
// Return here to allow the pruning code to run if needed.
if notAllowed {
return
}
}
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if srv == nil {
return
}
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&srv.sl.genid)
if genid == c.cache.genid && c.cache.results != nil {
r, ok = c.cache.results[string(c.pa.subject)]
} else {
// reset
c.cache.results = make(map[string]*SublistResult)
c.cache.genid = genid
}
if !ok {
subject := string(c.pa.subject)
r = srv.sl.Match(subject)
c.cache.results[subject] = r
if len(c.cache.results) > maxResultCacheSize {
// Prune the results cache. Keeps us from unbounded growth.
r := 0
for subject := range c.cache.results {
delete(c.cache.results, subject)
r++
if r > pruneSize {
break
}
}
}
}
// Check for no interest, short circuit if so.
if len(r.psubs) == 0 && len(r.qsubs) == 0 {
return
}
// Check for pedantic and bad subject.
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
return
}
// Scratch buffer..
msgh := c.msgb[:len(msgHeadProto)]
// msg header
msgh = append(msgh, c.pa.subject...)
msgh = append(msgh, ' ')
si := len(msgh)
isRoute := c.typ == ROUTER
// If we are a route and we have a queue subscription, deliver direct
// since they are sent direct via L2 semantics. If the match is a queue
// subscription, we will return from here regardless if we find a sub.
if isRoute {
if sub, ok := srv.routeSidQueueSubscriber(c.pa.sid); ok {
if sub != nil {
mh := c.msgHeader(msgh[:si], sub)
c.deliverMsg(sub, mh, msg)
}
return
}
}
// Used to only send normal subscriptions once across a given route.
var rmap map[string]struct{}
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER, make sure we only send it
// once. The other side will handle the appropriate re-processing
// and fan-out. Also enforce 1-Hop semantics, so no routing to another.
if sub.client.typ == ROUTER {
// Skip if sourced from a ROUTER and going to another ROUTER.
// This is 1-Hop semantics for ROUTERs.
if isRoute {
continue
}
// Check to see if we have already sent it here.
if rmap == nil {
rmap = make(map[string]struct{}, srv.numRoutes())
}
sub.client.mu.Lock()
if sub.client.nc == nil || sub.client.route == nil ||
sub.client.route.remoteID == "" {
c.Debugf("Bad or Missing ROUTER Identity, not processing msg")
sub.client.mu.Unlock()
continue
}
if _, ok := rmap[sub.client.route.remoteID]; ok {
c.Debugf("Ignoring route, already processed")
sub.client.mu.Unlock()
continue
}
rmap[sub.client.route.remoteID] = routeSeen
sub.client.mu.Unlock()
}
// Normal delivery
mh := c.msgHeader(msgh[:si], sub)
c.deliverMsg(sub, mh, msg)
}
// Now process any queue subs we have if not a route
if !isRoute {
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.cache.prand == nil {
c.cache.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
index := c.cache.prand.Intn(len(qsubs))
sub := qsubs[index]
if sub != nil {
mh := c.msgHeader(msgh[:si], sub)
c.deliverMsg(sub, mh, msg)
}
}
}
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - User %q, Subject %q", c.opts.Username, subject)
}
func (c *client) processPingTimer() {
c.mu.Lock()
defer c.mu.Unlock()
c.ptmr = nil
// Check if connection is still opened
if c.nc == nil {
return
}
c.Debugf("%s Ping Timer", c.typeString())
// Check for violation
c.pout++
if c.pout > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true)
c.clearConnection()
return
}
c.traceOutOp("PING", nil)
// Send PING
err := c.sendProto([]byte("PING\r\n"), true)
if err != nil {
c.Debugf("Error on Client Ping Flush, error %s", err)
c.clearConnection()
} else {
// Reset to fire again if all OK.
c.setPingTimer()
}
}
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
c.ptmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ptmr == nil {
return
}
c.ptmr.Stop()
c.ptmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, func() { c.authTimeout() })
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
func (c *client) isAuthTimerSet() bool {
c.mu.Lock()
isSet := c.atmr != nil
c.mu.Unlock()
return isSet
}
// Lock should be held
func (c *client) clearConnection() {
if c.nc == nil {
return
}
// With TLS, Close() is sending an alert (that is doing a write).
// Need to set a deadline otherwise the server could block there
// if the peer is not reading from socket.
c.nc.SetWriteDeadline(time.Now().Add(c.srv.getOpts().WriteDeadline))
if c.bw != nil {
c.bw.Flush()
}
c.nc.Close()
c.nc.SetWriteDeadline(time.Time{})
}
func (c *client) typeString() string {
switch c.typ {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
}
return "Unknown Type"
}
func (c *client) closeConnection() {
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return
}
c.Debugf("%s connection closed", c.typeString())
c.clearAuthTimer()
c.clearPingTimer()
c.clearConnection()
c.nc = nil
// Snapshot for use.
subs := make([]*subscription, 0, len(c.subs))
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
subs = append(subs, sub)
}
srv := c.srv
var (
routeClosed bool
retryImplicit bool
connectURLs []string
)
if c.route != nil {
routeClosed = c.route.closed
if !routeClosed {
retryImplicit = c.route.retry
}
connectURLs = c.route.connectURLs
}
c.mu.Unlock()
if srv != nil {
// This is a route that disconnected...
if len(connectURLs) > 0 {
// Unless disabled, possibly update the server's INFO protcol
// and send to clients that know how to handle async INFOs.
if !srv.getOpts().Cluster.NoAdvertise {
srv.removeClientConnectURLsAndSendINFOToClients(connectURLs)
}
}
// Unregister
srv.removeClient(c)
// Remove clients subscriptions.
for _, sub := range subs {
srv.sl.Remove(sub)
// Forward on unsubscribes if we are not
// a router ourselves.
if c.typ != ROUTER {
srv.broadcastUnSubscribe(sub)
}
}
}
// Don't reconnect routes that are being closed.
if routeClosed {
return
}
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
c.srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
c.srv.Debugf("Detected route to self, ignoring \"%s\"", rurl)
return
} else if rtype != Implicit || retryImplicit {
c.srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
}
}
// If the client is a route connection, sets the `closed` flag to true
// to prevent any reconnecting attempt when c.closeConnection() is called.
func (c *client) setRouteNoReconnectOnClose() {
c.mu.Lock()
if c.route != nil {
c.route.closed = true
}
c.mu.Unlock()
}
// Logging functionality scoped to a client or route.
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
Remove unused code
// Copyright 2012-2016 Apcera Inc. All rights reserved.
package server
import (
"bufio"
"crypto/tls"
"encoding/json"
"fmt"
"math/rand"
"net"
"sync"
"sync/atomic"
"time"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER is another router in the cluster.
ROUTER
)
const (
// Original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// This signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 512
msgHeadProto = "MSG "
)
// For controlling dynamic buffer sizes.
const (
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 128
maxBufSize = 65536
)
// Represent client booleans with a bitmask
type clientFlag byte
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
mpay int64
mu sync.Mutex
typ int
cid uint64
opts clientOpts
start time.Time
nc net.Conn
ncs string
bw *bufio.Writer
srv *Server
subs map[string]*subscription
perms *permissions
cache readCache
pcd map[*client]struct{}
atmr *time.Timer
ptmr *time.Timer
pout int
wfc int
msgb [msgScratchSize]byte
last time.Time
parseState
route *route
debug bool
trace bool
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
}
type permissions struct {
sub *Sublist
pub *Sublist
pcache map[string]bool
}
const (
maxResultCacheSize = 512
maxPermCacheSize = 32
pruneSize = 16
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
genid uint64
results map[string]*SublistResult
prand *rand.Rand
inMsgs int
inBytes int
subs int
}
func (c *client) String() (id string) {
return c.ncs
}
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
type subscription struct {
client *client
subject []byte
queue []byte
sid []byte
nm int64
max int64
}
type clientOpts struct {
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
SslRequired bool `json:"ssl_required"`
Authorization string `json:"auth_token"`
Username string `json:"user"`
Password string `json:"pass"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true}
func init() {
rand.Seed(time.Now().UnixNano())
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
c.subs = make(map[string]*subscription)
c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0)
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
// This is a scratch buffer used for processMsg()
// The msg header starts with "MSG ",
// in bytes that is [77 83 71 32].
c.msgb = [msgScratchSize]byte{77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
conn := "-"
if ip, ok := c.nc.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port)
}
switch c.typ {
case CLIENT:
c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
case ROUTER:
c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
}
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map any permissions
// into the client.
func (c *client) RegisterUser(user *User) {
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.mu.Lock()
c.perms = nil
c.mu.Unlock()
return
}
// Process Permissions and map into client connection structures.
c.mu.Lock()
defer c.mu.Unlock()
// Pre-allocate all to simplify checks later.
c.perms = &permissions{}
c.perms.sub = NewSublist()
c.perms.pub = NewSublist()
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
for _, pubSubject := range user.Permissions.Publish {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.Insert(sub)
}
// Loop over subscribe permissions
for _, subSubject := range user.Permissions.Subscribe {
sub := &subscription{subject: []byte(subSubject)}
c.perms.sub.Insert(sub)
}
}
func (c *client) readLoop() {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
nc := c.nc
s := c.srv
defer s.grWG.Done()
c.mu.Unlock()
if nc == nil {
return
}
// Start read buffer.
b := make([]byte, startBufSize)
// Snapshot server options.
opts := s.getOpts()
for {
n, err := nc.Read(b)
if err != nil {
c.closeConnection()
return
}
// Grab for updates for last activity.
last := time.Now()
// Clear inbound stats cache
c.cache.inMsgs = 0
c.cache.inBytes = 0
c.cache.subs = 0
if err := c.parse(b[:n]); err != nil {
// handled inline
if err != ErrMaxPayload && err != ErrAuthorization {
c.Errorf("Error reading from client: %s", err.Error())
c.sendErr("Parser Error")
c.closeConnection()
}
return
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
atomic.AddInt64(&c.inMsgs, int64(c.cache.inMsgs))
atomic.AddInt64(&c.inBytes, int64(c.cache.inBytes))
atomic.AddInt64(&s.inMsgs, int64(c.cache.inMsgs))
atomic.AddInt64(&s.inBytes, int64(c.cache.inBytes))
// Check pending clients for flush.
for cp := range c.pcd {
// Flush those in the set
cp.mu.Lock()
if cp.nc != nil {
// Gather the flush calls that happened before now.
// This is a signal into us about dynamic buffer allocation tuning.
wfc := cp.wfc
cp.wfc = 0
cp.nc.SetWriteDeadline(time.Now().Add(opts.WriteDeadline))
err := cp.bw.Flush()
cp.nc.SetWriteDeadline(time.Time{})
if err != nil {
c.Debugf("Error flushing: %v", err)
cp.mu.Unlock()
cp.closeConnection()
cp.mu.Lock()
} else {
// Update outbound last activity.
cp.last = last
// Check if we should tune the buffer.
sz := cp.bw.Available()
// Check for expansion opportunity.
if wfc > 2 && sz <= maxBufSize/2 {
cp.bw = bufio.NewWriterSize(cp.nc, sz*2)
}
// Check for shrinking opportunity.
if wfc == 0 && sz >= minBufSize*2 {
cp.bw = bufio.NewWriterSize(cp.nc, sz/2)
}
}
}
cp.mu.Unlock()
delete(c.pcd, cp)
}
// Check to see if we got closed, e.g. slow consumer
c.mu.Lock()
nc := c.nc
// Activity based on interest changes or data/msgs.
if c.cache.inMsgs > 0 || c.cache.subs > 0 {
c.last = last
}
c.mu.Unlock()
if nc == nil {
return
}
// Update buffer size as/if needed.
// Grow
if n == len(b) && len(b) < maxBufSize {
b = make([]byte, len(b)*2)
}
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
if n < len(b)/2 && len(b) > minBufSize {
b = make([]byte, len(b)/2)
}
}
}
func (c *client) traceMsg(msg []byte) {
if !c.trace {
return
}
// FIXME(dlc), allow limits to printable payload
c.Tracef("->> MSG_PAYLOAD: [%s]", string(msg[:len(msg)-LEN_CR_LF]))
}
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
if !c.trace {
return
}
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
if c.typ == ROUTER {
c.processRouteInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
switch c.typ {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
}
c.closeConnection()
}
func (c *client) processConnect(arg []byte) error {
c.traceInOp("CONNECT", arg)
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for c.nc != nil {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
typ := c.typ
r := c.route
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
c.mu.Unlock()
if srv != nil {
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthorization(c); !ok {
c.authViolation()
return ErrAuthorization
}
}
// Check client protocol request if it exists.
if typ == CLIENT && (proto < ClientProtoZero || proto > ClientProtoInfo) {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection()
return ErrBadClientProtocol
} else if typ == ROUTER && lang != "" {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
c.sendErr(ErrClientConnectedToRoutePort.Error())
c.closeConnection()
return ErrClientConnectedToRoutePort
}
// Grab connection name of remote route.
if typ == ROUTER && r != nil {
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.mu.Unlock()
}
if verbose {
c.sendOK()
}
return nil
}
func (c *client) authTimeout() {
c.sendErr(ErrAuthTimeout.Error())
c.Debugf("Authorization Timeout")
c.closeConnection()
}
func (c *client) authViolation() {
if c.srv != nil && c.srv.getOpts().Users != nil {
c.Errorf("%s - User %q",
ErrAuthorization.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthorization.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection()
}
func (c *client) maxConnExceeded() {
c.Errorf(ErrTooManyConnections.Error())
c.sendErr(ErrTooManyConnections.Error())
c.closeConnection()
}
func (c *client) maxPayloadViolation(sz int, max int64) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection()
}
// Assume the lock is held upon entry.
func (c *client) sendProto(info []byte, doFlush bool) error {
var err error
if c.bw != nil && c.nc != nil {
deadlineSet := false
if doFlush || c.bw.Available() < len(info) {
c.nc.SetWriteDeadline(time.Now().Add(c.srv.getOpts().WriteDeadline))
deadlineSet = true
}
_, err = c.bw.Write(info)
if err == nil && doFlush {
err = c.bw.Flush()
}
if deadlineSet {
c.nc.SetWriteDeadline(time.Time{})
}
}
return err
}
// Assume the lock is held upon entry.
func (c *client) sendInfo(info []byte) {
c.sendProto(info, true)
}
func (c *client) sendErr(err string) {
c.mu.Lock()
c.traceOutOp("-ERR", []byte(err))
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true)
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
c.traceOutOp("OK", nil)
// Can not autoflush this one, needs to be async.
c.sendProto([]byte("+OK\r\n"), false)
c.pcd[c] = needFlush
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
c.traceInOp("PING", nil)
if c.nc == nil {
c.mu.Unlock()
return
}
c.traceOutOp("PONG", nil)
if err := c.sendProto([]byte("PONG\r\n"), true); err != nil {
c.clearConnection()
c.Debugf("Error on Flush, error %s", err.Error())
c.mu.Unlock()
return
}
// The CONNECT should have been received, but make sure it
// is so before proceeding
if !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkClusterChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology.
checkClusterChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkClusterChange {
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and and code here
// to send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() {
c.sendInfo(srv.infoJSON)
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.traceInOp("PONG", nil)
c.mu.Lock()
c.pout = 0
c.mu.Unlock()
}
func (c *client) processMsgArgs(arg []byte) error {
if c.trace {
c.traceInOp("MSG", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
switch len(args) {
case 3:
c.pa.reply = nil
c.pa.szb = args[2]
c.pa.size = parseSize(args[2])
case 4:
c.pa.reply = args[2]
c.pa.szb = args[3]
c.pa.size = parseSize(args[3])
default:
return fmt.Errorf("processMsgArgs Parse Error: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processMsgArgs Bad or Missing Size: '%s'", arg)
}
// Common ones processed after check for arg length
c.pa.subject = args[0]
c.pa.sid = args[1]
return nil
}
func (c *client) processPub(arg []byte) error {
if c.trace {
c.traceInOp("PUB", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt64(&c.mpay)
if maxPayload > 0 && int64(c.pa.size) > maxPayload {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) processSub(argo []byte) (err error) {
c.traceInOp("SUB", argo)
// Indicate activity.
c.cache.subs += 1
// Copy so we do not reference a potentially large buffer
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.subject = args[0]
sub.queue = nil
sub.sid = args[1]
case 3:
sub.subject = args[0]
sub.queue = args[1]
sub.sid = args[2]
default:
return fmt.Errorf("processSub Parse Error: '%s'", arg)
}
shouldForward := false
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Check permissions if applicable.
if !c.canSubscribe(sub.subject) {
c.mu.Unlock()
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject))
c.Errorf("Subscription Violation - User %q, Subject %q, SID %s",
c.opts.Username, sub.subject, sub.sid)
return nil
}
// We can have two SUB protocols coming from a route due to some
// race conditions. We should make sure that we process only one.
sid := string(sub.sid)
if c.subs[sid] == nil {
c.subs[sid] = sub
if c.srv != nil {
err = c.srv.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
} else {
shouldForward = c.typ != ROUTER
}
}
}
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil
} else if c.opts.Verbose {
c.sendOK()
}
if shouldForward {
c.srv.broadcastSubscribe(sub)
}
return nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(sub []byte) bool {
if c.perms == nil {
return true
}
return len(c.perms.sub.Match(string(sub)).psubs) > 0
}
func (c *client) unsubscribe(sub *subscription) {
c.mu.Lock()
defer c.mu.Unlock()
if sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received\n",
string(sub.subject), sub.max, sub.nm)
return
}
c.traceOp("<-> %s", "DELSUB", sub.sid)
delete(c.subs, string(sub.sid))
if c.srv != nil {
c.srv.sl.Remove(sub)
}
}
func (c *client) processUnsub(arg []byte) error {
c.traceInOp("UNSUB", arg)
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
// Indicate activity.
c.cache.subs += 1
var sub *subscription
unsub := false
shouldForward := false
ok := false
c.mu.Lock()
if sub, ok = c.subs[string(sid)]; ok {
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
}
unsub = true
shouldForward = c.typ != ROUTER && c.srv != nil
}
c.mu.Unlock()
if unsub {
c.unsubscribe(sub)
}
if shouldForward {
c.srv.broadcastUnSubscribe(sub)
}
if c.opts.Verbose {
c.sendOK()
}
return nil
}
func (c *client) msgHeader(mh []byte, sub *subscription) []byte {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
if c.pa.reply != nil {
mh = append(mh, c.pa.reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, "\r\n"...)
return mh
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
var routeSeen = struct{}{}
func (c *client) deliverMsg(sub *subscription, mh, msg []byte) {
if sub.client == nil {
return
}
client := sub.client
client.mu.Lock()
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
// For routing..
shouldForward := client.typ != ROUTER && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
c.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'\n", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
defer client.srv.broadcastUnSubscribe(sub)
}
defer client.unsubscribe(sub)
} else if sub.nm > sub.max {
c.Debugf("Auto-unsubscribe limit [%d] exceeded\n", sub.max)
client.mu.Unlock()
client.unsubscribe(sub)
if shouldForward {
client.srv.broadcastUnSubscribe(sub)
}
return
}
}
if client.nc == nil {
client.mu.Unlock()
return
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg) - LEN_CR_LF)
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
atomic.AddInt64(&c.srv.outMsgs, 1)
atomic.AddInt64(&c.srv.outBytes, msgSize)
// Check to see if our writes will cause a flush
// in the underlying bufio. If so limit time we
// will wait for flush to complete.
deadlineSet := false
if client.bw.Available() < (len(mh) + len(msg)) {
client.wfc++
client.nc.SetWriteDeadline(time.Now().Add(client.srv.getOpts().WriteDeadline))
deadlineSet = true
}
// Deliver to the client.
_, err := client.bw.Write(mh)
if err != nil {
goto writeErr
}
_, err = client.bw.Write(msg)
if err != nil {
goto writeErr
}
if c.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
// TODO(dlc) - Do we need this or can we just call always?
if deadlineSet {
client.nc.SetWriteDeadline(time.Time{})
}
client.mu.Unlock()
c.pcd[client] = needFlush
return
writeErr:
if deadlineSet {
client.nc.SetWriteDeadline(time.Time{})
}
client.mu.Unlock()
if ne, ok := err.(net.Error); ok && ne.Timeout() {
atomic.AddInt64(&client.srv.slowConsumers, 1)
client.Noticef("Slow Consumer Detected")
client.closeConnection()
} else {
c.Debugf("Error writing msg: %v", err)
}
}
// processMsg is called to process an inbound msg from a client.
func (c *client) processMsg(msg []byte) {
// Snapshot server.
srv := c.srv
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.cache.inMsgs += 1
c.cache.inBytes += len(msg) - LEN_CR_LF
if c.trace {
c.traceMsg(msg)
}
// Disallow publish to _SYS.>, these are reserved for internals.
if c.pa.subject[0] == '_' && len(c.pa.subject) > 4 &&
c.pa.subject[1] == 'S' && c.pa.subject[2] == 'Y' &&
c.pa.subject[3] == 'S' && c.pa.subject[4] == '.' {
c.pubPermissionViolation(c.pa.subject)
return
}
// Check if published subject is allowed if we have permissions in place.
if c.perms != nil {
allowed, ok := c.perms.pcache[string(c.pa.subject)]
if ok && !allowed {
c.pubPermissionViolation(c.pa.subject)
return
}
if !ok {
r := c.perms.pub.Match(string(c.pa.subject))
notAllowed := len(r.psubs) == 0
if notAllowed {
c.pubPermissionViolation(c.pa.subject)
c.perms.pcache[string(c.pa.subject)] = false
} else {
c.perms.pcache[string(c.pa.subject)] = true
}
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
// Prune the permissions cache. Keeps us from unbounded growth.
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
r++
if r > pruneSize {
break
}
}
}
// Return here to allow the pruning code to run if needed.
if notAllowed {
return
}
}
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if srv == nil {
return
}
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&srv.sl.genid)
if genid == c.cache.genid && c.cache.results != nil {
r, ok = c.cache.results[string(c.pa.subject)]
} else {
// reset
c.cache.results = make(map[string]*SublistResult)
c.cache.genid = genid
}
if !ok {
subject := string(c.pa.subject)
r = srv.sl.Match(subject)
c.cache.results[subject] = r
if len(c.cache.results) > maxResultCacheSize {
// Prune the results cache. Keeps us from unbounded growth.
r := 0
for subject := range c.cache.results {
delete(c.cache.results, subject)
r++
if r > pruneSize {
break
}
}
}
}
// Check for no interest, short circuit if so.
if len(r.psubs) == 0 && len(r.qsubs) == 0 {
return
}
// Check for pedantic and bad subject.
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
return
}
// Scratch buffer..
msgh := c.msgb[:len(msgHeadProto)]
// msg header
msgh = append(msgh, c.pa.subject...)
msgh = append(msgh, ' ')
si := len(msgh)
isRoute := c.typ == ROUTER
// If we are a route and we have a queue subscription, deliver direct
// since they are sent direct via L2 semantics. If the match is a queue
// subscription, we will return from here regardless if we find a sub.
if isRoute {
if sub, ok := srv.routeSidQueueSubscriber(c.pa.sid); ok {
if sub != nil {
mh := c.msgHeader(msgh[:si], sub)
c.deliverMsg(sub, mh, msg)
}
return
}
}
// Used to only send normal subscriptions once across a given route.
var rmap map[string]struct{}
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER, make sure we only send it
// once. The other side will handle the appropriate re-processing
// and fan-out. Also enforce 1-Hop semantics, so no routing to another.
if sub.client.typ == ROUTER {
// Skip if sourced from a ROUTER and going to another ROUTER.
// This is 1-Hop semantics for ROUTERs.
if isRoute {
continue
}
// Check to see if we have already sent it here.
if rmap == nil {
rmap = make(map[string]struct{}, srv.numRoutes())
}
sub.client.mu.Lock()
if sub.client.nc == nil || sub.client.route == nil ||
sub.client.route.remoteID == "" {
c.Debugf("Bad or Missing ROUTER Identity, not processing msg")
sub.client.mu.Unlock()
continue
}
if _, ok := rmap[sub.client.route.remoteID]; ok {
c.Debugf("Ignoring route, already processed")
sub.client.mu.Unlock()
continue
}
rmap[sub.client.route.remoteID] = routeSeen
sub.client.mu.Unlock()
}
// Normal delivery
mh := c.msgHeader(msgh[:si], sub)
c.deliverMsg(sub, mh, msg)
}
// Now process any queue subs we have if not a route
if !isRoute {
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.cache.prand == nil {
c.cache.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
index := c.cache.prand.Intn(len(qsubs))
sub := qsubs[index]
if sub != nil {
mh := c.msgHeader(msgh[:si], sub)
c.deliverMsg(sub, mh, msg)
}
}
}
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - User %q, Subject %q", c.opts.Username, subject)
}
func (c *client) processPingTimer() {
c.mu.Lock()
defer c.mu.Unlock()
c.ptmr = nil
// Check if connection is still opened
if c.nc == nil {
return
}
c.Debugf("%s Ping Timer", c.typeString())
// Check for violation
c.pout++
if c.pout > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true)
c.clearConnection()
return
}
c.traceOutOp("PING", nil)
// Send PING
err := c.sendProto([]byte("PING\r\n"), true)
if err != nil {
c.Debugf("Error on Client Ping Flush, error %s", err)
c.clearConnection()
} else {
// Reset to fire again if all OK.
c.setPingTimer()
}
}
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
c.ptmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ptmr == nil {
return
}
c.ptmr.Stop()
c.ptmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, func() { c.authTimeout() })
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
func (c *client) isAuthTimerSet() bool {
c.mu.Lock()
isSet := c.atmr != nil
c.mu.Unlock()
return isSet
}
// Lock should be held
func (c *client) clearConnection() {
if c.nc == nil {
return
}
// With TLS, Close() is sending an alert (that is doing a write).
// Need to set a deadline otherwise the server could block there
// if the peer is not reading from socket.
c.nc.SetWriteDeadline(time.Now().Add(c.srv.getOpts().WriteDeadline))
if c.bw != nil {
c.bw.Flush()
}
c.nc.Close()
c.nc.SetWriteDeadline(time.Time{})
}
func (c *client) typeString() string {
switch c.typ {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
}
return "Unknown Type"
}
func (c *client) closeConnection() {
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return
}
c.Debugf("%s connection closed", c.typeString())
c.clearAuthTimer()
c.clearPingTimer()
c.clearConnection()
c.nc = nil
// Snapshot for use.
subs := make([]*subscription, 0, len(c.subs))
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
subs = append(subs, sub)
}
srv := c.srv
var (
routeClosed bool
retryImplicit bool
connectURLs []string
)
if c.route != nil {
routeClosed = c.route.closed
if !routeClosed {
retryImplicit = c.route.retry
}
connectURLs = c.route.connectURLs
}
c.mu.Unlock()
if srv != nil {
// This is a route that disconnected...
if len(connectURLs) > 0 {
// Unless disabled, possibly update the server's INFO protcol
// and send to clients that know how to handle async INFOs.
if !srv.getOpts().Cluster.NoAdvertise {
srv.removeClientConnectURLsAndSendINFOToClients(connectURLs)
}
}
// Unregister
srv.removeClient(c)
// Remove clients subscriptions.
for _, sub := range subs {
srv.sl.Remove(sub)
// Forward on unsubscribes if we are not
// a router ourselves.
if c.typ != ROUTER {
srv.broadcastUnSubscribe(sub)
}
}
}
// Don't reconnect routes that are being closed.
if routeClosed {
return
}
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
c.srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
c.srv.Debugf("Detected route to self, ignoring \"%s\"", rurl)
return
} else if rtype != Implicit || retryImplicit {
c.srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
}
}
// If the client is a route connection, sets the `closed` flag to true
// to prevent any reconnecting attempt when c.closeConnection() is called.
func (c *client) setRouteNoReconnectOnClose() {
c.mu.Lock()
if c.route != nil {
c.route.closed = true
}
c.mu.Unlock()
}
// Logging functionality scoped to a client or route.
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
|
package trees
import (
"errors"
"sync"
"github.com/joushou/qp"
)
type FilterDir struct {
Lister
UpdateHook func(string, *FilterDir)
FilteredNames map[string]bool
Whitelist bool
FilterLock sync.RWMutex
}
// Things to mask:
// List (files must be filtered)
// Walk (name must be permitted)
// Rename (oldname must be permitted)
// Remove (name must be permitted)
func (fd *FilterDir) update(name string) {
if fd.UpdateHook != nil {
fd.UpdateHook(name, fd)
}
}
func (fd *FilterDir) filePermitted(name string) bool {
fd.FilterLock.RLock()
defer fd.FilterLock.RUnlock()
_, inset := fd.FilteredNames[name]
// If the file is on the list and the list is a blacklist, or it is not on
// the list and the list is a whitelist, the access is not permitted.
return inset == fd.Whitelist
}
func (fd *FilterDir) List(user string) ([]qp.Stat, error) {
fd.update("")
l, err := fd.Lister.List(user)
if err != nil {
return nil, err
}
fd.FilterLock.RLock()
defer fd.FilterLock.RUnlock()
for i := 0; i < len(l); i++ {
n := l[i].Name
_, inset := fd.FilteredNames[n]
if inset != fd.Whitelist {
// Delete from list without preserving order.
l[i] = l[len(l)-1]
l[len(l)-1] = qp.Stat{}
l = l[:len(l)-1]
// There's a new element on our position - we want to process that
// too.
i--
}
}
return l, nil
}
func (fd *FilterDir) Walk(user, name string) (File, error) {
fd.update(name)
if !fd.filePermitted(name) {
return nil, nil
}
return fd.Lister.Walk(user, name)
}
func (fd *FilterDir) Rename(user, oldname, newname string) error {
fd.update(oldname)
if !fd.filePermitted(oldname) {
return errors.New("permission denied")
}
return fd.Lister.Rename(user, oldname, newname)
}
func (fd *FilterDir) Remove(user, name string) error {
fd.update(name)
if !fd.filePermitted(name) {
return errors.New("permission denied")
}
return fd.Lister.Remove(user, name)
}
func NewFilterDir(dir Lister) *FilterDir {
return &FilterDir{
Lister: dir,
}
}
Mask name in FilterDir
package trees
import (
"errors"
"sync"
"github.com/joushou/qp"
)
type FilterDir struct {
Lister
UpdateHook func(string, *FilterDir)
FilteredNames map[string]bool
Whitelist bool
FilterLock sync.RWMutex
Filename string
}
func (fd *FilterDir) Name() (string, error) {
return fd.Filename, nil
}
func (fd *FilterDir) Stat() (qp.Stat, error) {
s, err := fd.Lister.Stat()
if err != nil {
return s, err
}
s.Name = fd.Filename
return s, err
}
// Things to mask:
// List (files must be filtered)
// Walk (name must be permitted)
// Rename (oldname must be permitted)
// Remove (name must be permitted)
func (fd *FilterDir) update(name string) {
if fd.UpdateHook != nil {
fd.UpdateHook(name, fd)
}
}
func (fd *FilterDir) filePermitted(name string) bool {
fd.FilterLock.RLock()
defer fd.FilterLock.RUnlock()
_, inset := fd.FilteredNames[name]
// If the file is on the list and the list is a blacklist, or it is not on
// the list and the list is a whitelist, the access is not permitted.
return inset == fd.Whitelist
}
func (fd *FilterDir) List(user string) ([]qp.Stat, error) {
fd.update("")
l, err := fd.Lister.List(user)
if err != nil {
return nil, err
}
fd.FilterLock.RLock()
defer fd.FilterLock.RUnlock()
for i := 0; i < len(l); i++ {
n := l[i].Name
_, inset := fd.FilteredNames[n]
if inset != fd.Whitelist {
// Delete from list without preserving order.
l[i] = l[len(l)-1]
l[len(l)-1] = qp.Stat{}
l = l[:len(l)-1]
// There's a new element on our position - we want to process that
// too.
i--
}
}
return l, nil
}
func (fd *FilterDir) Walk(user, name string) (File, error) {
fd.update(name)
if !fd.filePermitted(name) {
return nil, nil
}
return fd.Lister.Walk(user, name)
}
func (fd *FilterDir) Rename(user, oldname, newname string) error {
fd.update(oldname)
if !fd.filePermitted(oldname) {
return errors.New("permission denied")
}
return fd.Lister.Rename(user, oldname, newname)
}
func (fd *FilterDir) Remove(user, name string) error {
fd.update(name)
if !fd.filePermitted(name) {
return errors.New("permission denied")
}
return fd.Lister.Remove(user, name)
}
func NewFilterDir(name string, dir Lister) *FilterDir {
return &FilterDir{
Filename: name,
Lister: dir,
}
}
|
t8407 테스트 수정.
|
// Config takes care of the whole configuration.
package config
import (
"errors"
"strconv"
)
type Config struct {
Secret string
BuildLogPath string
EmailFrom string
EmailHost string
EmailPort int
EmailUser string
EmailPassword string
SlackChannel string
SlackEndpoint string
Repositories []Repository
}
type Repository struct {
URL string
Commands []Command
Notify []Notify
}
type Command struct {
Name string
Execute string
}
type Notify struct {
Service string
Arguments []string
}
// ConfigForRepo returns the configuration for a repository that matches
// the URL.
func (c *Config) ConfigForRepo(url string) (Repository, error) {
r := Repository{}
for _, repo := range c.Repositories {
if repo.URL == url {
r = repo
return r, nil
}
}
msg := "Could not find repository with URL: " + url
err := errors.New(msg)
return r, err
}
func (c *Config) MailServer() string {
return c.EmailHost + ":" + strconv.Itoa(c.EmailPort)
}
add GitHub options to config
// Config takes care of the whole configuration.
package config
import (
"errors"
"strconv"
)
type Config struct {
Secret string
BuildLogPath string
EmailFrom string
EmailHost string
EmailPort int
EmailUser string
EmailPassword string
SlackChannel string
SlackEndpoint string
Repositories []Repository
GitHubKey string
}
type Repository struct {
URL string
Commands []Command
Notify []Notify
CommentPR bool
ClosePR bool
}
type Command struct {
Name string
Execute string
}
type Notify struct {
Service string
Arguments []string
}
// ConfigForRepo returns the configuration for a repository that matches
// the URL.
func (c *Config) ConfigForRepo(url string) (Repository, error) {
r := Repository{}
for _, repo := range c.Repositories {
if repo.URL == url {
r = repo
return r, nil
}
}
msg := "Could not find repository with URL: " + url
err := errors.New(msg)
return r, err
}
func (c *Config) MailServer() string {
return c.EmailHost + ":" + strconv.Itoa(c.EmailPort)
}
|
package config
import (
"github.com/cblomart/vsphere-graphite/backend"
"github.com/cblomart/vsphere-graphite/vsphere"
)
// Configuration : configurarion base
type Configuration struct {
VCenters []*vsphere.VCenter
Metrics []vsphere.Metric
Interval int
Domain string
Backend backend.Backend
CPUProfiling bool
MEMProfiling bool
FlushSize int
}
user pointers
package config
import (
"github.com/cblomart/vsphere-graphite/backend"
"github.com/cblomart/vsphere-graphite/vsphere"
)
// Configuration : configurarion base
type Configuration struct {
VCenters []*vsphere.VCenter
Metrics []*vsphere.Metric
Interval int
Domain string
Backend *backend.Backend
CPUProfiling bool
MEMProfiling bool
FlushSize int
}
|
// Copyright 2016 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"time"
"net"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
// Tenant stores tenant configuration
type Tenant struct {
Token string
TTL time.Duration
Heartbeat time.Duration
}
// Registry configuration
type Registry struct {
URL string
Token string
}
// Kafka configuration
type Kafka struct {
Brokers []string
Username string
Password string
APIKey string
RestURL string
SASL bool
}
// Nginx stores NGINX configuration
type Nginx struct {
Port int
Logging bool
}
// Controller configuration
type Controller struct {
URL string
Poll time.Duration
}
// Config TODO
type Config struct {
ServiceName string
ServiceVersion string
EndpointHost string
EndpointPort int
LogstashServer string
Register bool
Proxy bool
Log bool
Supervise bool
Tenant Tenant
Controller Controller
Registry Registry
Kafka Kafka
Nginx Nginx
LogLevel logrus.Level
AppArgs []string
ForceUpdate bool
}
// New TODO
func New(context *cli.Context) *Config {
// TODO: parse this more gracefully
loggingLevel := logrus.DebugLevel
logLevelArg := context.String(logLevel)
var err error
loggingLevel, err = logrus.ParseLevel(logLevelArg)
if err != nil {
loggingLevel = logrus.DebugLevel
}
endpointHost := context.String(endpointHost)
if endpointHost == "" {
for {
endpointHost = LocalIP()
if endpointHost != "" {
break
}
logrus.Warn("Could not obtain local IP")
time.Sleep(time.Second * 10)
}
}
return &Config{
ServiceName: context.String(serviceName),
ServiceVersion: context.String(serviceVersion),
EndpointHost: endpointHost,
EndpointPort: context.Int(endpointPort),
LogstashServer: context.String(logstashServer),
Register: context.BoolT(register),
Proxy: context.BoolT(proxy),
Log: context.BoolT(log),
Supervise: context.Bool(supervise),
Controller: Controller{
URL: context.String(controllerURL),
Poll: context.Duration(controllerPoll),
},
Tenant: Tenant{
Token: context.String(tenantToken),
TTL: context.Duration(tenantTTL),
Heartbeat: context.Duration(tenantHeartbeat),
},
Registry: Registry{
URL: context.String(registryURL),
Token: context.String(registryToken),
},
Kafka: Kafka{
Username: context.String(kafkaUsername),
Password: context.String(kafkaPassword),
APIKey: context.String(kafkaToken),
RestURL: context.String(kafkaRestURL),
Brokers: context.StringSlice(kafkaBrokers),
SASL: context.Bool(kafkaSASL),
},
Nginx: Nginx{
Port: context.Int(nginxPort),
},
LogLevel: loggingLevel,
AppArgs: context.Args(),
ForceUpdate: context.Bool(forceUpdate),
}
}
// Validate the configuration
func (c *Config) Validate(validateCreds bool) error {
if !c.Register && !c.Proxy {
return errors.New("Sidecar serves no purpose. Please enable either proxy or registry or both")
}
// Create list of validation checks
validators := []ValidatorFunc{}
if c.Supervise {
validators = append(validators,
func() error {
if len(c.AppArgs) == 0 {
return fmt.Errorf("Supervision mode requires application launch arguments")
}
return nil
},
)
}
if c.Log {
validators = append(validators,
IsNotEmpty("Logstash Host", c.LogstashServer),
)
}
if c.Register {
validators = append(validators,
func() error {
if c.Tenant.TTL.Seconds() < c.Tenant.Heartbeat.Seconds() {
return fmt.Errorf("Tenant TTL (%v) is less than heartbeat interval (%v)", c.Tenant.TTL, c.Tenant.Heartbeat)
}
return nil
},
IsNotEmpty("Service Name", c.ServiceName),
IsInRange("NGINX port", c.Nginx.Port, 1, 65535),
IsInRange("Service Endpoint Port", c.EndpointPort, 1, 65535),
IsInRangeDuration("Tenant TTL", c.Tenant.TTL, 5*time.Second, 1*time.Hour),
IsInRangeDuration("Tenant heartbeat interval", c.Tenant.TTL, 5*time.Second, 1*time.Hour),
)
if validateCreds {
validators = append(validators,
IsNotEmpty("Registry token", c.Registry.Token),
IsValidURL("Regsitry URL", c.Registry.URL),
)
}
}
if c.Proxy {
validators = append(validators,
IsNotEmpty("Tenant token", c.Tenant.Token),
IsValidURL("Controller URL", c.Controller.URL),
IsInRangeDuration("Controller polling interval", c.Controller.Poll, 5*time.Second, 1*time.Hour),
)
if validateCreds {
validators = append(validators,
IsNotEmpty("Registry token", c.Registry.Token),
IsValidURL("Regsitry URL", c.Registry.URL),
)
}
}
// If any of the Message Hub config is present validate the Message Hub config
if validateCreds && (len(c.Kafka.Brokers) > 0 || c.Kafka.Username != "" || c.Kafka.Password != "") {
validators = append(validators,
func() error {
if len(c.Kafka.Brokers) == 0 {
return errors.New("Kafka requires at least one broker")
}
for _, broker := range c.Kafka.Brokers {
if err := IsNotEmpty("Kafka broker", broker)(); err != nil {
return err
}
}
return nil
},
)
if c.Kafka.SASL {
validators = append(validators,
IsNotEmpty("Kafka username", c.Kafka.Username),
IsNotEmpty("Kafka password", c.Kafka.Password),
IsNotEmpty("Kafka token", c.Kafka.APIKey),
IsValidURL("Kafka Rest URL", c.Kafka.RestURL),
)
} else {
validators = append(validators,
func() error {
if len(c.Kafka.Brokers) != 0 {
if c.Kafka.Username != "" || c.Kafka.Password != "" ||
c.Kafka.RestURL != "" || c.Kafka.APIKey != "" {
return errors.New("Kafka credentials provided when SASL authentication disabled")
}
}
return nil
},
)
}
}
return Validate(validators)
}
// LocalIP retrieves the IP address of the sidecar
func LocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback return it
if ipNet, ok := address.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
if ipNet.IP.To4() != nil {
return ipNet.IP.String()
}
}
}
return ""
}
only check for kafka vars if proxying
// Copyright 2016 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"time"
"net"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
)
// Tenant stores tenant configuration
type Tenant struct {
Token string
TTL time.Duration
Heartbeat time.Duration
}
// Registry configuration
type Registry struct {
URL string
Token string
}
// Kafka configuration
type Kafka struct {
Brokers []string
Username string
Password string
APIKey string
RestURL string
SASL bool
}
// Nginx stores NGINX configuration
type Nginx struct {
Port int
Logging bool
}
// Controller configuration
type Controller struct {
URL string
Poll time.Duration
}
// Config TODO
type Config struct {
ServiceName string
ServiceVersion string
EndpointHost string
EndpointPort int
LogstashServer string
Register bool
Proxy bool
Log bool
Supervise bool
Tenant Tenant
Controller Controller
Registry Registry
Kafka Kafka
Nginx Nginx
LogLevel logrus.Level
AppArgs []string
ForceUpdate bool
}
// New TODO
func New(context *cli.Context) *Config {
// TODO: parse this more gracefully
loggingLevel := logrus.DebugLevel
logLevelArg := context.String(logLevel)
var err error
loggingLevel, err = logrus.ParseLevel(logLevelArg)
if err != nil {
loggingLevel = logrus.DebugLevel
}
endpointHost := context.String(endpointHost)
if endpointHost == "" {
for {
endpointHost = LocalIP()
if endpointHost != "" {
break
}
logrus.Warn("Could not obtain local IP")
time.Sleep(time.Second * 10)
}
}
return &Config{
ServiceName: context.String(serviceName),
ServiceVersion: context.String(serviceVersion),
EndpointHost: endpointHost,
EndpointPort: context.Int(endpointPort),
LogstashServer: context.String(logstashServer),
Register: context.BoolT(register),
Proxy: context.BoolT(proxy),
Log: context.BoolT(log),
Supervise: context.Bool(supervise),
Controller: Controller{
URL: context.String(controllerURL),
Poll: context.Duration(controllerPoll),
},
Tenant: Tenant{
Token: context.String(tenantToken),
TTL: context.Duration(tenantTTL),
Heartbeat: context.Duration(tenantHeartbeat),
},
Registry: Registry{
URL: context.String(registryURL),
Token: context.String(registryToken),
},
Kafka: Kafka{
Username: context.String(kafkaUsername),
Password: context.String(kafkaPassword),
APIKey: context.String(kafkaToken),
RestURL: context.String(kafkaRestURL),
Brokers: context.StringSlice(kafkaBrokers),
SASL: context.Bool(kafkaSASL),
},
Nginx: Nginx{
Port: context.Int(nginxPort),
},
LogLevel: loggingLevel,
AppArgs: context.Args(),
ForceUpdate: context.Bool(forceUpdate),
}
}
// Validate the configuration
func (c *Config) Validate(validateCreds bool) error {
if !c.Register && !c.Proxy {
return errors.New("Sidecar serves no purpose. Please enable either proxy or registry or both")
}
// Create list of validation checks
validators := []ValidatorFunc{}
if c.Supervise {
validators = append(validators,
func() error {
if len(c.AppArgs) == 0 {
return fmt.Errorf("Supervision mode requires application launch arguments")
}
return nil
},
)
}
if c.Log {
validators = append(validators,
IsNotEmpty("Logstash Host", c.LogstashServer),
)
}
if c.Register {
validators = append(validators,
func() error {
if c.Tenant.TTL.Seconds() < c.Tenant.Heartbeat.Seconds() {
return fmt.Errorf("Tenant TTL (%v) is less than heartbeat interval (%v)", c.Tenant.TTL, c.Tenant.Heartbeat)
}
return nil
},
IsNotEmpty("Service Name", c.ServiceName),
IsInRange("NGINX port", c.Nginx.Port, 1, 65535),
IsInRange("Service Endpoint Port", c.EndpointPort, 1, 65535),
IsInRangeDuration("Tenant TTL", c.Tenant.TTL, 5*time.Second, 1*time.Hour),
IsInRangeDuration("Tenant heartbeat interval", c.Tenant.TTL, 5*time.Second, 1*time.Hour),
)
if validateCreds {
validators = append(validators,
IsNotEmpty("Registry token", c.Registry.Token),
IsValidURL("Regsitry URL", c.Registry.URL),
)
}
}
if c.Proxy {
validators = append(validators,
IsNotEmpty("Tenant token", c.Tenant.Token),
IsValidURL("Controller URL", c.Controller.URL),
IsInRangeDuration("Controller polling interval", c.Controller.Poll, 5*time.Second, 1*time.Hour),
)
if validateCreds {
validators = append(validators,
IsNotEmpty("Registry token", c.Registry.Token),
IsValidURL("Regsitry URL", c.Registry.URL),
)
// If any of the Kafka config is present validate the Message Hub config
if len(c.Kafka.Brokers) > 0 || c.Kafka.Username != "" || c.Kafka.Password != "" {
validators = append(validators,
func() error {
if len(c.Kafka.Brokers) == 0 {
return errors.New("Kafka requires at least one broker")
}
for _, broker := range c.Kafka.Brokers {
if err := IsNotEmpty("Kafka broker", broker)(); err != nil {
return err
}
}
return nil
},
)
if c.Kafka.SASL {
validators = append(validators,
IsNotEmpty("Kafka username", c.Kafka.Username),
IsNotEmpty("Kafka password", c.Kafka.Password),
IsNotEmpty("Kafka token", c.Kafka.APIKey),
IsValidURL("Kafka Rest URL", c.Kafka.RestURL),
)
} else {
validators = append(validators,
func() error {
if len(c.Kafka.Brokers) != 0 {
if c.Kafka.Username != "" || c.Kafka.Password != "" ||
c.Kafka.RestURL != "" || c.Kafka.APIKey != "" {
return errors.New("Kafka credentials provided when SASL authentication disabled")
}
}
return nil
},
)
}
}
}
}
return Validate(validators)
}
// LocalIP retrieves the IP address of the sidecar
func LocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback return it
if ipNet, ok := address.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
if ipNet.IP.To4() != nil {
return ipNet.IP.String()
}
}
}
return ""
}
|
package config
import (
"encoding/json"
"io/ioutil"
"log"
"path/filepath"
"strings"
"github.com/sivel/overseer/monitor"
"github.com/sivel/overseer/notifier"
)
type Config struct {
}
type NotifierType struct {
Type string
}
func getNotifiers(configPath string) []notifier.Notifier {
notifierPath := filepath.Join(configPath, "notifiers")
files, err := ioutil.ReadDir(notifierPath)
if err != nil {
log.Fatalf("Could not list notifiers configuration directory: %s", err)
}
var notifiers []notifier.Notifier
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
var tmp NotifierType
text, err := ioutil.ReadFile(filepath.Join(notifierPath, f.Name()))
if err != nil {
log.Printf("Could not read configuration file: %s", f.Name())
continue
}
err = json.Unmarshal(text, &tmp)
if err != nil {
log.Printf("Configuration file not valid JSON: %s", f.Name())
continue
}
notifier, err := notifier.GetNotifier(tmp.Type)
if err != nil {
continue
}
notifiers = append(notifiers, notifier(text, f.Name()))
}
return notifiers
}
func getMonitors(configPath string) []monitor.Monitor {
monitorPath := filepath.Join(configPath, "monitors")
files, err := ioutil.ReadDir(monitorPath)
if err != nil {
log.Fatalf("Could not list monitors configuration directory: %s", err)
}
var monitors []monitor.Monitor
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
var tmp NotifierType
text, err := ioutil.ReadFile(filepath.Join(monitorPath, f.Name()))
if err != nil {
log.Printf("Could not read configuration file: %s", f.Name())
}
err = json.Unmarshal(text, &tmp)
if err != nil {
log.Printf("Configuration file not valid JSON: %s", f.Name())
}
monitor, err := monitor.GetMonitor(tmp.Type)
if err != nil {
continue
}
monitors = append(monitors, monitor(text))
}
return monitors
}
func ParseConfig() ([]monitor.Monitor, []notifier.Notifier) {
var config Config
configPath, _ := filepath.Abs("/etc/overseer")
configFile := filepath.Join(configPath, "overseer.json")
text, err := ioutil.ReadFile(configFile)
if err == nil {
json.Unmarshal(text, &config)
}
notifiers := getNotifiers(configPath)
monitors := getMonitors(configPath)
return monitors, notifiers
}
No need for a global config for now
package config
import (
"encoding/json"
"io/ioutil"
"log"
"path/filepath"
"strings"
"github.com/sivel/overseer/monitor"
"github.com/sivel/overseer/notifier"
)
type Config struct {
}
type NotifierType struct {
Type string
}
func getNotifiers(configPath string) []notifier.Notifier {
notifierPath := filepath.Join(configPath, "notifiers")
files, err := ioutil.ReadDir(notifierPath)
if err != nil {
log.Fatalf("Could not list notifiers configuration directory: %s", err)
}
var notifiers []notifier.Notifier
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
var tmp NotifierType
text, err := ioutil.ReadFile(filepath.Join(notifierPath, f.Name()))
if err != nil {
log.Printf("Could not read configuration file: %s", f.Name())
continue
}
err = json.Unmarshal(text, &tmp)
if err != nil {
log.Printf("Configuration file not valid JSON: %s", f.Name())
continue
}
notifier, err := notifier.GetNotifier(tmp.Type)
if err != nil {
continue
}
notifiers = append(notifiers, notifier(text, f.Name()))
}
return notifiers
}
func getMonitors(configPath string) []monitor.Monitor {
monitorPath := filepath.Join(configPath, "monitors")
files, err := ioutil.ReadDir(monitorPath)
if err != nil {
log.Fatalf("Could not list monitors configuration directory: %s", err)
}
var monitors []monitor.Monitor
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
var tmp NotifierType
text, err := ioutil.ReadFile(filepath.Join(monitorPath, f.Name()))
if err != nil {
log.Printf("Could not read configuration file: %s", f.Name())
continue
}
err = json.Unmarshal(text, &tmp)
if err != nil {
log.Printf("Configuration file not valid JSON: %s", f.Name())
continue
}
monitor, err := monitor.GetMonitor(tmp.Type)
if err != nil {
continue
}
monitors = append(monitors, monitor(text, f.Name()))
}
return monitors
}
func ParseConfig() ([]monitor.Monitor, []notifier.Notifier) {
configPath := "/etc/overseer"
notifiers := getNotifiers(configPath)
monitors := getMonitors(configPath)
return monitors, notifiers
}
|
package config
import (
"os"
"sync"
"sync/atomic"
)
//Config é a estrutura que tem todas as configurações da aplicação
type Config struct {
APIPort string
Version string
SEQUrl string
SEQAPIKey string
EnableRequestLog bool
EnablePrintRequest bool
Environment string
SEQDomain string
ApplicationName string
URLBBRegisterBoleto string
URLBBToken string
MockMode bool
DevMode bool
AppURL string
ElasticURL string
MongoURL string
BoletoJSONFileStore string
}
var cnf Config
var scnf sync.Once
var running uint64
var mutex sync.Mutex
//Get retorna o objeto de configurações da aplicação
func Get() Config {
return cnf
}
func Install(mockMode, devMode bool) {
atomic.StoreUint64(&running, 0)
cnf = Config{
APIPort: ":" + os.Getenv("API_PORT"),
Version: os.Getenv("API_VERSION"),
SEQUrl: os.Getenv("SEQ_URL"), //Pegar o SEQ de dev
SEQAPIKey: os.Getenv("SEQ_API_KEY"), //Staging Key:
EnableRequestLog: os.Getenv("ENABLE_REQUEST_LOG") == "true", // Log a cada request no SEQ
EnablePrintRequest: os.Getenv("ENABLE_PRINT_REQUEST") == "true", // Imprime algumas informacoes da request no console
Environment: os.Getenv("ENVIRONMENT"),
SEQDomain: "One",
ApplicationName: "BoletoOnline",
URLBBRegisterBoleto: os.Getenv("URL_BB_REGISTER_BOLETO"),
URLBBToken: os.Getenv("URL_BB_TOKEN"),
MockMode: mockMode,
AppURL: os.Getenv("APP_URL"),
ElasticURL: os.Getenv("ELASTIC_URL"),
DevMode: devMode,
MongoURL: os.Getenv("MONGODB_URL"),
BoletoJSONFileStore: os.Getenv("BOLETO_JSON_STORE"),
}
}
//IsRunning verifica se a aplicação tem que aceitar requisições
func IsRunning() bool {
return atomic.LoadUint64(&running) > 0
}
//Stop faz a aplicação parar de receber requisições
func Stop() {
atomic.StoreUint64(&running, 1)
}
:construction: adicona suporte disableLog na aplicação
package config
import (
"os"
"sync"
"sync/atomic"
)
//Config é a estrutura que tem todas as configurações da aplicação
type Config struct {
APIPort string
Version string
SEQUrl string
SEQAPIKey string
EnableRequestLog bool
EnablePrintRequest bool
Environment string
SEQDomain string
ApplicationName string
URLBBRegisterBoleto string
URLBBToken string
MockMode bool
DevMode bool
AppURL string
ElasticURL string
MongoURL string
BoletoJSONFileStore string
DisableLog bool
}
var cnf Config
var scnf sync.Once
var running uint64
var mutex sync.Mutex
//Get retorna o objeto de configurações da aplicação
func Get() Config {
return cnf
}
func Install(mockMode, devMode, disableLog bool) {
atomic.StoreUint64(&running, 0)
cnf = Config{
APIPort: ":" + os.Getenv("API_PORT"),
Version: os.Getenv("API_VERSION"),
SEQUrl: os.Getenv("SEQ_URL"), //Pegar o SEQ de dev
SEQAPIKey: os.Getenv("SEQ_API_KEY"), //Staging Key:
EnableRequestLog: os.Getenv("ENABLE_REQUEST_LOG") == "true", // Log a cada request no SEQ
EnablePrintRequest: os.Getenv("ENABLE_PRINT_REQUEST") == "true", // Imprime algumas informacoes da request no console
Environment: os.Getenv("ENVIRONMENT"),
SEQDomain: "One",
ApplicationName: "BoletoOnline",
URLBBRegisterBoleto: os.Getenv("URL_BB_REGISTER_BOLETO"),
URLBBToken: os.Getenv("URL_BB_TOKEN"),
MockMode: mockMode,
AppURL: os.Getenv("APP_URL"),
ElasticURL: os.Getenv("ELASTIC_URL"),
DevMode: devMode,
DisableLog: disableLog,
MongoURL: os.Getenv("MONGODB_URL"),
BoletoJSONFileStore: os.Getenv("BOLETO_JSON_STORE"),
}
}
//IsRunning verifica se a aplicação tem que aceitar requisições
func IsRunning() bool {
return atomic.LoadUint64(&running) > 0
}
//Stop faz a aplicação parar de receber requisições
func Stop() {
atomic.StoreUint64(&running, 1)
}
|
// Package config collects together all configuration settings
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package config
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"github.com/ThomsonReutersEikon/go-ntlm/ntlm"
"github.com/bgentry/go-netrc/netrc"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/tools"
"github.com/rubyist/tracerx"
)
var (
Config = New()
ShowConfigWarnings = false
defaultRemote = "origin"
gitConfigWarningPrefix = "lfs."
)
// FetchPruneConfig collects together the config options that control fetching and pruning
type FetchPruneConfig struct {
// The number of days prior to current date for which (local) refs other than HEAD
// will be fetched with --recent (default 7, 0 = only fetch HEAD)
FetchRecentRefsDays int `git:"lfs.fetchrecentrefsdays"`
// Makes the FetchRecentRefsDays option apply to remote refs from fetch source as well (default true)
FetchRecentRefsIncludeRemotes bool `git:"lfs.fetchrecentremoterefs"`
// number of days prior to latest commit on a ref that we'll fetch previous
// LFS changes too (default 0 = only fetch at ref)
FetchRecentCommitsDays int `git:"lfs.fetchrecentcommitsdays"`
// Whether to always fetch recent even without --recent
FetchRecentAlways bool `git:"lfs.fetchrecentalways"`
// Number of days added to FetchRecent*; data outside combined window will be
// deleted when prune is run. (default 3)
PruneOffsetDays int `git:"lfs.pruneoffsetdays"`
// Always verify with remote before pruning
PruneVerifyRemoteAlways bool `git:"lfs.pruneverifyremotealways"`
// Name of remote to check for unpushed and verify checks
PruneRemoteName string `git:"lfs.pruneremotetocheck"`
}
type Configuration struct {
// Os provides a `*Environment` used to access to the system's
// environment through os.Getenv. It is the point of entry for all
// system environment configuration.
Os *Environment
// Git provides a `*Environment` used to access to the various levels of
// `.gitconfig`'s. It is the point of entry for all Git environment
// configuration.
Git *Environment
//
gitConfig map[string]string
CurrentRemote string
NtlmSession ntlm.ClientSession
envVars map[string]string
envVarsMutex sync.Mutex
IsTracingHttp bool
IsDebuggingHttp bool
IsLoggingStats bool
loading sync.Mutex // guards initialization of gitConfig and remotes
origConfig map[string]string
remotes []string
extensions map[string]Extension
manualEndpoint *Endpoint
parsedNetrc netrcfinder
}
func New() *Configuration {
c := &Configuration{
Os: EnvironmentOf(NewOsFetcher()),
CurrentRemote: defaultRemote,
envVars: make(map[string]string),
}
c.IsTracingHttp = c.GetenvBool("GIT_CURL_VERBOSE", false)
c.IsDebuggingHttp = c.GetenvBool("LFS_DEBUG_HTTP", false)
c.IsLoggingStats = c.GetenvBool("GIT_LOG_STATS", false)
return c
}
// Values is a convenience type used to call the NewFromValues function. It
// specifies `Git` and `Env` maps to use as mock values, instead of calling out
// to real `.gitconfig`s and the `os.Getenv` function.
type Values struct {
// Git and Os are the stand-in maps used to provide values for their
// respective environments.
Git, Os map[string]string
}
// NewFrom returns a new `*config.Configuration` that reads both its Git
// and Enviornment-level values from the ones provided instead of the actual
// `.gitconfig` file or `os.Getenv`, respectively.
//
// This method should only be used during testing.
func NewFrom(v Values) *Configuration {
return &Configuration{
Os: EnvironmentOf(mapFetcher(v.Os)),
Git: EnvironmentOf(mapFetcher(v.Git)),
gitConfig: v.Git,
envVars: make(map[string]string, 0),
}
}
// Unmarshal unmarshals the *Configuration in context into all of `v`'s fields,
// according to the following rules:
//
// Values are marshaled according to the given key and environment, as follows:
// type T struct {
// Field string `git:"key"`
// Other string `os:"key"`
// }
//
// If an unknown environment is given, an error will be returned. If there is no
// method supporting conversion into a field's type, an error will be returned.
// If no value is associated with the given key and environment, the field will
// // only be modified if there is a config value present matching the given
// key. If the field is already set to a non-zero value of that field's type,
// then it will be left alone.
//
// Otherwise, the field will be set to the value of calling the
// appropriately-typed method on the specified environment.
func (c *Configuration) Unmarshal(v interface{}) error {
c.loadGitConfig()
into := reflect.ValueOf(v)
if into.Kind() != reflect.Ptr {
return fmt.Errorf("lfs/config: unable to parse non-pointer type of %T", v)
}
into = into.Elem()
for i := 0; i < into.Type().NumField(); i++ {
field := into.Field(i)
sfield := into.Type().Field(i)
key, env, err := c.parseTag(sfield.Tag)
if err != nil {
return err
}
if env == nil {
continue
}
var val interface{}
switch sfield.Type.Kind() {
case reflect.String:
var ok bool
val, ok = env.Get(key)
if !ok {
val = field.String()
}
case reflect.Int:
val = env.Int(key, int(field.Int()))
case reflect.Bool:
val = env.Bool(key, field.Bool())
default:
return fmt.Errorf(
"lfs/config: unsupported target type for field %q: %v",
sfield.Name, sfield.Type.String())
}
if val != nil {
into.Field(i).Set(reflect.ValueOf(val))
}
}
return nil
}
// parseTag returns the key, environment, and optional error assosciated with a
// given tag. It will return the XOR of either the `git` or `os` tag. That is to
// say, a field tagged with EITHER `git` OR `os` is valid, but pone tagged with
// both is not.
//
// If neither field was found, then a nil environment will be returned.
func (c *Configuration) parseTag(tag reflect.StructTag) (key string, env *Environment, err error) {
git, os := tag.Get("git"), tag.Get("os")
if len(git) != 0 && len(os) != 0 {
return "", nil, errors.New("lfs/config: ambiguous tags")
}
if len(git) != 0 {
return git, c.Git, nil
}
if len(os) != 0 {
return os, c.Os, nil
}
return
}
// Getenv is shorthand for `c.Os.Get(key)`.
func (c *Configuration) Getenv(key string) string {
v, _ := c.Os.Get(key)
return v
}
// GetenvBool is shorthand for `c.Os.Bool(key, def)`.
func (c *Configuration) GetenvBool(key string, def bool) bool {
return c.Os.Bool(key, def)
}
// GitRemoteUrl returns the git clone/push url for a given remote (blank if not found)
// the forpush argument is to cater for separate remote.name.pushurl settings
func (c *Configuration) GitRemoteUrl(remote string, forpush bool) string {
if forpush {
if u, ok := c.GitConfig("remote." + remote + ".pushurl"); ok {
return u
}
}
if u, ok := c.GitConfig("remote." + remote + ".url"); ok {
return u
}
return ""
}
// Manually set an Endpoint to use instead of deriving from Git config
func (c *Configuration) SetManualEndpoint(e Endpoint) {
c.manualEndpoint = &e
}
func (c *Configuration) Endpoint(operation string) Endpoint {
if c.manualEndpoint != nil {
return *c.manualEndpoint
}
if operation == "upload" {
if url, ok := c.GitConfig("lfs.pushurl"); ok {
return NewEndpointWithConfig(url, c)
}
}
if url, ok := c.GitConfig("lfs.url"); ok {
return NewEndpointWithConfig(url, c)
}
if len(c.CurrentRemote) > 0 && c.CurrentRemote != defaultRemote {
if endpoint := c.RemoteEndpoint(c.CurrentRemote, operation); len(endpoint.Url) > 0 {
return endpoint
}
}
return c.RemoteEndpoint(defaultRemote, operation)
}
func (c *Configuration) ConcurrentTransfers() int {
if c.NtlmAccess("download") {
return 1
}
uploads := 3
if v, ok := c.GitConfig("lfs.concurrenttransfers"); ok {
n, err := strconv.Atoi(v)
if err == nil && n > 0 {
uploads = n
}
}
return uploads
}
// BasicTransfersOnly returns whether to only allow "basic" HTTP transfers.
// Default is false, including if the lfs.basictransfersonly is invalid
func (c *Configuration) BasicTransfersOnly() bool {
return c.GitConfigBool("lfs.basictransfersonly", false)
}
// TusTransfersAllowed returns whether to only use "tus.io" HTTP transfers.
// Default is false, including if the lfs.tustransfers is invalid
func (c *Configuration) TusTransfersAllowed() bool {
return c.GitConfigBool("lfs.tustransfers", false)
}
func (c *Configuration) BatchTransfer() bool {
return c.GitConfigBool("lfs.batch", true)
}
func (c *Configuration) NtlmAccess(operation string) bool {
return c.Access(operation) == "ntlm"
}
// PrivateAccess will retrieve the access value and return true if
// the value is set to private. When a repo is marked as having private
// access, the http requests for the batch api will fetch the credentials
// before running, otherwise the request will run without credentials.
func (c *Configuration) PrivateAccess(operation string) bool {
return c.Access(operation) != "none"
}
// Access returns the access auth type.
func (c *Configuration) Access(operation string) string {
return c.EndpointAccess(c.Endpoint(operation))
}
// SetAccess will set the private access flag in .git/config.
func (c *Configuration) SetAccess(operation string, authType string) {
c.SetEndpointAccess(c.Endpoint(operation), authType)
}
func (c *Configuration) FindNetrcHost(host string) (*netrc.Machine, error) {
c.loading.Lock()
defer c.loading.Unlock()
if c.parsedNetrc == nil {
n, err := c.parseNetrc()
if err != nil {
return nil, err
}
c.parsedNetrc = n
}
return c.parsedNetrc.FindMachine(host), nil
}
// Manually override the netrc config
func (c *Configuration) SetNetrc(n netrcfinder) {
c.parsedNetrc = n
}
func (c *Configuration) EndpointAccess(e Endpoint) string {
key := fmt.Sprintf("lfs.%s.access", e.Url)
if v, ok := c.GitConfig(key); ok && len(v) > 0 {
lower := strings.ToLower(v)
if lower == "private" {
return "basic"
}
return lower
}
return "none"
}
func (c *Configuration) SetEndpointAccess(e Endpoint, authType string) {
tracerx.Printf("setting repository access to %s", authType)
key := fmt.Sprintf("lfs.%s.access", e.Url)
// Modify the config cache because it's checked again in this process
// without being reloaded.
switch authType {
case "", "none":
git.Config.UnsetLocalKey("", key)
c.loading.Lock()
delete(c.gitConfig, strings.ToLower(key))
c.loading.Unlock()
default:
git.Config.SetLocal("", key, authType)
c.loading.Lock()
c.gitConfig[strings.ToLower(key)] = authType
c.loading.Unlock()
}
}
func (c *Configuration) FetchIncludePaths() []string {
c.loadGitConfig()
patterns, _ := c.Git.Get("lfs.fetchinclude")
return tools.CleanPaths(patterns, ",")
}
func (c *Configuration) FetchExcludePaths() []string {
c.loadGitConfig()
patterns, _ := c.Git.Get("lfs.fetchexclude")
return tools.CleanPaths(patterns, ",")
}
func (c *Configuration) RemoteEndpoint(remote, operation string) Endpoint {
if len(remote) == 0 {
remote = defaultRemote
}
// Support separate push URL if specified and pushing
if operation == "upload" {
if url, ok := c.GitConfig("remote." + remote + ".lfspushurl"); ok {
return NewEndpointWithConfig(url, c)
}
}
if url, ok := c.GitConfig("remote." + remote + ".lfsurl"); ok {
return NewEndpointWithConfig(url, c)
}
// finally fall back on git remote url (also supports pushurl)
if url := c.GitRemoteUrl(remote, operation == "upload"); url != "" {
return NewEndpointFromCloneURLWithConfig(url, c)
}
return Endpoint{}
}
func (c *Configuration) Remotes() []string {
c.loadGitConfig()
return c.remotes
}
// GitProtocol returns the protocol for the LFS API when converting from a
// git:// remote url.
func (c *Configuration) GitProtocol() string {
if value, ok := c.GitConfig("lfs.gitprotocol"); ok {
return value
}
return "https"
}
func (c *Configuration) Extensions() map[string]Extension {
c.loadGitConfig()
return c.extensions
}
// SortedExtensions gets the list of extensions ordered by Priority
func (c *Configuration) SortedExtensions() ([]Extension, error) {
return SortExtensions(c.Extensions())
}
// GitConfigInt parses a git config value and returns it as an integer.
func (c *Configuration) GitConfigInt(key string, def int) int {
c.loadGitConfig()
return c.Git.Int(strings.ToLower(key), def)
}
// GitConfigBool parses a git config value and returns true if defined as
// true, 1, on, yes, or def if not defined
func (c *Configuration) GitConfigBool(key string, def bool) bool {
c.loadGitConfig()
return c.Git.Bool(strings.ToLower(key), def)
}
func (c *Configuration) GitConfig(key string) (string, bool) {
c.loadGitConfig()
value, ok := c.gitConfig[strings.ToLower(key)]
return value, ok
}
func (c *Configuration) AllGitConfig() map[string]string {
c.loadGitConfig()
return c.gitConfig
}
func (c *Configuration) FetchPruneConfig() FetchPruneConfig {
f := &FetchPruneConfig{
FetchRecentRefsDays: 7,
FetchRecentRefsIncludeRemotes: true,
PruneOffsetDays: 3,
PruneRemoteName: "origin",
}
if err := c.Unmarshal(f); err != nil {
panic(err.Error())
}
return *f
}
func (c *Configuration) SkipDownloadErrors() bool {
return c.GetenvBool("GIT_LFS_SKIP_DOWNLOAD_ERRORS", false) || c.GitConfigBool("lfs.skipdownloaderrors", false)
}
func (c *Configuration) loadGitConfig() bool {
c.loading.Lock()
defer c.loading.Unlock()
if c.Git != nil {
return false
}
gf, extensions, uniqRemotes := ReadGitConfig(getGitConfigs()...)
c.Git = EnvironmentOf(gf)
c.gitConfig = gf.vals // XXX TERRIBLE
c.extensions = extensions
c.remotes = make([]string, 0, len(uniqRemotes))
for remote, isOrigin := range uniqRemotes {
if isOrigin {
continue
}
c.remotes = append(c.remotes, remote)
}
return true
}
// XXX(taylor): remove mutability
func (c *Configuration) SetConfig(key, value string) {
if c.loadGitConfig() {
c.loading.Lock()
c.origConfig = make(map[string]string)
for k, v := range c.gitConfig {
c.origConfig[k] = v
}
c.loading.Unlock()
}
c.gitConfig[key] = value
}
// XXX(taylor): remove mutability
func (c *Configuration) ResetConfig() {
c.loading.Lock()
c.gitConfig = make(map[string]string)
if gf, ok := c.Git.Fetcher.(*GitFetcher); ok {
gf.vals = c.gitConfig
}
for k, v := range c.origConfig {
c.gitConfig[k] = v
}
c.loading.Unlock()
}
config/config: remove SetConfig and ResetConfig methods
// Package config collects together all configuration settings
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package config
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"github.com/ThomsonReutersEikon/go-ntlm/ntlm"
"github.com/bgentry/go-netrc/netrc"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/tools"
"github.com/rubyist/tracerx"
)
var (
Config = New()
ShowConfigWarnings = false
defaultRemote = "origin"
gitConfigWarningPrefix = "lfs."
)
// FetchPruneConfig collects together the config options that control fetching and pruning
type FetchPruneConfig struct {
// The number of days prior to current date for which (local) refs other than HEAD
// will be fetched with --recent (default 7, 0 = only fetch HEAD)
FetchRecentRefsDays int `git:"lfs.fetchrecentrefsdays"`
// Makes the FetchRecentRefsDays option apply to remote refs from fetch source as well (default true)
FetchRecentRefsIncludeRemotes bool `git:"lfs.fetchrecentremoterefs"`
// number of days prior to latest commit on a ref that we'll fetch previous
// LFS changes too (default 0 = only fetch at ref)
FetchRecentCommitsDays int `git:"lfs.fetchrecentcommitsdays"`
// Whether to always fetch recent even without --recent
FetchRecentAlways bool `git:"lfs.fetchrecentalways"`
// Number of days added to FetchRecent*; data outside combined window will be
// deleted when prune is run. (default 3)
PruneOffsetDays int `git:"lfs.pruneoffsetdays"`
// Always verify with remote before pruning
PruneVerifyRemoteAlways bool `git:"lfs.pruneverifyremotealways"`
// Name of remote to check for unpushed and verify checks
PruneRemoteName string `git:"lfs.pruneremotetocheck"`
}
type Configuration struct {
// Os provides a `*Environment` used to access to the system's
// environment through os.Getenv. It is the point of entry for all
// system environment configuration.
Os *Environment
// Git provides a `*Environment` used to access to the various levels of
// `.gitconfig`'s. It is the point of entry for all Git environment
// configuration.
Git *Environment
//
gitConfig map[string]string
CurrentRemote string
NtlmSession ntlm.ClientSession
envVars map[string]string
envVarsMutex sync.Mutex
IsTracingHttp bool
IsDebuggingHttp bool
IsLoggingStats bool
loading sync.Mutex // guards initialization of gitConfig and remotes
origConfig map[string]string
remotes []string
extensions map[string]Extension
manualEndpoint *Endpoint
parsedNetrc netrcfinder
}
func New() *Configuration {
c := &Configuration{
Os: EnvironmentOf(NewOsFetcher()),
CurrentRemote: defaultRemote,
envVars: make(map[string]string),
}
c.IsTracingHttp = c.GetenvBool("GIT_CURL_VERBOSE", false)
c.IsDebuggingHttp = c.GetenvBool("LFS_DEBUG_HTTP", false)
c.IsLoggingStats = c.GetenvBool("GIT_LOG_STATS", false)
return c
}
// Values is a convenience type used to call the NewFromValues function. It
// specifies `Git` and `Env` maps to use as mock values, instead of calling out
// to real `.gitconfig`s and the `os.Getenv` function.
type Values struct {
// Git and Os are the stand-in maps used to provide values for their
// respective environments.
Git, Os map[string]string
}
// NewFrom returns a new `*config.Configuration` that reads both its Git
// and Enviornment-level values from the ones provided instead of the actual
// `.gitconfig` file or `os.Getenv`, respectively.
//
// This method should only be used during testing.
func NewFrom(v Values) *Configuration {
return &Configuration{
Os: EnvironmentOf(mapFetcher(v.Os)),
Git: EnvironmentOf(mapFetcher(v.Git)),
gitConfig: v.Git,
envVars: make(map[string]string, 0),
}
}
// Unmarshal unmarshals the *Configuration in context into all of `v`'s fields,
// according to the following rules:
//
// Values are marshaled according to the given key and environment, as follows:
// type T struct {
// Field string `git:"key"`
// Other string `os:"key"`
// }
//
// If an unknown environment is given, an error will be returned. If there is no
// method supporting conversion into a field's type, an error will be returned.
// If no value is associated with the given key and environment, the field will
// // only be modified if there is a config value present matching the given
// key. If the field is already set to a non-zero value of that field's type,
// then it will be left alone.
//
// Otherwise, the field will be set to the value of calling the
// appropriately-typed method on the specified environment.
func (c *Configuration) Unmarshal(v interface{}) error {
c.loadGitConfig()
into := reflect.ValueOf(v)
if into.Kind() != reflect.Ptr {
return fmt.Errorf("lfs/config: unable to parse non-pointer type of %T", v)
}
into = into.Elem()
for i := 0; i < into.Type().NumField(); i++ {
field := into.Field(i)
sfield := into.Type().Field(i)
key, env, err := c.parseTag(sfield.Tag)
if err != nil {
return err
}
if env == nil {
continue
}
var val interface{}
switch sfield.Type.Kind() {
case reflect.String:
var ok bool
val, ok = env.Get(key)
if !ok {
val = field.String()
}
case reflect.Int:
val = env.Int(key, int(field.Int()))
case reflect.Bool:
val = env.Bool(key, field.Bool())
default:
return fmt.Errorf(
"lfs/config: unsupported target type for field %q: %v",
sfield.Name, sfield.Type.String())
}
if val != nil {
into.Field(i).Set(reflect.ValueOf(val))
}
}
return nil
}
// parseTag returns the key, environment, and optional error assosciated with a
// given tag. It will return the XOR of either the `git` or `os` tag. That is to
// say, a field tagged with EITHER `git` OR `os` is valid, but pone tagged with
// both is not.
//
// If neither field was found, then a nil environment will be returned.
func (c *Configuration) parseTag(tag reflect.StructTag) (key string, env *Environment, err error) {
git, os := tag.Get("git"), tag.Get("os")
if len(git) != 0 && len(os) != 0 {
return "", nil, errors.New("lfs/config: ambiguous tags")
}
if len(git) != 0 {
return git, c.Git, nil
}
if len(os) != 0 {
return os, c.Os, nil
}
return
}
// Getenv is shorthand for `c.Os.Get(key)`.
func (c *Configuration) Getenv(key string) string {
v, _ := c.Os.Get(key)
return v
}
// GetenvBool is shorthand for `c.Os.Bool(key, def)`.
func (c *Configuration) GetenvBool(key string, def bool) bool {
return c.Os.Bool(key, def)
}
// GitRemoteUrl returns the git clone/push url for a given remote (blank if not found)
// the forpush argument is to cater for separate remote.name.pushurl settings
func (c *Configuration) GitRemoteUrl(remote string, forpush bool) string {
if forpush {
if u, ok := c.GitConfig("remote." + remote + ".pushurl"); ok {
return u
}
}
if u, ok := c.GitConfig("remote." + remote + ".url"); ok {
return u
}
return ""
}
// Manually set an Endpoint to use instead of deriving from Git config
func (c *Configuration) SetManualEndpoint(e Endpoint) {
c.manualEndpoint = &e
}
func (c *Configuration) Endpoint(operation string) Endpoint {
if c.manualEndpoint != nil {
return *c.manualEndpoint
}
if operation == "upload" {
if url, ok := c.GitConfig("lfs.pushurl"); ok {
return NewEndpointWithConfig(url, c)
}
}
if url, ok := c.GitConfig("lfs.url"); ok {
return NewEndpointWithConfig(url, c)
}
if len(c.CurrentRemote) > 0 && c.CurrentRemote != defaultRemote {
if endpoint := c.RemoteEndpoint(c.CurrentRemote, operation); len(endpoint.Url) > 0 {
return endpoint
}
}
return c.RemoteEndpoint(defaultRemote, operation)
}
func (c *Configuration) ConcurrentTransfers() int {
if c.NtlmAccess("download") {
return 1
}
uploads := 3
if v, ok := c.GitConfig("lfs.concurrenttransfers"); ok {
n, err := strconv.Atoi(v)
if err == nil && n > 0 {
uploads = n
}
}
return uploads
}
// BasicTransfersOnly returns whether to only allow "basic" HTTP transfers.
// Default is false, including if the lfs.basictransfersonly is invalid
func (c *Configuration) BasicTransfersOnly() bool {
return c.GitConfigBool("lfs.basictransfersonly", false)
}
// TusTransfersAllowed returns whether to only use "tus.io" HTTP transfers.
// Default is false, including if the lfs.tustransfers is invalid
func (c *Configuration) TusTransfersAllowed() bool {
return c.GitConfigBool("lfs.tustransfers", false)
}
func (c *Configuration) BatchTransfer() bool {
return c.GitConfigBool("lfs.batch", true)
}
func (c *Configuration) NtlmAccess(operation string) bool {
return c.Access(operation) == "ntlm"
}
// PrivateAccess will retrieve the access value and return true if
// the value is set to private. When a repo is marked as having private
// access, the http requests for the batch api will fetch the credentials
// before running, otherwise the request will run without credentials.
func (c *Configuration) PrivateAccess(operation string) bool {
return c.Access(operation) != "none"
}
// Access returns the access auth type.
func (c *Configuration) Access(operation string) string {
return c.EndpointAccess(c.Endpoint(operation))
}
// SetAccess will set the private access flag in .git/config.
func (c *Configuration) SetAccess(operation string, authType string) {
c.SetEndpointAccess(c.Endpoint(operation), authType)
}
func (c *Configuration) FindNetrcHost(host string) (*netrc.Machine, error) {
c.loading.Lock()
defer c.loading.Unlock()
if c.parsedNetrc == nil {
n, err := c.parseNetrc()
if err != nil {
return nil, err
}
c.parsedNetrc = n
}
return c.parsedNetrc.FindMachine(host), nil
}
// Manually override the netrc config
func (c *Configuration) SetNetrc(n netrcfinder) {
c.parsedNetrc = n
}
func (c *Configuration) EndpointAccess(e Endpoint) string {
key := fmt.Sprintf("lfs.%s.access", e.Url)
if v, ok := c.GitConfig(key); ok && len(v) > 0 {
lower := strings.ToLower(v)
if lower == "private" {
return "basic"
}
return lower
}
return "none"
}
func (c *Configuration) SetEndpointAccess(e Endpoint, authType string) {
tracerx.Printf("setting repository access to %s", authType)
key := fmt.Sprintf("lfs.%s.access", e.Url)
// Modify the config cache because it's checked again in this process
// without being reloaded.
switch authType {
case "", "none":
git.Config.UnsetLocalKey("", key)
c.loading.Lock()
delete(c.gitConfig, strings.ToLower(key))
c.loading.Unlock()
default:
git.Config.SetLocal("", key, authType)
c.loading.Lock()
c.gitConfig[strings.ToLower(key)] = authType
c.loading.Unlock()
}
}
func (c *Configuration) FetchIncludePaths() []string {
c.loadGitConfig()
patterns, _ := c.Git.Get("lfs.fetchinclude")
return tools.CleanPaths(patterns, ",")
}
func (c *Configuration) FetchExcludePaths() []string {
c.loadGitConfig()
patterns, _ := c.Git.Get("lfs.fetchexclude")
return tools.CleanPaths(patterns, ",")
}
func (c *Configuration) RemoteEndpoint(remote, operation string) Endpoint {
if len(remote) == 0 {
remote = defaultRemote
}
// Support separate push URL if specified and pushing
if operation == "upload" {
if url, ok := c.GitConfig("remote." + remote + ".lfspushurl"); ok {
return NewEndpointWithConfig(url, c)
}
}
if url, ok := c.GitConfig("remote." + remote + ".lfsurl"); ok {
return NewEndpointWithConfig(url, c)
}
// finally fall back on git remote url (also supports pushurl)
if url := c.GitRemoteUrl(remote, operation == "upload"); url != "" {
return NewEndpointFromCloneURLWithConfig(url, c)
}
return Endpoint{}
}
func (c *Configuration) Remotes() []string {
c.loadGitConfig()
return c.remotes
}
// GitProtocol returns the protocol for the LFS API when converting from a
// git:// remote url.
func (c *Configuration) GitProtocol() string {
if value, ok := c.GitConfig("lfs.gitprotocol"); ok {
return value
}
return "https"
}
func (c *Configuration) Extensions() map[string]Extension {
c.loadGitConfig()
return c.extensions
}
// SortedExtensions gets the list of extensions ordered by Priority
func (c *Configuration) SortedExtensions() ([]Extension, error) {
return SortExtensions(c.Extensions())
}
// GitConfigInt parses a git config value and returns it as an integer.
func (c *Configuration) GitConfigInt(key string, def int) int {
c.loadGitConfig()
return c.Git.Int(strings.ToLower(key), def)
}
// GitConfigBool parses a git config value and returns true if defined as
// true, 1, on, yes, or def if not defined
func (c *Configuration) GitConfigBool(key string, def bool) bool {
c.loadGitConfig()
return c.Git.Bool(strings.ToLower(key), def)
}
func (c *Configuration) GitConfig(key string) (string, bool) {
c.loadGitConfig()
value, ok := c.gitConfig[strings.ToLower(key)]
return value, ok
}
func (c *Configuration) AllGitConfig() map[string]string {
c.loadGitConfig()
return c.gitConfig
}
func (c *Configuration) FetchPruneConfig() FetchPruneConfig {
f := &FetchPruneConfig{
FetchRecentRefsDays: 7,
FetchRecentRefsIncludeRemotes: true,
PruneOffsetDays: 3,
PruneRemoteName: "origin",
}
if err := c.Unmarshal(f); err != nil {
panic(err.Error())
}
return *f
}
func (c *Configuration) SkipDownloadErrors() bool {
return c.GetenvBool("GIT_LFS_SKIP_DOWNLOAD_ERRORS", false) || c.GitConfigBool("lfs.skipdownloaderrors", false)
}
func (c *Configuration) loadGitConfig() bool {
c.loading.Lock()
defer c.loading.Unlock()
if c.Git != nil {
return false
}
gf, extensions, uniqRemotes := ReadGitConfig(getGitConfigs()...)
c.Git = EnvironmentOf(gf)
c.gitConfig = gf.vals // XXX TERRIBLE
c.extensions = extensions
c.remotes = make([]string, 0, len(uniqRemotes))
for remote, isOrigin := range uniqRemotes {
if isOrigin {
continue
}
c.remotes = append(c.remotes, remote)
}
return true
}
|
//
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/ghodss/yaml"
"github.com/mitchellh/go-homedir"
"github.com/nanobox-io/nanobox-golang-stylish"
)
const (
OS = runtime.GOOS
ARCH = runtime.GOARCH
LOGTAP_PORT = ":6361"
MIST_PORT = ":1445"
SERVER_PORT = ":1757"
VERSION = "0.16.11"
)
type (
exiter func(int)
)
var (
err error //
mutex = &sync.Mutex{}
//
AppDir string // the path to the application (~/.nanobox/apps/<app>)
AppsDir string // ~/.nanobox/apps
CWDir string // the current working directory
EnginesDir string // ~/.nanobox/engines
Home string // the users home directory (~)
IP string // the guest vm's private network ip (generated from app name)
Root string // nanobox's root directory path (~/.nanobox)
UpdateFile string // the path to the .update file (~/.nanobox/.update)
//
Nanofile NanofileConfig // parsed nanofile options
VMfile VMfileConfig // parsed nanofile options
//
ServerURI string // nanobox-server host:port combo (IP:1757)
ServerURL string // nanobox-server host:port combo (IP:1757) (http)
MistURI string // mist's host:port combo (IP:1445)
LogtapURI string // logtap's host:port combo (IP:6361)
// flags
Background bool // don't suspend the vm on exit
Devmode bool // run nanobox in devmode
Force bool // force a command to run (effects very per command)
Verbose bool // run cli with log level "debug"
Silent bool // silence all ouput
LogLevel string //
//
Exit exiter = os.Exit
)
//
func init() {
// default log level
LogLevel = "info"
// set the current working directory first, as it's used in other steps of the
// configuration process
if p, err := os.Getwd(); err != nil {
Log.Fatal("[config/config] os.Getwd() failed", err.Error())
} else {
CWDir = filepath.ToSlash(p)
}
// set Home based off the users homedir (~)
if p, err := homedir.Dir(); err != nil {
Log.Fatal("[config/config] homedir.Dir() failed", err.Error())
} else {
Home = filepath.ToSlash(p)
}
// set nanobox's root directory;
Root = filepath.ToSlash(filepath.Join(Home, ".nanobox"))
// check for a ~/.nanobox dir and create one if it's not found
if _, err := os.Stat(Root); err != nil {
fmt.Printf(stylish.Bullet("Creating %s directory", Root))
if err := os.Mkdir(Root, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/.update file and create one if it's not found
UpdateFile = filepath.ToSlash(filepath.Join(Root, ".update"))
if _, err := os.Stat(UpdateFile); err != nil {
f, err := os.Create(UpdateFile)
if err != nil {
Log.Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
}
// check for a ~/.nanobox/engines dir and create one if it's not found
EnginesDir = filepath.ToSlash(filepath.Join(Root, "engines"))
if _, err := os.Stat(EnginesDir); err != nil {
if err := os.Mkdir(EnginesDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/apps dir and create one if it's not found
AppsDir = filepath.ToSlash(filepath.Join(Root, "apps"))
if _, err := os.Stat(AppsDir); err != nil {
if err := os.Mkdir(AppsDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// the .nanofile needs to be parsed right away so that its config options are
// available as soon as possible
Nanofile = ParseNanofile()
//
ServerURI = Nanofile.IP + SERVER_PORT
ServerURL = "http://" + ServerURI
MistURI = Nanofile.IP + MIST_PORT
LogtapURI = Nanofile.IP + LOGTAP_PORT
// set the 'App' first so it can be used in subsequent configurations; the 'App'
// is set to the name of the cwd; this can be overriden from a .nanofile
AppDir = filepath.ToSlash(filepath.Join(AppsDir, Nanofile.Name))
}
// ParseConfig
func ParseConfig(path string, v interface{}) error {
//
fp, err := filepath.Abs(path)
if err != nil {
return err
}
//
f, err := ioutil.ReadFile(fp)
if err != nil {
return err
}
//
return yaml.Unmarshal(f, v)
}
// writeConfig
func writeConfig(path string, v interface{}) error {
// take a config objects path and create (and truncate) the file, preparing it
// to receive new configurations
f, err := os.Create(path)
if err != nil {
Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
// marshal the config object
b, err := yaml.Marshal(v)
if err != nil {
Fatal("[config/config] yaml.Marshal() failed", err.Error())
}
// mutex.Lock()
// write it back to the file
if _, err := f.Write(b); err != nil {
return err
}
// mutex.Unlock()
return nil
}
bumping to 0.16.12
//
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/ghodss/yaml"
"github.com/mitchellh/go-homedir"
"github.com/nanobox-io/nanobox-golang-stylish"
)
const (
OS = runtime.GOOS
ARCH = runtime.GOARCH
LOGTAP_PORT = ":6361"
MIST_PORT = ":1445"
SERVER_PORT = ":1757"
VERSION = "0.16.12"
)
type (
exiter func(int)
)
var (
err error //
mutex = &sync.Mutex{}
//
AppDir string // the path to the application (~/.nanobox/apps/<app>)
AppsDir string // ~/.nanobox/apps
CWDir string // the current working directory
EnginesDir string // ~/.nanobox/engines
Home string // the users home directory (~)
IP string // the guest vm's private network ip (generated from app name)
Root string // nanobox's root directory path (~/.nanobox)
UpdateFile string // the path to the .update file (~/.nanobox/.update)
//
Nanofile NanofileConfig // parsed nanofile options
VMfile VMfileConfig // parsed nanofile options
//
ServerURI string // nanobox-server host:port combo (IP:1757)
ServerURL string // nanobox-server host:port combo (IP:1757) (http)
MistURI string // mist's host:port combo (IP:1445)
LogtapURI string // logtap's host:port combo (IP:6361)
// flags
Background bool // don't suspend the vm on exit
Devmode bool // run nanobox in devmode
Force bool // force a command to run (effects very per command)
Verbose bool // run cli with log level "debug"
Silent bool // silence all ouput
LogLevel string //
//
Exit exiter = os.Exit
)
//
func init() {
// default log level
LogLevel = "info"
// set the current working directory first, as it's used in other steps of the
// configuration process
if p, err := os.Getwd(); err != nil {
Log.Fatal("[config/config] os.Getwd() failed", err.Error())
} else {
CWDir = filepath.ToSlash(p)
}
// set Home based off the users homedir (~)
if p, err := homedir.Dir(); err != nil {
Log.Fatal("[config/config] homedir.Dir() failed", err.Error())
} else {
Home = filepath.ToSlash(p)
}
// set nanobox's root directory;
Root = filepath.ToSlash(filepath.Join(Home, ".nanobox"))
// check for a ~/.nanobox dir and create one if it's not found
if _, err := os.Stat(Root); err != nil {
fmt.Printf(stylish.Bullet("Creating %s directory", Root))
if err := os.Mkdir(Root, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/.update file and create one if it's not found
UpdateFile = filepath.ToSlash(filepath.Join(Root, ".update"))
if _, err := os.Stat(UpdateFile); err != nil {
f, err := os.Create(UpdateFile)
if err != nil {
Log.Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
}
// check for a ~/.nanobox/engines dir and create one if it's not found
EnginesDir = filepath.ToSlash(filepath.Join(Root, "engines"))
if _, err := os.Stat(EnginesDir); err != nil {
if err := os.Mkdir(EnginesDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// check for a ~/.nanobox/apps dir and create one if it's not found
AppsDir = filepath.ToSlash(filepath.Join(Root, "apps"))
if _, err := os.Stat(AppsDir); err != nil {
if err := os.Mkdir(AppsDir, 0755); err != nil {
Log.Fatal("[config/config] os.Mkdir() failed", err.Error())
}
}
// the .nanofile needs to be parsed right away so that its config options are
// available as soon as possible
Nanofile = ParseNanofile()
//
ServerURI = Nanofile.IP + SERVER_PORT
ServerURL = "http://" + ServerURI
MistURI = Nanofile.IP + MIST_PORT
LogtapURI = Nanofile.IP + LOGTAP_PORT
// set the 'App' first so it can be used in subsequent configurations; the 'App'
// is set to the name of the cwd; this can be overriden from a .nanofile
AppDir = filepath.ToSlash(filepath.Join(AppsDir, Nanofile.Name))
}
// ParseConfig
func ParseConfig(path string, v interface{}) error {
//
fp, err := filepath.Abs(path)
if err != nil {
return err
}
//
f, err := ioutil.ReadFile(fp)
if err != nil {
return err
}
//
return yaml.Unmarshal(f, v)
}
// writeConfig
func writeConfig(path string, v interface{}) error {
// take a config objects path and create (and truncate) the file, preparing it
// to receive new configurations
f, err := os.Create(path)
if err != nil {
Fatal("[config/config] os.Create() failed", err.Error())
}
defer f.Close()
// marshal the config object
b, err := yaml.Marshal(v)
if err != nil {
Fatal("[config/config] yaml.Marshal() failed", err.Error())
}
// mutex.Lock()
// write it back to the file
if _, err := f.Write(b); err != nil {
return err
}
// mutex.Unlock()
return nil
}
|
package config
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"os/user"
"strings"
"gopkg.in/yaml.v2"
"github.com/coreos/pkg/capnslog"
"github.com/jgsqware/clairctl/xstrings"
"github.com/jgsqware/xnet"
"github.com/spf13/viper"
)
var log = capnslog.NewPackageLogger("github.com/jgsqware/clairctl", "config")
var errNoInterfaceProvided = errors.New("could not load configuration: no interface provided")
var errInvalidInterface = errors.New("Interface does not exist")
var ErrLoginNotFound = errors.New("user is not log in")
var IsLocal = false
var Insecure = false
var NoClean = false
var ImageName string
type reportConfig struct {
Path, Format string
}
type clairConfig struct {
URI string
Port, HealthPort int
Report reportConfig
}
type authConfig struct {
InsecureSkipVerify bool
}
type clairctlConfig struct {
IP, Interface, TempFolder string
Port int
}
type docker struct {
InsecureRegistries []string
}
type config struct {
Clair clairConfig
Auth authConfig
Clairctl clairctlConfig
Docker docker
}
// Init reads in config file and ENV variables if set.
func Init(cfgFile string, logLevel string, noClean bool) {
NoClean = noClean
lvl := capnslog.WARNING
if logLevel != "" {
// Initialize logging system
var err error
lvl, err = capnslog.ParseLevel(strings.ToUpper(logLevel))
if err != nil {
log.Warningf("Wrong Log level %v, defaults to [Warning]", logLevel)
lvl = capnslog.WARNING
}
}
capnslog.SetGlobalLogLevel(lvl)
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false))
viper.SetEnvPrefix("clairctl")
viper.SetConfigName("clairctl") // name of config file (without extension)
viper.AddConfigPath("$HOME/.clairctl") // adding home directory as first search path
viper.AddConfigPath(".") // adding home directory as first search path
viper.AutomaticEnv() // read in environment variables that match
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
}
err := viper.ReadInConfig()
if err != nil {
log.Debugf("No config file used")
} else {
log.Debugf("Using config file: %v", viper.ConfigFileUsed())
}
if viper.Get("clair.uri") == nil {
viper.Set("clair.uri", "http://localhost")
}
if viper.Get("clair.port") == nil {
viper.Set("clair.port", "6060")
}
if viper.Get("clair.healthPort") == nil {
viper.Set("clair.healthPort", "6061")
}
if viper.Get("clair.report.path") == nil {
viper.Set("clair.report.path", "reports")
}
if viper.Get("clair.report.format") == nil {
viper.Set("clair.report.format", "html")
}
if viper.Get("auth.insecureSkipVerify") == nil {
viper.Set("auth.insecureSkipVerify", "true")
}
if viper.Get("clairctl.ip") == nil {
viper.Set("clairctl.ip", "")
}
if viper.Get("clairctl.port") == nil {
viper.Set("clairctl.port", 0)
}
if viper.Get("clairctl.interface") == nil {
viper.Set("clairctl.interface", "")
}
if viper.Get("clairctl.tempFolder") == nil {
viper.Set("clairctl.tempFolder", "/tmp/clairctl")
}
}
func TmpLocal() string {
return viper.GetString("clairctl.tempFolder")
}
func values() config {
return config{
Clair: clairConfig{
URI: viper.GetString("clair.uri"),
Port: viper.GetInt("clair.port"),
HealthPort: viper.GetInt("clair.healthPort"),
Report: reportConfig{
Path: viper.GetString("clair.report.path"),
Format: viper.GetString("clair.report.format"),
},
},
Auth: authConfig{
InsecureSkipVerify: viper.GetBool("auth.insecureSkipVerify"),
},
Clairctl: clairctlConfig{
IP: viper.GetString("clairctl.ip"),
Port: viper.GetInt("clairctl.port"),
TempFolder: viper.GetString("clairctl.tempFolder"),
Interface: viper.GetString("clairctl.interface"),
},
Docker: docker{
InsecureRegistries: viper.GetStringSlice("docker.insecure-registries"),
},
}
}
func Print() {
cfg := values()
cfgBytes, err := yaml.Marshal(cfg)
if err != nil {
log.Fatalf("marshalling configuration: %v", err)
}
fmt.Println("Configuration")
fmt.Printf("%v", string(cfgBytes))
}
func ClairctlHome() string {
usr, err := user.Current()
if err != nil {
panic(err)
}
p := usr.HomeDir + "/.clairctl"
if _, err := os.Stat(p); os.IsNotExist(err) {
os.Mkdir(p, 0700)
}
return p
}
type Login struct {
Username string
Password string
}
type loginMapping map[string]Login
func ClairctlConfig() string {
return ClairctlHome() + "/config.json"
}
func AddLogin(registry string, login Login) error {
var logins loginMapping
if err := readConfigFile(&logins, ClairctlConfig()); err != nil {
return fmt.Errorf("reading clairctl file: %v", err)
}
logins[registry] = login
if err := writeConfigFile(logins, ClairctlConfig()); err != nil {
return fmt.Errorf("indenting login: %v", err)
}
return nil
}
func GetLogin(registry string) (Login, error) {
if _, err := os.Stat(ClairctlConfig()); err == nil {
var logins loginMapping
if err := readConfigFile(&logins, ClairctlConfig()); err != nil {
return Login{}, fmt.Errorf("reading clairctl file: %v", err)
}
if login, present := logins[registry]; present {
d, err := base64.StdEncoding.DecodeString(login.Password)
if err != nil {
return Login{}, fmt.Errorf("decoding password: %v", err)
}
login.Password = string(d)
return login, nil
}
}
return Login{}, ErrLoginNotFound
}
func RemoveLogin(registry string) (bool, error) {
if _, err := os.Stat(ClairctlConfig()); err == nil {
var logins loginMapping
if err := readConfigFile(&logins, ClairctlConfig()); err != nil {
return false, fmt.Errorf("reading clairctl file: %v", err)
}
if _, present := logins[registry]; present {
delete(logins, registry)
if err := writeConfigFile(logins, ClairctlConfig()); err != nil {
return false, fmt.Errorf("indenting login: %v", err)
}
return true, nil
}
}
return false, nil
}
func readConfigFile(logins *loginMapping, file string) error {
if _, err := os.Stat(file); err == nil {
f, err := ioutil.ReadFile(file)
if err != nil {
return err
}
if err := json.Unmarshal(f, &logins); err != nil {
return err
}
} else {
*logins = loginMapping{}
}
return nil
}
func writeConfigFile(logins loginMapping, file string) error {
s, err := xstrings.ToIndentJSON(logins)
if err != nil {
return err
}
err = ioutil.WriteFile(file, s, os.ModePerm)
if err != nil {
return err
}
return nil
}
//LocalServerIP return the local clairctl server IP
func LocalServerIP() (string, error) {
localPort := viper.GetString("clairctl.port")
localIP := viper.GetString("clairctl.ip")
localInterfaceConfig := viper.GetString("clairctl.interface")
if localIP == "" {
log.Info("retrieving interface for local IP")
var err error
var localInterface net.Interface
localInterface, err = translateInterface(localInterfaceConfig)
if err != nil {
return "", fmt.Errorf("retrieving interface: %v", err)
}
localIP, err = xnet.IPv4(localInterface)
if err != nil {
return "", fmt.Errorf("retrieving interface ip: %v", err)
}
}
return strings.TrimSpace(localIP) + ":" + localPort, nil
}
func translateInterface(localInterface string) (net.Interface, error) {
if localInterface != "" {
log.Debug("interface provided, looking for " + localInterface)
netInterface, err := net.InterfaceByName(localInterface)
if err != nil {
return net.Interface{}, err
}
return *netInterface, nil
}
log.Debug("no interface provided, looking for docker0")
netInterface, err := net.InterfaceByName("docker0")
if err != nil {
log.Debug("docker0 not found, looking for first connected broadcast interface")
interfaces, err := net.Interfaces()
if err != nil {
return net.Interface{}, err
}
i, err := xnet.First(xnet.Filter(interfaces, xnet.IsBroadcast), xnet.HasAddr)
if err != nil {
return net.Interface{}, err
}
return i, nil
}
return *netInterface, nil
}
func Clean() error {
if IsLocal && !NoClean {
log.Debug("cleaning temporary local repository")
err := os.RemoveAll(TmpLocal())
if err != nil {
return fmt.Errorf("cleaning temporary local repository: %v", err)
}
}
return nil
}
panic if config is not parsable (#54)
package config
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"os/user"
"strings"
"gopkg.in/yaml.v2"
"github.com/coreos/pkg/capnslog"
"github.com/jgsqware/clairctl/xstrings"
"github.com/jgsqware/xnet"
"github.com/spf13/viper"
)
var log = capnslog.NewPackageLogger("github.com/jgsqware/clairctl", "config")
var errNoInterfaceProvided = errors.New("could not load configuration: no interface provided")
var errInvalidInterface = errors.New("Interface does not exist")
var ErrLoginNotFound = errors.New("user is not log in")
var IsLocal = false
var Insecure = false
var NoClean = false
var ImageName string
type reportConfig struct {
Path, Format string
}
type clairConfig struct {
URI string
Port, HealthPort int
Report reportConfig
}
type authConfig struct {
InsecureSkipVerify bool
}
type clairctlConfig struct {
IP, Interface, TempFolder string
Port int
}
type docker struct {
InsecureRegistries []string
}
type config struct {
Clair clairConfig
Auth authConfig
Clairctl clairctlConfig
Docker docker
}
// Init reads in config file and ENV variables if set.
func Init(cfgFile string, logLevel string, noClean bool) {
NoClean = noClean
lvl := capnslog.WARNING
if logLevel != "" {
// Initialize logging system
var err error
lvl, err = capnslog.ParseLevel(strings.ToUpper(logLevel))
if err != nil {
log.Warningf("Wrong Log level %v, defaults to [Warning]", logLevel)
lvl = capnslog.WARNING
}
}
capnslog.SetGlobalLogLevel(lvl)
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, false))
viper.SetEnvPrefix("clairctl")
viper.SetConfigName("clairctl") // name of config file (without extension)
viper.AddConfigPath("$HOME/.clairctl") // adding home directory as first search path
viper.AddConfigPath(".") // adding home directory as first search path
viper.AutomaticEnv() // read in environment variables that match
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
}
err := viper.ReadInConfig()
if err != nil {
e, ok := err.(viper.ConfigParseError)
if ok {
log.Fatalf("error parsing config file: %v", e)
}
log.Debugf("No config file used")
} else {
log.Debugf("Using config file: %v", viper.ConfigFileUsed())
}
if viper.Get("clair.uri") == nil {
viper.Set("clair.uri", "http://localhost")
}
if viper.Get("clair.port") == nil {
viper.Set("clair.port", "6060")
}
if viper.Get("clair.healthPort") == nil {
viper.Set("clair.healthPort", "6061")
}
if viper.Get("clair.report.path") == nil {
viper.Set("clair.report.path", "reports")
}
if viper.Get("clair.report.format") == nil {
viper.Set("clair.report.format", "html")
}
if viper.Get("auth.insecureSkipVerify") == nil {
viper.Set("auth.insecureSkipVerify", "true")
}
if viper.Get("clairctl.ip") == nil {
viper.Set("clairctl.ip", "")
}
if viper.Get("clairctl.port") == nil {
viper.Set("clairctl.port", 0)
}
if viper.Get("clairctl.interface") == nil {
viper.Set("clairctl.interface", "")
}
if viper.Get("clairctl.tempFolder") == nil {
viper.Set("clairctl.tempFolder", "/tmp/clairctl")
}
}
func TmpLocal() string {
return viper.GetString("clairctl.tempFolder")
}
func values() config {
return config{
Clair: clairConfig{
URI: viper.GetString("clair.uri"),
Port: viper.GetInt("clair.port"),
HealthPort: viper.GetInt("clair.healthPort"),
Report: reportConfig{
Path: viper.GetString("clair.report.path"),
Format: viper.GetString("clair.report.format"),
},
},
Auth: authConfig{
InsecureSkipVerify: viper.GetBool("auth.insecureSkipVerify"),
},
Clairctl: clairctlConfig{
IP: viper.GetString("clairctl.ip"),
Port: viper.GetInt("clairctl.port"),
TempFolder: viper.GetString("clairctl.tempFolder"),
Interface: viper.GetString("clairctl.interface"),
},
Docker: docker{
InsecureRegistries: viper.GetStringSlice("docker.insecure-registries"),
},
}
}
func Print() {
cfg := values()
cfgBytes, err := yaml.Marshal(cfg)
if err != nil {
log.Fatalf("marshalling configuration: %v", err)
}
fmt.Println("Configuration")
fmt.Printf("%v", string(cfgBytes))
}
func ClairctlHome() string {
usr, err := user.Current()
if err != nil {
panic(err)
}
p := usr.HomeDir + "/.clairctl"
if _, err := os.Stat(p); os.IsNotExist(err) {
os.Mkdir(p, 0700)
}
return p
}
type Login struct {
Username string
Password string
}
type loginMapping map[string]Login
func ClairctlConfig() string {
return ClairctlHome() + "/config.json"
}
func AddLogin(registry string, login Login) error {
var logins loginMapping
if err := readConfigFile(&logins, ClairctlConfig()); err != nil {
return fmt.Errorf("reading clairctl file: %v", err)
}
logins[registry] = login
if err := writeConfigFile(logins, ClairctlConfig()); err != nil {
return fmt.Errorf("indenting login: %v", err)
}
return nil
}
func GetLogin(registry string) (Login, error) {
if _, err := os.Stat(ClairctlConfig()); err == nil {
var logins loginMapping
if err := readConfigFile(&logins, ClairctlConfig()); err != nil {
return Login{}, fmt.Errorf("reading clairctl file: %v", err)
}
if login, present := logins[registry]; present {
d, err := base64.StdEncoding.DecodeString(login.Password)
if err != nil {
return Login{}, fmt.Errorf("decoding password: %v", err)
}
login.Password = string(d)
return login, nil
}
}
return Login{}, ErrLoginNotFound
}
func RemoveLogin(registry string) (bool, error) {
if _, err := os.Stat(ClairctlConfig()); err == nil {
var logins loginMapping
if err := readConfigFile(&logins, ClairctlConfig()); err != nil {
return false, fmt.Errorf("reading clairctl file: %v", err)
}
if _, present := logins[registry]; present {
delete(logins, registry)
if err := writeConfigFile(logins, ClairctlConfig()); err != nil {
return false, fmt.Errorf("indenting login: %v", err)
}
return true, nil
}
}
return false, nil
}
func readConfigFile(logins *loginMapping, file string) error {
if _, err := os.Stat(file); err == nil {
f, err := ioutil.ReadFile(file)
if err != nil {
return err
}
if err := json.Unmarshal(f, &logins); err != nil {
return err
}
} else {
*logins = loginMapping{}
}
return nil
}
func writeConfigFile(logins loginMapping, file string) error {
s, err := xstrings.ToIndentJSON(logins)
if err != nil {
return err
}
err = ioutil.WriteFile(file, s, os.ModePerm)
if err != nil {
return err
}
return nil
}
//LocalServerIP return the local clairctl server IP
func LocalServerIP() (string, error) {
localPort := viper.GetString("clairctl.port")
localIP := viper.GetString("clairctl.ip")
localInterfaceConfig := viper.GetString("clairctl.interface")
if localIP == "" {
log.Info("retrieving interface for local IP")
var err error
var localInterface net.Interface
localInterface, err = translateInterface(localInterfaceConfig)
if err != nil {
return "", fmt.Errorf("retrieving interface: %v", err)
}
localIP, err = xnet.IPv4(localInterface)
if err != nil {
return "", fmt.Errorf("retrieving interface ip: %v", err)
}
}
return strings.TrimSpace(localIP) + ":" + localPort, nil
}
func translateInterface(localInterface string) (net.Interface, error) {
if localInterface != "" {
log.Debug("interface provided, looking for " + localInterface)
netInterface, err := net.InterfaceByName(localInterface)
if err != nil {
return net.Interface{}, err
}
return *netInterface, nil
}
log.Debug("no interface provided, looking for docker0")
netInterface, err := net.InterfaceByName("docker0")
if err != nil {
log.Debug("docker0 not found, looking for first connected broadcast interface")
interfaces, err := net.Interfaces()
if err != nil {
return net.Interface{}, err
}
i, err := xnet.First(xnet.Filter(interfaces, xnet.IsBroadcast), xnet.HasAddr)
if err != nil {
return net.Interface{}, err
}
return i, nil
}
return *netInterface, nil
}
func Clean() error {
if IsLocal && !NoClean {
log.Debug("cleaning temporary local repository")
err := os.RemoveAll(TmpLocal())
if err != nil {
return fmt.Errorf("cleaning temporary local repository: %v", err)
}
}
return nil
}
|
package app
import (
"github.com/0xb10c/memo/memod/database"
"github.com/0xb10c/memo/memod/logger"
"github.com/0xb10c/memo/memod/mempool"
)
// Start starts the memo deamon
func Start() {
db, err := database.Setup()
if err != nil {
if err != nil {
logger.Error.Printf("Failed to setup database connection: %s", err.Error())
return
}
}
defer db.Close()
mempool.SetupMempoolFetcher()
}
Add exit signal handling
package app
import (
"os"
"os/signal"
"syscall"
"github.com/0xb10c/memo/memod/database"
"github.com/0xb10c/memo/memod/logger"
"github.com/0xb10c/memo/memod/mempool"
)
// Start starts the memo deamon
func Start() {
db, err := database.Setup()
if err != nil {
if err != nil {
logger.Error.Printf("Failed to setup database connection: %s", err.Error())
return
}
}
defer db.Close()
// run the mempool fetcher in a goroutine
go mempool.SetupMempoolFetcher()
waitForOSSignal()
}
func waitForOSSignal() {
exitSignals := make(chan os.Signal, 1)
shouldExit := make(chan bool, 1)
signal.Notify(exitSignals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
go handleExitSig(exitSignals, shouldExit)
<-shouldExit // wait till memod should exit
logger.Info.Println("Memod exiting")
}
// handles exit signals
func handleExitSig(exitSignals chan os.Signal, shouldExit chan bool) {
sig := <-exitSignals
logger.Info.Println("Received signal", sig)
shouldExit <- true
}
|
package main
import (
"encoding/json"
"fmt"
"log"
)
// A Client is a connected player and associated websocket connection.
type Client struct {
// WebSocket connection (communicate with this via send and receive channels)
conn *Connection
// ID of the player that this client represents
playerId string
// ID of the last applied event
lastAppliedEventId uint64
}
func makeClient(conn *Connection) *Client {
c := &Client{conn: conn}
game.register <- c
return c
}
func (c *Client) Initialize(playerId string, gameConstants *GameConstants, gameState *GameState) {
c.playerId = playerId
// send initial player data to client
b, err := json.Marshal(&InitData{playerId, gameConstants, gameState})
if err != nil {
panic(err)
}
raw := json.RawMessage(b)
c.Send(&Message{Type: "init", Time: MakeTimestamp(), Data: &raw})
log.Println(fmt.Sprintf("Client Starting: %v", c.playerId))
// boot client message handler
go c.run()
}
func (c *Client) run() {
defer func() {
game.unregister <- c
close(c.conn.send)
}()
for {
select {
case message, ok := <-c.conn.receive:
if !ok {
log.Println(fmt.Sprintf("Client Stopping: %v", c.playerId))
game.history.Run(&RemoveShipEvent{MakeTimestamp(), c.playerId})
return
}
c.handleMessage(message)
}
}
}
func (c *Client) handleMessage(message *Message) {
switch message.Type {
case "changeAcceleration":
var data AccelerationData
err := json.Unmarshal([]byte(*message.Data), &data)
if err != nil {
log.Fatal(err)
}
game.history.Run(&ChangeAccelerationEvent{message.Time, c.playerId, data.Direction})
c.updateLastAppliedEvent(data.EventId)
case "changeRotation":
var data RotationData
err := json.Unmarshal([]byte(*message.Data), &data)
if err != nil {
log.Fatal(err)
}
game.history.Run(&ChangeRotationEvent{message.Time, c.playerId, data.Direction})
c.updateLastAppliedEvent(data.EventId)
case "fire":
var data FireData
err := json.Unmarshal([]byte(*message.Data), &data)
if err != nil {
log.Fatal(err)
}
game.history.Run(&FireEvent{message.Time, c.playerId, data.ProjectileId, data.Created})
c.updateLastAppliedEvent(data.EventId)
}
}
func (c *Client) updateLastAppliedEvent(eventId uint64) {
if eventId > c.lastAppliedEventId {
c.lastAppliedEventId = eventId
} else {
log.Fatal("got a weird event id", c.lastAppliedEventId, eventId)
}
}
func (c *Client) SendUpdate(state *GameState) {
b, err := json.Marshal(&UpdateData{state, c.lastAppliedEventId})
if err != nil {
panic(err)
}
raw := json.RawMessage(b)
c.Send(&Message{Type: "update", Time: MakeTimestamp(), Data: &raw})
}
func (c *Client) Send(message *Message) {
c.conn.send <- message
}
Cleanup.
package main
import (
"encoding/json"
"fmt"
"log"
)
// A Client is a connected player and associated websocket connection.
type Client struct {
// WebSocket connection (communicate with this via send and receive channels)
conn *Connection
// ID of the player that this client represents
playerId string
// ID of the last applied event
lastAppliedEventId uint64
}
func makeClient(conn *Connection) *Client {
c := &Client{conn: conn}
game.register <- c
return c
}
func (c *Client) Initialize(playerId string, gameConstants *GameConstants, gameState *GameState) {
c.playerId = playerId
// send initial player data to client
b, err := json.Marshal(&InitData{playerId, gameConstants, gameState})
if err != nil {
panic(err)
}
raw := json.RawMessage(b)
c.Send(&Message{Type: "init", Time: MakeTimestamp(), Data: &raw})
log.Println(fmt.Sprintf("Client Starting: %v", c.playerId))
// boot client message handler
go c.run()
}
func (c *Client) run() {
defer func() {
game.unregister <- c
close(c.conn.send)
}()
for {
select {
case message, ok := <-c.conn.receive:
if !ok {
log.Println(fmt.Sprintf("Client Stopping: %v", c.playerId))
game.history.Run(&RemoveShipEvent{MakeTimestamp(), c.playerId})
return
}
c.handleMessage(message)
}
}
}
func (c *Client) handleMessage(message *Message) {
switch message.Type {
case "changeAcceleration":
var data AccelerationData
err := json.Unmarshal([]byte(*message.Data), &data)
if err != nil {
log.Fatal(err)
}
game.history.Run(&ChangeAccelerationEvent{message.Time, c.playerId, data.Direction})
c.updateLastAppliedEvent(data.EventId)
case "changeRotation":
var data RotationData
err := json.Unmarshal([]byte(*message.Data), &data)
if err != nil {
log.Fatal(err)
}
game.history.Run(&ChangeRotationEvent{message.Time, c.playerId, data.Direction})
c.updateLastAppliedEvent(data.EventId)
case "fire":
var data FireData
err := json.Unmarshal([]byte(*message.Data), &data)
if err != nil {
log.Fatal(err)
}
game.history.Run(&FireEvent{message.Time, c.playerId, data.ProjectileId, data.Created})
c.updateLastAppliedEvent(data.EventId)
}
}
func (c *Client) updateLastAppliedEvent(eventId uint64) {
if eventId > c.lastAppliedEventId {
c.lastAppliedEventId = eventId
} else {
log.Fatalf("Client got out-of-order event id: %d, %d", c.lastAppliedEventId, eventId)
}
}
func (c *Client) SendUpdate(state *GameState) {
b, err := json.Marshal(&UpdateData{state, c.lastAppliedEventId})
if err != nil {
panic(err)
}
raw := json.RawMessage(b)
c.Send(&Message{Type: "update", Time: MakeTimestamp(), Data: &raw})
}
func (c *Client) Send(message *Message) {
c.conn.send <- message
}
|
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER is another router in the cluster.
ROUTER
)
const (
// ClientProtoZero is the original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// ClientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 1024
msgHeadProto = "RMSG "
msgHeadProtoLen = len(msgHeadProto)
)
// For controlling dynamic buffer sizes.
const (
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 64 // Smallest to shrink to for PING/PONG
maxBufSize = 65536 // 64k
shortsToShrink = 2
)
// Represent client booleans with a bitmask
type clientFlag byte
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
infoReceived // The INFO protocol has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
clearConnection // Marks that clearConnection has already been called.
flushOutbound // Marks client as having a flushOutbound call in progress.
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (cf *clientFlag) clear(c clientFlag) {
*cf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// ClosedState is the reason client was closed. This will
// be passed into calls to clearConnection, but will only
// be stored in ConnInfo for monitoring.
type ClosedState int
const (
ClientClosed = ClosedState(iota + 1)
AuthenticationTimeout
AuthenticationViolation
TLSHandshakeError
SlowConsumerPendingBytes
SlowConsumerWriteDeadline
WriteError
ReadError
ParseError
StaleConnection
ProtocolViolation
BadClientProtocolVersion
WrongPort
MaxConnectionsExceeded
MaxPayloadExceeded
MaxControlLineExceeded
DuplicateRoute
RouteRemoved
ServerShutdown
)
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
mpay int64
msubs int
mu sync.Mutex
typ int
cid uint64
opts clientOpts
start time.Time
nonce []byte
nc net.Conn
ncs string
out outbound
srv *Server
acc *Account
subs map[string]*subscription
perms *permissions
mperms *msgDeny
darray []string
in readCache
pcd map[*client]struct{}
atmr *time.Timer
ping pinfo
msgb [msgScratchSize]byte
last time.Time
parseState
rtt time.Duration
rttStart time.Time
route *route
debug bool
trace bool
echo bool
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
}
// Struct for PING initiation from the server.
type pinfo struct {
tmr *time.Timer
out int
}
// outbound holds pending data for a socket.
type outbound struct {
p []byte // Primary write buffer
s []byte // Secondary for use post flush
nb net.Buffers // net.Buffers for writev IO
sz int // limit size per []byte, uses variable BufSize constants, start, min, max.
sws int // Number of short writes, used for dyanmic resizing.
pb int64 // Total pending/queued bytes.
pm int64 // Total pending/queued messages.
sg *sync.Cond // Flusher conditional for signaling.
fsp int // Flush signals that are pending from readLoop's pcd.
mp int64 // snapshot of max pending.
wdl time.Duration // Snapshot fo write deadline.
lft time.Duration // Last flush time.
}
type perm struct {
allow *Sublist
deny *Sublist
}
type permissions struct {
sub perm
pub perm
pcache map[string]bool
}
// msgDeny is used when a user permission for subscriptions has a deny
// clause but a subscription could be made that is of broader scope.
// e.g. deny = "foo", but user subscribes to "*". That subscription should
// succeed but no message sent on foo should be delivered.
type msgDeny struct {
deny *Sublist
dcache map[string]bool
}
const (
maxResultCacheSize = 512
maxDenyPermCacheSize = 256
maxPermCacheSize = 128
pruneSize = 32
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
// These are for clients who are bound to a single account.
genid uint64
results map[string]*SublistResult
// This is for routes to have their own L1 as well that is account aware.
rcache map[string]*routeCache
prand *rand.Rand
msgs int
bytes int
subs int
rsz int // Read buffer size
srs int // Short reads, used for dynamic buffer resizing.
}
func (c *client) String() (id string) {
return c.ncs
}
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
// This is the main subscription struct that indicates
// interest in published messages.
// FIXME(dlc) - This is getting bloated for normal subs, need
// to optionally have an opts section for non-normal stuff.
type subscription struct {
client *client
im *streamImport // This is for import stream support.
shadow []*subscription // This is to track shadowed accounts.
subject []byte
queue []byte
sid []byte
nm int64
max int64
qw int32
}
type clientOpts struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
TLSRequired bool `json:"tls_required"`
Nkey string `json:"nkey,omitempty"`
Sig string `json:"sig,omitempty"`
Authorization string `json:"auth_token,omitempty"`
Username string `json:"user,omitempty"`
Password string `json:"pass,omitempty"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Account string `json:"account,omitempty"`
AccountNew bool `json:"new_account,omitempty"`
// Routes only
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true}
func init() {
rand.Seed(time.Now().UnixNano())
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
// Outbound data structure setup
c.out.sz = startBufSize
c.out.sg = sync.NewCond(&c.mu)
opts := s.getOpts()
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
c.subs = make(map[string]*subscription)
c.echo = true
c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0)
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
// This is a scratch buffer used for processMsg()
// The msg header starts with "RMSG ", which can be used
// for both local and routes.
// in bytes that is [82 77 83 71 32].
c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
conn := "-"
if ip, ok := c.nc.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port)
}
switch c.typ {
case CLIENT:
c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
case ROUTER:
c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
}
}
// RegisterWithAccount will register the given user with a specific
// account. This will change the subject namespace.
func (c *client) registerWithAccount(acc *Account) error {
if acc == nil || acc.sl == nil {
return ErrBadAccount
}
// If we were previously register, usually to $G, do accounting here to remove.
if c.acc != nil {
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.mu.Lock()
c.srv.activeAccounts--
c.srv.mu.Unlock()
}
}
// Add in new one.
if prev := acc.addClient(c); prev == 0 && c.srv != nil {
c.srv.mu.Lock()
c.srv.activeAccounts++
c.srv.mu.Unlock()
}
c.mu.Lock()
c.acc = acc
c.mu.Unlock()
return nil
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterUser(user *User) {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.Errorf("Problem registering with account [%s]", user.Account.Name)
c.sendErr("Failed Account Registration")
return
}
}
c.mu.Lock()
defer c.mu.Unlock()
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
return
}
c.setPermissions(user.Permissions)
}
// RegisterNkey allows auth to call back into a new nkey
// client with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterNkeyUser(user *NkeyUser) {
c.mu.Lock()
defer c.mu.Unlock()
// Register with proper account and sublist.
if user.Account != nil {
c.acc = user.Account
}
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
return
}
c.setPermissions(user.Permissions)
}
// Initializes client.perms structure.
// Lock is held on entry.
func (c *client) setPermissions(perms *Permissions) {
if perms == nil {
return
}
c.perms = &permissions{}
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
if perms.Publish != nil {
if len(perms.Publish.Allow) > 0 {
c.perms.pub.allow = NewSublist()
}
for _, pubSubject := range perms.Publish.Allow {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.allow.Insert(sub)
}
if len(perms.Publish.Deny) > 0 {
c.perms.pub.deny = NewSublist()
}
for _, pubSubject := range perms.Publish.Deny {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Loop over subscribe permissions
if perms.Subscribe != nil {
if len(perms.Subscribe.Allow) > 0 {
c.perms.sub.allow = NewSublist()
}
for _, subSubject := range perms.Subscribe.Allow {
sub := &subscription{subject: []byte(subSubject)}
c.perms.sub.allow.Insert(sub)
}
if len(perms.Subscribe.Deny) > 0 {
c.perms.sub.deny = NewSublist()
// Also hold onto this array for later.
c.darray = perms.Subscribe.Deny
}
for _, subSubject := range perms.Subscribe.Deny {
sub := &subscription{subject: []byte(subSubject)}
c.perms.sub.deny.Insert(sub)
}
}
}
// This will load up the deny structure used for filtering delivered
// messages based on a deny clause for subscriptions.
// Lock should be held.
func (c *client) loadMsgDenyFilter() {
c.mperms = &msgDeny{NewSublist(), make(map[string]bool)}
for _, sub := range c.darray {
c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
}
}
// writeLoop is the main socket write functionality.
// Runs in its own Go routine.
func (c *client) writeLoop() {
defer c.srv.grWG.Done()
// Used to check that we did flush from last wake up.
waitOk := true
// Main loop. Will wait to be signaled and then will use
// buffered outbound structure for efficient writev to the underlying socket.
for {
c.mu.Lock()
if waitOk && (c.out.pb == 0 || c.out.fsp > 0) && len(c.out.nb) == 0 && !c.flags.isSet(clearConnection) {
// Wait on pending data.
c.out.sg.Wait()
}
// Flush data
waitOk = c.flushOutbound()
isClosed := c.flags.isSet(clearConnection)
c.mu.Unlock()
if isClosed {
return
}
}
}
// readLoop is the main socket read functionality.
// Runs in its own Go routine.
func (c *client) readLoop() {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
nc := c.nc
s := c.srv
c.in.rsz = startBufSize
defer s.grWG.Done()
c.mu.Unlock()
if nc == nil {
return
}
// Start read buffer.
b := make([]byte, c.in.rsz)
for {
n, err := nc.Read(b)
if err != nil {
if err == io.EOF {
c.closeConnection(ClientClosed)
} else {
c.closeConnection(ReadError)
}
return
}
// Grab for updates for last activity.
last := time.Now()
// Clear inbound stats cache
c.in.msgs = 0
c.in.bytes = 0
c.in.subs = 0
// Main call into parser for inbound data. This will generate callouts
// to process messages, etc.
if err := c.parse(b[:n]); err != nil {
// handled inline
if err != ErrMaxPayload && err != ErrAuthorization {
c.Errorf("%s", err.Error())
c.closeConnection(ProtocolViolation)
}
return
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
if c.in.msgs > 0 {
atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
}
// Budget to spend in place flushing outbound data.
// Client will be checked on several fronts to see
// if applicable. Routes will never wait in place.
budget := 500 * time.Microsecond
if c.typ == ROUTER {
budget = 0
}
// Check pending clients for flush.
for cp := range c.pcd {
// Queue up a flush for those in the set
cp.mu.Lock()
// Update last activity for message delivery
cp.last = last
cp.out.fsp--
if budget > 0 && cp.flushOutbound() {
budget -= cp.out.lft
} else {
cp.flushSignal()
}
cp.mu.Unlock()
delete(c.pcd, cp)
}
// Update activity, check read buffer size.
c.mu.Lock()
nc := c.nc
// Activity based on interest changes or data/msgs.
if c.in.msgs > 0 || c.in.subs > 0 {
c.last = last
}
if n >= cap(b) {
c.in.srs = 0
} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
c.in.srs++
}
// Update read buffer size as/if needed.
if n >= cap(b) && cap(b) < maxBufSize {
// Grow
c.in.rsz = cap(b) * 2
b = make([]byte, c.in.rsz)
} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
c.in.rsz = cap(b) / 2
b = make([]byte, c.in.rsz)
}
c.mu.Unlock()
// Check to see if we got closed, e.g. slow consumer
if nc == nil {
return
}
}
}
// collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo.
// This will return a copy on purpose.
func (c *client) collapsePtoNB() net.Buffers {
if c.out.p != nil {
p := c.out.p
c.out.p = nil
return append(c.out.nb, p)
}
return c.out.nb
}
// This will handle the fixup needed on a partial write.
// Assume pending has been already calculated correctly.
func (c *client) handlePartialWrite(pnb net.Buffers) {
nb := c.collapsePtoNB()
// The partial needs to be first, so append nb to pnb
c.out.nb = append(pnb, nb...)
}
// flushOutbound will flush outbound buffer to a client.
// Will return if data was attempted to be written.
// Lock must be held
func (c *client) flushOutbound() bool {
if c.flags.isSet(flushOutbound) {
return false
}
c.flags.set(flushOutbound)
defer c.flags.clear(flushOutbound)
// Check for nothing to do.
if c.nc == nil || c.srv == nil || c.out.pb == 0 {
return true // true because no need to queue a signal.
}
// Snapshot opts
srv := c.srv
// Place primary on nb, assign primary to secondary, nil out nb and secondary.
nb := c.collapsePtoNB()
c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil
// For selecting primary replacement.
cnb := nb
// In case it goes away after releasing the lock.
nc := c.nc
attempted := c.out.pb
apm := c.out.pm
// Do NOT hold lock during actual IO
c.mu.Unlock()
// flush here
now := time.Now()
// FIXME(dlc) - writev will do multiple IOs past 1024 on
// most platforms, need to account for that with deadline?
nc.SetWriteDeadline(now.Add(c.out.wdl))
// Actual write to the socket.
n, err := nb.WriteTo(nc)
nc.SetWriteDeadline(time.Time{})
lft := time.Since(now)
// Re-acquire client lock
c.mu.Lock()
// Update flush time statistics
c.out.lft = lft
// Subtract from pending bytes and messages.
c.out.pb -= n
c.out.pm -= apm // FIXME(dlc) - this will not be accurate.
// Check for partial writes
if n != attempted && n > 0 {
c.handlePartialWrite(nb)
} else if n >= int64(c.out.sz) {
c.out.sws = 0
}
if err != nil {
if n == 0 {
c.out.pb -= attempted
}
if ne, ok := err.(net.Error); ok && ne.Timeout() {
atomic.AddInt64(&srv.slowConsumers, 1)
c.clearConnection(SlowConsumerWriteDeadline)
c.Noticef("Slow Consumer Detected: WriteDeadline of %v Exceeded", c.out.wdl)
} else {
c.clearConnection(WriteError)
c.Debugf("Error flushing: %v", err)
}
return true
}
// Adjust based on what we wrote plus any pending.
pt := int(n + c.out.pb)
// Adjust sz as needed downward, keeping power of 2.
// We do this at a slower rate, hence the pt*4.
if pt < c.out.sz && c.out.sz > minBufSize {
c.out.sws++
if c.out.sws > shortsToShrink {
c.out.sz >>= 1
}
}
// Adjust sz as needed upward, keeping power of 2.
if pt > c.out.sz && c.out.sz < maxBufSize {
c.out.sz <<= 1
}
// Check to see if we can reuse buffers.
if len(cnb) > 0 {
oldp := cnb[0][:0]
if cap(oldp) >= c.out.sz {
// Replace primary or secondary if they are nil, reusing same buffer.
if c.out.p == nil {
c.out.p = oldp
} else if c.out.s == nil || cap(c.out.s) < c.out.sz {
c.out.s = oldp
}
}
}
return true
}
// flushSignal will use server to queue the flush IO operation to a pool of flushers.
// Lock must be held.
func (c *client) flushSignal() {
c.out.sg.Signal()
}
func (c *client) traceMsg(msg []byte) {
if !c.trace {
return
}
// FIXME(dlc), allow limits to printable payload
c.Tracef("<<- MSG_PAYLOAD: [%s]", string(msg[:len(msg)-LEN_CR_LF]))
}
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
if !c.trace {
return
}
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
if c.typ == ROUTER {
c.processRouteInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
switch c.typ {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
}
c.closeConnection(ParseError)
}
// Password pattern matcher.
var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
// removePassFromTrace removes any notion of passwords from trace
// messages for logging.
func removePassFromTrace(arg []byte) []byte {
if !bytes.Contains(arg, []byte(`pass`)) {
return arg
}
// Take a copy of the connect proto just for the trace message.
var _arg [4096]byte
buf := append(_arg[:0], arg...)
m := passPat.FindAllSubmatchIndex(buf, -1)
if len(m) == 0 {
return arg
}
redactedPass := []byte("[REDACTED]")
for _, i := range m {
if len(i) < 4 {
continue
}
start := i[2]
end := i[3]
// Replace password substring.
buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
break
}
return buf
}
func (c *client) processConnect(arg []byte) error {
if c.trace {
c.traceInOp("CONNECT", removePassFromTrace(arg))
}
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for c.nc != nil {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
typ := c.typ
r := c.route
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
c.echo = c.opts.Echo
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
account := c.opts.Account
accountNew := c.opts.AccountNew
c.mu.Unlock()
if srv != nil {
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthorization(c); !ok {
c.authViolation()
return ErrAuthorization
}
// Check for Account designation
if account != "" {
var acc *Account
var wasNew bool
if !srv.NewAccountsAllowed() {
acc = srv.LookupAccount(account)
if acc == nil {
c.Errorf(ErrMissingAccount.Error())
c.sendErr("Account Not Found")
return ErrMissingAccount
} else if accountNew {
c.Errorf(ErrAccountExists.Error())
c.sendErr(ErrAccountExists.Error())
return ErrAccountExists
}
} else {
// We can create this one on the fly.
acc, wasNew = srv.LookupOrRegisterAccount(account)
if accountNew && !wasNew {
c.Errorf(ErrAccountExists.Error())
c.sendErr(ErrAccountExists.Error())
return ErrAccountExists
}
}
// If we are here we can register ourselves with the new account.
if err := c.registerWithAccount(acc); err != nil {
c.Errorf("Problem registering with account [%s]", account)
c.sendErr("Failed Account Registration")
return ErrBadAccount
}
} else if c.acc == nil {
// By default register with the global account.
c.registerWithAccount(srv.gacc)
}
}
// Check client protocol request if it exists.
if typ == CLIENT && (proto < ClientProtoZero || proto > ClientProtoInfo) {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection(BadClientProtocolVersion)
return ErrBadClientProtocol
} else if typ == ROUTER && lang != "" {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
c.sendErr(ErrClientConnectedToRoutePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToRoutePort
}
// Grab connection name of remote route.
if typ == ROUTER && r != nil {
var routePerms *RoutePermissions
if srv != nil {
routePerms = srv.getOpts().Cluster.Permissions
}
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.setRoutePermissions(routePerms)
c.mu.Unlock()
}
if verbose {
c.sendOK()
}
return nil
}
func (c *client) authTimeout() {
c.sendErr(ErrAuthTimeout.Error())
c.Debugf("Authorization Timeout")
c.closeConnection(AuthenticationTimeout)
}
func (c *client) authViolation() {
var hasNkeys, hasUsers bool
if s := c.srv; s != nil {
s.mu.Lock()
hasNkeys = s.nkeys != nil
hasUsers = s.users != nil
s.mu.Unlock()
}
if hasNkeys {
c.Errorf("%s - Nkey %q",
ErrAuthorization.Error(),
c.opts.Nkey)
} else if hasUsers {
c.Errorf("%s - User %q",
ErrAuthorization.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthorization.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection(AuthenticationViolation)
}
func (c *client) maxConnExceeded() {
c.Errorf(ErrTooManyConnections.Error())
c.sendErr(ErrTooManyConnections.Error())
c.closeConnection(MaxConnectionsExceeded)
}
func (c *client) maxSubsExceeded() {
c.Errorf(ErrTooManySubs.Error())
c.sendErr(ErrTooManySubs.Error())
}
func (c *client) maxPayloadViolation(sz int, max int64) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection(MaxPayloadExceeded)
}
// queueOutbound queues data for client/route connections.
// Return if the data is referenced or not. If referenced, the caller
// should not reuse the `data` array.
// Lock should be held.
func (c *client) queueOutbound(data []byte) bool {
// Assume data will not be referenced
referenced := false
// Add to pending bytes total.
c.out.pb += int64(len(data))
// Check for slow consumer via pending bytes limit.
// ok to return here, client is going away.
if c.out.pb > c.out.mp {
c.clearConnection(SlowConsumerPendingBytes)
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
return referenced
}
if c.out.p == nil && len(data) < maxBufSize {
if c.out.sz == 0 {
c.out.sz = startBufSize
}
if c.out.s != nil && cap(c.out.s) >= c.out.sz {
c.out.p = c.out.s
c.out.s = nil
} else {
// FIXME(dlc) - make power of 2 if less than maxBufSize?
c.out.p = make([]byte, 0, c.out.sz)
}
}
// Determine if we copy or reference
available := cap(c.out.p) - len(c.out.p)
if len(data) > available {
// We can fit into existing primary, but message will fit in next one
// we allocate or utilize from the secondary. So copy what we can.
if available > 0 && len(data) < c.out.sz {
c.out.p = append(c.out.p, data[:available]...)
data = data[available:]
}
// Put the primary on the nb if it has a payload
if len(c.out.p) > 0 {
c.out.nb = append(c.out.nb, c.out.p)
c.out.p = nil
}
// Check for a big message, and if found place directly on nb
// FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize
if len(data) > maxBufSize {
c.out.nb = append(c.out.nb, data)
referenced = true
} else {
// We will copy to primary.
if c.out.p == nil {
// Grow here
if (c.out.sz << 1) <= maxBufSize {
c.out.sz <<= 1
}
if len(data) > c.out.sz {
c.out.p = make([]byte, 0, len(data))
} else {
if c.out.s != nil && cap(c.out.s) >= c.out.sz { // TODO(dlc) - Size mismatch?
c.out.p = c.out.s
c.out.s = nil
} else {
c.out.p = make([]byte, 0, c.out.sz)
}
}
}
c.out.p = append(c.out.p, data...)
}
} else {
c.out.p = append(c.out.p, data...)
}
return referenced
}
// Assume the lock is held upon entry.
func (c *client) sendProto(info []byte, doFlush bool) {
if c.nc == nil {
return
}
c.queueOutbound(info)
if !(doFlush && c.flushOutbound()) {
c.flushSignal()
}
}
// Assume the lock is held upon entry.
func (c *client) sendPong() {
c.traceOutOp("PONG", nil)
c.sendProto([]byte("PONG\r\n"), true)
}
// Assume the lock is held upon entry.
func (c *client) sendPing() {
c.rttStart = time.Now()
c.ping.out++
c.traceOutOp("PING", nil)
c.sendProto([]byte("PING\r\n"), true)
}
// Generates the INFO to be sent to the client with the client ID included.
// info arg will be copied since passed by value.
// Assume lock is held.
func (c *client) generateClientInfoJSON(info Info) []byte {
info.CID = c.cid
// Generate the info json
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
return bytes.Join(pcs, []byte(" "))
}
// Assume the lock is held upon entry.
func (c *client) sendInfo(info []byte) {
c.sendProto(info, true)
}
func (c *client) sendErr(err string) {
c.mu.Lock()
c.traceOutOp("-ERR", []byte(err))
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true)
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
c.traceOutOp("OK", nil)
// Can not autoflush this one, needs to be async.
c.sendProto([]byte("+OK\r\n"), false)
// FIXME(dlc) - ??
c.pcd[c] = needFlush
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
c.traceInOp("PING", nil)
if c.nc == nil {
c.mu.Unlock()
return
}
c.sendPong()
// If not a CLIENT, we are done
if c.typ != CLIENT {
c.mu.Unlock()
return
}
// The CONNECT should have been received, but make sure it
// is so before proceeding
if !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkClusterChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology.
checkClusterChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkClusterChange {
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and and code here
// to send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() {
c.sendInfo(c.generateClientInfoJSON(srv.copyInfo()))
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.traceInOp("PONG", nil)
c.mu.Lock()
c.ping.out = 0
c.rtt = time.Since(c.rttStart)
c.mu.Unlock()
}
func (c *client) processPub(arg []byte) error {
if c.trace {
c.traceInOp("PUB", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt64(&c.mpay)
if maxPayload > 0 && int64(c.pa.size) > maxPayload {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) processSub(argo []byte) (err error) {
c.traceInOp("SUB", argo)
// Indicate activity.
c.in.subs++
// Copy so we do not reference a potentially large buffer
// FIXME(dlc) - make more efficient.
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.subject = args[0]
sub.queue = nil
sub.sid = args[1]
case 3:
sub.subject = args[0]
sub.queue = args[1]
sub.sid = args[2]
default:
return fmt.Errorf("processSub Parse Error: '%s'", arg)
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Grab connection type.
ctype := c.typ
// Check permissions if applicable.
if ctype == ROUTER {
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
return nil
}
} else if !c.canSubscribe(string(sub.subject)) {
c.mu.Unlock()
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject))
c.Errorf("Subscription Violation - User %q, Subject %q, SID %s",
c.opts.Username, sub.subject, sub.sid)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.msubs > 0 && len(c.subs) >= c.msubs {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// We can have two SUB protocols coming from a route due to some
// race conditions. We should make sure that we process only one.
sid := string(sub.sid)
acc := c.acc
// Subscribe here.
if c.subs[sid] == nil {
c.subs[sid] = sub
if acc != nil && acc.sl != nil {
err = acc.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
}
}
}
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil
} else if c.opts.Verbose {
c.sendOK()
}
if acc != nil {
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
// If we are routing and this is a local sub, add to the route map for the associated account.
if ctype == CLIENT {
c.srv.updateRouteSubscriptionMap(acc, sub, 1)
}
}
return nil
}
// If the client's account has stream imports and there are matches for
// this subscription's subject, then add shadow subscriptions in
// other accounts that can export this subject.
func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error {
if acc == nil {
return ErrMissingAccount
}
var rims [32]*streamImport
var ims = rims[:0]
var tokens []string
acc.mu.RLock()
for _, im := range acc.imports.streams {
if tokens == nil {
tokens = strings.Split(string(sub.subject), tsep)
}
if isSubsetMatch(tokens, im.prefix+im.from) {
ims = append(ims, im)
}
}
acc.mu.RUnlock()
var shadow []*subscription
// Now walk through collected importMaps
for _, im := range ims {
// We have a match for a local subscription with an import from another account.
// We will create a shadow subscription.
nsub := *sub // copy
nsub.im = im
if im.prefix != "" {
// redo subject here to match subject in the publisher account space.
// Just remove prefix from what they gave us. That maps into other space.
nsub.subject = sub.subject[len(im.prefix):]
}
c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
if err := im.acc.sl.Insert(&nsub); err != nil {
errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
c.Debugf(errs)
return fmt.Errorf(errs)
}
// Update our route map here.
c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1)
// FIXME(dlc) - make sure to remove as well!
if shadow == nil {
shadow = make([]*subscription, 0, len(ims))
}
shadow = append(shadow, &nsub)
}
if shadow != nil {
c.mu.Lock()
sub.shadow = shadow
c.mu.Unlock()
}
return nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(subject string) bool {
if c.perms == nil {
return true
}
allowed := true
// Check allow list. If no allow list that means all are allowed. Deny can overrule.
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
allowed = len(r.psubs) != 0
}
// If we have a deny list and we think we are allowed, check that as well.
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
allowed = len(r.psubs) == 0
// We use the actual subscription to signal us to spin up the deny mperms
// and cache. We check if the subject is a wildcard that contains any of
// the deny clauses.
// FIXME(dlc) - We could be smarter and track when these go away and remove.
if allowed && c.mperms == nil && subjectHasWildcard(subject) {
// Whip through the deny array and check if this wildcard subject is within scope.
for _, sub := range c.darray {
tokens := strings.Split(sub, tsep)
if isSubsetMatch(tokens, sub) {
c.loadMsgDenyFilter()
break
}
}
}
}
return allowed
}
// Low level unsubscribe for a given client.
func (c *client) unsubscribe(acc *Account, sub *subscription, force bool) {
c.mu.Lock()
defer c.mu.Unlock()
if !force && sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received\n",
string(sub.subject), sub.max, sub.nm)
return
}
c.traceOp("<-> %s", "DELSUB", sub.sid)
delete(c.subs, string(sub.sid))
if c.typ != CLIENT {
c.removeReplySubTimeout(sub)
}
if acc != nil {
acc.sl.Remove(sub)
}
// Check to see if we have shadow subscriptions.
for _, nsub := range sub.shadow {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else if c.typ == CLIENT && c.srv != nil {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
}
sub.shadow = nil
}
func (c *client) processUnsub(arg []byte) error {
c.traceInOp("UNSUB", arg)
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
// Indicate activity.
c.in.subs++
var sub *subscription
unsub := false
ok := false
c.mu.Lock()
// Grab connection type.
ctype := c.typ
var acc *Account
if sub, ok = c.subs[string(sid)]; ok {
acc = c.acc
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
unsub = true
}
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if unsub {
c.unsubscribe(acc, sub, false)
if acc != nil && ctype == CLIENT {
c.srv.updateRouteSubscriptionMap(acc, sub, -1)
}
}
return nil
}
// checkDenySub will check if we are allowed to deliver this message in the
// presence of deny clauses for subscriptions. Deny clauses will not prevent
// larger scoped wildcard subscriptions, so we need to check at delivery time.
// Lock should be held.
func (c *client) checkDenySub(subject string) bool {
if denied, ok := c.mperms.dcache[subject]; ok {
return denied
} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
c.mperms.dcache[subject] = true
return true
} else {
c.mperms.dcache[subject] = false
}
if len(c.mperms.dcache) > maxDenyPermCacheSize {
c.pruneDenyCache()
}
return false
}
func (c *client) msgHeader(mh []byte, sub *subscription, reply []byte) []byte {
if len(sub.sid) > 0 {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
}
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
return mh
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
func (c *client) deliverMsg(sub *subscription, mh, msg []byte) bool {
if sub.client == nil {
return false
}
client := sub.client
client.mu.Lock()
// Check echo
if c == client && !client.echo {
client.mu.Unlock()
return false
}
// Check if we have a subscribe deny clause. This will trigger us to check the subject
// for a match against the denied subjects.
if client.mperms != nil && client.checkDenySub(string(c.pa.subject)) {
client.mu.Unlock()
return false
}
srv := client.srv
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
if client.typ == ROUTER && sub.nm >= sub.max {
// The only router based messages that we will see here are remoteReplies.
// We handle these slightly differently.
defer client.removeReplySub(sub)
} else {
// For routing..
shouldForward := client.typ == CLIENT && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'\n", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
defer srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
defer client.unsubscribe(client.acc, sub, true)
} else if sub.nm > sub.max {
client.Debugf("Auto-unsubscribe limit [%d] exceeded\n", sub.max)
client.mu.Unlock()
client.unsubscribe(client.acc, sub, true)
if shouldForward {
srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
return false
}
}
}
// Check for closed connection
if client.nc == nil {
client.mu.Unlock()
return false
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg) - LEN_CR_LF)
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
atomic.AddInt64(&srv.outMsgs, 1)
atomic.AddInt64(&srv.outBytes, msgSize)
// Queue to outbound buffer
client.queueOutbound(mh)
client.queueOutbound(msg)
client.out.pm++
// Check outbound threshold and queue IO flush if needed.
if client.out.pm > 1 && client.out.pb > maxBufSize*2 {
client.flushSignal()
}
if c.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
// Increment the flush pending signals if we are setting for the first time.
if _, ok := c.pcd[client]; !ok {
client.out.fsp++
}
client.mu.Unlock()
// Remember for when we return to the top of the loop.
c.pcd[client] = needFlush
return true
}
// pruneDenyCache will prune the deny cache via randomly
// deleting items. Doing so pruneSize items at a time.
// Lock must be held for this one since it is shared under
// deliverMsg.
func (c *client) pruneDenyCache() {
r := 0
for subject := range c.mperms.dcache {
delete(c.mperms.dcache, subject)
if r++; r > pruneSize {
break
}
}
}
// prunePubPermsCache will prune the cache via randomly
// deleting items. Doing so pruneSize items at a time.
func (c *client) prunePubPermsCache() {
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
if r++; r > pruneSize {
break
}
}
}
// pubAllowed checks on publish permissioning.
func (c *client) pubAllowed(subject string) bool {
if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
return true
}
// Check if published subject is allowed if we have permissions in place.
allowed, ok := c.perms.pcache[subject]
if ok {
return allowed
}
// Cache miss, check allow then deny as needed.
if c.perms.pub.allow != nil {
r := c.perms.pub.allow.Match(subject)
allowed = len(r.psubs) != 0
} else {
// No entries means all are allowed. Deny will overrule as needed.
allowed = true
}
// If we have a deny list and are currently allowed, check that as well.
if allowed && c.perms.pub.deny != nil {
r := c.perms.pub.deny.Match(subject)
allowed = len(r.psubs) == 0
}
// Update our cache here.
c.perms.pcache[string(subject)] = allowed
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
c.prunePubPermsCache()
}
return allowed
}
// Used to mimic client like replies.
const (
replyPrefix = "_R_."
replyPrefixLen = len(replyPrefix)
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
)
// newServiceReply is used when rewriting replies that cross account boundaries.
// These will look like _R_.XXXXXXXX.
func (c *client) newServiceReply() []byte {
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
var b = [15]byte{'_', 'R', '_', '.'}
rn := c.in.prand.Int63()
for i, l := replyPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return b[:]
}
// Test whether a reply subject is a service import reply.
func isServiceReply(reply []byte) bool {
return len(reply) > 3 && string(reply[:4]) == replyPrefix
}
// This will decide to call the client code or router code.
func (c *client) processInboundMsg(msg []byte) {
if c.typ == CLIENT {
c.processInboundClientMsg(msg)
} else {
c.processInboundRoutedMsg(msg)
}
}
// processInboundClientMsg is called to process an inbound msg from a client.
func (c *client) processInboundClientMsg(msg []byte) {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += len(msg) - LEN_CR_LF
if c.trace {
c.traceMsg(msg)
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return
}
// Now check for reserved replies. These are used for service imports.
if isServiceReply(c.pa.reply) {
c.replySubjectViolation(c.pa.reply)
return
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil || c.acc == nil {
return
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Check to see if we need to map/route to another account.
if c.acc.imports.services != nil {
c.checkForImportServices(c.acc, msg)
}
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply)
}
}
// This checks and process import services by doing the mapping and sending the
// message onward if applicable.
func (c *client) checkForImportServices(acc *Account, msg []byte) {
if acc == nil || acc.imports.services == nil {
return
}
acc.mu.RLock()
rm := acc.imports.services[string(c.pa.subject)]
acc.mu.RUnlock()
// Get the results from the other account for the mapped "to" subject.
if rm != nil && rm.acc != nil && rm.acc.sl != nil {
var nrr []byte
if rm.ae {
acc.removeServiceImport(rm.from)
}
if c.pa.reply != nil {
// We want to remap this to provide anonymity.
nrr = c.newServiceReply()
rm.acc.addImplicitServiceImport(acc, string(nrr), string(c.pa.reply), true)
}
// FIXME(dlc) - Do L1 cache trick from above.
rr := rm.acc.sl.Match(rm.to)
c.processMsgResults(rm.acc, rr, msg, []byte(rm.to), nrr)
}
}
type routeTarget struct {
sub *subscription
qnames [][]byte
}
// This processes the sublist results for a given message.
func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, subject, reply []byte) {
// msg header for clients.
msgh := c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si := len(msgh)
// For sending messages across routes
var rmap map[*client]*routeTarget
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER. We now process
// these after everything else.
if sub.client != nil && sub.client.typ == ROUTER {
if rmap == nil {
rmap = map[*client]*routeTarget{}
}
if c.typ != ROUTER && rmap[sub.client] == nil {
rmap[sub.client] = &routeTarget{sub: sub}
}
continue
}
// Check for stream import mapped subs
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, c.pa.subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
// Normal delivery
mh := c.msgHeader(msgh[:si], sub, reply)
c.deliverMsg(sub, mh, msg)
}
// If we are sourced from a route we need to have direct filtered queues.
if c.typ == ROUTER && c.pa.queues == nil {
return
}
// Set these up to optionally filter based on the queue lists.
// This is for messages received from routes which will have directed
// guidance on which queue groups we should deliver to.
qf := c.pa.queues
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
var bounce bool
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
// If we have a filter check that here. We could make this a map or someting more
// complex but linear search since we expect queues to be small should be faster
// and more cache friendly.
if qf != nil && len(qsubs) > 0 {
tqn := qsubs[0].queue
for _, qn := range qf {
if bytes.Equal(qn, tqn) {
goto selectQSub
}
}
continue
}
selectQSub:
var rsub *subscription
// Find a subscription that is able to deliver this message
// starting at a random index.
startIndex := c.in.prand.Intn(len(qsubs))
for i := 0; i < len(qsubs); i++ {
index := (startIndex + i) % len(qsubs)
sub := qsubs[index]
if sub == nil {
continue
}
// Sending to a remote route.
if sub.client.typ == ROUTER {
if c.typ == ROUTER {
// We just came from a route, so skip and prefer local subs.
// Keep our first rsub in case all else fails.
if rsub == nil {
rsub = sub
}
continue
} else {
if rmap == nil {
rmap = map[*client]*routeTarget{}
rmap[sub.client] = &routeTarget{sub: sub, qnames: [][]byte{sub.queue}}
} else if rt := rmap[sub.client]; rt != nil {
rt.qnames = append(rt.qnames, sub.queue)
} else {
rmap[sub.client] = &routeTarget{sub: sub, qnames: [][]byte{sub.queue}}
}
}
break
}
// Check for mapped subs
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, c.pa.subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
mh := c.msgHeader(msgh[:si], sub, reply)
if c.deliverMsg(sub, mh, msg) {
// Clear rsub
rsub = nil
break
}
}
if rsub != nil {
// If we are here we tried to deliver to a local qsub
// but failed. So we will send it to a remote.
bounce = true
if rmap == nil {
rmap = map[*client]*routeTarget{}
}
if rt := rmap[rsub.client]; rt != nil {
rt.qnames = append(rt.qnames, rsub.queue)
} else {
rmap[rsub.client] = &routeTarget{sub: rsub, qnames: [][]byte{rsub.queue}}
}
}
}
// Don't send messages to routes if we ourselves are a route.
if (c.typ != CLIENT && !bounce) || len(rmap) == 0 {
return
}
// Now process route connections.
for _, rt := range rmap {
mh := c.msgb[:msgHeadProtoLen]
mh = append(mh, acc.Name...)
mh = append(mh, ' ')
mh = append(mh, subject...)
mh = append(mh, ' ')
// If we have queues the third token turns into marker
// that signals number of queues. The leading byte signifies
// whether a reply is present as well.
if len(rt.qnames) > 0 {
if reply != nil {
mh = append(mh, '+') // Signal that there is a reply.
} else {
mh = append(mh, '|') // Only queues
}
mh = append(mh, ' ')
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
for _, qn := range rt.qnames {
mh = append(mh, qn...)
mh = append(mh, ' ')
}
} else if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
c.deliverMsg(rt.sub, mh, msg)
}
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - User %q, Subject %q", c.opts.Username, subject)
}
func (c *client) replySubjectViolation(reply []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply))
c.Errorf("Publish Violation - User %q, Reply %q", c.opts.Username, reply)
}
func (c *client) processPingTimer() {
c.mu.Lock()
defer c.mu.Unlock()
c.ping.tmr = nil
// Check if connection is still opened
if c.nc == nil {
return
}
c.Debugf("%s Ping Timer", c.typeString())
// If we have had activity within the PingInterval no
// need to send a ping.
if delta := time.Since(c.last); delta < c.srv.getOpts().PingInterval {
c.Debugf("Delaying PING due to activity %v ago", delta.Round(time.Second))
} else {
// Check for violation
if c.ping.out+1 > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true)
c.clearConnection(StaleConnection)
return
}
// Send PING
c.sendPing()
}
// Reset to fire again.
c.setPingTimer()
}
// Lock should be held
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ping.tmr == nil {
return
}
c.ping.tmr.Stop()
c.ping.tmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, c.authTimeout)
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
func (c *client) isAuthTimerSet() bool {
c.mu.Lock()
isSet := c.atmr != nil
c.mu.Unlock()
return isSet
}
// Lock should be held
func (c *client) clearConnection(reason ClosedState) {
if c.flags.isSet(clearConnection) {
return
}
c.flags.set(clearConnection)
nc := c.nc
if nc == nil || c.srv == nil {
return
}
// Flush any pending.
c.flushOutbound()
// Clear outbound here.
c.out.sg.Broadcast()
// With TLS, Close() is sending an alert (that is doing a write).
// Need to set a deadline otherwise the server could block there
// if the peer is not reading from socket.
if c.flags.isSet(handshakeComplete) {
nc.SetWriteDeadline(time.Now().Add(c.out.wdl))
}
nc.Close()
// Do this always to also kick out any IO writes.
nc.SetWriteDeadline(time.Time{})
// Save off the connection if its a client.
if c.typ == CLIENT && c.srv != nil {
go c.srv.saveClosedClient(c, nc, reason)
}
}
func (c *client) typeString() string {
switch c.typ {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
}
return "Unknown Type"
}
// processSubsOnConfigReload removes any subscriptions the client has that are no
// longer authorized, and check for imports (accounts) due to a config reload.
func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
c.mu.Lock()
var (
checkPerms = c.perms != nil
checkAcc = c.acc != nil
acc = c.acc
)
if !checkPerms && !checkAcc {
c.mu.Unlock()
return
}
var (
_subs [32]*subscription
subs = _subs[:0]
_removed [32]*subscription
removed = _removed[:0]
srv = c.srv
userInfo = c.opts.Nkey
)
if userInfo == "" {
userInfo = c.opts.Username
if userInfo == "" {
userInfo = fmt.Sprintf("%v", c.cid)
}
}
if checkAcc {
// We actually only want to check if stream imports have changed.
if _, ok := awcsti[acc.Name]; !ok {
checkAcc = false
}
}
// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
// so we do that here as we collect them. We will check result down below.
c.mperms = nil
// Collect client's subs under the lock
for _, sub := range c.subs {
subs = append(subs, sub)
// Just checking to rebuild mperms under the lock.
c.canSubscribe(string(sub.subject))
}
c.mu.Unlock()
// We can call canSubscribe() without locking since the permissions are updated
// from config reload code prior to calling this function. So there is no risk
// of concurrent access to c.perms.
for _, sub := range subs {
if checkPerms && !c.canSubscribe(string(sub.subject)) {
removed = append(removed, sub)
c.unsubscribe(acc, sub, true)
} else if checkAcc {
c.mu.Lock()
oldShadows := sub.shadow
sub.shadow = nil
c.mu.Unlock()
c.addShadowSubscriptions(acc, sub)
for _, nsub := range oldShadows {
nsub.im.acc.sl.Remove(nsub)
}
}
}
// Report back to client and logs.
for _, sub := range removed {
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
sub.subject, sub.sid))
srv.Noticef("Removed sub %q (sid %q) for user %q - not authorized",
sub.subject, sub.sid, userInfo)
}
}
// Allows us to count up all the queue subscribers during close.
type qsub struct {
sub *subscription
n int32
}
func (c *client) closeConnection(reason ClosedState) {
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return
}
c.Debugf("%s connection closed", c.typeString())
c.clearAuthTimer()
c.clearPingTimer()
c.clearConnection(reason)
c.nc = nil
ctype := c.typ
// Snapshot for use if we are a client connection.
// FIXME(dlc) - we can just stub in a new one for client
// and reference existing one.
var subs []*subscription
if ctype == CLIENT {
subs = make([]*subscription, 0, len(c.subs))
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
subs = append(subs, sub)
}
}
srv := c.srv
var (
routeClosed bool
retryImplicit bool
connectURLs []string
)
if c.route != nil {
routeClosed = c.route.closed
if !routeClosed {
retryImplicit = c.route.retry
}
connectURLs = c.route.connectURLs
}
acc := c.acc
c.mu.Unlock()
// Remove clients subscriptions.
if ctype == CLIENT {
acc.sl.RemoveBatch(subs)
} else {
go c.removeRemoteSubs()
}
if srv != nil {
// This is a route that disconnected, but we are not in lame duck mode...
if len(connectURLs) > 0 && !srv.isLameDuckMode() {
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !srv.getOpts().Cluster.NoAdvertise {
srv.removeClientConnectURLsAndSendINFOToClients(connectURLs)
}
}
// Unregister
srv.removeClient(c)
// Update remote subscriptions.
if acc != nil && ctype == CLIENT {
qsubs := map[string]*qsub{}
for _, sub := range subs {
if sub.queue == nil {
srv.updateRouteSubscriptionMap(acc, sub, -1)
} else {
// We handle queue subscribers special in case we
// have a bunch we can just send one update to the
// connected routes.
key := string(sub.subject) + " " + string(sub.queue)
if esub, ok := qsubs[key]; ok {
esub.n++
} else {
qsubs[key] = &qsub{sub, 1}
}
}
}
// Process any qsubs here.
for _, esub := range qsubs {
srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
}
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.mu.Lock()
c.srv.activeAccounts--
c.srv.mu.Unlock()
}
}
}
// Don't reconnect routes that are being closed.
if routeClosed {
return
}
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
c.srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
c.srv.Debugf("Detected route to self, ignoring \"%s\"", rurl)
return
} else if rtype != Implicit || retryImplicit {
c.srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
}
}
// If the client is a route connection, sets the `closed` flag to true
// to prevent any reconnecting attempt when c.closeConnection() is called.
func (c *client) setRouteNoReconnectOnClose() {
c.mu.Lock()
if c.route != nil {
c.route.closed = true
}
c.mu.Unlock()
}
// Logging functionality scoped to a client or route.
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
Collect remove subs on first check
Signed-off-by: Derek Collison <e1c79a582b6629e6b39e9679f4bb964d25db4aa8@nats.io>
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"math/rand"
"net"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
)
// Type of client connection.
const (
// CLIENT is an end user.
CLIENT = iota
// ROUTER is another router in the cluster.
ROUTER
)
const (
// ClientProtoZero is the original Client protocol from 2009.
// http://nats.io/documentation/internals/nats-protocol/
ClientProtoZero = iota
// ClientProtoInfo signals a client can receive more then the original INFO block.
// This can be used to update clients on other cluster members, etc.
ClientProtoInfo
)
func init() {
rand.Seed(time.Now().UnixNano())
}
const (
// Scratch buffer size for the processMsg() calls.
msgScratchSize = 1024
msgHeadProto = "RMSG "
msgHeadProtoLen = len(msgHeadProto)
)
// For controlling dynamic buffer sizes.
const (
startBufSize = 512 // For INFO/CONNECT block
minBufSize = 64 // Smallest to shrink to for PING/PONG
maxBufSize = 65536 // 64k
shortsToShrink = 2
)
// Represent client booleans with a bitmask
type clientFlag byte
// Some client state represented as flags
const (
connectReceived clientFlag = 1 << iota // The CONNECT proto has been received
infoReceived // The INFO protocol has been received
firstPongSent // The first PONG has been sent
handshakeComplete // For TLS clients, indicate that the handshake is complete
clearConnection // Marks that clearConnection has already been called.
flushOutbound // Marks client as having a flushOutbound call in progress.
)
// set the flag (would be equivalent to set the boolean to true)
func (cf *clientFlag) set(c clientFlag) {
*cf |= c
}
// clear the flag (would be equivalent to set the boolean to false)
func (cf *clientFlag) clear(c clientFlag) {
*cf &= ^c
}
// isSet returns true if the flag is set, false otherwise
func (cf clientFlag) isSet(c clientFlag) bool {
return cf&c != 0
}
// setIfNotSet will set the flag `c` only if that flag was not already
// set and return true to indicate that the flag has been set. Returns
// false otherwise.
func (cf *clientFlag) setIfNotSet(c clientFlag) bool {
if *cf&c == 0 {
*cf |= c
return true
}
return false
}
// ClosedState is the reason client was closed. This will
// be passed into calls to clearConnection, but will only
// be stored in ConnInfo for monitoring.
type ClosedState int
const (
ClientClosed = ClosedState(iota + 1)
AuthenticationTimeout
AuthenticationViolation
TLSHandshakeError
SlowConsumerPendingBytes
SlowConsumerWriteDeadline
WriteError
ReadError
ParseError
StaleConnection
ProtocolViolation
BadClientProtocolVersion
WrongPort
MaxConnectionsExceeded
MaxPayloadExceeded
MaxControlLineExceeded
DuplicateRoute
RouteRemoved
ServerShutdown
)
type client struct {
// Here first because of use of atomics, and memory alignment.
stats
mpay int64
msubs int
mu sync.Mutex
typ int
cid uint64
opts clientOpts
start time.Time
nonce []byte
nc net.Conn
ncs string
out outbound
srv *Server
acc *Account
subs map[string]*subscription
perms *permissions
mperms *msgDeny
darray []string
in readCache
pcd map[*client]struct{}
atmr *time.Timer
ping pinfo
msgb [msgScratchSize]byte
last time.Time
parseState
rtt time.Duration
rttStart time.Time
route *route
debug bool
trace bool
echo bool
flags clientFlag // Compact booleans into a single field. Size will be increased when needed.
}
// Struct for PING initiation from the server.
type pinfo struct {
tmr *time.Timer
out int
}
// outbound holds pending data for a socket.
type outbound struct {
p []byte // Primary write buffer
s []byte // Secondary for use post flush
nb net.Buffers // net.Buffers for writev IO
sz int // limit size per []byte, uses variable BufSize constants, start, min, max.
sws int // Number of short writes, used for dyanmic resizing.
pb int64 // Total pending/queued bytes.
pm int64 // Total pending/queued messages.
sg *sync.Cond // Flusher conditional for signaling.
fsp int // Flush signals that are pending from readLoop's pcd.
mp int64 // snapshot of max pending.
wdl time.Duration // Snapshot fo write deadline.
lft time.Duration // Last flush time.
}
type perm struct {
allow *Sublist
deny *Sublist
}
type permissions struct {
sub perm
pub perm
pcache map[string]bool
}
// msgDeny is used when a user permission for subscriptions has a deny
// clause but a subscription could be made that is of broader scope.
// e.g. deny = "foo", but user subscribes to "*". That subscription should
// succeed but no message sent on foo should be delivered.
type msgDeny struct {
deny *Sublist
dcache map[string]bool
}
const (
maxResultCacheSize = 512
maxDenyPermCacheSize = 256
maxPermCacheSize = 128
pruneSize = 32
)
// Used in readloop to cache hot subject lookups and group statistics.
type readCache struct {
// These are for clients who are bound to a single account.
genid uint64
results map[string]*SublistResult
// This is for routes to have their own L1 as well that is account aware.
rcache map[string]*routeCache
prand *rand.Rand
msgs int
bytes int
subs int
rsz int // Read buffer size
srs int // Short reads, used for dynamic buffer resizing.
}
func (c *client) String() (id string) {
return c.ncs
}
func (c *client) GetOpts() *clientOpts {
return &c.opts
}
// GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil
// otherwise. Implements the ClientAuth interface.
func (c *client) GetTLSConnectionState() *tls.ConnectionState {
tc, ok := c.nc.(*tls.Conn)
if !ok {
return nil
}
state := tc.ConnectionState()
return &state
}
// This is the main subscription struct that indicates
// interest in published messages.
// FIXME(dlc) - This is getting bloated for normal subs, need
// to optionally have an opts section for non-normal stuff.
type subscription struct {
client *client
im *streamImport // This is for import stream support.
shadow []*subscription // This is to track shadowed accounts.
subject []byte
queue []byte
sid []byte
nm int64
max int64
qw int32
}
type clientOpts struct {
Echo bool `json:"echo"`
Verbose bool `json:"verbose"`
Pedantic bool `json:"pedantic"`
TLSRequired bool `json:"tls_required"`
Nkey string `json:"nkey,omitempty"`
Sig string `json:"sig,omitempty"`
Authorization string `json:"auth_token,omitempty"`
Username string `json:"user,omitempty"`
Password string `json:"pass,omitempty"`
Name string `json:"name"`
Lang string `json:"lang"`
Version string `json:"version"`
Protocol int `json:"protocol"`
Account string `json:"account,omitempty"`
AccountNew bool `json:"new_account,omitempty"`
// Routes only
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
}
var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true}
func init() {
rand.Seed(time.Now().UnixNano())
}
// Lock should be held
func (c *client) initClient() {
s := c.srv
c.cid = atomic.AddUint64(&s.gcid, 1)
// Outbound data structure setup
c.out.sz = startBufSize
c.out.sg = sync.NewCond(&c.mu)
opts := s.getOpts()
// Snapshots to avoid mutex access in fast paths.
c.out.wdl = opts.WriteDeadline
c.out.mp = opts.MaxPending
c.subs = make(map[string]*subscription)
c.echo = true
c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0)
c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0)
// This is a scratch buffer used for processMsg()
// The msg header starts with "RMSG ", which can be used
// for both local and routes.
// in bytes that is [82 77 83 71 32].
c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32}
// This is to track pending clients that have data to be flushed
// after we process inbound msgs from our own connection.
c.pcd = make(map[*client]struct{})
// snapshot the string version of the connection
conn := "-"
if ip, ok := c.nc.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port)
}
switch c.typ {
case CLIENT:
c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid)
case ROUTER:
c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid)
}
}
// RegisterWithAccount will register the given user with a specific
// account. This will change the subject namespace.
func (c *client) registerWithAccount(acc *Account) error {
if acc == nil || acc.sl == nil {
return ErrBadAccount
}
// If we were previously register, usually to $G, do accounting here to remove.
if c.acc != nil {
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.mu.Lock()
c.srv.activeAccounts--
c.srv.mu.Unlock()
}
}
// Add in new one.
if prev := acc.addClient(c); prev == 0 && c.srv != nil {
c.srv.mu.Lock()
c.srv.activeAccounts++
c.srv.mu.Unlock()
}
c.mu.Lock()
c.acc = acc
c.mu.Unlock()
return nil
}
// RegisterUser allows auth to call back into a new client
// with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterUser(user *User) {
// Register with proper account and sublist.
if user.Account != nil {
if err := c.registerWithAccount(user.Account); err != nil {
c.Errorf("Problem registering with account [%s]", user.Account.Name)
c.sendErr("Failed Account Registration")
return
}
}
c.mu.Lock()
defer c.mu.Unlock()
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
return
}
c.setPermissions(user.Permissions)
}
// RegisterNkey allows auth to call back into a new nkey
// client with the authenticated user. This is used to map
// any permissions into the client and setup accounts.
func (c *client) RegisterNkeyUser(user *NkeyUser) {
c.mu.Lock()
defer c.mu.Unlock()
// Register with proper account and sublist.
if user.Account != nil {
c.acc = user.Account
}
// Assign permissions.
if user.Permissions == nil {
// Reset perms to nil in case client previously had them.
c.perms = nil
c.mperms = nil
return
}
c.setPermissions(user.Permissions)
}
// Initializes client.perms structure.
// Lock is held on entry.
func (c *client) setPermissions(perms *Permissions) {
if perms == nil {
return
}
c.perms = &permissions{}
c.perms.pcache = make(map[string]bool)
// Loop over publish permissions
if perms.Publish != nil {
if len(perms.Publish.Allow) > 0 {
c.perms.pub.allow = NewSublist()
}
for _, pubSubject := range perms.Publish.Allow {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.allow.Insert(sub)
}
if len(perms.Publish.Deny) > 0 {
c.perms.pub.deny = NewSublist()
}
for _, pubSubject := range perms.Publish.Deny {
sub := &subscription{subject: []byte(pubSubject)}
c.perms.pub.deny.Insert(sub)
}
}
// Loop over subscribe permissions
if perms.Subscribe != nil {
if len(perms.Subscribe.Allow) > 0 {
c.perms.sub.allow = NewSublist()
}
for _, subSubject := range perms.Subscribe.Allow {
sub := &subscription{subject: []byte(subSubject)}
c.perms.sub.allow.Insert(sub)
}
if len(perms.Subscribe.Deny) > 0 {
c.perms.sub.deny = NewSublist()
// Also hold onto this array for later.
c.darray = perms.Subscribe.Deny
}
for _, subSubject := range perms.Subscribe.Deny {
sub := &subscription{subject: []byte(subSubject)}
c.perms.sub.deny.Insert(sub)
}
}
}
// This will load up the deny structure used for filtering delivered
// messages based on a deny clause for subscriptions.
// Lock should be held.
func (c *client) loadMsgDenyFilter() {
c.mperms = &msgDeny{NewSublist(), make(map[string]bool)}
for _, sub := range c.darray {
c.mperms.deny.Insert(&subscription{subject: []byte(sub)})
}
}
// writeLoop is the main socket write functionality.
// Runs in its own Go routine.
func (c *client) writeLoop() {
defer c.srv.grWG.Done()
// Used to check that we did flush from last wake up.
waitOk := true
// Main loop. Will wait to be signaled and then will use
// buffered outbound structure for efficient writev to the underlying socket.
for {
c.mu.Lock()
if waitOk && (c.out.pb == 0 || c.out.fsp > 0) && len(c.out.nb) == 0 && !c.flags.isSet(clearConnection) {
// Wait on pending data.
c.out.sg.Wait()
}
// Flush data
waitOk = c.flushOutbound()
isClosed := c.flags.isSet(clearConnection)
c.mu.Unlock()
if isClosed {
return
}
}
}
// readLoop is the main socket read functionality.
// Runs in its own Go routine.
func (c *client) readLoop() {
// Grab the connection off the client, it will be cleared on a close.
// We check for that after the loop, but want to avoid a nil dereference
c.mu.Lock()
nc := c.nc
s := c.srv
c.in.rsz = startBufSize
defer s.grWG.Done()
c.mu.Unlock()
if nc == nil {
return
}
// Start read buffer.
b := make([]byte, c.in.rsz)
for {
n, err := nc.Read(b)
if err != nil {
if err == io.EOF {
c.closeConnection(ClientClosed)
} else {
c.closeConnection(ReadError)
}
return
}
// Grab for updates for last activity.
last := time.Now()
// Clear inbound stats cache
c.in.msgs = 0
c.in.bytes = 0
c.in.subs = 0
// Main call into parser for inbound data. This will generate callouts
// to process messages, etc.
if err := c.parse(b[:n]); err != nil {
// handled inline
if err != ErrMaxPayload && err != ErrAuthorization {
c.Errorf("%s", err.Error())
c.closeConnection(ProtocolViolation)
}
return
}
// Updates stats for client and server that were collected
// from parsing through the buffer.
if c.in.msgs > 0 {
atomic.AddInt64(&c.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&c.inBytes, int64(c.in.bytes))
atomic.AddInt64(&s.inMsgs, int64(c.in.msgs))
atomic.AddInt64(&s.inBytes, int64(c.in.bytes))
}
// Budget to spend in place flushing outbound data.
// Client will be checked on several fronts to see
// if applicable. Routes will never wait in place.
budget := 500 * time.Microsecond
if c.typ == ROUTER {
budget = 0
}
// Check pending clients for flush.
for cp := range c.pcd {
// Queue up a flush for those in the set
cp.mu.Lock()
// Update last activity for message delivery
cp.last = last
cp.out.fsp--
if budget > 0 && cp.flushOutbound() {
budget -= cp.out.lft
} else {
cp.flushSignal()
}
cp.mu.Unlock()
delete(c.pcd, cp)
}
// Update activity, check read buffer size.
c.mu.Lock()
nc := c.nc
// Activity based on interest changes or data/msgs.
if c.in.msgs > 0 || c.in.subs > 0 {
c.last = last
}
if n >= cap(b) {
c.in.srs = 0
} else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to.
c.in.srs++
}
// Update read buffer size as/if needed.
if n >= cap(b) && cap(b) < maxBufSize {
// Grow
c.in.rsz = cap(b) * 2
b = make([]byte, c.in.rsz)
} else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink {
// Shrink, for now don't accelerate, ping/pong will eventually sort it out.
c.in.rsz = cap(b) / 2
b = make([]byte, c.in.rsz)
}
c.mu.Unlock()
// Check to see if we got closed, e.g. slow consumer
if nc == nil {
return
}
}
}
// collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo.
// This will return a copy on purpose.
func (c *client) collapsePtoNB() net.Buffers {
if c.out.p != nil {
p := c.out.p
c.out.p = nil
return append(c.out.nb, p)
}
return c.out.nb
}
// This will handle the fixup needed on a partial write.
// Assume pending has been already calculated correctly.
func (c *client) handlePartialWrite(pnb net.Buffers) {
nb := c.collapsePtoNB()
// The partial needs to be first, so append nb to pnb
c.out.nb = append(pnb, nb...)
}
// flushOutbound will flush outbound buffer to a client.
// Will return if data was attempted to be written.
// Lock must be held
func (c *client) flushOutbound() bool {
if c.flags.isSet(flushOutbound) {
return false
}
c.flags.set(flushOutbound)
defer c.flags.clear(flushOutbound)
// Check for nothing to do.
if c.nc == nil || c.srv == nil || c.out.pb == 0 {
return true // true because no need to queue a signal.
}
// Snapshot opts
srv := c.srv
// Place primary on nb, assign primary to secondary, nil out nb and secondary.
nb := c.collapsePtoNB()
c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil
// For selecting primary replacement.
cnb := nb
// In case it goes away after releasing the lock.
nc := c.nc
attempted := c.out.pb
apm := c.out.pm
// Do NOT hold lock during actual IO
c.mu.Unlock()
// flush here
now := time.Now()
// FIXME(dlc) - writev will do multiple IOs past 1024 on
// most platforms, need to account for that with deadline?
nc.SetWriteDeadline(now.Add(c.out.wdl))
// Actual write to the socket.
n, err := nb.WriteTo(nc)
nc.SetWriteDeadline(time.Time{})
lft := time.Since(now)
// Re-acquire client lock
c.mu.Lock()
// Update flush time statistics
c.out.lft = lft
// Subtract from pending bytes and messages.
c.out.pb -= n
c.out.pm -= apm // FIXME(dlc) - this will not be accurate.
// Check for partial writes
if n != attempted && n > 0 {
c.handlePartialWrite(nb)
} else if n >= int64(c.out.sz) {
c.out.sws = 0
}
if err != nil {
if n == 0 {
c.out.pb -= attempted
}
if ne, ok := err.(net.Error); ok && ne.Timeout() {
atomic.AddInt64(&srv.slowConsumers, 1)
c.clearConnection(SlowConsumerWriteDeadline)
c.Noticef("Slow Consumer Detected: WriteDeadline of %v Exceeded", c.out.wdl)
} else {
c.clearConnection(WriteError)
c.Debugf("Error flushing: %v", err)
}
return true
}
// Adjust based on what we wrote plus any pending.
pt := int(n + c.out.pb)
// Adjust sz as needed downward, keeping power of 2.
// We do this at a slower rate, hence the pt*4.
if pt < c.out.sz && c.out.sz > minBufSize {
c.out.sws++
if c.out.sws > shortsToShrink {
c.out.sz >>= 1
}
}
// Adjust sz as needed upward, keeping power of 2.
if pt > c.out.sz && c.out.sz < maxBufSize {
c.out.sz <<= 1
}
// Check to see if we can reuse buffers.
if len(cnb) > 0 {
oldp := cnb[0][:0]
if cap(oldp) >= c.out.sz {
// Replace primary or secondary if they are nil, reusing same buffer.
if c.out.p == nil {
c.out.p = oldp
} else if c.out.s == nil || cap(c.out.s) < c.out.sz {
c.out.s = oldp
}
}
}
return true
}
// flushSignal will use server to queue the flush IO operation to a pool of flushers.
// Lock must be held.
func (c *client) flushSignal() {
c.out.sg.Signal()
}
func (c *client) traceMsg(msg []byte) {
if !c.trace {
return
}
// FIXME(dlc), allow limits to printable payload
c.Tracef("<<- MSG_PAYLOAD: [%s]", string(msg[:len(msg)-LEN_CR_LF]))
}
func (c *client) traceInOp(op string, arg []byte) {
c.traceOp("<<- %s", op, arg)
}
func (c *client) traceOutOp(op string, arg []byte) {
c.traceOp("->> %s", op, arg)
}
func (c *client) traceOp(format, op string, arg []byte) {
if !c.trace {
return
}
opa := []interface{}{}
if op != "" {
opa = append(opa, op)
}
if arg != nil {
opa = append(opa, string(arg))
}
c.Tracef(format, opa)
}
// Process the information messages from Clients and other Routes.
func (c *client) processInfo(arg []byte) error {
info := Info{}
if err := json.Unmarshal(arg, &info); err != nil {
return err
}
if c.typ == ROUTER {
c.processRouteInfo(&info)
}
return nil
}
func (c *client) processErr(errStr string) {
switch c.typ {
case CLIENT:
c.Errorf("Client Error %s", errStr)
case ROUTER:
c.Errorf("Route Error %s", errStr)
}
c.closeConnection(ParseError)
}
// Password pattern matcher.
var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`)
// removePassFromTrace removes any notion of passwords from trace
// messages for logging.
func removePassFromTrace(arg []byte) []byte {
if !bytes.Contains(arg, []byte(`pass`)) {
return arg
}
// Take a copy of the connect proto just for the trace message.
var _arg [4096]byte
buf := append(_arg[:0], arg...)
m := passPat.FindAllSubmatchIndex(buf, -1)
if len(m) == 0 {
return arg
}
redactedPass := []byte("[REDACTED]")
for _, i := range m {
if len(i) < 4 {
continue
}
start := i[2]
end := i[3]
// Replace password substring.
buf = append(buf[:start], append(redactedPass, buf[end:]...)...)
break
}
return buf
}
func (c *client) processConnect(arg []byte) error {
if c.trace {
c.traceInOp("CONNECT", removePassFromTrace(arg))
}
c.mu.Lock()
// If we can't stop the timer because the callback is in progress...
if !c.clearAuthTimer() {
// wait for it to finish and handle sending the failure back to
// the client.
for c.nc != nil {
c.mu.Unlock()
time.Sleep(25 * time.Millisecond)
c.mu.Lock()
}
c.mu.Unlock()
return nil
}
c.last = time.Now()
typ := c.typ
r := c.route
srv := c.srv
// Moved unmarshalling of clients' Options under the lock.
// The client has already been added to the server map, so it is possible
// that other routines lookup the client, and access its options under
// the client's lock, so unmarshalling the options outside of the lock
// would cause data RACEs.
if err := json.Unmarshal(arg, &c.opts); err != nil {
c.mu.Unlock()
return err
}
// Indicate that the CONNECT protocol has been received, and that the
// server now knows which protocol this client supports.
c.flags.set(connectReceived)
// Capture these under lock
c.echo = c.opts.Echo
proto := c.opts.Protocol
verbose := c.opts.Verbose
lang := c.opts.Lang
account := c.opts.Account
accountNew := c.opts.AccountNew
c.mu.Unlock()
if srv != nil {
// As soon as c.opts is unmarshalled and if the proto is at
// least ClientProtoInfo, we need to increment the following counter.
// This is decremented when client is removed from the server's
// clients map.
if proto >= ClientProtoInfo {
srv.mu.Lock()
srv.cproto++
srv.mu.Unlock()
}
// Check for Auth
if ok := srv.checkAuthorization(c); !ok {
c.authViolation()
return ErrAuthorization
}
// Check for Account designation
if account != "" {
var acc *Account
var wasNew bool
if !srv.NewAccountsAllowed() {
acc = srv.LookupAccount(account)
if acc == nil {
c.Errorf(ErrMissingAccount.Error())
c.sendErr("Account Not Found")
return ErrMissingAccount
} else if accountNew {
c.Errorf(ErrAccountExists.Error())
c.sendErr(ErrAccountExists.Error())
return ErrAccountExists
}
} else {
// We can create this one on the fly.
acc, wasNew = srv.LookupOrRegisterAccount(account)
if accountNew && !wasNew {
c.Errorf(ErrAccountExists.Error())
c.sendErr(ErrAccountExists.Error())
return ErrAccountExists
}
}
// If we are here we can register ourselves with the new account.
if err := c.registerWithAccount(acc); err != nil {
c.Errorf("Problem registering with account [%s]", account)
c.sendErr("Failed Account Registration")
return ErrBadAccount
}
} else if c.acc == nil {
// By default register with the global account.
c.registerWithAccount(srv.gacc)
}
}
// Check client protocol request if it exists.
if typ == CLIENT && (proto < ClientProtoZero || proto > ClientProtoInfo) {
c.sendErr(ErrBadClientProtocol.Error())
c.closeConnection(BadClientProtocolVersion)
return ErrBadClientProtocol
} else if typ == ROUTER && lang != "" {
// Way to detect clients that incorrectly connect to the route listen
// port. Client provide Lang in the CONNECT protocol while ROUTEs don't.
c.sendErr(ErrClientConnectedToRoutePort.Error())
c.closeConnection(WrongPort)
return ErrClientConnectedToRoutePort
}
// Grab connection name of remote route.
if typ == ROUTER && r != nil {
var routePerms *RoutePermissions
if srv != nil {
routePerms = srv.getOpts().Cluster.Permissions
}
c.mu.Lock()
c.route.remoteID = c.opts.Name
c.setRoutePermissions(routePerms)
c.mu.Unlock()
}
if verbose {
c.sendOK()
}
return nil
}
func (c *client) authTimeout() {
c.sendErr(ErrAuthTimeout.Error())
c.Debugf("Authorization Timeout")
c.closeConnection(AuthenticationTimeout)
}
func (c *client) authViolation() {
var hasNkeys, hasUsers bool
if s := c.srv; s != nil {
s.mu.Lock()
hasNkeys = s.nkeys != nil
hasUsers = s.users != nil
s.mu.Unlock()
}
if hasNkeys {
c.Errorf("%s - Nkey %q",
ErrAuthorization.Error(),
c.opts.Nkey)
} else if hasUsers {
c.Errorf("%s - User %q",
ErrAuthorization.Error(),
c.opts.Username)
} else {
c.Errorf(ErrAuthorization.Error())
}
c.sendErr("Authorization Violation")
c.closeConnection(AuthenticationViolation)
}
func (c *client) maxConnExceeded() {
c.Errorf(ErrTooManyConnections.Error())
c.sendErr(ErrTooManyConnections.Error())
c.closeConnection(MaxConnectionsExceeded)
}
func (c *client) maxSubsExceeded() {
c.Errorf(ErrTooManySubs.Error())
c.sendErr(ErrTooManySubs.Error())
}
func (c *client) maxPayloadViolation(sz int, max int64) {
c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max)
c.sendErr("Maximum Payload Violation")
c.closeConnection(MaxPayloadExceeded)
}
// queueOutbound queues data for client/route connections.
// Return if the data is referenced or not. If referenced, the caller
// should not reuse the `data` array.
// Lock should be held.
func (c *client) queueOutbound(data []byte) bool {
// Assume data will not be referenced
referenced := false
// Add to pending bytes total.
c.out.pb += int64(len(data))
// Check for slow consumer via pending bytes limit.
// ok to return here, client is going away.
if c.out.pb > c.out.mp {
c.clearConnection(SlowConsumerPendingBytes)
atomic.AddInt64(&c.srv.slowConsumers, 1)
c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp)
return referenced
}
if c.out.p == nil && len(data) < maxBufSize {
if c.out.sz == 0 {
c.out.sz = startBufSize
}
if c.out.s != nil && cap(c.out.s) >= c.out.sz {
c.out.p = c.out.s
c.out.s = nil
} else {
// FIXME(dlc) - make power of 2 if less than maxBufSize?
c.out.p = make([]byte, 0, c.out.sz)
}
}
// Determine if we copy or reference
available := cap(c.out.p) - len(c.out.p)
if len(data) > available {
// We can fit into existing primary, but message will fit in next one
// we allocate or utilize from the secondary. So copy what we can.
if available > 0 && len(data) < c.out.sz {
c.out.p = append(c.out.p, data[:available]...)
data = data[available:]
}
// Put the primary on the nb if it has a payload
if len(c.out.p) > 0 {
c.out.nb = append(c.out.nb, c.out.p)
c.out.p = nil
}
// Check for a big message, and if found place directly on nb
// FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize
if len(data) > maxBufSize {
c.out.nb = append(c.out.nb, data)
referenced = true
} else {
// We will copy to primary.
if c.out.p == nil {
// Grow here
if (c.out.sz << 1) <= maxBufSize {
c.out.sz <<= 1
}
if len(data) > c.out.sz {
c.out.p = make([]byte, 0, len(data))
} else {
if c.out.s != nil && cap(c.out.s) >= c.out.sz { // TODO(dlc) - Size mismatch?
c.out.p = c.out.s
c.out.s = nil
} else {
c.out.p = make([]byte, 0, c.out.sz)
}
}
}
c.out.p = append(c.out.p, data...)
}
} else {
c.out.p = append(c.out.p, data...)
}
return referenced
}
// Assume the lock is held upon entry.
func (c *client) sendProto(info []byte, doFlush bool) {
if c.nc == nil {
return
}
c.queueOutbound(info)
if !(doFlush && c.flushOutbound()) {
c.flushSignal()
}
}
// Assume the lock is held upon entry.
func (c *client) sendPong() {
c.traceOutOp("PONG", nil)
c.sendProto([]byte("PONG\r\n"), true)
}
// Assume the lock is held upon entry.
func (c *client) sendPing() {
c.rttStart = time.Now()
c.ping.out++
c.traceOutOp("PING", nil)
c.sendProto([]byte("PING\r\n"), true)
}
// Generates the INFO to be sent to the client with the client ID included.
// info arg will be copied since passed by value.
// Assume lock is held.
func (c *client) generateClientInfoJSON(info Info) []byte {
info.CID = c.cid
// Generate the info json
b, _ := json.Marshal(info)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
return bytes.Join(pcs, []byte(" "))
}
// Assume the lock is held upon entry.
func (c *client) sendInfo(info []byte) {
c.sendProto(info, true)
}
func (c *client) sendErr(err string) {
c.mu.Lock()
c.traceOutOp("-ERR", []byte(err))
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true)
c.mu.Unlock()
}
func (c *client) sendOK() {
c.mu.Lock()
c.traceOutOp("OK", nil)
// Can not autoflush this one, needs to be async.
c.sendProto([]byte("+OK\r\n"), false)
// FIXME(dlc) - ??
c.pcd[c] = needFlush
c.mu.Unlock()
}
func (c *client) processPing() {
c.mu.Lock()
c.traceInOp("PING", nil)
if c.nc == nil {
c.mu.Unlock()
return
}
c.sendPong()
// If not a CLIENT, we are done
if c.typ != CLIENT {
c.mu.Unlock()
return
}
// The CONNECT should have been received, but make sure it
// is so before proceeding
if !c.flags.isSet(connectReceived) {
c.mu.Unlock()
return
}
// If we are here, the CONNECT has been received so we know
// if this client supports async INFO or not.
var (
checkClusterChange bool
srv = c.srv
)
// For older clients, just flip the firstPongSent flag if not already
// set and we are done.
if c.opts.Protocol < ClientProtoInfo || srv == nil {
c.flags.setIfNotSet(firstPongSent)
} else {
// This is a client that supports async INFO protocols.
// If this is the first PING (so firstPongSent is not set yet),
// we will need to check if there was a change in cluster topology.
checkClusterChange = !c.flags.isSet(firstPongSent)
}
c.mu.Unlock()
if checkClusterChange {
srv.mu.Lock()
c.mu.Lock()
// Now that we are under both locks, we can flip the flag.
// This prevents sendAsyncInfoToClients() and and code here
// to send a double INFO protocol.
c.flags.set(firstPongSent)
// If there was a cluster update since this client was created,
// send an updated INFO protocol now.
if srv.lastCURLsUpdate >= c.start.UnixNano() {
c.sendInfo(c.generateClientInfoJSON(srv.copyInfo()))
}
c.mu.Unlock()
srv.mu.Unlock()
}
}
func (c *client) processPong() {
c.traceInOp("PONG", nil)
c.mu.Lock()
c.ping.out = 0
c.rtt = time.Since(c.rttStart)
c.mu.Unlock()
}
func (c *client) processPub(arg []byte) error {
if c.trace {
c.traceInOp("PUB", arg)
}
// Unroll splitArgs to avoid runtime/heap issues
a := [MAX_PUB_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
c.pa.arg = arg
switch len(args) {
case 2:
c.pa.subject = args[0]
c.pa.reply = nil
c.pa.size = parseSize(args[1])
c.pa.szb = args[1]
case 3:
c.pa.subject = args[0]
c.pa.reply = args[1]
c.pa.size = parseSize(args[2])
c.pa.szb = args[2]
default:
return fmt.Errorf("processPub Parse Error: '%s'", arg)
}
if c.pa.size < 0 {
return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg)
}
maxPayload := atomic.LoadInt64(&c.mpay)
if maxPayload > 0 && int64(c.pa.size) > maxPayload {
c.maxPayloadViolation(c.pa.size, maxPayload)
return ErrMaxPayload
}
if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) {
c.sendErr("Invalid Publish Subject")
}
return nil
}
func splitArg(arg []byte) [][]byte {
a := [MAX_MSG_ARGS][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
return args
}
func (c *client) processSub(argo []byte) (err error) {
c.traceInOp("SUB", argo)
// Indicate activity.
c.in.subs++
// Copy so we do not reference a potentially large buffer
// FIXME(dlc) - make more efficient.
arg := make([]byte, len(argo))
copy(arg, argo)
args := splitArg(arg)
sub := &subscription{client: c}
switch len(args) {
case 2:
sub.subject = args[0]
sub.queue = nil
sub.sid = args[1]
case 3:
sub.subject = args[0]
sub.queue = args[1]
sub.sid = args[2]
default:
return fmt.Errorf("processSub Parse Error: '%s'", arg)
}
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return nil
}
// Grab connection type.
ctype := c.typ
// Check permissions if applicable.
if ctype == ROUTER {
if !c.canExport(string(sub.subject)) {
c.mu.Unlock()
return nil
}
} else if !c.canSubscribe(string(sub.subject)) {
c.mu.Unlock()
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject))
c.Errorf("Subscription Violation - User %q, Subject %q, SID %s",
c.opts.Username, sub.subject, sub.sid)
return nil
}
// Check if we have a maximum on the number of subscriptions.
if c.msubs > 0 && len(c.subs) >= c.msubs {
c.mu.Unlock()
c.maxSubsExceeded()
return nil
}
// We can have two SUB protocols coming from a route due to some
// race conditions. We should make sure that we process only one.
sid := string(sub.sid)
acc := c.acc
// Subscribe here.
if c.subs[sid] == nil {
c.subs[sid] = sub
if acc != nil && acc.sl != nil {
err = acc.sl.Insert(sub)
if err != nil {
delete(c.subs, sid)
}
}
}
c.mu.Unlock()
if err != nil {
c.sendErr("Invalid Subject")
return nil
} else if c.opts.Verbose {
c.sendOK()
}
if acc != nil {
if err := c.addShadowSubscriptions(acc, sub); err != nil {
c.Errorf(err.Error())
}
// If we are routing and this is a local sub, add to the route map for the associated account.
if ctype == CLIENT {
c.srv.updateRouteSubscriptionMap(acc, sub, 1)
}
}
return nil
}
// If the client's account has stream imports and there are matches for
// this subscription's subject, then add shadow subscriptions in
// other accounts that can export this subject.
func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error {
if acc == nil {
return ErrMissingAccount
}
var rims [32]*streamImport
var ims = rims[:0]
var tokens []string
acc.mu.RLock()
for _, im := range acc.imports.streams {
if tokens == nil {
tokens = strings.Split(string(sub.subject), tsep)
}
if isSubsetMatch(tokens, im.prefix+im.from) {
ims = append(ims, im)
}
}
acc.mu.RUnlock()
var shadow []*subscription
// Now walk through collected importMaps
for _, im := range ims {
// We have a match for a local subscription with an import from another account.
// We will create a shadow subscription.
nsub := *sub // copy
nsub.im = im
if im.prefix != "" {
// redo subject here to match subject in the publisher account space.
// Just remove prefix from what they gave us. That maps into other space.
nsub.subject = sub.subject[len(im.prefix):]
}
c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name)
if err := im.acc.sl.Insert(&nsub); err != nil {
errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name)
c.Debugf(errs)
return fmt.Errorf(errs)
}
// Update our route map here.
c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1)
// FIXME(dlc) - make sure to remove as well!
if shadow == nil {
shadow = make([]*subscription, 0, len(ims))
}
shadow = append(shadow, &nsub)
}
if shadow != nil {
c.mu.Lock()
sub.shadow = shadow
c.mu.Unlock()
}
return nil
}
// canSubscribe determines if the client is authorized to subscribe to the
// given subject. Assumes caller is holding lock.
func (c *client) canSubscribe(subject string) bool {
if c.perms == nil {
return true
}
allowed := true
// Check allow list. If no allow list that means all are allowed. Deny can overrule.
if c.perms.sub.allow != nil {
r := c.perms.sub.allow.Match(subject)
allowed = len(r.psubs) != 0
}
// If we have a deny list and we think we are allowed, check that as well.
if allowed && c.perms.sub.deny != nil {
r := c.perms.sub.deny.Match(subject)
allowed = len(r.psubs) == 0
// We use the actual subscription to signal us to spin up the deny mperms
// and cache. We check if the subject is a wildcard that contains any of
// the deny clauses.
// FIXME(dlc) - We could be smarter and track when these go away and remove.
if allowed && c.mperms == nil && subjectHasWildcard(subject) {
// Whip through the deny array and check if this wildcard subject is within scope.
for _, sub := range c.darray {
tokens := strings.Split(sub, tsep)
if isSubsetMatch(tokens, sub) {
c.loadMsgDenyFilter()
break
}
}
}
}
return allowed
}
// Low level unsubscribe for a given client.
func (c *client) unsubscribe(acc *Account, sub *subscription, force bool) {
c.mu.Lock()
defer c.mu.Unlock()
if !force && sub.max > 0 && sub.nm < sub.max {
c.Debugf(
"Deferring actual UNSUB(%s): %d max, %d received\n",
string(sub.subject), sub.max, sub.nm)
return
}
c.traceOp("<-> %s", "DELSUB", sub.sid)
delete(c.subs, string(sub.sid))
if c.typ != CLIENT {
c.removeReplySubTimeout(sub)
}
if acc != nil {
acc.sl.Remove(sub)
}
// Check to see if we have shadow subscriptions.
for _, nsub := range sub.shadow {
if err := nsub.im.acc.sl.Remove(nsub); err != nil {
c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name)
} else if c.typ == CLIENT && c.srv != nil {
c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1)
}
}
sub.shadow = nil
}
func (c *client) processUnsub(arg []byte) error {
c.traceInOp("UNSUB", arg)
args := splitArg(arg)
var sid []byte
max := -1
switch len(args) {
case 1:
sid = args[0]
case 2:
sid = args[0]
max = parseSize(args[1])
default:
return fmt.Errorf("processUnsub Parse Error: '%s'", arg)
}
// Indicate activity.
c.in.subs++
var sub *subscription
unsub := false
ok := false
c.mu.Lock()
// Grab connection type.
ctype := c.typ
var acc *Account
if sub, ok = c.subs[string(sid)]; ok {
acc = c.acc
if max > 0 {
sub.max = int64(max)
} else {
// Clear it here to override
sub.max = 0
unsub = true
}
}
c.mu.Unlock()
if c.opts.Verbose {
c.sendOK()
}
if unsub {
c.unsubscribe(acc, sub, false)
if acc != nil && ctype == CLIENT {
c.srv.updateRouteSubscriptionMap(acc, sub, -1)
}
}
return nil
}
// checkDenySub will check if we are allowed to deliver this message in the
// presence of deny clauses for subscriptions. Deny clauses will not prevent
// larger scoped wildcard subscriptions, so we need to check at delivery time.
// Lock should be held.
func (c *client) checkDenySub(subject string) bool {
if denied, ok := c.mperms.dcache[subject]; ok {
return denied
} else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 {
c.mperms.dcache[subject] = true
return true
} else {
c.mperms.dcache[subject] = false
}
if len(c.mperms.dcache) > maxDenyPermCacheSize {
c.pruneDenyCache()
}
return false
}
func (c *client) msgHeader(mh []byte, sub *subscription, reply []byte) []byte {
if len(sub.sid) > 0 {
mh = append(mh, sub.sid...)
mh = append(mh, ' ')
}
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
return mh
}
// Used to treat maps as efficient set
var needFlush = struct{}{}
func (c *client) deliverMsg(sub *subscription, mh, msg []byte) bool {
if sub.client == nil {
return false
}
client := sub.client
client.mu.Lock()
// Check echo
if c == client && !client.echo {
client.mu.Unlock()
return false
}
// Check if we have a subscribe deny clause. This will trigger us to check the subject
// for a match against the denied subjects.
if client.mperms != nil && client.checkDenySub(string(c.pa.subject)) {
client.mu.Unlock()
return false
}
srv := client.srv
sub.nm++
// Check if we should auto-unsubscribe.
if sub.max > 0 {
if client.typ == ROUTER && sub.nm >= sub.max {
// The only router based messages that we will see here are remoteReplies.
// We handle these slightly differently.
defer client.removeReplySub(sub)
} else {
// For routing..
shouldForward := client.typ == CLIENT && client.srv != nil
// If we are at the exact number, unsubscribe but
// still process the message in hand, otherwise
// unsubscribe and drop message on the floor.
if sub.nm == sub.max {
client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'\n", sub.max, string(sub.sid))
// Due to defer, reverse the code order so that execution
// is consistent with other cases where we unsubscribe.
if shouldForward {
defer srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
defer client.unsubscribe(client.acc, sub, true)
} else if sub.nm > sub.max {
client.Debugf("Auto-unsubscribe limit [%d] exceeded\n", sub.max)
client.mu.Unlock()
client.unsubscribe(client.acc, sub, true)
if shouldForward {
srv.updateRouteSubscriptionMap(client.acc, sub, -1)
}
return false
}
}
}
// Check for closed connection
if client.nc == nil {
client.mu.Unlock()
return false
}
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
msgSize := int64(len(msg) - LEN_CR_LF)
// No atomic needed since accessed under client lock.
// Monitor is reading those also under client's lock.
client.outMsgs++
client.outBytes += msgSize
atomic.AddInt64(&srv.outMsgs, 1)
atomic.AddInt64(&srv.outBytes, msgSize)
// Queue to outbound buffer
client.queueOutbound(mh)
client.queueOutbound(msg)
client.out.pm++
// Check outbound threshold and queue IO flush if needed.
if client.out.pm > 1 && client.out.pb > maxBufSize*2 {
client.flushSignal()
}
if c.trace {
client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil)
}
// Increment the flush pending signals if we are setting for the first time.
if _, ok := c.pcd[client]; !ok {
client.out.fsp++
}
client.mu.Unlock()
// Remember for when we return to the top of the loop.
c.pcd[client] = needFlush
return true
}
// pruneDenyCache will prune the deny cache via randomly
// deleting items. Doing so pruneSize items at a time.
// Lock must be held for this one since it is shared under
// deliverMsg.
func (c *client) pruneDenyCache() {
r := 0
for subject := range c.mperms.dcache {
delete(c.mperms.dcache, subject)
if r++; r > pruneSize {
break
}
}
}
// prunePubPermsCache will prune the cache via randomly
// deleting items. Doing so pruneSize items at a time.
func (c *client) prunePubPermsCache() {
r := 0
for subject := range c.perms.pcache {
delete(c.perms.pcache, subject)
if r++; r > pruneSize {
break
}
}
}
// pubAllowed checks on publish permissioning.
func (c *client) pubAllowed(subject string) bool {
if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) {
return true
}
// Check if published subject is allowed if we have permissions in place.
allowed, ok := c.perms.pcache[subject]
if ok {
return allowed
}
// Cache miss, check allow then deny as needed.
if c.perms.pub.allow != nil {
r := c.perms.pub.allow.Match(subject)
allowed = len(r.psubs) != 0
} else {
// No entries means all are allowed. Deny will overrule as needed.
allowed = true
}
// If we have a deny list and are currently allowed, check that as well.
if allowed && c.perms.pub.deny != nil {
r := c.perms.pub.deny.Match(subject)
allowed = len(r.psubs) == 0
}
// Update our cache here.
c.perms.pcache[string(subject)] = allowed
// Prune if needed.
if len(c.perms.pcache) > maxPermCacheSize {
c.prunePubPermsCache()
}
return allowed
}
// Used to mimic client like replies.
const (
replyPrefix = "_R_."
replyPrefixLen = len(replyPrefix)
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
)
// newServiceReply is used when rewriting replies that cross account boundaries.
// These will look like _R_.XXXXXXXX.
func (c *client) newServiceReply() []byte {
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
var b = [15]byte{'_', 'R', '_', '.'}
rn := c.in.prand.Int63()
for i, l := replyPrefixLen, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
return b[:]
}
// Test whether a reply subject is a service import reply.
func isServiceReply(reply []byte) bool {
return len(reply) > 3 && string(reply[:4]) == replyPrefix
}
// This will decide to call the client code or router code.
func (c *client) processInboundMsg(msg []byte) {
if c.typ == CLIENT {
c.processInboundClientMsg(msg)
} else {
c.processInboundRoutedMsg(msg)
}
}
// processInboundClientMsg is called to process an inbound msg from a client.
func (c *client) processInboundClientMsg(msg []byte) {
// Update statistics
// The msg includes the CR_LF, so pull back out for accounting.
c.in.msgs++
c.in.bytes += len(msg) - LEN_CR_LF
if c.trace {
c.traceMsg(msg)
}
// Check pub permissions
if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) {
c.pubPermissionViolation(c.pa.subject)
return
}
// Now check for reserved replies. These are used for service imports.
if isServiceReply(c.pa.reply) {
c.replySubjectViolation(c.pa.reply)
return
}
if c.opts.Verbose {
c.sendOK()
}
// Mostly under testing scenarios.
if c.srv == nil || c.acc == nil {
return
}
// Match the subscriptions. We will use our own L1 map if
// it's still valid, avoiding contention on the shared sublist.
var r *SublistResult
var ok bool
genid := atomic.LoadUint64(&c.acc.sl.genid)
if genid == c.in.genid && c.in.results != nil {
r, ok = c.in.results[string(c.pa.subject)]
} else {
// Reset our L1 completely.
c.in.results = make(map[string]*SublistResult)
c.in.genid = genid
}
// Go back to the sublist data structure.
if !ok {
r = c.acc.sl.Match(string(c.pa.subject))
c.in.results[string(c.pa.subject)] = r
// Prune the results cache. Keeps us from unbounded growth. Random delete.
if len(c.in.results) > maxResultCacheSize {
n := 0
for subject := range c.in.results {
delete(c.in.results, subject)
if n++; n > pruneSize {
break
}
}
}
}
// Check to see if we need to map/route to another account.
if c.acc.imports.services != nil {
c.checkForImportServices(c.acc, msg)
}
// Check for no interest, short circuit if so.
// This is the fanout scale.
if len(r.psubs)+len(r.qsubs) > 0 {
c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply)
}
}
// This checks and process import services by doing the mapping and sending the
// message onward if applicable.
func (c *client) checkForImportServices(acc *Account, msg []byte) {
if acc == nil || acc.imports.services == nil {
return
}
acc.mu.RLock()
rm := acc.imports.services[string(c.pa.subject)]
acc.mu.RUnlock()
// Get the results from the other account for the mapped "to" subject.
if rm != nil && rm.acc != nil && rm.acc.sl != nil {
var nrr []byte
if rm.ae {
acc.removeServiceImport(rm.from)
}
if c.pa.reply != nil {
// We want to remap this to provide anonymity.
nrr = c.newServiceReply()
rm.acc.addImplicitServiceImport(acc, string(nrr), string(c.pa.reply), true)
}
// FIXME(dlc) - Do L1 cache trick from above.
rr := rm.acc.sl.Match(rm.to)
c.processMsgResults(rm.acc, rr, msg, []byte(rm.to), nrr)
}
}
type routeTarget struct {
sub *subscription
qnames [][]byte
}
// This processes the sublist results for a given message.
func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, subject, reply []byte) {
// msg header for clients.
msgh := c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, subject...)
msgh = append(msgh, ' ')
si := len(msgh)
// For sending messages across routes
var rmap map[*client]*routeTarget
// Loop over all normal subscriptions that match.
for _, sub := range r.psubs {
// Check if this is a send to a ROUTER. We now process
// these after everything else.
if sub.client != nil && sub.client.typ == ROUTER {
if rmap == nil {
rmap = map[*client]*routeTarget{}
}
if c.typ != ROUTER && rmap[sub.client] == nil {
rmap[sub.client] = &routeTarget{sub: sub}
}
continue
}
// Check for stream import mapped subs
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, c.pa.subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
// Normal delivery
mh := c.msgHeader(msgh[:si], sub, reply)
c.deliverMsg(sub, mh, msg)
}
// If we are sourced from a route we need to have direct filtered queues.
if c.typ == ROUTER && c.pa.queues == nil {
return
}
// Set these up to optionally filter based on the queue lists.
// This is for messages received from routes which will have directed
// guidance on which queue groups we should deliver to.
qf := c.pa.queues
// Check to see if we have our own rand yet. Global rand
// has contention with lots of clients, etc.
if c.in.prand == nil {
c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano()))
}
// Process queue subs
var bounce bool
for i := 0; i < len(r.qsubs); i++ {
qsubs := r.qsubs[i]
// If we have a filter check that here. We could make this a map or someting more
// complex but linear search since we expect queues to be small should be faster
// and more cache friendly.
if qf != nil && len(qsubs) > 0 {
tqn := qsubs[0].queue
for _, qn := range qf {
if bytes.Equal(qn, tqn) {
goto selectQSub
}
}
continue
}
selectQSub:
var rsub *subscription
// Find a subscription that is able to deliver this message
// starting at a random index.
startIndex := c.in.prand.Intn(len(qsubs))
for i := 0; i < len(qsubs); i++ {
index := (startIndex + i) % len(qsubs)
sub := qsubs[index]
if sub == nil {
continue
}
// Sending to a remote route.
if sub.client.typ == ROUTER {
if c.typ == ROUTER {
// We just came from a route, so skip and prefer local subs.
// Keep our first rsub in case all else fails.
if rsub == nil {
rsub = sub
}
continue
} else {
if rmap == nil {
rmap = map[*client]*routeTarget{}
rmap[sub.client] = &routeTarget{sub: sub, qnames: [][]byte{sub.queue}}
} else if rt := rmap[sub.client]; rt != nil {
rt.qnames = append(rt.qnames, sub.queue)
} else {
rmap[sub.client] = &routeTarget{sub: sub, qnames: [][]byte{sub.queue}}
}
}
break
}
// Check for mapped subs
if sub.im != nil && sub.im.prefix != "" {
// Redo the subject here on the fly.
msgh = c.msgb[1:msgHeadProtoLen]
msgh = append(msgh, sub.im.prefix...)
msgh = append(msgh, c.pa.subject...)
msgh = append(msgh, ' ')
si = len(msgh)
}
mh := c.msgHeader(msgh[:si], sub, reply)
if c.deliverMsg(sub, mh, msg) {
// Clear rsub
rsub = nil
break
}
}
if rsub != nil {
// If we are here we tried to deliver to a local qsub
// but failed. So we will send it to a remote.
bounce = true
if rmap == nil {
rmap = map[*client]*routeTarget{}
}
if rt := rmap[rsub.client]; rt != nil {
rt.qnames = append(rt.qnames, rsub.queue)
} else {
rmap[rsub.client] = &routeTarget{sub: rsub, qnames: [][]byte{rsub.queue}}
}
}
}
// Don't send messages to routes if we ourselves are a route.
if (c.typ != CLIENT && !bounce) || len(rmap) == 0 {
return
}
// Now process route connections.
for _, rt := range rmap {
mh := c.msgb[:msgHeadProtoLen]
mh = append(mh, acc.Name...)
mh = append(mh, ' ')
mh = append(mh, subject...)
mh = append(mh, ' ')
// If we have queues the third token turns into marker
// that signals number of queues. The leading byte signifies
// whether a reply is present as well.
if len(rt.qnames) > 0 {
if reply != nil {
mh = append(mh, '+') // Signal that there is a reply.
} else {
mh = append(mh, '|') // Only queues
}
mh = append(mh, ' ')
if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
for _, qn := range rt.qnames {
mh = append(mh, qn...)
mh = append(mh, ' ')
}
} else if reply != nil {
mh = append(mh, reply...)
mh = append(mh, ' ')
}
mh = append(mh, c.pa.szb...)
mh = append(mh, _CRLF_...)
c.deliverMsg(rt.sub, mh, msg)
}
}
func (c *client) pubPermissionViolation(subject []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject))
c.Errorf("Publish Violation - User %q, Subject %q", c.opts.Username, subject)
}
func (c *client) replySubjectViolation(reply []byte) {
c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply))
c.Errorf("Publish Violation - User %q, Reply %q", c.opts.Username, reply)
}
func (c *client) processPingTimer() {
c.mu.Lock()
defer c.mu.Unlock()
c.ping.tmr = nil
// Check if connection is still opened
if c.nc == nil {
return
}
c.Debugf("%s Ping Timer", c.typeString())
// If we have had activity within the PingInterval no
// need to send a ping.
if delta := time.Since(c.last); delta < c.srv.getOpts().PingInterval {
c.Debugf("Delaying PING due to activity %v ago", delta.Round(time.Second))
} else {
// Check for violation
if c.ping.out+1 > c.srv.getOpts().MaxPingsOut {
c.Debugf("Stale Client Connection - Closing")
c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true)
c.clearConnection(StaleConnection)
return
}
// Send PING
c.sendPing()
}
// Reset to fire again.
c.setPingTimer()
}
// Lock should be held
func (c *client) setPingTimer() {
if c.srv == nil {
return
}
d := c.srv.getOpts().PingInterval
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
// Lock should be held
func (c *client) clearPingTimer() {
if c.ping.tmr == nil {
return
}
c.ping.tmr.Stop()
c.ping.tmr = nil
}
// Lock should be held
func (c *client) setAuthTimer(d time.Duration) {
c.atmr = time.AfterFunc(d, c.authTimeout)
}
// Lock should be held
func (c *client) clearAuthTimer() bool {
if c.atmr == nil {
return true
}
stopped := c.atmr.Stop()
c.atmr = nil
return stopped
}
func (c *client) isAuthTimerSet() bool {
c.mu.Lock()
isSet := c.atmr != nil
c.mu.Unlock()
return isSet
}
// Lock should be held
func (c *client) clearConnection(reason ClosedState) {
if c.flags.isSet(clearConnection) {
return
}
c.flags.set(clearConnection)
nc := c.nc
if nc == nil || c.srv == nil {
return
}
// Flush any pending.
c.flushOutbound()
// Clear outbound here.
c.out.sg.Broadcast()
// With TLS, Close() is sending an alert (that is doing a write).
// Need to set a deadline otherwise the server could block there
// if the peer is not reading from socket.
if c.flags.isSet(handshakeComplete) {
nc.SetWriteDeadline(time.Now().Add(c.out.wdl))
}
nc.Close()
// Do this always to also kick out any IO writes.
nc.SetWriteDeadline(time.Time{})
// Save off the connection if its a client.
if c.typ == CLIENT && c.srv != nil {
go c.srv.saveClosedClient(c, nc, reason)
}
}
func (c *client) typeString() string {
switch c.typ {
case CLIENT:
return "Client"
case ROUTER:
return "Router"
}
return "Unknown Type"
}
// processSubsOnConfigReload removes any subscriptions the client has that are no
// longer authorized, and check for imports (accounts) due to a config reload.
func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) {
c.mu.Lock()
var (
checkPerms = c.perms != nil
checkAcc = c.acc != nil
acc = c.acc
)
if !checkPerms && !checkAcc {
c.mu.Unlock()
return
}
var (
_subs [32]*subscription
subs = _subs[:0]
_removed [32]*subscription
removed = _removed[:0]
srv = c.srv
userInfo = c.opts.Nkey
)
if userInfo == "" {
userInfo = c.opts.Username
if userInfo == "" {
userInfo = fmt.Sprintf("%v", c.cid)
}
}
if checkAcc {
// We actually only want to check if stream imports have changed.
if _, ok := awcsti[acc.Name]; !ok {
checkAcc = false
}
}
// We will clear any mperms we have here. It will rebuild on the fly with canSubscribe,
// so we do that here as we collect them. We will check result down below.
c.mperms = nil
// Collect client's subs under the lock
for _, sub := range c.subs {
subs = append(subs, sub)
// Just checking to rebuild mperms under the lock, will collect removed though here.
if !c.canSubscribe(string(sub.subject)) {
removed = append(removed, sub)
}
}
c.mu.Unlock()
// We can call canSubscribe() without locking since the permissions are updated
// from config reload code prior to calling this function. So there is no risk
// of concurrent access to c.perms.
for _, sub := range subs {
if checkAcc {
c.mu.Lock()
oldShadows := sub.shadow
sub.shadow = nil
c.mu.Unlock()
c.addShadowSubscriptions(acc, sub)
for _, nsub := range oldShadows {
nsub.im.acc.sl.Remove(nsub)
}
}
}
// Report back to client and logs.
for _, sub := range removed {
c.unsubscribe(acc, sub, true)
c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)",
sub.subject, sub.sid))
srv.Noticef("Removed sub %q (sid %q) for user %q - not authorized",
sub.subject, sub.sid, userInfo)
}
}
// Allows us to count up all the queue subscribers during close.
type qsub struct {
sub *subscription
n int32
}
func (c *client) closeConnection(reason ClosedState) {
c.mu.Lock()
if c.nc == nil {
c.mu.Unlock()
return
}
c.Debugf("%s connection closed", c.typeString())
c.clearAuthTimer()
c.clearPingTimer()
c.clearConnection(reason)
c.nc = nil
ctype := c.typ
// Snapshot for use if we are a client connection.
// FIXME(dlc) - we can just stub in a new one for client
// and reference existing one.
var subs []*subscription
if ctype == CLIENT {
subs = make([]*subscription, 0, len(c.subs))
for _, sub := range c.subs {
// Auto-unsubscribe subscriptions must be unsubscribed forcibly.
sub.max = 0
subs = append(subs, sub)
}
}
srv := c.srv
var (
routeClosed bool
retryImplicit bool
connectURLs []string
)
if c.route != nil {
routeClosed = c.route.closed
if !routeClosed {
retryImplicit = c.route.retry
}
connectURLs = c.route.connectURLs
}
acc := c.acc
c.mu.Unlock()
// Remove clients subscriptions.
if ctype == CLIENT {
acc.sl.RemoveBatch(subs)
} else {
go c.removeRemoteSubs()
}
if srv != nil {
// This is a route that disconnected, but we are not in lame duck mode...
if len(connectURLs) > 0 && !srv.isLameDuckMode() {
// Unless disabled, possibly update the server's INFO protocol
// and send to clients that know how to handle async INFOs.
if !srv.getOpts().Cluster.NoAdvertise {
srv.removeClientConnectURLsAndSendINFOToClients(connectURLs)
}
}
// Unregister
srv.removeClient(c)
// Update remote subscriptions.
if acc != nil && ctype == CLIENT {
qsubs := map[string]*qsub{}
for _, sub := range subs {
if sub.queue == nil {
srv.updateRouteSubscriptionMap(acc, sub, -1)
} else {
// We handle queue subscribers special in case we
// have a bunch we can just send one update to the
// connected routes.
key := string(sub.subject) + " " + string(sub.queue)
if esub, ok := qsubs[key]; ok {
esub.n++
} else {
qsubs[key] = &qsub{sub, 1}
}
}
}
// Process any qsubs here.
for _, esub := range qsubs {
srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n))
}
if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil {
c.srv.mu.Lock()
c.srv.activeAccounts--
c.srv.mu.Unlock()
}
}
}
// Don't reconnect routes that are being closed.
if routeClosed {
return
}
// Check for a solicited route. If it was, start up a reconnect unless
// we are already connected to the other end.
if c.isSolicitedRoute() || retryImplicit {
// Capture these under lock
c.mu.Lock()
rid := c.route.remoteID
rtype := c.route.routeType
rurl := c.route.url
c.mu.Unlock()
srv.mu.Lock()
defer srv.mu.Unlock()
// It is possible that the server is being shutdown.
// If so, don't try to reconnect
if !srv.running {
return
}
if rid != "" && srv.remotes[rid] != nil {
c.srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid)
return
} else if rid == srv.info.ID {
c.srv.Debugf("Detected route to self, ignoring \"%s\"", rurl)
return
} else if rtype != Implicit || retryImplicit {
c.srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl)
// Keep track of this go-routine so we can wait for it on
// server shutdown.
srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) })
}
}
}
// If the client is a route connection, sets the `closed` flag to true
// to prevent any reconnecting attempt when c.closeConnection() is called.
func (c *client) setRouteNoReconnectOnClose() {
c.mu.Lock()
if c.route != nil {
c.route.closed = true
}
c.mu.Unlock()
}
// Logging functionality scoped to a client or route.
func (c *client) Errorf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Errorf(format, v...)
}
func (c *client) Debugf(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Debugf(format, v...)
}
func (c *client) Noticef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Noticef(format, v...)
}
func (c *client) Tracef(format string, v ...interface{}) {
format = fmt.Sprintf("%s - %s", c, format)
c.srv.Tracef(format, v...)
}
|
//line lang.y:6
package lang
import __yyfmt__ "fmt"
//line lang.y:6
import (
"github.com/hashicorp/terraform/config/lang/ast"
)
//line lang.y:14
type parserSymType struct {
yys int
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
const PROGRAM_BRACKET_LEFT = 57346
const PROGRAM_BRACKET_RIGHT = 57347
const PROGRAM_STRING_START = 57348
const PROGRAM_STRING_END = 57349
const PAREN_LEFT = 57350
const PAREN_RIGHT = 57351
const COMMA = 57352
const ARITH_OP = 57353
const IDENTIFIER = 57354
const INTEGER = 57355
const FLOAT = 57356
const STRING = 57357
var parserToknames = [...]string{
"$end",
"error",
"$unk",
"PROGRAM_BRACKET_LEFT",
"PROGRAM_BRACKET_RIGHT",
"PROGRAM_STRING_START",
"PROGRAM_STRING_END",
"PAREN_LEFT",
"PAREN_RIGHT",
"COMMA",
"ARITH_OP",
"IDENTIFIER",
"INTEGER",
"FLOAT",
"STRING",
}
var parserStatenames = [...]string{}
const parserEofCode = 1
const parserErrCode = 2
const parserMaxDepth = 200
//line lang.y:165
//line yacctab:1
var parserExca = [...]int{
-1, 1,
1, -1,
-2, 0,
}
const parserNprod = 19
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
const parserLast = 30
var parserAct = [...]int{
9, 20, 16, 16, 7, 7, 3, 18, 10, 8,
1, 17, 14, 12, 13, 6, 6, 19, 8, 22,
15, 23, 24, 11, 2, 25, 16, 21, 4, 5,
}
var parserPact = [...]int{
1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15,
0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000,
-1000, 12, -9, -1000, 0, -9,
}
var parserPgo = [...]int{
0, 0, 29, 28, 23, 6, 27, 10,
}
var parserR1 = [...]int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
1, 1, 1, 1, 1, 6, 6, 6, 3,
}
var parserR2 = [...]int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
1, 1, 3, 1, 4, 0, 3, 1, 1,
}
var parserChk = [...]int{
-1000, -7, -4, -5, -3, -2, 15, 4, -5, -1,
8, -4, 13, 14, 12, 5, 11, -1, 8, -1,
9, -6, -1, 9, 10, -1,
}
var parserDef = [...]int{
1, -2, 2, 3, 5, 6, 18, 0, 4, 0,
0, 9, 10, 11, 13, 7, 0, 0, 15, 12,
8, 0, 17, 14, 0, 16,
}
var parserTok1 = [...]int{
1,
}
var parserTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15,
}
var parserTok3 = [...]int{
0,
}
var parserErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1
/* parser for yacc output */
var (
parserDebug = 0
parserErrorVerbose = false
)
type parserLexer interface {
Lex(lval *parserSymType) int
Error(s string)
}
type parserParser interface {
Parse(parserLexer) int
Lookahead() int
}
type parserParserImpl struct {
lookahead func() int
}
func (p *parserParserImpl) Lookahead() int {
return p.lookahead()
}
func parserNewParser() parserParser {
p := &parserParserImpl{
lookahead: func() int { return -1 },
}
return p
}
const parserFlag = -1000
func parserTokname(c int) string {
if c >= 1 && c-1 < len(parserToknames) {
if parserToknames[c-1] != "" {
return parserToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func parserStatname(s int) string {
if s >= 0 && s < len(parserStatenames) {
if parserStatenames[s] != "" {
return parserStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func parserErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !parserErrorVerbose {
return "syntax error"
}
for _, e := range parserErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + parserTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := parserPact[state]
for tok := TOKSTART; tok-1 < len(parserToknames); tok++ {
if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if parserDef[state] == -2 {
i := 0
for parserExca[i] != -1 || parserExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; parserExca[i] >= 0; i += 2 {
tok := parserExca[i]
if tok < TOKSTART || parserExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if parserExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += parserTokname(tok)
}
return res
}
func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = parserTok1[0]
goto out
}
if char < len(parserTok1) {
token = parserTok1[char]
goto out
}
if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) {
token = parserTok2[char-parserPrivate]
goto out
}
}
for i := 0; i < len(parserTok3); i += 2 {
token = parserTok3[i+0]
if token == char {
token = parserTok3[i+1]
goto out
}
}
out:
if token == 0 {
token = parserTok2[1] /* unknown char */
}
if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char))
}
return char, token
}
func parserParse(parserlex parserLexer) int {
return parserNewParser().Parse(parserlex)
}
func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int {
var parsern int
var parserlval parserSymType
var parserVAL parserSymType
var parserDollar []parserSymType
_ = parserDollar // silence set and not used
parserS := make([]parserSymType, parserMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
parserstate := 0
parserchar := -1
parsertoken := -1 // parserchar translated into internal numbering
parserrcvr.lookahead = func() int { return parserchar }
defer func() {
// Make sure we report no lookahead when not parsing.
parserstate = -1
parserchar = -1
parsertoken = -1
}()
parserp := -1
goto parserstack
ret0:
return 0
ret1:
return 1
parserstack:
/* put a state and value onto the stack */
if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate))
}
parserp++
if parserp >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserS[parserp] = parserVAL
parserS[parserp].yys = parserstate
parsernewstate:
parsern = parserPact[parserstate]
if parsern <= parserFlag {
goto parserdefault /* simple state */
}
if parserchar < 0 {
parserchar, parsertoken = parserlex1(parserlex, &parserlval)
}
parsern += parsertoken
if parsern < 0 || parsern >= parserLast {
goto parserdefault
}
parsern = parserAct[parsern]
if parserChk[parsern] == parsertoken { /* valid shift */
parserchar = -1
parsertoken = -1
parserVAL = parserlval
parserstate = parsern
if Errflag > 0 {
Errflag--
}
goto parserstack
}
parserdefault:
/* default state action */
parsern = parserDef[parserstate]
if parsern == -2 {
if parserchar < 0 {
parserchar, parsertoken = parserlex1(parserlex, &parserlval)
}
/* look through exception table */
xi := 0
for {
if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0]
if parsern < 0 || parsern == parsertoken {
break
}
}
parsern = parserExca[xi+1]
if parsern < 0 {
goto ret0
}
}
if parsern == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
parserlex.Error(parserErrorMessage(parserstate, parsertoken))
Nerrs++
if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for parserp >= 0 {
parsern = parserPact[parserS[parserp].yys] + parserErrCode
if parsern >= 0 && parsern < parserLast {
parserstate = parserAct[parsern] /* simulate a shift of "error" */
if parserChk[parserstate] == parserErrCode {
goto parserstack
}
}
/* the current p has no shift on "error", pop stack */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys)
}
parserp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken))
}
if parsertoken == parserEofCode {
goto ret1
}
parserchar = -1
parsertoken = -1
goto parsernewstate /* try again in the same state */
}
}
/* reduction by production parsern */
if parserDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate))
}
parsernt := parsern
parserpt := parserp
_ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern]
// parserp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if parserp+1 >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserVAL = parserS[parserp+1]
/* consult goto table to find next state */
parsern = parserR1[parsern]
parserg := parserPgo[parsern]
parserj := parserg + parserS[parserp].yys + 1
if parserj >= parserLast {
parserstate = parserAct[parserg]
} else {
parserstate = parserAct[parserj]
if parserChk[parserstate] != -parsern {
parserstate = parserAct[parserg]
}
}
// dummy call; replaced with literal code
switch parsernt {
case 1:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:35
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
case 2:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:43
{
parserResult = parserDollar[1].node
// We want to make sure that the top value is always a Concat
// so that the return value is always a string type from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := parserDollar[1].node.(*ast.Concat); !ok {
if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Concat{
Exprs: []ast.Node{parserDollar[1].node},
Posx: parserDollar[1].node.Pos(),
}
}
}
}
case 3:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:66
{
parserVAL.node = parserDollar[1].node
}
case 4:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:70
{
var result []ast.Node
if c, ok := parserDollar[1].node.(*ast.Concat); ok {
result = append(c.Exprs, parserDollar[2].node)
} else {
result = []ast.Node{parserDollar[1].node, parserDollar[2].node}
}
parserVAL.node = &ast.Concat{
Exprs: result,
Posx: result[0].Pos(),
}
}
case 5:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:86
{
parserVAL.node = parserDollar[1].node
}
case 6:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:90
{
parserVAL.node = parserDollar[1].node
}
case 7:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:96
{
parserVAL.node = parserDollar[2].node
}
case 8:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:102
{
parserVAL.node = parserDollar[2].node
}
case 9:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:106
{
parserVAL.node = parserDollar[1].node
}
case 10:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:110
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(int),
Typex: ast.TypeInt,
Posx: parserDollar[1].token.Pos,
}
}
case 11:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:118
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(float64),
Typex: ast.TypeFloat,
Posx: parserDollar[1].token.Pos,
}
}
case 12:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:126
{
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[2].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node},
Posx: parserDollar[1].node.Pos(),
}
}
case 13:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:134
{
parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos}
}
case 14:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:138
{
parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos}
}
case 15:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:143
{
parserVAL.nodeList = nil
}
case 16:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:147
{
parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node)
}
case 17:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:151
{
parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node)
}
case 18:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:157
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(string),
Typex: ast.TypeString,
Posx: parserDollar[1].token.Pos,
}
}
}
goto parserstack /* stack new state and value */
}
config/lang: restore go1.4.3 generated code
my theory is that @mitchellh checked in a go1.5 generated file in
344e7c26b5f116842932d0e6b6ad2f1a250526f4
//line lang.y:6
package lang
import __yyfmt__ "fmt"
//line lang.y:6
import (
"github.com/hashicorp/terraform/config/lang/ast"
)
//line lang.y:14
type parserSymType struct {
yys int
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
const PROGRAM_BRACKET_LEFT = 57346
const PROGRAM_BRACKET_RIGHT = 57347
const PROGRAM_STRING_START = 57348
const PROGRAM_STRING_END = 57349
const PAREN_LEFT = 57350
const PAREN_RIGHT = 57351
const COMMA = 57352
const ARITH_OP = 57353
const IDENTIFIER = 57354
const INTEGER = 57355
const FLOAT = 57356
const STRING = 57357
var parserToknames = []string{
"PROGRAM_BRACKET_LEFT",
"PROGRAM_BRACKET_RIGHT",
"PROGRAM_STRING_START",
"PROGRAM_STRING_END",
"PAREN_LEFT",
"PAREN_RIGHT",
"COMMA",
"ARITH_OP",
"IDENTIFIER",
"INTEGER",
"FLOAT",
"STRING",
}
var parserStatenames = []string{}
const parserEofCode = 1
const parserErrCode = 2
const parserMaxDepth = 200
//line lang.y:165
//line yacctab:1
var parserExca = []int{
-1, 1,
1, -1,
-2, 0,
}
const parserNprod = 19
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
const parserLast = 30
var parserAct = []int{
9, 20, 16, 16, 7, 7, 3, 18, 10, 8,
1, 17, 14, 12, 13, 6, 6, 19, 8, 22,
15, 23, 24, 11, 2, 25, 16, 21, 4, 5,
}
var parserPact = []int{
1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15,
0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000,
-1000, 12, -9, -1000, 0, -9,
}
var parserPgo = []int{
0, 0, 29, 28, 23, 6, 27, 10,
}
var parserR1 = []int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
1, 1, 1, 1, 1, 6, 6, 6, 3,
}
var parserR2 = []int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
1, 1, 3, 1, 4, 0, 3, 1, 1,
}
var parserChk = []int{
-1000, -7, -4, -5, -3, -2, 15, 4, -5, -1,
8, -4, 13, 14, 12, 5, 11, -1, 8, -1,
9, -6, -1, 9, 10, -1,
}
var parserDef = []int{
1, -2, 2, 3, 5, 6, 18, 0, 4, 0,
0, 9, 10, 11, 13, 7, 0, 0, 15, 12,
8, 0, 17, 14, 0, 16,
}
var parserTok1 = []int{
1,
}
var parserTok2 = []int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15,
}
var parserTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var parserDebug = 0
type parserLexer interface {
Lex(lval *parserSymType) int
Error(s string)
}
const parserFlag = -1000
func parserTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(parserToknames) {
if parserToknames[c-4] != "" {
return parserToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func parserStatname(s int) string {
if s >= 0 && s < len(parserStatenames) {
if parserStatenames[s] != "" {
return parserStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func parserlex1(lex parserLexer, lval *parserSymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = parserTok1[0]
goto out
}
if char < len(parserTok1) {
c = parserTok1[char]
goto out
}
if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) {
c = parserTok2[char-parserPrivate]
goto out
}
}
for i := 0; i < len(parserTok3); i += 2 {
c = parserTok3[i+0]
if c == char {
c = parserTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = parserTok2[1] /* unknown char */
}
if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char))
}
return c
}
func parserParse(parserlex parserLexer) int {
var parsern int
var parserlval parserSymType
var parserVAL parserSymType
parserS := make([]parserSymType, parserMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
parserstate := 0
parserchar := -1
parserp := -1
goto parserstack
ret0:
return 0
ret1:
return 1
parserstack:
/* put a state and value onto the stack */
if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate))
}
parserp++
if parserp >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserS[parserp] = parserVAL
parserS[parserp].yys = parserstate
parsernewstate:
parsern = parserPact[parserstate]
if parsern <= parserFlag {
goto parserdefault /* simple state */
}
if parserchar < 0 {
parserchar = parserlex1(parserlex, &parserlval)
}
parsern += parserchar
if parsern < 0 || parsern >= parserLast {
goto parserdefault
}
parsern = parserAct[parsern]
if parserChk[parsern] == parserchar { /* valid shift */
parserchar = -1
parserVAL = parserlval
parserstate = parsern
if Errflag > 0 {
Errflag--
}
goto parserstack
}
parserdefault:
/* default state action */
parsern = parserDef[parserstate]
if parsern == -2 {
if parserchar < 0 {
parserchar = parserlex1(parserlex, &parserlval)
}
/* look through exception table */
xi := 0
for {
if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0]
if parsern < 0 || parsern == parserchar {
break
}
}
parsern = parserExca[xi+1]
if parsern < 0 {
goto ret0
}
}
if parsern == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
parserlex.Error("syntax error")
Nerrs++
if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parserchar))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for parserp >= 0 {
parsern = parserPact[parserS[parserp].yys] + parserErrCode
if parsern >= 0 && parsern < parserLast {
parserstate = parserAct[parsern] /* simulate a shift of "error" */
if parserChk[parserstate] == parserErrCode {
goto parserstack
}
}
/* the current p has no shift on "error", pop stack */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys)
}
parserp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar))
}
if parserchar == parserEofCode {
goto ret1
}
parserchar = -1
goto parsernewstate /* try again in the same state */
}
}
/* reduction by production parsern */
if parserDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate))
}
parsernt := parsern
parserpt := parserp
_ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern]
parserVAL = parserS[parserp+1]
/* consult goto table to find next state */
parsern = parserR1[parsern]
parserg := parserPgo[parsern]
parserj := parserg + parserS[parserp].yys + 1
if parserj >= parserLast {
parserstate = parserAct[parserg]
} else {
parserstate = parserAct[parserj]
if parserChk[parserstate] != -parsern {
parserstate = parserAct[parserg]
}
}
// dummy call; replaced with literal code
switch parsernt {
case 1:
//line lang.y:35
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
case 2:
//line lang.y:43
{
parserResult = parserS[parserpt-0].node
// We want to make sure that the top value is always a Concat
// so that the return value is always a string type from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok {
if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Concat{
Exprs: []ast.Node{parserS[parserpt-0].node},
Posx: parserS[parserpt-0].node.Pos(),
}
}
}
}
case 3:
//line lang.y:66
{
parserVAL.node = parserS[parserpt-0].node
}
case 4:
//line lang.y:70
{
var result []ast.Node
if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok {
result = append(c.Exprs, parserS[parserpt-0].node)
} else {
result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node}
}
parserVAL.node = &ast.Concat{
Exprs: result,
Posx: result[0].Pos(),
}
}
case 5:
//line lang.y:86
{
parserVAL.node = parserS[parserpt-0].node
}
case 6:
//line lang.y:90
{
parserVAL.node = parserS[parserpt-0].node
}
case 7:
//line lang.y:96
{
parserVAL.node = parserS[parserpt-1].node
}
case 8:
//line lang.y:102
{
parserVAL.node = parserS[parserpt-1].node
}
case 9:
//line lang.y:106
{
parserVAL.node = parserS[parserpt-0].node
}
case 10:
//line lang.y:110
{
parserVAL.node = &ast.LiteralNode{
Value: parserS[parserpt-0].token.Value.(int),
Typex: ast.TypeInt,
Posx: parserS[parserpt-0].token.Pos,
}
}
case 11:
//line lang.y:118
{
parserVAL.node = &ast.LiteralNode{
Value: parserS[parserpt-0].token.Value.(float64),
Typex: ast.TypeFloat,
Posx: parserS[parserpt-0].token.Pos,
}
}
case 12:
//line lang.y:126
{
parserVAL.node = &ast.Arithmetic{
Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node},
Posx: parserS[parserpt-2].node.Pos(),
}
}
case 13:
//line lang.y:134
{
parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos}
}
case 14:
//line lang.y:138
{
parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos}
}
case 15:
//line lang.y:143
{
parserVAL.nodeList = nil
}
case 16:
//line lang.y:147
{
parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node)
}
case 17:
//line lang.y:151
{
parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node)
}
case 18:
//line lang.y:157
{
parserVAL.node = &ast.LiteralNode{
Value: parserS[parserpt-0].token.Value.(string),
Typex: ast.TypeString,
Posx: parserS[parserpt-0].token.Pos,
}
}
}
goto parserstack /* stack new state and value */
}
|
// Copyright 2017 Pilosa Corp.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
package pilosa
import (
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"time"
)
const timeFormat = "2006-01-02T15:04"
// Schema contains the index properties
type Schema struct {
indexes map[string]*Index
}
func (s *Schema) String() string {
return fmt.Sprintf("%#v", s.indexes)
}
// NewSchema creates a new Schema
func NewSchema() *Schema {
return &Schema{
indexes: make(map[string]*Index),
}
}
// Index returns an index with a name.
// *Deprecated* Passing IndexOptions or nil.
func (s *Schema) Index(name string, options ...*IndexOptions) (*Index, error) {
if index, ok := s.indexes[name]; ok {
return index, nil
}
var indexOptions *IndexOptions
if len(options) == 1 {
indexOptions = options[0]
} else if len(options) > 1 {
return nil, ErrInvalidIndexOption
}
index, err := NewIndex(name, indexOptions)
if err != nil {
return nil, err
}
s.indexes[name] = index
return index, nil
}
// Indexes return a copy of the indexes in this schema
func (s *Schema) Indexes() map[string]*Index {
result := make(map[string]*Index)
for k, v := range s.indexes {
result[k] = v.copy()
}
return result
}
func (s *Schema) diff(other *Schema) *Schema {
result := NewSchema()
for indexName, index := range s.indexes {
if otherIndex, ok := other.indexes[indexName]; !ok {
// if the index doesn't exist in the other schema, simply copy it
result.indexes[indexName] = index.copy()
} else {
// the index exists in the other schema; check the frames
resultIndex, _ := NewIndex(indexName, index.options)
for frameName, frame := range index.frames {
if _, ok := otherIndex.frames[frameName]; !ok {
// the frame doesn't exist in the other schema, copy it
resultIndex.frames[frameName] = frame.copy()
}
}
// check whether we modified result index
if len(resultIndex.frames) > 0 {
// if so, move it to the result
result.indexes[indexName] = resultIndex
}
}
}
return result
}
// PQLQuery is an interface for PQL queries.
type PQLQuery interface {
Index() *Index
serialize() string
Error() error
}
// PQLBaseQuery is the base implementation for PQLQuery.
type PQLBaseQuery struct {
index *Index
pql string
err error
}
// NewPQLBaseQuery creates a new PQLQuery with the given PQL and index.
func NewPQLBaseQuery(pql string, index *Index, err error) *PQLBaseQuery {
return &PQLBaseQuery{
index: index,
pql: pql,
err: err,
}
}
// Index returns the index for this query
func (q *PQLBaseQuery) Index() *Index {
return q.index
}
func (q *PQLBaseQuery) serialize() string {
return q.pql
}
// Error returns the error or nil for this query.
func (q PQLBaseQuery) Error() error {
return q.err
}
// PQLBitmapQuery is the return type for bitmap queries.
type PQLBitmapQuery struct {
index *Index
pql string
err error
}
// Index returns the index for this query/
func (q *PQLBitmapQuery) Index() *Index {
return q.index
}
func (q *PQLBitmapQuery) serialize() string {
return q.pql
}
// Error returns the error or nil for this query.
func (q PQLBitmapQuery) Error() error {
return q.err
}
// PQLBatchQuery contains a batch of PQL queries.
// Use Index.BatchQuery function to create an instance.
//
// Usage:
//
// index, err := NewIndex("repository", nil)
// stargazer, err := index.Frame("stargazer", nil)
// query := repo.BatchQuery(
// stargazer.Bitmap(5),
// stargazer.Bitmap(15),
// repo.Union(stargazer.Bitmap(20), stargazer.Bitmap(25)))
type PQLBatchQuery struct {
index *Index
queries []string
err error
}
// Index returns the index for this query.
func (q *PQLBatchQuery) Index() *Index {
return q.index
}
func (q *PQLBatchQuery) serialize() string {
return strings.Join(q.queries, "")
}
func (q *PQLBatchQuery) Error() error {
return q.err
}
// Add adds a query to the batch.
func (q *PQLBatchQuery) Add(query PQLQuery) {
err := query.Error()
if err != nil {
q.err = err
}
q.queries = append(q.queries, query.serialize())
}
// IndexOptions contains options to customize Index structs and column queries.
// *Deprecated*. This struct is deprecated, do not use it in new code.
// *Deprecation*: `ColumnLabel` field is deprecated and will be removed in a future release.
// *Deprecation*: `TimeQuantum` field is deprecated and will be removed in a future release.
type IndexOptions struct {
ColumnLabel string
TimeQuantum TimeQuantum
}
func (options *IndexOptions) withDefaults() (updated *IndexOptions) {
// copy options so the original is not updated
updated = &IndexOptions{}
*updated = *options
// impose defaults
if updated.ColumnLabel == "" {
updated.ColumnLabel = "columnID"
}
return
}
func (options IndexOptions) String() string {
return fmt.Sprintf(`{"options": {"columnLabel": "%s"}}`, options.ColumnLabel)
}
// NewPQLBitmapQuery creates a new PqlBitmapQuery.
func NewPQLBitmapQuery(pql string, index *Index, err error) *PQLBitmapQuery {
return &PQLBitmapQuery{
index: index,
pql: pql,
err: err,
}
}
// Index is a Pilosa index. The purpose of the Index is to represent a data namespace.
// You cannot perform cross-index queries. Column-level attributes are global to the Index.
type Index struct {
name string
options *IndexOptions
frames map[string]*Frame
}
func (idx *Index) String() string {
return fmt.Sprintf("%#v", idx)
}
// NewIndex creates an index with a name and options.
// Pass nil for default options.
func NewIndex(name string, options *IndexOptions) (*Index, error) {
if err := validateIndexName(name); err != nil {
return nil, err
}
if options == nil {
options = &IndexOptions{}
}
options = options.withDefaults()
if err := validateLabel(options.ColumnLabel); err != nil {
return nil, err
}
return &Index{
name: name,
options: options,
frames: map[string]*Frame{},
}, nil
}
// Frames return a copy of the frames in this index
func (idx *Index) Frames() map[string]*Frame {
result := make(map[string]*Frame)
for k, v := range idx.frames {
result[k] = v.copy()
}
return result
}
func (idx *Index) copy() *Index {
frames := make(map[string]*Frame)
for name, f := range idx.frames {
frames[name] = f.copy()
}
index := &Index{
name: idx.name,
frames: frames,
options: &IndexOptions{},
}
*index.options = *idx.options
return index
}
// Name returns the name of this index.
func (idx *Index) Name() string {
return idx.name
}
// ColumnLabel returns the column label for this index.
func (idx *Index) ColumnLabel() string {
return idx.options.ColumnLabel
}
// Frame creates a frame struct with the specified name and defaults.
func (idx *Index) Frame(name string, options ...interface{}) (*Frame, error) {
if frame, ok := idx.frames[name]; ok {
return frame, nil
}
if err := validateFrameName(name); err != nil {
return nil, err
}
frameOptions := &FrameOptions{}
err := frameOptions.addOptions(options...)
if err != nil {
return nil, err
}
frameOptions = frameOptions.withDefaults()
if err := validateLabel(frameOptions.RowLabel); err != nil {
return nil, err
}
frame := newFrame(name, idx)
frame.options = frameOptions
idx.frames[name] = frame
return frame, nil
}
// BatchQuery creates a batch query with the given queries.
func (idx *Index) BatchQuery(queries ...PQLQuery) *PQLBatchQuery {
stringQueries := make([]string, 0, len(queries))
for _, query := range queries {
stringQueries = append(stringQueries, query.serialize())
}
return &PQLBatchQuery{
index: idx,
queries: stringQueries,
}
}
// RawQuery creates a query with the given string.
// Note that the query is not validated before sending to the server.
func (idx *Index) RawQuery(query string) *PQLBaseQuery {
return NewPQLBaseQuery(query, idx, nil)
}
// Union creates a Union query.
// Union performs a logical OR on the results of each BITMAP_CALL query passed to it.
func (idx *Index) Union(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
return idx.bitmapOperation("Union", bitmaps...)
}
// Intersect creates an Intersect query.
// Intersect performs a logical AND on the results of each BITMAP_CALL query passed to it.
func (idx *Index) Intersect(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
if len(bitmaps) < 1 {
return NewPQLBitmapQuery("", idx, NewError("Intersect operation requires at least 1 bitmap"))
}
return idx.bitmapOperation("Intersect", bitmaps...)
}
// Difference creates an Intersect query.
// Difference returns all of the bits from the first BITMAP_CALL argument passed to it, without the bits from each subsequent BITMAP_CALL.
func (idx *Index) Difference(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
if len(bitmaps) < 1 {
return NewPQLBitmapQuery("", idx, NewError("Difference operation requires at least 1 bitmap"))
}
return idx.bitmapOperation("Difference", bitmaps...)
}
// Xor creates an Xor query.
func (idx *Index) Xor(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
if len(bitmaps) < 2 {
return NewPQLBitmapQuery("", idx, NewError("Xor operation requires at least 2 bitmaps"))
}
return idx.bitmapOperation("Xor", bitmaps...)
}
// Count creates a Count query.
// Returns the number of set bits in the BITMAP_CALL passed in.
func (idx *Index) Count(bitmap *PQLBitmapQuery) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("Count(%s)", bitmap.serialize()), idx, nil)
}
// SetColumnAttrs creates a SetColumnAttrs query.
// SetColumnAttrs associates arbitrary key/value pairs with a column in an index.
// Following types are accepted: integer, float, string and boolean types.
func (idx *Index) SetColumnAttrs(columnID uint64, attrs map[string]interface{}) *PQLBaseQuery {
attrsString, err := createAttributesString(attrs)
if err != nil {
return NewPQLBaseQuery("", idx, err)
}
return NewPQLBaseQuery(fmt.Sprintf("SetColumnAttrs(%s=%d, %s)",
idx.options.ColumnLabel, columnID, attrsString), idx, nil)
}
func (idx *Index) bitmapOperation(name string, bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
var err error
args := make([]string, 0, len(bitmaps))
for _, bitmap := range bitmaps {
if err = bitmap.Error(); err != nil {
return NewPQLBitmapQuery("", idx, err)
}
args = append(args, bitmap.serialize())
}
return NewPQLBitmapQuery(fmt.Sprintf("%s(%s)", name, strings.Join(args, ", ")), idx, nil)
}
// FrameInfo represents schema information for a frame.
type FrameInfo struct {
Name string `json:"name"`
}
// FrameOptions contains options to customize Frame objects and frame queries.
// *Deprecation*: `RowLabel` field is deprecated and will be removed in a future release.
type FrameOptions struct {
RowLabel string
// If a Frame has a time quantum, then Views are generated for each of the defined time segments.
TimeQuantum TimeQuantum
// Enables inverted frames
InverseEnabled bool
CacheType CacheType
CacheSize uint
RangeEnabled bool
fields map[string]rangeField
}
func (fo *FrameOptions) withDefaults() (updated *FrameOptions) {
// copy options so the original is not updated
updated = &FrameOptions{}
*updated = *fo
// impose defaults
if updated.RowLabel == "" {
updated.RowLabel = "rowID"
}
if updated.fields == nil {
updated.fields = map[string]rangeField{}
}
return
}
func (fo FrameOptions) String() string {
mopt := map[string]interface{}{
"rowLabel": fo.RowLabel,
}
if fo.InverseEnabled {
mopt["inverseEnabled"] = true
}
if fo.TimeQuantum != TimeQuantumNone {
mopt["timeQuantum"] = string(fo.TimeQuantum)
}
if fo.CacheType != CacheTypeDefault {
mopt["cacheType"] = string(fo.CacheType)
}
if fo.CacheSize != 0 {
mopt["cacheSize"] = fo.CacheSize
}
if fo.RangeEnabled {
mopt["rangeEnabled"] = true
}
if len(fo.fields) > 0 {
mopt["rangeEnabled"] = true
fields := make([]rangeField, 0, len(fo.fields))
for _, field := range fo.fields {
fields = append(fields, field)
}
mopt["fields"] = fields
}
return fmt.Sprintf(`{"options": %s}`, encodeMap(mopt))
}
// AddIntField adds an integer field to the frame options
func (fo *FrameOptions) AddIntField(name string, min int, max int) error {
field, err := newIntRangeField(name, min, max)
if err != nil {
return err
}
if fo.fields == nil {
fo.fields = map[string]rangeField{}
}
fo.fields[name] = field
return nil
}
func (fo *FrameOptions) addOptions(options ...interface{}) error {
for i, option := range options {
switch o := option.(type) {
case nil:
if i != 0 {
return ErrInvalidFrameOption
}
continue
case *FrameOptions:
if i != 0 {
return ErrInvalidFrameOption
}
*fo = *o
case FrameOption:
err := o(fo)
if err != nil {
return err
}
case TimeQuantum:
fo.TimeQuantum = o
case CacheType:
fo.CacheType = o
default:
return ErrInvalidFrameOption
}
}
return nil
}
// FrameOption is used to pass an option to index.Frame function.
type FrameOption func(options *FrameOptions) error
// InverseEnabled enables inverse frame.
func InverseEnabled(enabled bool) FrameOption {
return func(options *FrameOptions) error {
options.InverseEnabled = enabled
return nil
}
}
// CacheSize sets the cache size.
func CacheSize(size uint) FrameOption {
return func(options *FrameOptions) error {
options.CacheSize = size
return nil
}
}
// RangeEnabled enables range encoding for a frame.
func RangeEnabled(enabled bool) FrameOption {
return func(options *FrameOptions) error {
options.RangeEnabled = enabled
return nil
}
}
// IntField adds an integer field to the frame.
func IntField(name string, min int, max int) FrameOption {
return func(options *FrameOptions) error {
return options.AddIntField(name, min, max)
}
}
// Frame structs are used to segment and define different functional characteristics within your entire index.
// You can think of a Frame as a table-like data partition within your Index.
// Row-level attributes are namespaced at the Frame level.
type Frame struct {
name string
index *Index
options *FrameOptions
fields map[string]*RangeField
}
func (f *Frame) String() string {
return fmt.Sprintf("%#v", f)
}
func newFrame(name string, index *Index) *Frame {
return &Frame{
name: name,
index: index,
options: &FrameOptions{},
fields: make(map[string]*RangeField),
}
}
// Name returns the name of the frame
func (f *Frame) Name() string {
return f.name
}
// RowLabel returns the row label for this frame.
func (f *Frame) RowLabel() string {
return f.options.RowLabel
}
func (f *Frame) copy() *Frame {
frame := newFrame(f.name, f.index)
*frame.options = *f.options
frame.fields = make(map[string]*RangeField)
for k, v := range f.fields {
frame.fields[k] = v
}
return frame
}
// Bitmap creates a bitmap query using the row label.
// Bitmap retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query.
// It also retrieves any attributes set on that row or column.
func (f *Frame) Bitmap(rowID uint64) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("Bitmap(%s=%d, frame='%s')",
f.options.RowLabel, rowID, f.name), f.index, nil)
}
// InverseBitmap creates a bitmap query using the column label.
// Bitmap retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query.
// It also retrieves any attributes set on that row or column.
func (f *Frame) InverseBitmap(columnID uint64) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("Bitmap(%s=%d, frame='%s')",
f.index.options.ColumnLabel, columnID, f.name), f.index, nil)
}
// SetBit creates a SetBit query.
// SetBit, assigns a value of 1 to a bit in the binary matrix, thus associating the given row in the given frame with the given column.
func (f *Frame) SetBit(rowID uint64, columnID uint64) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("SetBit(%s=%d, frame='%s', %s=%d)",
f.options.RowLabel, rowID, f.name, f.index.options.ColumnLabel, columnID), f.index, nil)
}
// SetBitTimestamp creates a SetBit query with timestamp.
// SetBit, assigns a value of 1 to a bit in the binary matrix,
// thus associating the given row in the given frame with the given column.
func (f *Frame) SetBitTimestamp(rowID uint64, columnID uint64, timestamp time.Time) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("SetBit(%s=%d, frame='%s', %s=%d, timestamp='%s')",
f.options.RowLabel, rowID, f.name, f.index.options.ColumnLabel, columnID, timestamp.Format(timeFormat)),
f.index, nil)
}
// ClearBit creates a ClearBit query.
// ClearBit, assigns a value of 0 to a bit in the binary matrix, thus disassociating the given row in the given frame from the given column.
func (f *Frame) ClearBit(rowID uint64, columnID uint64) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("ClearBit(%s=%d, frame='%s', %s=%d)",
f.options.RowLabel, rowID, f.name, f.index.options.ColumnLabel, columnID), f.index, nil)
}
// TopN creates a TopN query with the given item count.
// Returns the id and count of the top n bitmaps (by count of bits) in the frame.
func (f *Frame) TopN(n uint64) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(frame='%s', n=%d, inverse=false)", f.name, n), f.index, nil)
}
// InverseTopN creates a TopN query with the given item count.
// Returns the id and count of the top n bitmaps (by count of bits) in the frame.
// This variant sets inverse=true
func (f *Frame) InverseTopN(n uint64) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(frame='%s', n=%d, inverse=true)", f.name, n), f.index, nil)
}
// BitmapTopN creates a TopN query with the given item count and bitmap.
// This variant supports customizing the bitmap query.
func (f *Frame) BitmapTopN(n uint64, bitmap *PQLBitmapQuery) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(%s, frame='%s', n=%d, inverse=false)",
bitmap.serialize(), f.name, n), f.index, nil)
}
// InverseBitmapTopN creates a TopN query with the given item count and bitmap.
// This variant supports customizing the bitmap query and sets inverse=true.
func (f *Frame) InverseBitmapTopN(n uint64, bitmap *PQLBitmapQuery) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(%s, frame='%s', n=%d, inverse=true)",
bitmap.serialize(), f.name, n), f.index, nil)
}
// FilterFieldTopN creates a TopN query with the given item count, bitmap, field and the filter for that field
// The field and filters arguments work together to only return Bitmaps which have the attribute specified by field with one of the values specified in filters.
func (f *Frame) FilterFieldTopN(n uint64, bitmap *PQLBitmapQuery, field string, values ...interface{}) *PQLBitmapQuery {
return f.filterFieldTopN(n, bitmap, false, field, values...)
}
// InverseFilterFieldTopN creates a TopN query with the given item count, bitmap, field and the filter for that field
// The field and filters arguments work together to only return Bitmaps which have the attribute specified by field with one of the values specified in filters.
// This variant sets inverse=true.
func (f *Frame) InverseFilterFieldTopN(n uint64, bitmap *PQLBitmapQuery, field string, values ...interface{}) *PQLBitmapQuery {
return f.filterFieldTopN(n, bitmap, true, field, values...)
}
func (f *Frame) filterFieldTopN(n uint64, bitmap *PQLBitmapQuery, inverse bool, field string, values ...interface{}) *PQLBitmapQuery {
if err := validateLabel(field); err != nil {
return NewPQLBitmapQuery("", f.index, err)
}
b, err := json.Marshal(values)
if err != nil {
return NewPQLBitmapQuery("", f.index, err)
}
inverseStr := "true"
if !inverse {
inverseStr = "false"
}
if bitmap == nil {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(frame='%s', n=%d, inverse=%s, field='%s', filters=%s)",
f.name, n, inverseStr, field, string(b)), f.index, nil)
}
return NewPQLBitmapQuery(fmt.Sprintf("TopN(%s, frame='%s', n=%d, inverse=%s, field='%s', filters=%s)",
bitmap.serialize(), f.name, n, inverseStr, field, string(b)), f.index, nil)
}
// Range creates a Range query.
// Similar to Bitmap, but only returns bits which were set with timestamps between the given start and end timestamps.
func (f *Frame) Range(rowID uint64, start time.Time, end time.Time) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("Range(%s=%d, frame='%s', start='%s', end='%s')",
f.options.RowLabel, rowID, f.name, start.Format(timeFormat), end.Format(timeFormat)), f.index, nil)
}
// InverseRange creates a Range query.
// Similar to Bitmap, but only returns bits which were set with timestamps between the given start and end timestamps.
func (f *Frame) InverseRange(columnID uint64, start time.Time, end time.Time) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("Range(%s=%d, frame='%s', start='%s', end='%s')",
f.index.options.ColumnLabel, columnID, f.name, start.Format(timeFormat), end.Format(timeFormat)), f.index, nil)
}
// SetRowAttrs creates a SetRowAttrs query.
// SetRowAttrs associates arbitrary key/value pairs with a row in a frame.
// Following types are accepted: integer, float, string and boolean types.
func (f *Frame) SetRowAttrs(rowID uint64, attrs map[string]interface{}) *PQLBaseQuery {
attrsString, err := createAttributesString(attrs)
if err != nil {
return NewPQLBaseQuery("", f.index, err)
}
return NewPQLBaseQuery(fmt.Sprintf("SetRowAttrs(%s=%d, frame='%s', %s)",
f.options.RowLabel, rowID, f.name, attrsString), f.index, nil)
}
// Sum creates a Sum query.
// The corresponding frame should include the field in its options.
// *Deprecated* Use `frame.Sum(bitmap)` instead.
func (f *Frame) Sum(bitmap *PQLBitmapQuery, field string) *PQLBaseQuery {
return f.Field(field).Sum(bitmap)
}
// SetIntFieldValue creates a SetFieldValue query.
// *Deprecated* Use `frame.SetIntValue(columnID, value)` instead.
func (f *Frame) SetIntFieldValue(columnID uint64, field string, value int) *PQLBaseQuery {
return f.Field(field).SetIntValue(columnID, value)
}
// Field returns a field to operate on.
func (f *Frame) Field(name string) *RangeField {
field := f.fields[name]
if field == nil {
field = newRangeField(f, name)
// do not cache fields with error
if field.err == nil {
f.fields[name] = field
}
}
return field
}
// Fields return a copy of the fields in this frame
func (f *Frame) Fields() map[string]*RangeField {
result := make(map[string]*RangeField)
for k, v := range f.fields {
result[k] = v
}
return result
}
func createAttributesString(attrs map[string]interface{}) (string, error) {
attrsList := make([]string, 0, len(attrs))
for k, v := range attrs {
// TODO: validate the type of v is one of string, int64, float64, bool
if err := validateLabel(k); err != nil {
return "", err
}
if vs, ok := v.(string); ok {
attrsList = append(attrsList, fmt.Sprintf("%s=\"%s\"", k, strings.Replace(vs, "\"", "\\\"", -1)))
} else {
attrsList = append(attrsList, fmt.Sprintf("%s=%v", k, v))
}
}
sort.Strings(attrsList)
return strings.Join(attrsList, ", "), nil
}
// TimeQuantum type represents valid time quantum values for frames having support for that.
type TimeQuantum string
// TimeQuantum constants
const (
TimeQuantumNone TimeQuantum = ""
TimeQuantumYear TimeQuantum = "Y"
TimeQuantumMonth TimeQuantum = "M"
TimeQuantumDay TimeQuantum = "D"
TimeQuantumHour TimeQuantum = "H"
TimeQuantumYearMonth TimeQuantum = "YM"
TimeQuantumMonthDay TimeQuantum = "MD"
TimeQuantumDayHour TimeQuantum = "DH"
TimeQuantumYearMonthDay TimeQuantum = "YMD"
TimeQuantumMonthDayHour TimeQuantum = "MDH"
TimeQuantumYearMonthDayHour TimeQuantum = "YMDH"
)
// CacheType represents cache type for a frame
type CacheType string
// CacheType constants
const (
CacheTypeDefault CacheType = ""
CacheTypeLRU CacheType = "lru"
CacheTypeRanked CacheType = "ranked"
)
// rangeField represents a single field.
// TODO: rename.
type rangeField map[string]interface{}
func newIntRangeField(name string, min int, max int) (rangeField, error) {
err := validateLabel(name)
if err != nil {
return nil, err
}
if max <= min {
return nil, errors.New("Max should be greater than min for int fields")
}
return map[string]interface{}{
"name": name,
"type": "int",
"min": min,
"max": max,
}, nil
}
// RangeField enables writing queries for range encoded fields.
type RangeField struct {
frame *Frame
name string
err error
}
func newRangeField(frame *Frame, name string) *RangeField {
err := validateLabel(name)
return &RangeField{
frame: frame,
name: name,
err: err,
}
}
// LT creates a less than query.
func (field *RangeField) LT(n int) *PQLBitmapQuery {
return field.binaryOperation("<", n)
}
// LTE creates a less than or equal query.
func (field *RangeField) LTE(n int) *PQLBitmapQuery {
return field.binaryOperation("<=", n)
}
// GT creates a greater than query.
func (field *RangeField) GT(n int) *PQLBitmapQuery {
return field.binaryOperation(">", n)
}
// GTE creates a greater than or equal query.
func (field *RangeField) GTE(n int) *PQLBitmapQuery {
return field.binaryOperation(">=", n)
}
// Equals creates an equals query.
func (field *RangeField) Equals(n int) *PQLBitmapQuery {
return field.binaryOperation("==", n)
}
// NotEquals creates a not equals query.
func (field *RangeField) NotEquals(n int) *PQLBitmapQuery {
return field.binaryOperation("!=", n)
}
// NotNull creates a not equal to null query.
func (field *RangeField) NotNull() *PQLBitmapQuery {
qry := fmt.Sprintf("Range(frame='%s', %s != null)", field.frame.name, field.name)
return NewPQLBitmapQuery(qry, field.frame.index, field.err)
}
// Between creates a between query.
func (field *RangeField) Between(a int, b int) *PQLBitmapQuery {
qry := fmt.Sprintf("Range(frame='%s', %s >< [%d,%d])", field.frame.name, field.name, a, b)
return NewPQLBitmapQuery(qry, field.frame.index, field.err)
}
// Sum creates a sum query.
func (field *RangeField) Sum(bitmap *PQLBitmapQuery) *PQLBaseQuery {
bitmapStr := ""
if bitmap != nil {
bitmapStr = fmt.Sprintf("%s, ", bitmap.serialize())
}
qry := fmt.Sprintf("Sum(%sframe='%s', field='%s')", bitmapStr, field.frame.name, field.name)
return NewPQLBaseQuery(qry, field.frame.index, field.err)
}
// SetIntValue creates a SetValue query.
func (field *RangeField) SetIntValue(columnID uint64, value int) *PQLBaseQuery {
index := field.frame.index
qry := fmt.Sprintf("SetFieldValue(frame='%s', %s=%d, %s=%d)",
field.frame.name, index.options.ColumnLabel, columnID, field.name, value)
return NewPQLBaseQuery(qry, index, nil)
}
func (field *RangeField) binaryOperation(op string, n int) *PQLBitmapQuery {
qry := fmt.Sprintf("Range(frame='%s', %s %s %d)", field.frame.name, field.name, op, n)
return NewPQLBitmapQuery(qry, field.frame.index, field.err)
}
func encodeMap(m map[string]interface{}) string {
result, err := json.Marshal(m)
if err != nil {
panic(err)
}
return string(result)
}
cut out row and column labels
// Copyright 2017 Pilosa Corp.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
package pilosa
import (
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"time"
)
const timeFormat = "2006-01-02T15:04"
// Schema contains the index properties
type Schema struct {
indexes map[string]*Index
}
func (s *Schema) String() string {
return fmt.Sprintf("%#v", s.indexes)
}
// NewSchema creates a new Schema
func NewSchema() *Schema {
return &Schema{
indexes: make(map[string]*Index),
}
}
// Index returns an index with a name.
// *Deprecated* Passing IndexOptions or nil.
func (s *Schema) Index(name string, options ...*IndexOptions) (*Index, error) {
if index, ok := s.indexes[name]; ok {
return index, nil
}
var indexOptions *IndexOptions
if len(options) == 1 {
indexOptions = options[0]
} else if len(options) > 1 {
return nil, ErrInvalidIndexOption
}
index, err := NewIndex(name, indexOptions)
if err != nil {
return nil, err
}
s.indexes[name] = index
return index, nil
}
// Indexes return a copy of the indexes in this schema
func (s *Schema) Indexes() map[string]*Index {
result := make(map[string]*Index)
for k, v := range s.indexes {
result[k] = v.copy()
}
return result
}
func (s *Schema) diff(other *Schema) *Schema {
result := NewSchema()
for indexName, index := range s.indexes {
if otherIndex, ok := other.indexes[indexName]; !ok {
// if the index doesn't exist in the other schema, simply copy it
result.indexes[indexName] = index.copy()
} else {
// the index exists in the other schema; check the frames
resultIndex, _ := NewIndex(indexName, index.options)
for frameName, frame := range index.frames {
if _, ok := otherIndex.frames[frameName]; !ok {
// the frame doesn't exist in the other schema, copy it
resultIndex.frames[frameName] = frame.copy()
}
}
// check whether we modified result index
if len(resultIndex.frames) > 0 {
// if so, move it to the result
result.indexes[indexName] = resultIndex
}
}
}
return result
}
// PQLQuery is an interface for PQL queries.
type PQLQuery interface {
Index() *Index
serialize() string
Error() error
}
// PQLBaseQuery is the base implementation for PQLQuery.
type PQLBaseQuery struct {
index *Index
pql string
err error
}
// NewPQLBaseQuery creates a new PQLQuery with the given PQL and index.
func NewPQLBaseQuery(pql string, index *Index, err error) *PQLBaseQuery {
return &PQLBaseQuery{
index: index,
pql: pql,
err: err,
}
}
// Index returns the index for this query
func (q *PQLBaseQuery) Index() *Index {
return q.index
}
func (q *PQLBaseQuery) serialize() string {
return q.pql
}
// Error returns the error or nil for this query.
func (q PQLBaseQuery) Error() error {
return q.err
}
// PQLBitmapQuery is the return type for bitmap queries.
type PQLBitmapQuery struct {
index *Index
pql string
err error
}
// Index returns the index for this query/
func (q *PQLBitmapQuery) Index() *Index {
return q.index
}
func (q *PQLBitmapQuery) serialize() string {
return q.pql
}
// Error returns the error or nil for this query.
func (q PQLBitmapQuery) Error() error {
return q.err
}
// PQLBatchQuery contains a batch of PQL queries.
// Use Index.BatchQuery function to create an instance.
//
// Usage:
//
// index, err := NewIndex("repository", nil)
// stargazer, err := index.Frame("stargazer", nil)
// query := repo.BatchQuery(
// stargazer.Bitmap(5),
// stargazer.Bitmap(15),
// repo.Union(stargazer.Bitmap(20), stargazer.Bitmap(25)))
type PQLBatchQuery struct {
index *Index
queries []string
err error
}
// Index returns the index for this query.
func (q *PQLBatchQuery) Index() *Index {
return q.index
}
func (q *PQLBatchQuery) serialize() string {
return strings.Join(q.queries, "")
}
func (q *PQLBatchQuery) Error() error {
return q.err
}
// Add adds a query to the batch.
func (q *PQLBatchQuery) Add(query PQLQuery) {
err := query.Error()
if err != nil {
q.err = err
}
q.queries = append(q.queries, query.serialize())
}
// IndexOptions contains options to customize Index structs and column queries.
// *Deprecated*. This struct is deprecated, do not use it in new code.
// *Deprecation*: `ColumnLabel` field is deprecated and will be removed in a future release.
// *Deprecation*: `TimeQuantum` field is deprecated and will be removed in a future release.
type IndexOptions struct {
ColumnLabel string
TimeQuantum TimeQuantum
}
func (options *IndexOptions) withDefaults() (updated *IndexOptions) {
// copy options so the original is not updated
updated = &IndexOptions{}
*updated = *options
// impose defaults
if updated.ColumnLabel == "" {
updated.ColumnLabel = "columnID"
}
return
}
func (options IndexOptions) String() string {
return fmt.Sprintf(`{"options": {"columnLabel": "%s"}}`, options.ColumnLabel)
}
// NewPQLBitmapQuery creates a new PqlBitmapQuery.
func NewPQLBitmapQuery(pql string, index *Index, err error) *PQLBitmapQuery {
return &PQLBitmapQuery{
index: index,
pql: pql,
err: err,
}
}
// Index is a Pilosa index. The purpose of the Index is to represent a data namespace.
// You cannot perform cross-index queries. Column-level attributes are global to the Index.
type Index struct {
name string
options *IndexOptions
frames map[string]*Frame
}
func (idx *Index) String() string {
return fmt.Sprintf("%#v", idx)
}
// NewIndex creates an index with a name and options.
// Pass nil for default options.
func NewIndex(name string, options *IndexOptions) (*Index, error) {
if err := validateIndexName(name); err != nil {
return nil, err
}
if options == nil {
options = &IndexOptions{}
}
options = options.withDefaults()
if err := validateLabel(options.ColumnLabel); err != nil {
return nil, err
}
return &Index{
name: name,
options: options,
frames: map[string]*Frame{},
}, nil
}
// Frames return a copy of the frames in this index
func (idx *Index) Frames() map[string]*Frame {
result := make(map[string]*Frame)
for k, v := range idx.frames {
result[k] = v.copy()
}
return result
}
func (idx *Index) copy() *Index {
frames := make(map[string]*Frame)
for name, f := range idx.frames {
frames[name] = f.copy()
}
index := &Index{
name: idx.name,
frames: frames,
options: &IndexOptions{},
}
*index.options = *idx.options
return index
}
// Name returns the name of this index.
func (idx *Index) Name() string {
return idx.name
}
// Frame creates a frame struct with the specified name and defaults.
func (idx *Index) Frame(name string, options ...interface{}) (*Frame, error) {
if frame, ok := idx.frames[name]; ok {
return frame, nil
}
if err := validateFrameName(name); err != nil {
return nil, err
}
frameOptions := &FrameOptions{}
err := frameOptions.addOptions(options...)
if err != nil {
return nil, err
}
frameOptions = frameOptions.withDefaults()
if err := validateLabel(frameOptions.RowLabel); err != nil {
return nil, err
}
frame := newFrame(name, idx)
frame.options = frameOptions
idx.frames[name] = frame
return frame, nil
}
// BatchQuery creates a batch query with the given queries.
func (idx *Index) BatchQuery(queries ...PQLQuery) *PQLBatchQuery {
stringQueries := make([]string, 0, len(queries))
for _, query := range queries {
stringQueries = append(stringQueries, query.serialize())
}
return &PQLBatchQuery{
index: idx,
queries: stringQueries,
}
}
// RawQuery creates a query with the given string.
// Note that the query is not validated before sending to the server.
func (idx *Index) RawQuery(query string) *PQLBaseQuery {
return NewPQLBaseQuery(query, idx, nil)
}
// Union creates a Union query.
// Union performs a logical OR on the results of each BITMAP_CALL query passed to it.
func (idx *Index) Union(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
return idx.bitmapOperation("Union", bitmaps...)
}
// Intersect creates an Intersect query.
// Intersect performs a logical AND on the results of each BITMAP_CALL query passed to it.
func (idx *Index) Intersect(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
if len(bitmaps) < 1 {
return NewPQLBitmapQuery("", idx, NewError("Intersect operation requires at least 1 bitmap"))
}
return idx.bitmapOperation("Intersect", bitmaps...)
}
// Difference creates an Intersect query.
// Difference returns all of the bits from the first BITMAP_CALL argument passed to it, without the bits from each subsequent BITMAP_CALL.
func (idx *Index) Difference(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
if len(bitmaps) < 1 {
return NewPQLBitmapQuery("", idx, NewError("Difference operation requires at least 1 bitmap"))
}
return idx.bitmapOperation("Difference", bitmaps...)
}
// Xor creates an Xor query.
func (idx *Index) Xor(bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
if len(bitmaps) < 2 {
return NewPQLBitmapQuery("", idx, NewError("Xor operation requires at least 2 bitmaps"))
}
return idx.bitmapOperation("Xor", bitmaps...)
}
// Count creates a Count query.
// Returns the number of set bits in the BITMAP_CALL passed in.
func (idx *Index) Count(bitmap *PQLBitmapQuery) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("Count(%s)", bitmap.serialize()), idx, nil)
}
// SetColumnAttrs creates a SetColumnAttrs query.
// SetColumnAttrs associates arbitrary key/value pairs with a column in an index.
// Following types are accepted: integer, float, string and boolean types.
func (idx *Index) SetColumnAttrs(columnID uint64, attrs map[string]interface{}) *PQLBaseQuery {
attrsString, err := createAttributesString(attrs)
if err != nil {
return NewPQLBaseQuery("", idx, err)
}
return NewPQLBaseQuery(fmt.Sprintf("SetColumnAttrs(%s=%d, %s)",
idx.options.ColumnLabel, columnID, attrsString), idx, nil)
}
func (idx *Index) bitmapOperation(name string, bitmaps ...*PQLBitmapQuery) *PQLBitmapQuery {
var err error
args := make([]string, 0, len(bitmaps))
for _, bitmap := range bitmaps {
if err = bitmap.Error(); err != nil {
return NewPQLBitmapQuery("", idx, err)
}
args = append(args, bitmap.serialize())
}
return NewPQLBitmapQuery(fmt.Sprintf("%s(%s)", name, strings.Join(args, ", ")), idx, nil)
}
// FrameInfo represents schema information for a frame.
type FrameInfo struct {
Name string `json:"name"`
}
// FrameOptions contains options to customize Frame objects and frame queries.
// *Deprecation*: `RowLabel` field is deprecated and will be removed in a future release.
type FrameOptions struct {
RowLabel string
// If a Frame has a time quantum, then Views are generated for each of the defined time segments.
TimeQuantum TimeQuantum
// Enables inverted frames
InverseEnabled bool
CacheType CacheType
CacheSize uint
RangeEnabled bool
fields map[string]rangeField
}
func (fo *FrameOptions) withDefaults() (updated *FrameOptions) {
// copy options so the original is not updated
updated = &FrameOptions{}
*updated = *fo
// impose defaults
if updated.RowLabel == "" {
updated.RowLabel = "rowID"
}
if updated.fields == nil {
updated.fields = map[string]rangeField{}
}
return
}
func (fo FrameOptions) String() string {
mopt := map[string]interface{}{
"rowLabel": fo.RowLabel,
}
if fo.InverseEnabled {
mopt["inverseEnabled"] = true
}
if fo.TimeQuantum != TimeQuantumNone {
mopt["timeQuantum"] = string(fo.TimeQuantum)
}
if fo.CacheType != CacheTypeDefault {
mopt["cacheType"] = string(fo.CacheType)
}
if fo.CacheSize != 0 {
mopt["cacheSize"] = fo.CacheSize
}
if fo.RangeEnabled {
mopt["rangeEnabled"] = true
}
if len(fo.fields) > 0 {
mopt["rangeEnabled"] = true
fields := make([]rangeField, 0, len(fo.fields))
for _, field := range fo.fields {
fields = append(fields, field)
}
mopt["fields"] = fields
}
return fmt.Sprintf(`{"options": %s}`, encodeMap(mopt))
}
// AddIntField adds an integer field to the frame options
func (fo *FrameOptions) AddIntField(name string, min int, max int) error {
field, err := newIntRangeField(name, min, max)
if err != nil {
return err
}
if fo.fields == nil {
fo.fields = map[string]rangeField{}
}
fo.fields[name] = field
return nil
}
func (fo *FrameOptions) addOptions(options ...interface{}) error {
for i, option := range options {
switch o := option.(type) {
case nil:
if i != 0 {
return ErrInvalidFrameOption
}
continue
case *FrameOptions:
if i != 0 {
return ErrInvalidFrameOption
}
*fo = *o
case FrameOption:
err := o(fo)
if err != nil {
return err
}
case TimeQuantum:
fo.TimeQuantum = o
case CacheType:
fo.CacheType = o
default:
return ErrInvalidFrameOption
}
}
return nil
}
// FrameOption is used to pass an option to index.Frame function.
type FrameOption func(options *FrameOptions) error
// InverseEnabled enables inverse frame.
func InverseEnabled(enabled bool) FrameOption {
return func(options *FrameOptions) error {
options.InverseEnabled = enabled
return nil
}
}
// CacheSize sets the cache size.
func CacheSize(size uint) FrameOption {
return func(options *FrameOptions) error {
options.CacheSize = size
return nil
}
}
// RangeEnabled enables range encoding for a frame.
func RangeEnabled(enabled bool) FrameOption {
return func(options *FrameOptions) error {
options.RangeEnabled = enabled
return nil
}
}
// IntField adds an integer field to the frame.
func IntField(name string, min int, max int) FrameOption {
return func(options *FrameOptions) error {
return options.AddIntField(name, min, max)
}
}
// Frame structs are used to segment and define different functional characteristics within your entire index.
// You can think of a Frame as a table-like data partition within your Index.
// Row-level attributes are namespaced at the Frame level.
type Frame struct {
name string
index *Index
options *FrameOptions
fields map[string]*RangeField
}
func (f *Frame) String() string {
return fmt.Sprintf("%#v", f)
}
func newFrame(name string, index *Index) *Frame {
return &Frame{
name: name,
index: index,
options: &FrameOptions{},
fields: make(map[string]*RangeField),
}
}
// Name returns the name of the frame
func (f *Frame) Name() string {
return f.name
}
func (f *Frame) copy() *Frame {
frame := newFrame(f.name, f.index)
*frame.options = *f.options
frame.fields = make(map[string]*RangeField)
for k, v := range f.fields {
frame.fields[k] = v
}
return frame
}
// Bitmap creates a bitmap query using the row label.
// Bitmap retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query.
// It also retrieves any attributes set on that row or column.
func (f *Frame) Bitmap(rowID uint64) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("Bitmap(%s=%d, frame='%s')",
f.options.RowLabel, rowID, f.name), f.index, nil)
}
// InverseBitmap creates a bitmap query using the column label.
// Bitmap retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query.
// It also retrieves any attributes set on that row or column.
func (f *Frame) InverseBitmap(columnID uint64) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("Bitmap(%s=%d, frame='%s')",
f.index.options.ColumnLabel, columnID, f.name), f.index, nil)
}
// SetBit creates a SetBit query.
// SetBit, assigns a value of 1 to a bit in the binary matrix, thus associating the given row in the given frame with the given column.
func (f *Frame) SetBit(rowID uint64, columnID uint64) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("SetBit(%s=%d, frame='%s', %s=%d)",
f.options.RowLabel, rowID, f.name, f.index.options.ColumnLabel, columnID), f.index, nil)
}
// SetBitTimestamp creates a SetBit query with timestamp.
// SetBit, assigns a value of 1 to a bit in the binary matrix,
// thus associating the given row in the given frame with the given column.
func (f *Frame) SetBitTimestamp(rowID uint64, columnID uint64, timestamp time.Time) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("SetBit(%s=%d, frame='%s', %s=%d, timestamp='%s')",
f.options.RowLabel, rowID, f.name, f.index.options.ColumnLabel, columnID, timestamp.Format(timeFormat)),
f.index, nil)
}
// ClearBit creates a ClearBit query.
// ClearBit, assigns a value of 0 to a bit in the binary matrix, thus disassociating the given row in the given frame from the given column.
func (f *Frame) ClearBit(rowID uint64, columnID uint64) *PQLBaseQuery {
return NewPQLBaseQuery(fmt.Sprintf("ClearBit(%s=%d, frame='%s', %s=%d)",
f.options.RowLabel, rowID, f.name, f.index.options.ColumnLabel, columnID), f.index, nil)
}
// TopN creates a TopN query with the given item count.
// Returns the id and count of the top n bitmaps (by count of bits) in the frame.
func (f *Frame) TopN(n uint64) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(frame='%s', n=%d, inverse=false)", f.name, n), f.index, nil)
}
// InverseTopN creates a TopN query with the given item count.
// Returns the id and count of the top n bitmaps (by count of bits) in the frame.
// This variant sets inverse=true
func (f *Frame) InverseTopN(n uint64) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(frame='%s', n=%d, inverse=true)", f.name, n), f.index, nil)
}
// BitmapTopN creates a TopN query with the given item count and bitmap.
// This variant supports customizing the bitmap query.
func (f *Frame) BitmapTopN(n uint64, bitmap *PQLBitmapQuery) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(%s, frame='%s', n=%d, inverse=false)",
bitmap.serialize(), f.name, n), f.index, nil)
}
// InverseBitmapTopN creates a TopN query with the given item count and bitmap.
// This variant supports customizing the bitmap query and sets inverse=true.
func (f *Frame) InverseBitmapTopN(n uint64, bitmap *PQLBitmapQuery) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(%s, frame='%s', n=%d, inverse=true)",
bitmap.serialize(), f.name, n), f.index, nil)
}
// FilterFieldTopN creates a TopN query with the given item count, bitmap, field and the filter for that field
// The field and filters arguments work together to only return Bitmaps which have the attribute specified by field with one of the values specified in filters.
func (f *Frame) FilterFieldTopN(n uint64, bitmap *PQLBitmapQuery, field string, values ...interface{}) *PQLBitmapQuery {
return f.filterFieldTopN(n, bitmap, false, field, values...)
}
// InverseFilterFieldTopN creates a TopN query with the given item count, bitmap, field and the filter for that field
// The field and filters arguments work together to only return Bitmaps which have the attribute specified by field with one of the values specified in filters.
// This variant sets inverse=true.
func (f *Frame) InverseFilterFieldTopN(n uint64, bitmap *PQLBitmapQuery, field string, values ...interface{}) *PQLBitmapQuery {
return f.filterFieldTopN(n, bitmap, true, field, values...)
}
func (f *Frame) filterFieldTopN(n uint64, bitmap *PQLBitmapQuery, inverse bool, field string, values ...interface{}) *PQLBitmapQuery {
if err := validateLabel(field); err != nil {
return NewPQLBitmapQuery("", f.index, err)
}
b, err := json.Marshal(values)
if err != nil {
return NewPQLBitmapQuery("", f.index, err)
}
inverseStr := "true"
if !inverse {
inverseStr = "false"
}
if bitmap == nil {
return NewPQLBitmapQuery(fmt.Sprintf("TopN(frame='%s', n=%d, inverse=%s, field='%s', filters=%s)",
f.name, n, inverseStr, field, string(b)), f.index, nil)
}
return NewPQLBitmapQuery(fmt.Sprintf("TopN(%s, frame='%s', n=%d, inverse=%s, field='%s', filters=%s)",
bitmap.serialize(), f.name, n, inverseStr, field, string(b)), f.index, nil)
}
// Range creates a Range query.
// Similar to Bitmap, but only returns bits which were set with timestamps between the given start and end timestamps.
func (f *Frame) Range(rowID uint64, start time.Time, end time.Time) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("Range(%s=%d, frame='%s', start='%s', end='%s')",
f.options.RowLabel, rowID, f.name, start.Format(timeFormat), end.Format(timeFormat)), f.index, nil)
}
// InverseRange creates a Range query.
// Similar to Bitmap, but only returns bits which were set with timestamps between the given start and end timestamps.
func (f *Frame) InverseRange(columnID uint64, start time.Time, end time.Time) *PQLBitmapQuery {
return NewPQLBitmapQuery(fmt.Sprintf("Range(%s=%d, frame='%s', start='%s', end='%s')",
f.index.options.ColumnLabel, columnID, f.name, start.Format(timeFormat), end.Format(timeFormat)), f.index, nil)
}
// SetRowAttrs creates a SetRowAttrs query.
// SetRowAttrs associates arbitrary key/value pairs with a row in a frame.
// Following types are accepted: integer, float, string and boolean types.
func (f *Frame) SetRowAttrs(rowID uint64, attrs map[string]interface{}) *PQLBaseQuery {
attrsString, err := createAttributesString(attrs)
if err != nil {
return NewPQLBaseQuery("", f.index, err)
}
return NewPQLBaseQuery(fmt.Sprintf("SetRowAttrs(%s=%d, frame='%s', %s)",
f.options.RowLabel, rowID, f.name, attrsString), f.index, nil)
}
// Sum creates a Sum query.
// The corresponding frame should include the field in its options.
// *Deprecated* Use `frame.Sum(bitmap)` instead.
func (f *Frame) Sum(bitmap *PQLBitmapQuery, field string) *PQLBaseQuery {
return f.Field(field).Sum(bitmap)
}
// SetIntFieldValue creates a SetFieldValue query.
// *Deprecated* Use `frame.SetIntValue(columnID, value)` instead.
func (f *Frame) SetIntFieldValue(columnID uint64, field string, value int) *PQLBaseQuery {
return f.Field(field).SetIntValue(columnID, value)
}
// Field returns a field to operate on.
func (f *Frame) Field(name string) *RangeField {
field := f.fields[name]
if field == nil {
field = newRangeField(f, name)
// do not cache fields with error
if field.err == nil {
f.fields[name] = field
}
}
return field
}
// Fields return a copy of the fields in this frame
func (f *Frame) Fields() map[string]*RangeField {
result := make(map[string]*RangeField)
for k, v := range f.fields {
result[k] = v
}
return result
}
func createAttributesString(attrs map[string]interface{}) (string, error) {
attrsList := make([]string, 0, len(attrs))
for k, v := range attrs {
// TODO: validate the type of v is one of string, int64, float64, bool
if err := validateLabel(k); err != nil {
return "", err
}
if vs, ok := v.(string); ok {
attrsList = append(attrsList, fmt.Sprintf("%s=\"%s\"", k, strings.Replace(vs, "\"", "\\\"", -1)))
} else {
attrsList = append(attrsList, fmt.Sprintf("%s=%v", k, v))
}
}
sort.Strings(attrsList)
return strings.Join(attrsList, ", "), nil
}
// TimeQuantum type represents valid time quantum values for frames having support for that.
type TimeQuantum string
// TimeQuantum constants
const (
TimeQuantumNone TimeQuantum = ""
TimeQuantumYear TimeQuantum = "Y"
TimeQuantumMonth TimeQuantum = "M"
TimeQuantumDay TimeQuantum = "D"
TimeQuantumHour TimeQuantum = "H"
TimeQuantumYearMonth TimeQuantum = "YM"
TimeQuantumMonthDay TimeQuantum = "MD"
TimeQuantumDayHour TimeQuantum = "DH"
TimeQuantumYearMonthDay TimeQuantum = "YMD"
TimeQuantumMonthDayHour TimeQuantum = "MDH"
TimeQuantumYearMonthDayHour TimeQuantum = "YMDH"
)
// CacheType represents cache type for a frame
type CacheType string
// CacheType constants
const (
CacheTypeDefault CacheType = ""
CacheTypeLRU CacheType = "lru"
CacheTypeRanked CacheType = "ranked"
)
// rangeField represents a single field.
// TODO: rename.
type rangeField map[string]interface{}
func newIntRangeField(name string, min int, max int) (rangeField, error) {
err := validateLabel(name)
if err != nil {
return nil, err
}
if max <= min {
return nil, errors.New("Max should be greater than min for int fields")
}
return map[string]interface{}{
"name": name,
"type": "int",
"min": min,
"max": max,
}, nil
}
// RangeField enables writing queries for range encoded fields.
type RangeField struct {
frame *Frame
name string
err error
}
func newRangeField(frame *Frame, name string) *RangeField {
err := validateLabel(name)
return &RangeField{
frame: frame,
name: name,
err: err,
}
}
// LT creates a less than query.
func (field *RangeField) LT(n int) *PQLBitmapQuery {
return field.binaryOperation("<", n)
}
// LTE creates a less than or equal query.
func (field *RangeField) LTE(n int) *PQLBitmapQuery {
return field.binaryOperation("<=", n)
}
// GT creates a greater than query.
func (field *RangeField) GT(n int) *PQLBitmapQuery {
return field.binaryOperation(">", n)
}
// GTE creates a greater than or equal query.
func (field *RangeField) GTE(n int) *PQLBitmapQuery {
return field.binaryOperation(">=", n)
}
// Equals creates an equals query.
func (field *RangeField) Equals(n int) *PQLBitmapQuery {
return field.binaryOperation("==", n)
}
// NotEquals creates a not equals query.
func (field *RangeField) NotEquals(n int) *PQLBitmapQuery {
return field.binaryOperation("!=", n)
}
// NotNull creates a not equal to null query.
func (field *RangeField) NotNull() *PQLBitmapQuery {
qry := fmt.Sprintf("Range(frame='%s', %s != null)", field.frame.name, field.name)
return NewPQLBitmapQuery(qry, field.frame.index, field.err)
}
// Between creates a between query.
func (field *RangeField) Between(a int, b int) *PQLBitmapQuery {
qry := fmt.Sprintf("Range(frame='%s', %s >< [%d,%d])", field.frame.name, field.name, a, b)
return NewPQLBitmapQuery(qry, field.frame.index, field.err)
}
// Sum creates a sum query.
func (field *RangeField) Sum(bitmap *PQLBitmapQuery) *PQLBaseQuery {
bitmapStr := ""
if bitmap != nil {
bitmapStr = fmt.Sprintf("%s, ", bitmap.serialize())
}
qry := fmt.Sprintf("Sum(%sframe='%s', field='%s')", bitmapStr, field.frame.name, field.name)
return NewPQLBaseQuery(qry, field.frame.index, field.err)
}
// SetIntValue creates a SetValue query.
func (field *RangeField) SetIntValue(columnID uint64, value int) *PQLBaseQuery {
index := field.frame.index
qry := fmt.Sprintf("SetFieldValue(frame='%s', %s=%d, %s=%d)",
field.frame.name, index.options.ColumnLabel, columnID, field.name, value)
return NewPQLBaseQuery(qry, index, nil)
}
func (field *RangeField) binaryOperation(op string, n int) *PQLBitmapQuery {
qry := fmt.Sprintf("Range(frame='%s', %s %s %d)", field.frame.name, field.name, op, n)
return NewPQLBitmapQuery(qry, field.frame.index, field.err)
}
func encodeMap(m map[string]interface{}) string {
result, err := json.Marshal(m)
if err != nil {
panic(err)
}
return string(result)
}
|
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"errors"
"fmt"
)
var (
// ErrConnectionClosed represents an error condition on a closed connection.
ErrConnectionClosed = errors.New("Connection Closed")
// ErrAuthorization represents an error condition on failed authentication.
ErrAuthentication = errors.New("Authentication Error")
// ErrAuthTimeout represents an error condition on failed authorization due to timeout.
ErrAuthTimeout = errors.New("Authentication Timeout")
// ErrAuthExpired represents an expired authorization due to timeout.
ErrAuthExpired = errors.New("Authentication Expired")
// ErrMaxPayload represents an error condition when the payload is too big.
ErrMaxPayload = errors.New("Maximum Payload Exceeded")
// ErrMaxControlLine represents an error condition when the control line is too big.
ErrMaxControlLine = errors.New("Maximum Control Line Exceeded")
// ErrReservedPublishSubject represents an error condition when sending to a reserved subject, e.g. _SYS.>
ErrReservedPublishSubject = errors.New("Reserved Internal Subject")
// ErrBadClientProtocol signals a client requested an invalud client protocol.
ErrBadClientProtocol = errors.New("Invalid Client Protocol")
// ErrTooManyConnections signals a client that the maximum number of connections supported by the
// server has been reached.
ErrTooManyConnections = errors.New("Maximum Connections Exceeded")
// ErrTooManyAccountConnections signals that an acount has reached its maximum number of active
// connections.
ErrTooManyAccountConnections = errors.New("Maximum Account Active Connections Exceeded")
// ErrTooManySubs signals a client that the maximum number of subscriptions per connection
// has been reached.
ErrTooManySubs = errors.New("Maximum Subscriptions Exceeded")
// ErrClientConnectedToRoutePort represents an error condition when a client
// attempted to connect to the route listen port.
ErrClientConnectedToRoutePort = errors.New("Attempted To Connect To Route Port")
// ErrAccountExists is returned when an account is attempted to be registered
// but already exists.
ErrAccountExists = errors.New("Account Exists")
// ErrBadAccount represents a malformed or incorrect account.
ErrBadAccount = errors.New("Bad Account")
// ErrReservedAccount represents a reserved account that can not be created.
ErrReservedAccount = errors.New("Reserved Account")
// ErrMissingAccount is returned when an account does not exist.
ErrMissingAccount = errors.New("Account Missing")
// ErrAccountValidation is returned when an account has failed validation.
ErrAccountValidation = errors.New("Account Validation Failed")
// ErrNoAccountResolver is returned when we attempt an update but do not have an account resolver.
ErrNoAccountResolver = errors.New("Account Resolver Missing")
// ErrStreamImportAuthorization is returned when a stream import is not authorized.
ErrStreamImportAuthorization = errors.New("Stream Import Not Authorized")
// ErrServiceImportAuthorization is returned when a service import is not authorized.
ErrServiceImportAuthorization = errors.New("Service Import Not Authorized")
// ErrClientOrRouteConnectedToGatewayPort represents an error condition when
// a client or route attempted to connect to the Gateway port.
ErrClientOrRouteConnectedToGatewayPort = errors.New("Attempted To Connect To Gateway Port")
// ErrWrongGateway represents an error condition when a server receives a connect
// request from a remote Gateway with a destination name that does not match the server's
// Gateway's name.
ErrWrongGateway = errors.New("Wrong Gateway")
// ErrNoSysAccount is returned when an attempt to publish or subscribe is made
// when there is no internal system account defined.
ErrNoSysAccount = errors.New("System Account Not Setup")
)
// configErr is a configuration error.
type configErr struct {
token token
reason string
}
// Source reports the location of a configuration error.
func (e *configErr) Source() string {
return fmt.Sprintf("%s:%d:%d", e.token.SourceFile(), e.token.Line(), e.token.Position())
}
// Error reports the location and reason from a configuration error.
func (e *configErr) Error() string {
if e.token != nil {
return fmt.Sprintf("%s: %s", e.Source(), e.reason)
}
return e.reason
}
// unknownConfigFieldErr is an error reported in pedantic mode.
type unknownConfigFieldErr struct {
configErr
field string
}
// Error reports that an unknown field was in the configuration.
func (e *unknownConfigFieldErr) Error() string {
return fmt.Sprintf("%s: unknown field %q", e.Source(), e.field)
}
// configWarningErr is an error reported in pedantic mode.
type configWarningErr struct {
configErr
field string
}
// Error reports a configuration warning.
func (e *configWarningErr) Error() string {
return fmt.Sprintf("%s: invalid use of field %q: %s", e.Source(), e.field, e.reason)
}
// processConfigErr is the result of processing the configuration from the server.
type processConfigErr struct {
errors []error
warnings []error
}
// Error returns the collection of errors separated by new lines,
// warnings appear first then hard errors.
func (e *processConfigErr) Error() string {
var msg string
for _, err := range e.Warnings() {
msg += err.Error() + "\n"
}
for _, err := range e.Errors() {
msg += err.Error() + "\n"
}
return msg
}
// Warnings returns the list of warnings.
func (e *processConfigErr) Warnings() []error {
return e.warnings
}
// Errors returns the list of errors.
func (e *processConfigErr) Errors() []error {
return e.errors
}
Fix ErrAuthentication comment
// Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"errors"
"fmt"
)
var (
// ErrConnectionClosed represents an error condition on a closed connection.
ErrConnectionClosed = errors.New("Connection Closed")
// ErrAuthentication represents an error condition on failed authentication.
ErrAuthentication = errors.New("Authentication Error")
// ErrAuthTimeout represents an error condition on failed authorization due to timeout.
ErrAuthTimeout = errors.New("Authentication Timeout")
// ErrAuthExpired represents an expired authorization due to timeout.
ErrAuthExpired = errors.New("Authentication Expired")
// ErrMaxPayload represents an error condition when the payload is too big.
ErrMaxPayload = errors.New("Maximum Payload Exceeded")
// ErrMaxControlLine represents an error condition when the control line is too big.
ErrMaxControlLine = errors.New("Maximum Control Line Exceeded")
// ErrReservedPublishSubject represents an error condition when sending to a reserved subject, e.g. _SYS.>
ErrReservedPublishSubject = errors.New("Reserved Internal Subject")
// ErrBadClientProtocol signals a client requested an invalud client protocol.
ErrBadClientProtocol = errors.New("Invalid Client Protocol")
// ErrTooManyConnections signals a client that the maximum number of connections supported by the
// server has been reached.
ErrTooManyConnections = errors.New("Maximum Connections Exceeded")
// ErrTooManyAccountConnections signals that an acount has reached its maximum number of active
// connections.
ErrTooManyAccountConnections = errors.New("Maximum Account Active Connections Exceeded")
// ErrTooManySubs signals a client that the maximum number of subscriptions per connection
// has been reached.
ErrTooManySubs = errors.New("Maximum Subscriptions Exceeded")
// ErrClientConnectedToRoutePort represents an error condition when a client
// attempted to connect to the route listen port.
ErrClientConnectedToRoutePort = errors.New("Attempted To Connect To Route Port")
// ErrAccountExists is returned when an account is attempted to be registered
// but already exists.
ErrAccountExists = errors.New("Account Exists")
// ErrBadAccount represents a malformed or incorrect account.
ErrBadAccount = errors.New("Bad Account")
// ErrReservedAccount represents a reserved account that can not be created.
ErrReservedAccount = errors.New("Reserved Account")
// ErrMissingAccount is returned when an account does not exist.
ErrMissingAccount = errors.New("Account Missing")
// ErrAccountValidation is returned when an account has failed validation.
ErrAccountValidation = errors.New("Account Validation Failed")
// ErrNoAccountResolver is returned when we attempt an update but do not have an account resolver.
ErrNoAccountResolver = errors.New("Account Resolver Missing")
// ErrStreamImportAuthorization is returned when a stream import is not authorized.
ErrStreamImportAuthorization = errors.New("Stream Import Not Authorized")
// ErrServiceImportAuthorization is returned when a service import is not authorized.
ErrServiceImportAuthorization = errors.New("Service Import Not Authorized")
// ErrClientOrRouteConnectedToGatewayPort represents an error condition when
// a client or route attempted to connect to the Gateway port.
ErrClientOrRouteConnectedToGatewayPort = errors.New("Attempted To Connect To Gateway Port")
// ErrWrongGateway represents an error condition when a server receives a connect
// request from a remote Gateway with a destination name that does not match the server's
// Gateway's name.
ErrWrongGateway = errors.New("Wrong Gateway")
// ErrNoSysAccount is returned when an attempt to publish or subscribe is made
// when there is no internal system account defined.
ErrNoSysAccount = errors.New("System Account Not Setup")
)
// configErr is a configuration error.
type configErr struct {
token token
reason string
}
// Source reports the location of a configuration error.
func (e *configErr) Source() string {
return fmt.Sprintf("%s:%d:%d", e.token.SourceFile(), e.token.Line(), e.token.Position())
}
// Error reports the location and reason from a configuration error.
func (e *configErr) Error() string {
if e.token != nil {
return fmt.Sprintf("%s: %s", e.Source(), e.reason)
}
return e.reason
}
// unknownConfigFieldErr is an error reported in pedantic mode.
type unknownConfigFieldErr struct {
configErr
field string
}
// Error reports that an unknown field was in the configuration.
func (e *unknownConfigFieldErr) Error() string {
return fmt.Sprintf("%s: unknown field %q", e.Source(), e.field)
}
// configWarningErr is an error reported in pedantic mode.
type configWarningErr struct {
configErr
field string
}
// Error reports a configuration warning.
func (e *configWarningErr) Error() string {
return fmt.Sprintf("%s: invalid use of field %q: %s", e.Source(), e.field, e.reason)
}
// processConfigErr is the result of processing the configuration from the server.
type processConfigErr struct {
errors []error
warnings []error
}
// Error returns the collection of errors separated by new lines,
// warnings appear first then hard errors.
func (e *processConfigErr) Error() string {
var msg string
for _, err := range e.Warnings() {
msg += err.Error() + "\n"
}
for _, err := range e.Errors() {
msg += err.Error() + "\n"
}
return msg
}
// Warnings returns the list of warnings.
func (e *processConfigErr) Warnings() []error {
return e.warnings
}
// Errors returns the list of errors.
func (e *processConfigErr) Errors() []error {
return e.errors
}
|
/* Copyright (C) 2013 CompleteDB LLC.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PubSubSQL. If not, see <http://www.gnu.org/licenses/>.
*/
package server
import "strconv"
// link
type link struct {
pubsub *pubsub
tg *tag
}
func (this *link) clear() {
this.pubsub = nil
this.tg = nil
}
// record
type record struct {
values []string
links []link
prev *record
next *record
}
// record factory
func newRecord(columns int, id int) *record {
rec := record{
values: make([]string, columns, columns),
}
rec.setValue(0, strconv.Itoa(id))
return &rec
}
func (this *record) free() {
this.values = nil
this.links = nil
this.prev = nil
this.next = nil
}
// Returns record index in a table.
func (r *record) id() int {
id, err := strconv.Atoi(r.values[0])
if err != nil {
panic("record id can not be 0")
}
return id
}
// Returns record index in a table as string.
func (r *record) idAsString() string {
return r.values[0]
}
// Returns value based on column ordinal.
// Empty string is returned for invalid ordinal.
func (this *record) getValue(ordinal int) string {
if len(this.values) > ordinal {
return this.values[ordinal]
}
return ""
}
// Sets value based on column ordinal.
// Automatically adjusts the record if ordinal is invalid.
func (this *record) setValue(ordinal int, val string) {
l := len(this.values)
if l <= ordinal {
delta := ordinal - l + 1
temp := make([]string, delta)
this.values = append(this.values, temp...)
}
this.values[ordinal] = val
}
// addSubscription adds subscription to the record.
func (this *record) addSubscription(sub *subscription) {
pubsb := &this.links[0].pubsub
if *pubsb == nil {
*pubsb = new(pubsub)
}
pubsb.add(sub)
}
calling a method on double pointer is not allowed since go1.4
/* Copyright (C) 2013 CompleteDB LLC.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PubSubSQL. If not, see <http://www.gnu.org/licenses/>.
*/
package server
import "strconv"
// link
type link struct {
pubsub *pubsub
tg *tag
}
func (this *link) clear() {
this.pubsub = nil
this.tg = nil
}
// record
type record struct {
values []string
links []link
prev *record
next *record
}
// record factory
func newRecord(columns int, id int) *record {
rec := record{
values: make([]string, columns, columns),
}
rec.setValue(0, strconv.Itoa(id))
return &rec
}
func (this *record) free() {
this.values = nil
this.links = nil
this.prev = nil
this.next = nil
}
// Returns record index in a table.
func (r *record) id() int {
id, err := strconv.Atoi(r.values[0])
if err != nil {
panic("record id can not be 0")
}
return id
}
// Returns record index in a table as string.
func (r *record) idAsString() string {
return r.values[0]
}
// Returns value based on column ordinal.
// Empty string is returned for invalid ordinal.
func (this *record) getValue(ordinal int) string {
if len(this.values) > ordinal {
return this.values[ordinal]
}
return ""
}
// Sets value based on column ordinal.
// Automatically adjusts the record if ordinal is invalid.
func (this *record) setValue(ordinal int, val string) {
l := len(this.values)
if l <= ordinal {
delta := ordinal - l + 1
temp := make([]string, delta)
this.values = append(this.values, temp...)
}
this.values[ordinal] = val
}
// addSubscription adds subscription to the record.
func (this *record) addSubscription(sub *subscription) {
pubsb := this.links[0].pubsub
if pubsb == nil {
this.links[0].pubsub = new(pubsub)
pubsb = this.links[0].pubsub
}
pubsb.add(sub)
}
|
package main
import (
"flag"
"log"
"net"
"net/http"
"net/rpc"
)
type Nothing bool
type Message struct {
User string
Target string
Msg string
}
type ChatServer struct {
port string
messageQueue map[string][]string
users []string
shutdown chan bool
}
func (c *ChatServer) RegisterGoofs(username string, reply *string) error {
*reply = "Welcome to GOOF TALK\n"
*reply += "List of GOOFS online:\n"
c.users = append(c.users, username)
c.messageQueue[username] = nil
for _, value := range c.users {
*reply += value + "\n"
}
for k, _ := range c.messageQueue {
c.messageQueue[k] = append(c.messageQueue[k], username+" has joined.")
}
log.Printf("%s has joined the chat.\n", username)
return nil
}
func (c *ChatServer) ListGoofs(none Nothing, reply *[]string) error {
*reply = append(*reply, "Current online Goofs:")
for i := range c.users {
*reply = append(*reply, c.users[i])
}
log.Println("Dumped list of Goofs to client output")
return nil
}
func parseFlags(cs *ChatServer) {
flag.StringVar(&cs.port, "port", "3410", "port for chat server to listen on")
flag.Parse()
cs.port = ":" + cs.port
}
func RunServer(cs *ChatServer) {
rpc.Register(cs)
rpc.HandleHTTP()
log.Printf("Listening on port %s...\n", cs.port)
l, err := net.Listen("tcp", cs.port)
if err != nil {
log.Panicf("Can't bind port to listen. %q", err)
}
go http.Serve(l, nil)
}
func main() {
cs := new(ChatServer)
cs.messageQueue = make(map[string][]string)
cs.shutdown = make(chan bool, 1)
parseFlags(cs)
RunServer(cs)
<-cs.shutdown
}
updated server.go with shutdown function added to it
package main
import (
"flag"
"log"
"net"
"net/http"
"net/rpc"
)
type Nothing bool
type Message struct {
User string
Target string
Msg string
}
type ChatServer struct {
port string
messageQueue map[string][]string
users []string
shutdown chan bool
}
func (c *ChatServer) RegisterGoofs(username string, reply *string) error {
*reply = " _____ ____ ____ ______ _______ _ _ \n"
*reply += " / ____| / __ \\ / __ \\ | ____| |__ __| | | | | \n"
*reply += " | | __ | | | | | | | | | |__ | | __ _ | | | | __ \n"
*reply += " | | |_ | | | | | | | | | | __| | | / _` | | | | |/ / \n"
*reply += " | |__| | | |__| | | |__| | | | | | | (_| | | | | < \n"
*reply += " \\_____| \\____/ \\____/ |_| |_| \\__,_| |_| |_|\\_\\ v1.0\n"
*reply += "List of GOOFS online:\n"
c.users = append(c.users, username)
c.messageQueue[username] = nil
for _, value := range c.users {
*reply += value + "\n"
}
for k, _ := range c.messageQueue {
c.messageQueue[k] = append(c.messageQueue[k], username+" has joined.")
}
log.Printf("%s has joined the chat.\n", username)
return nil
}
func (c *ChatServer) ListGoofs(none Nothing, reply *[]string) error {
*reply = append(*reply, "Current online Goofs:")
for i := range c.users {
*reply = append(*reply, c.users[i])
}
log.Println("Dumped list of Goofs to client output")
return nil
}
func (c *ChatServer) Logout(username string, reply *[]string) error {
for i, val := range c.users {
if val == username {
c.users = append(c.users[:i], c.users[i+1:]...) //deletes the user from the array(slice)
break
}
}
log.Printf("%s has left the chat", username)
return nil
}
func (c *ChatServer) Shutdown(nothing Nothing, reply *Nothing) error {
var rep []string
for _,val := range c.users {
c.Logout(val, &rep)
}
log.Println("Server shutdown...Goodbye.")
*reply = false
c.shutdown <- true
return nil
}
func parseFlags(cs *ChatServer) {
flag.StringVar(&cs.port, "port", "3410", "port for chat server to listen on")
flag.Parse()
cs.port = ":" + cs.port
}
func RunServer(cs *ChatServer) {
rpc.Register(cs)
rpc.HandleHTTP()
log.Printf("Listening on port %s...\n", cs.port)
l, err := net.Listen("tcp", cs.port)
if err != nil {
log.Panicf("Can't bind port to listen. %q", err)
}
go http.Serve(l, nil)
}
func main() {
cs := new(ChatServer)
cs.messageQueue = make(map[string][]string)
cs.shutdown = make(chan bool, 1)
parseFlags(cs)
RunServer(cs)
<-cs.shutdown
}
|
package main
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"flag"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/gorilla/mux"
"github.com/slugalisk/overrustlelogs/common"
"github.com/xlab/handysort"
"github.com/yosssi/ace"
)
// temp ish.. move to config
const (
LogLinePrefixLength = len("[2017-01-10 08:57:47 UTC] ")
)
// errors
var (
ErrUserNotFound = errors.New("didn't find any logs for this user")
ErrNotFound = errors.New("file not found")
ErrSearchKeyNotFound = errors.New("didn't find what you were looking for :(")
)
// log file extension pattern
var (
LogExtension = regexp.MustCompile(`\.txt(\.lz4)?$`)
NicksExtension = regexp.MustCompile(`\.nicks\.lz4$`)
)
func init() {
configPath := flag.String("config", "", "config path")
flag.Parse()
common.SetupConfig(*configPath)
}
// Start server
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
d := NewDebugger()
r := mux.NewRouter()
r.StrictSlash(true)
r.HandleFunc("/", d.WatchHandle("Base", BaseHandle)).Methods("GET")
r.HandleFunc("/contact", d.WatchHandle("Contact", ContactHandle)).Methods("GET")
r.HandleFunc("/changelog", d.WatchHandle("Changelog", ChangelogHandle)).Methods("GET")
r.HandleFunc("/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET").Queries("date", "{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}")
r.HandleFunc("/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET")
r.HandleFunc("/mentions/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("MentionsHandle", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+}/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET").Queries("date", "{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+}/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+}/mentions/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("MentionsHandle", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}", d.WatchHandle("Channel", ChannelHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}", d.WatchHandle("Month", MonthHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}.txt", d.WatchHandle("Day", DayHandle)).Queries("search", "{filter:.+}").Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}.txt", d.WatchHandle("Day", DayHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}", d.WatchHandle("Day", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs", d.WatchHandle("Users", UsersHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("User", UserHandle)).Queries("search", "{filter:.+}").Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("User", UserHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("User", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/premium/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("Premium", PremiumHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/premium/{nick:[a-zA-Z0-9_-]{1,25}}/{month:[a-zA-Z]+ [0-9]{4}}.txt", d.WatchHandle("PremiumUser", PremiumUserHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/premium/{nick:[a-zA-Z0-9_-]{1,25}}/{month:[a-zA-Z]+ [0-9]{4}}", d.WatchHandle("PremiumUser", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/current", d.WatchHandle("CurrentBase", CurrentBaseHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/current/{nick:[a-zA-Z0-9_]+}.txt", d.WatchHandle("NickHandle", NickHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/current/{nick:[a-zA-Z0-9_]+}", d.WatchHandle("NickHandle", WrapperHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster.txt", d.WatchHandle("DestinyBroadcaster", DestinyBroadcasterHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster", d.WatchHandle("DestinyBroadcaster", WrapperHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/subscribers.txt", d.WatchHandle("DestinySubscriber", DestinySubscriberHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/subscribers", d.WatchHandle("DestinySubscriber", WrapperHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/bans.txt", d.WatchHandle("DestinyBan", DestinyBanHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/bans", d.WatchHandle("DestinyBan", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster.txt", d.WatchHandle("Broadcaster", BroadcasterHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster", d.WatchHandle("Broadcaster", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/subscribers.txt", d.WatchHandle("Subscriber", SubscriberHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/subscribers", d.WatchHandle("Subscriber", WrapperHandle)).Methods("GET")
r.HandleFunc("/api/v1/stalk/{channel:[a-zA-Z0-9_-]+ chatlog}/{nick:[a-zA-Z0-9_-]+}.json", d.WatchHandle("Stalk", StalkHandle)).Queries("limit", "{limit:[0-9]+}").Methods("GET")
r.HandleFunc("/api/v1/stalk/{channel:[a-zA-Z0-9_-]+ chatlog}/{nick:[a-zA-Z0-9_-]+}.json", d.WatchHandle("Stalk", StalkHandle)).Methods("GET")
r.HandleFunc("/api/v1/status.json", d.WatchHandle("Debug", d.HTTPHandle))
r.NotFoundHandler = http.HandlerFunc(NotFoundHandle)
// r.PathPrefix("/assets/").Handler(http.StripPrefix("/assets/", http.FileServer(http.Dir("assets"))))
srv := &http.Server{
Addr: common.GetConfig().Server.Address,
Handler: r,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
}
go srv.ListenAndServe()
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt, syscall.SIGTERM)
<-sigint
log.Println("i love you guys, be careful")
os.Exit(0)
}
// Debugger logging...
type Debugger struct {
mu sync.Mutex
counters map[string]*int64
}
// NewDebugger ...
func NewDebugger() *Debugger {
d := &Debugger{counters: make(map[string]*int64)}
go func() {
for {
time.Sleep(time.Minute)
d.DebugPrint()
}
}()
return d
}
// WatchHandle ...
func (d *Debugger) WatchHandle(name string, f http.HandlerFunc) http.HandlerFunc {
var c *int64
var ok bool
d.mu.Lock()
if c, ok = d.counters[name]; !ok {
c = new(int64)
d.counters[name] = c
}
d.mu.Unlock()
return func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt64(c, 1)
start := time.Now()
f.ServeHTTP(w, r)
log.Printf("served \"%s\" in %s", r.URL.Path, time.Since(start))
atomic.AddInt64(c, -1)
}
}
func (d *Debugger) counts() map[string]int64 {
counts := make(map[string]int64)
d.mu.Lock()
for name, c := range d.counters {
counts[name] = atomic.LoadInt64(c)
}
d.mu.Unlock()
return counts
}
// DebugPrint ...
func (d *Debugger) DebugPrint() {
log.Println(d.counts())
}
// HTTPHandle serve debugger status as JSON
func (d *Debugger) HTTPHandle(w http.ResponseWriter, r *http.Request) {
b, _ := json.Marshal(d.counts())
w.Write(b)
}
// NotFoundHandle channel index
func NotFoundHandle(w http.ResponseWriter, r *http.Request) {
serveError(w, ErrNotFound)
}
// BaseHandle channel index
func BaseHandle(w http.ResponseWriter, r *http.Request) {
paths, err := readDirIndex(common.GetConfig().LogPath)
if err != nil {
serveError(w, err)
return
}
serveDirIndex(w, []string{}, paths)
}
// WrapperHandle static html log wrapper
func WrapperHandle(w http.ResponseWriter, r *http.Request) {
tpl, err := ace.Load(common.GetConfig().Server.ViewPath+"/layout", common.GetConfig().Server.ViewPath+"/wrapper", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-type", "text/html; charset=UTF-8")
path := r.URL.Path + ".txt"
if r.URL.RawQuery != "" {
path += "?" + r.URL.RawQuery
}
if err := tpl.Execute(w, struct{ Path string }{Path: path}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// ContactHandle contact page
func ContactHandle(w http.ResponseWriter, r *http.Request) {
tpl, err := ace.Load(common.GetConfig().Server.ViewPath+"/layout", common.GetConfig().Server.ViewPath+"/contact", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// ChangelogHandle changelog page
func ChangelogHandle(w http.ResponseWriter, r *http.Request) {
tpl, err := ace.Load(common.GetConfig().Server.ViewPath+"/layout", common.GetConfig().Server.ViewPath+"/changelog", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// ChannelHandle channel index
func ChannelHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
paths, err := readDirIndex(filepath.Join(common.GetConfig().LogPath, vars["channel"]))
if err != nil {
serveError(w, err)
return
}
sort.Sort(dirsByMonth(paths))
serveDirIndex(w, []string{vars["channel"]}, paths)
}
// MonthHandle channel index
func MonthHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
paths, err := readLogDir(filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]))
if err != nil {
serveError(w, err)
return
}
metaPaths := []string{"userlogs", "broadcaster.txt", "subscribers.txt"}
if vars["channel"] == "Destinygg chatlog" {
metaPaths = append(metaPaths, "bans.txt")
}
sort.Sort(dirsByDay(paths))
paths = append(paths, metaPaths...)
copy(paths[len(metaPaths):], paths)
copy(paths, metaPaths)
for i, path := range paths {
paths[i] = LogExtension.ReplaceAllString(path, ".txt")
}
serveDirIndex(w, []string{vars["channel"], vars["month"]}, paths)
}
// DayHandle channel index
func DayHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
data, err := readLogFile(filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"], vars["date"]))
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
w.Header().Set("Content-type", "text/plain; charset=UTF-8")
w.Header().Set("Cache-control", "max-age=60")
var ok bool
var filter func([]byte, string) bool
if _, ok = vars["filter"]; ok {
filter = filterKey
} else {
filter = func(l []byte, f string) bool { return true }
}
var lineCount int
reader := bufio.NewReaderSize(bytes.NewReader(data), len(data))
for {
line, err := reader.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if filter(line, vars["filter"]) {
w.Write(line)
lineCount++
}
}
if lineCount == 0 && ok {
http.Error(w, ErrSearchKeyNotFound.Error(), http.StatusNotFound)
}
}
// UsersHandle channel index .
func UsersHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
f, err := os.Open(filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]))
if err != nil {
serveError(w, ErrNotFound)
return
}
files, err := f.Readdir(0)
if err != nil {
serveError(w, err)
return
}
nicks := common.NickList{}
for _, file := range files {
if NicksExtension.MatchString(file.Name()) {
common.ReadNickList(nicks, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"], file.Name()))
}
}
names := make([]string, 0, len(nicks))
for nick := range nicks {
names = append(names, nick+".txt")
}
sort.Sort(handysort.Strings(names))
serveDirIndex(w, []string{vars["channel"], vars["month"], "userlogs"}, names)
}
// UserHandle user log
func UserHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
if _, ok := vars["filter"]; ok {
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), searchKey(vars["nick"], vars["filter"]))
return
}
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), nickFilter(vars["nick"]))
}
// PremiumHandle premium user log index
func PremiumHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
paths, err := readDirIndex(filepath.Join(common.GetConfig().LogPath, vars["channel"]))
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
for i := range paths {
paths[i] += ".txt"
}
serveDirIndex(w, []string{vars["channel"], "premium", vars["nick"]}, paths)
}
// PremiumUserHandle user logs + replies
func PremiumUserHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
nick := bytes.ToLower([]byte(vars["nick"]))
filter := func(line []byte) bool {
return bytes.Contains(bytes.ToLower(line), nick)
}
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), filter)
}
// BroadcasterHandle channel index
func BroadcasterHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
nick := vars["channel"][:len(vars["channel"])-8]
search, err := common.NewNickSearch(filepath.Join(common.GetConfig().LogPath, vars["channel"]), nick)
if err != nil {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
}
rs, err := search.Next()
if err == io.EOF {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
} else if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), nickFilter(rs.Nick()))
}
// SubscriberHandle channel index
func SubscriberHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), nickFilter("twitchnotify"))
}
// DestinyBroadcasterHandle destiny logs
func DestinyBroadcasterHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, "Destinygg chatlog", vars["month"]), nickFilter("Destiny"))
}
// DestinySubscriberHandle destiny subscriber logs
func DestinySubscriberHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, "Destinygg chatlog", vars["month"]), nickFilter("Subscriber"))
}
// DestinyBanHandle channel ban list
func DestinyBanHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, "Destinygg chatlog", vars["month"]), nickFilter("Ban"))
}
// CurrentBaseHandle shows the most recent months logs directly on the subdomain
func CurrentBaseHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
vars["month"] = time.Now().Format("January 2006")
MonthHandle(w, r)
}
func convertChannelCase(ch string) string {
ch = strings.ToLower(ch)
p := strings.Split(ch, " ")
p[0] = strings.Title(p[0])
return strings.Join(p, " ")
}
// NickHandle shows the users most recent available log
func NickHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
vars["channel"] = convertChannelCase(vars["channel"])
search, err := common.NewNickSearch(filepath.Join(common.GetConfig().LogPath, vars["channel"]), vars["nick"])
if err != nil {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
}
rs, err := search.Next()
if err != nil {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
}
if rs.Nick() != vars["nick"] {
http.Redirect(w, r, "./"+rs.Nick()+".txt", 301)
return
}
vars["month"] = rs.Month()
UserHandle(w, r)
}
// MentionsHandle shows each line where a specific nick gets mentioned
func MentionsHandle(w http.ResponseWriter, r *http.Request) {
if r.Host[:4] != "dgg." && r.Host[:4] != "ttv." {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
// serveError(w, errors.New("yee"))
return
}
vars := mux.Vars(r)
if _, ok := vars["channel"]; ok {
vars["channel"] = strings.Title(vars["channel"]) + " chatlog"
} else {
vars["channel"] = "Destinygg chatlog"
}
if _, ok := vars["date"]; !ok {
vars["date"] = time.Now().UTC().Format("2006-01-02")
}
t, err := time.Parse("2006-01-02", vars["date"])
if err != nil {
http.Error(w, "invalid date format", http.StatusNotFound)
return
}
if t.After(time.Now().UTC()) {
http.Error(w, "can't look into the future D:", http.StatusNotFound)
return
}
data, err := readLogFile(filepath.Join(common.GetConfig().LogPath, vars["channel"], t.Format("January 2006"), t.Format("2006-01-02")))
if err != nil {
http.Error(w, "no logs found :(", http.StatusNotFound)
return
}
w.Header().Set("Content-type", "text/plain; charset=UTF-8")
var lineCount int
reader := bufio.NewReaderSize(bytes.NewReader(data), len(data))
for {
line, err := reader.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if bytes.Contains(line[bytes.Index(line[LogLinePrefixLength:], []byte(":"))+LogLinePrefixLength:], []byte(" "+vars["nick"])) {
w.Write(line)
lineCount++
}
}
if lineCount == 0 {
http.Error(w, "no mentions :(", http.StatusNotFound)
}
}
// StalkHandle return n most recent lines of chat for user
func StalkHandle(w http.ResponseWriter, r *http.Request) {
type Error struct {
Error string `json:"error"`
}
w.Header().Set("Content-type", "application/json")
vars := mux.Vars(r)
if _, ok := vars["limit"]; !ok {
vars["limit"] = "3"
}
limit, err := strconv.ParseUint(vars["limit"], 10, 32)
if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusBadRequest)
return
}
if limit > uint64(common.GetConfig().Server.MaxStalkLines) {
limit = uint64(common.GetConfig().Server.MaxStalkLines)
} else if limit < 1 {
limit = 3
}
buf := make([]string, limit)
index := limit
search, err := common.NewNickSearch(filepath.Join(common.GetConfig().LogPath, vars["channel"]), vars["nick"])
if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusNotFound)
return
}
ScanLogs:
for {
rs, err := search.Next()
if err == io.EOF {
break
} else if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusInternalServerError)
return
}
data, err := readLogFile(filepath.Join(common.GetConfig().LogPath, vars["channel"], rs.Month(), rs.Day()))
if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusInternalServerError)
return
}
lines := [][]byte{}
r := bufio.NewReaderSize(bytes.NewReader(data), len(data))
filter := nickFilter(rs.Nick())
for {
line, err := r.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if filter(line) {
lines = append(lines, line[0:len(line)-1])
}
}
for i := len(lines) - 1; i >= 0; i-- {
index--
buf[index] = string(lines[i])
if index == 0 {
break ScanLogs
}
}
}
if index == limit {
d, _ := json.Marshal(Error{"User not found"})
http.Error(w, string(d), http.StatusInternalServerError)
return
}
type Line struct {
Timestamp int64 `json:"timestamp"`
Text string `json:"text"`
}
data := struct {
Nick string `json:"nick"`
Lines []Line `json:"lines"`
}{
Lines: []Line{},
}
for i := int(index); i < len(buf); i++ {
t, err := time.Parse("2006-01-02 15:04:05 MST", buf[i][1:24])
if err != nil {
continue
}
ci := strings.Index(buf[i][LogLinePrefixLength:], ":")
data.Nick = buf[i][LogLinePrefixLength : LogLinePrefixLength+ci]
data.Lines = append(data.Lines, Line{
Timestamp: t.Unix(),
Text: buf[i][ci+LogLinePrefixLength+2:],
})
}
d, _ := json.Marshal(data)
w.Write(d)
}
type dirsByMonth []string
func (l dirsByMonth) Len() int {
return len(l)
}
func (l dirsByMonth) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l dirsByMonth) Less(i, j int) bool {
format := "January 2006"
a, err := time.Parse(format, l[i])
if err != nil {
return true
}
b, err := time.Parse(format, l[j])
if err != nil {
return false
}
return !b.After(a)
}
type dirsByDay []string
func (l dirsByDay) Len() int {
return len(l)
}
func (l dirsByDay) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l dirsByDay) Less(i, j int) bool {
format := "2006-01-02.txt.lz4"
a, err := time.Parse(format, lz4Path(l[i]))
if err != nil {
log.Println(l[i])
return true
}
b, err := time.Parse(format, lz4Path(l[j]))
if err != nil {
log.Println(l[j])
return false
}
return !b.After(a)
}
func lz4Path(path string) string {
if path[len(path)-4:] != ".lz4" {
path += ".lz4"
}
return path
}
func readDirIndex(path string) ([]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, ErrNotFound
}
names, err := f.Readdirnames(0)
if err != nil {
return nil, err
}
sort.Sort(handysort.Strings(names))
return names, nil
}
func readLogDir(path string) ([]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, ErrNotFound
}
files, err := f.Readdir(0)
if err != nil {
return nil, err
}
var names []string
for _, file := range files {
if LogExtension.MatchString(file.Name()) {
names = append(names, file.Name())
}
}
sort.Sort(handysort.Strings(names))
return names, nil
}
func readLogFile(path string) ([]byte, error) {
var buf []byte
path = LogExtension.ReplaceAllString(path, "")
buf, err := common.ReadCompressedFile(path + ".txt")
if os.IsNotExist(err) {
f, err := os.Open(path + ".txt")
if os.IsNotExist(err) {
return nil, ErrNotFound
}
buf, err = ioutil.ReadAll(f)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return buf, nil
}
func nickFilter(nick string) func([]byte) bool {
nick += ":"
return func(line []byte) bool {
for i := 0; i < len(nick); i++ {
if i+LogLinePrefixLength > len(line) || line[i+LogLinePrefixLength] != nick[i] {
return false
}
}
return true
}
}
func searchKey(nick, filter string) func([]byte) bool {
nick += ":"
return func(line []byte) bool {
for i := 0; i < len(nick); i++ {
if i+LogLinePrefixLength > len(line) || line[i+LogLinePrefixLength] != nick[i] {
return false
}
}
return bytes.Contains(bytes.ToLower(line[len(nick)+LogLinePrefixLength:]), bytes.ToLower([]byte(filter)))
}
}
func filterKey(line []byte, f string) bool {
return bytes.Contains(bytes.ToLower(line), bytes.ToLower([]byte(f)))
}
// serveError ...
func serveError(w http.ResponseWriter, e error) {
tpl, err := ace.Load(filepath.Join(common.GetConfig().Server.ViewPath, "layout"), filepath.Join(common.GetConfig().Server.ViewPath, "error"), nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]interface{}{}
if e == ErrNotFound {
w.WriteHeader(http.StatusNotFound)
data["Message"] = e.Error()
} else if e != nil {
// w.WriteHeader(http.StatusInternalServerError)
data["Message"] = e.Error()
} else {
// w.WriteHeader(http.StatusInternalServerError)
data["Message"] = "Unknown Error"
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// serveDirIndex ...
func serveDirIndex(w http.ResponseWriter, base []string, paths []string) {
tpl, err := ace.Load(filepath.Join(common.GetConfig().Server.ViewPath, "layout"), filepath.Join(common.GetConfig().Server.ViewPath, "directory"), nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]interface{}{
"Breadcrumbs": []map[string]string{},
"Paths": []map[string]string{},
}
basePath := ""
for _, path := range base {
basePath += "/" + path
data["Breadcrumbs"] = append(data["Breadcrumbs"].([]map[string]string), map[string]string{
"Path": basePath,
"Name": path,
})
}
basePath += "/"
for _, path := range paths {
icon := "file-text"
if filepath.Ext(path) == "" {
icon = "folder"
}
data["Paths"] = append(data["Paths"].([]map[string]string), map[string]string{
"Path": basePath + strings.Replace(path, ".txt", "", -1),
"Name": path,
"Icon": icon,
})
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func serveFilteredLogs(w http.ResponseWriter, path string, filter func([]byte) bool) {
logs, err := readLogDir(path)
if err != nil {
http.Error(w, ErrNotFound.Error(), http.StatusNotFound)
return
}
w.Header().Set("Content-type", "text/plain; charset=UTF-8")
var lineCount int
for _, name := range logs {
data, err := readLogFile(filepath.Join(path, name))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
r := bufio.NewReaderSize(bytes.NewReader(data), len(data))
for {
line, err := r.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if filter(line) {
w.Write(line)
lineCount++
}
}
}
if lineCount == 0 {
http.Error(w, ErrSearchKeyNotFound.Error(), http.StatusNotFound)
return
}
}
removed sub domain logic
package main
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"flag"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/gorilla/mux"
"github.com/slugalisk/overrustlelogs/common"
"github.com/xlab/handysort"
"github.com/yosssi/ace"
)
// temp ish.. move to config
const (
LogLinePrefixLength = len("[2017-01-10 08:57:47 UTC] ")
)
// errors
var (
ErrUserNotFound = errors.New("didn't find any logs for this user")
ErrNotFound = errors.New("file not found")
ErrSearchKeyNotFound = errors.New("didn't find what you were looking for :(")
)
// log file extension pattern
var (
LogExtension = regexp.MustCompile(`\.txt(\.lz4)?$`)
NicksExtension = regexp.MustCompile(`\.nicks\.lz4$`)
)
func init() {
configPath := flag.String("config", "", "config path")
flag.Parse()
common.SetupConfig(*configPath)
}
// Start server
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
d := NewDebugger()
r := mux.NewRouter()
r.StrictSlash(true)
r.HandleFunc("/", d.WatchHandle("Base", BaseHandle)).Methods("GET")
r.HandleFunc("/contact", d.WatchHandle("Contact", ContactHandle)).Methods("GET")
r.HandleFunc("/changelog", d.WatchHandle("Changelog", ChangelogHandle)).Methods("GET")
r.HandleFunc("/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET").Queries("date", "{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}")
r.HandleFunc("/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET")
r.HandleFunc("/mentions/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("MentionsHandle", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+}/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET").Queries("date", "{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+}/mentions/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("MentionsHandle", MentionsHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+}/mentions/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("MentionsHandle", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}", d.WatchHandle("Channel", ChannelHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}", d.WatchHandle("Month", MonthHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}.txt", d.WatchHandle("Day", DayHandle)).Queries("search", "{filter:.+}").Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}.txt", d.WatchHandle("Day", DayHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/{date:[0-9]{4}-[0-9]{2}-[0-9]{2}}", d.WatchHandle("Day", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs", d.WatchHandle("Users", UsersHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("User", UserHandle)).Queries("search", "{filter:.+}").Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs/{nick:[a-zA-Z0-9_-]{1,25}}.txt", d.WatchHandle("User", UserHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/userlogs/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("User", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/premium/{nick:[a-zA-Z0-9_-]{1,25}}", d.WatchHandle("Premium", PremiumHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/premium/{nick:[a-zA-Z0-9_-]{1,25}}/{month:[a-zA-Z]+ [0-9]{4}}.txt", d.WatchHandle("PremiumUser", PremiumUserHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/premium/{nick:[a-zA-Z0-9_-]{1,25}}/{month:[a-zA-Z]+ [0-9]{4}}", d.WatchHandle("PremiumUser", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/current", d.WatchHandle("CurrentBase", CurrentBaseHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/current/{nick:[a-zA-Z0-9_]+}.txt", d.WatchHandle("NickHandle", NickHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/current/{nick:[a-zA-Z0-9_]+}", d.WatchHandle("NickHandle", WrapperHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster.txt", d.WatchHandle("DestinyBroadcaster", DestinyBroadcasterHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster", d.WatchHandle("DestinyBroadcaster", WrapperHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/subscribers.txt", d.WatchHandle("DestinySubscriber", DestinySubscriberHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/subscribers", d.WatchHandle("DestinySubscriber", WrapperHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/bans.txt", d.WatchHandle("DestinyBan", DestinyBanHandle)).Methods("GET")
r.HandleFunc("/Destinygg chatlog/{month:[a-zA-Z]+ [0-9]{4}}/bans", d.WatchHandle("DestinyBan", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster.txt", d.WatchHandle("Broadcaster", BroadcasterHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/broadcaster", d.WatchHandle("Broadcaster", WrapperHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/subscribers.txt", d.WatchHandle("Subscriber", SubscriberHandle)).Methods("GET")
r.HandleFunc("/{channel:[a-zA-Z0-9_-]+ chatlog}/{month:[a-zA-Z]+ [0-9]{4}}/subscribers", d.WatchHandle("Subscriber", WrapperHandle)).Methods("GET")
r.HandleFunc("/api/v1/stalk/{channel:[a-zA-Z0-9_-]+ chatlog}/{nick:[a-zA-Z0-9_-]+}.json", d.WatchHandle("Stalk", StalkHandle)).Queries("limit", "{limit:[0-9]+}").Methods("GET")
r.HandleFunc("/api/v1/stalk/{channel:[a-zA-Z0-9_-]+ chatlog}/{nick:[a-zA-Z0-9_-]+}.json", d.WatchHandle("Stalk", StalkHandle)).Methods("GET")
r.HandleFunc("/api/v1/status.json", d.WatchHandle("Debug", d.HTTPHandle))
r.NotFoundHandler = http.HandlerFunc(NotFoundHandle)
// r.PathPrefix("/assets/").Handler(http.StripPrefix("/assets/", http.FileServer(http.Dir("assets"))))
srv := &http.Server{
Addr: common.GetConfig().Server.Address,
Handler: r,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
}
go srv.ListenAndServe()
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt, syscall.SIGTERM)
<-sigint
log.Println("i love you guys, be careful")
os.Exit(0)
}
// Debugger logging...
type Debugger struct {
mu sync.Mutex
counters map[string]*int64
}
// NewDebugger ...
func NewDebugger() *Debugger {
d := &Debugger{counters: make(map[string]*int64)}
go func() {
for {
time.Sleep(time.Minute)
d.DebugPrint()
}
}()
return d
}
// WatchHandle ...
func (d *Debugger) WatchHandle(name string, f http.HandlerFunc) http.HandlerFunc {
var c *int64
var ok bool
d.mu.Lock()
if c, ok = d.counters[name]; !ok {
c = new(int64)
d.counters[name] = c
}
d.mu.Unlock()
return func(w http.ResponseWriter, r *http.Request) {
atomic.AddInt64(c, 1)
start := time.Now()
f.ServeHTTP(w, r)
log.Printf("served \"%s\" in %s", r.URL.Path, time.Since(start))
atomic.AddInt64(c, -1)
}
}
func (d *Debugger) counts() map[string]int64 {
counts := make(map[string]int64)
d.mu.Lock()
for name, c := range d.counters {
counts[name] = atomic.LoadInt64(c)
}
d.mu.Unlock()
return counts
}
// DebugPrint ...
func (d *Debugger) DebugPrint() {
log.Println(d.counts())
}
// HTTPHandle serve debugger status as JSON
func (d *Debugger) HTTPHandle(w http.ResponseWriter, r *http.Request) {
b, _ := json.Marshal(d.counts())
w.Write(b)
}
// NotFoundHandle channel index
func NotFoundHandle(w http.ResponseWriter, r *http.Request) {
serveError(w, ErrNotFound)
}
// BaseHandle channel index
func BaseHandle(w http.ResponseWriter, r *http.Request) {
paths, err := readDirIndex(common.GetConfig().LogPath)
if err != nil {
serveError(w, err)
return
}
serveDirIndex(w, []string{}, paths)
}
// WrapperHandle static html log wrapper
func WrapperHandle(w http.ResponseWriter, r *http.Request) {
tpl, err := ace.Load(common.GetConfig().Server.ViewPath+"/layout", common.GetConfig().Server.ViewPath+"/wrapper", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-type", "text/html; charset=UTF-8")
path := r.URL.Path + ".txt"
if r.URL.RawQuery != "" {
path += "?" + r.URL.RawQuery
}
if err := tpl.Execute(w, struct{ Path string }{Path: path}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// ContactHandle contact page
func ContactHandle(w http.ResponseWriter, r *http.Request) {
tpl, err := ace.Load(common.GetConfig().Server.ViewPath+"/layout", common.GetConfig().Server.ViewPath+"/contact", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// ChangelogHandle changelog page
func ChangelogHandle(w http.ResponseWriter, r *http.Request) {
tpl, err := ace.Load(common.GetConfig().Server.ViewPath+"/layout", common.GetConfig().Server.ViewPath+"/changelog", nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// ChannelHandle channel index
func ChannelHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
paths, err := readDirIndex(filepath.Join(common.GetConfig().LogPath, vars["channel"]))
if err != nil {
serveError(w, err)
return
}
sort.Sort(dirsByMonth(paths))
serveDirIndex(w, []string{vars["channel"]}, paths)
}
// MonthHandle channel index
func MonthHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
paths, err := readLogDir(filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]))
if err != nil {
serveError(w, err)
return
}
metaPaths := []string{"userlogs", "broadcaster.txt", "subscribers.txt"}
if vars["channel"] == "Destinygg chatlog" {
metaPaths = append(metaPaths, "bans.txt")
}
sort.Sort(dirsByDay(paths))
paths = append(paths, metaPaths...)
copy(paths[len(metaPaths):], paths)
copy(paths, metaPaths)
for i, path := range paths {
paths[i] = LogExtension.ReplaceAllString(path, ".txt")
}
serveDirIndex(w, []string{vars["channel"], vars["month"]}, paths)
}
// DayHandle channel index
func DayHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
data, err := readLogFile(filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"], vars["date"]))
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
w.Header().Set("Content-type", "text/plain; charset=UTF-8")
w.Header().Set("Cache-control", "max-age=60")
var ok bool
var filter func([]byte, string) bool
if _, ok = vars["filter"]; ok {
filter = filterKey
} else {
filter = func(l []byte, f string) bool { return true }
}
var lineCount int
reader := bufio.NewReaderSize(bytes.NewReader(data), len(data))
for {
line, err := reader.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if filter(line, vars["filter"]) {
w.Write(line)
lineCount++
}
}
if lineCount == 0 && ok {
http.Error(w, ErrSearchKeyNotFound.Error(), http.StatusNotFound)
}
}
// UsersHandle channel index .
func UsersHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
f, err := os.Open(filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]))
if err != nil {
serveError(w, ErrNotFound)
return
}
files, err := f.Readdir(0)
if err != nil {
serveError(w, err)
return
}
nicks := common.NickList{}
for _, file := range files {
if NicksExtension.MatchString(file.Name()) {
common.ReadNickList(nicks, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"], file.Name()))
}
}
names := make([]string, 0, len(nicks))
for nick := range nicks {
names = append(names, nick+".txt")
}
sort.Sort(handysort.Strings(names))
serveDirIndex(w, []string{vars["channel"], vars["month"], "userlogs"}, names)
}
// UserHandle user log
func UserHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
if _, ok := vars["filter"]; ok {
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), searchKey(vars["nick"], vars["filter"]))
return
}
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), nickFilter(vars["nick"]))
}
// PremiumHandle premium user log index
func PremiumHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
paths, err := readDirIndex(filepath.Join(common.GetConfig().LogPath, vars["channel"]))
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
for i := range paths {
paths[i] += ".txt"
}
serveDirIndex(w, []string{vars["channel"], "premium", vars["nick"]}, paths)
}
// PremiumUserHandle user logs + replies
func PremiumUserHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
nick := bytes.ToLower([]byte(vars["nick"]))
filter := func(line []byte) bool {
return bytes.Contains(bytes.ToLower(line), nick)
}
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), filter)
}
// BroadcasterHandle channel index
func BroadcasterHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
nick := vars["channel"][:len(vars["channel"])-8]
search, err := common.NewNickSearch(filepath.Join(common.GetConfig().LogPath, vars["channel"]), nick)
if err != nil {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
}
rs, err := search.Next()
if err == io.EOF {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
} else if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), nickFilter(rs.Nick()))
}
// SubscriberHandle channel index
func SubscriberHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, vars["channel"], vars["month"]), nickFilter("twitchnotify"))
}
// DestinyBroadcasterHandle destiny logs
func DestinyBroadcasterHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, "Destinygg chatlog", vars["month"]), nickFilter("Destiny"))
}
// DestinySubscriberHandle destiny subscriber logs
func DestinySubscriberHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, "Destinygg chatlog", vars["month"]), nickFilter("Subscriber"))
}
// DestinyBanHandle channel ban list
func DestinyBanHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
serveFilteredLogs(w, filepath.Join(common.GetConfig().LogPath, "Destinygg chatlog", vars["month"]), nickFilter("Ban"))
}
// CurrentBaseHandle shows the most recent months logs directly on the subdomain
func CurrentBaseHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
vars["month"] = time.Now().Format("January 2006")
MonthHandle(w, r)
}
func convertChannelCase(ch string) string {
ch = strings.ToLower(ch)
p := strings.Split(ch, " ")
p[0] = strings.Title(p[0])
return strings.Join(p, " ")
}
// NickHandle shows the users most recent available log
func NickHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
vars["channel"] = convertChannelCase(vars["channel"])
search, err := common.NewNickSearch(filepath.Join(common.GetConfig().LogPath, vars["channel"]), vars["nick"])
if err != nil {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
}
rs, err := search.Next()
if err != nil {
http.Error(w, ErrUserNotFound.Error(), http.StatusNotFound)
return
}
if rs.Nick() != vars["nick"] {
http.Redirect(w, r, "./"+rs.Nick()+".txt", 301)
return
}
vars["month"] = rs.Month()
UserHandle(w, r)
}
// MentionsHandle shows each line where a specific nick gets mentioned
func MentionsHandle(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
if _, ok := vars["channel"]; ok {
vars["channel"] = strings.Title(vars["channel"]) + " chatlog"
} else {
vars["channel"] = "Destinygg chatlog"
}
if _, ok := vars["date"]; !ok {
vars["date"] = time.Now().UTC().Format("2006-01-02")
}
t, err := time.Parse("2006-01-02", vars["date"])
if err != nil {
http.Error(w, "invalid date format", http.StatusNotFound)
return
}
if t.After(time.Now().UTC()) {
http.Error(w, "can't look into the future D:", http.StatusNotFound)
return
}
data, err := readLogFile(filepath.Join(common.GetConfig().LogPath, vars["channel"], t.Format("January 2006"), t.Format("2006-01-02")))
if err != nil {
http.Error(w, "no logs found :(", http.StatusNotFound)
return
}
w.Header().Set("Content-type", "text/plain; charset=UTF-8")
var lineCount int
reader := bufio.NewReaderSize(bytes.NewReader(data), len(data))
for {
line, err := reader.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if bytes.Contains(line[bytes.Index(line[LogLinePrefixLength:], []byte(":"))+LogLinePrefixLength:], []byte(" "+vars["nick"])) {
w.Write(line)
lineCount++
}
}
if lineCount == 0 {
http.Error(w, "no mentions :(", http.StatusNotFound)
}
}
// StalkHandle return n most recent lines of chat for user
func StalkHandle(w http.ResponseWriter, r *http.Request) {
type Error struct {
Error string `json:"error"`
}
w.Header().Set("Content-type", "application/json")
vars := mux.Vars(r)
if _, ok := vars["limit"]; !ok {
vars["limit"] = "3"
}
limit, err := strconv.ParseUint(vars["limit"], 10, 32)
if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusBadRequest)
return
}
if limit > uint64(common.GetConfig().Server.MaxStalkLines) {
limit = uint64(common.GetConfig().Server.MaxStalkLines)
} else if limit < 1 {
limit = 3
}
buf := make([]string, limit)
index := limit
search, err := common.NewNickSearch(filepath.Join(common.GetConfig().LogPath, vars["channel"]), vars["nick"])
if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusNotFound)
return
}
ScanLogs:
for {
rs, err := search.Next()
if err == io.EOF {
break
} else if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusInternalServerError)
return
}
data, err := readLogFile(filepath.Join(common.GetConfig().LogPath, vars["channel"], rs.Month(), rs.Day()))
if err != nil {
d, _ := json.Marshal(Error{err.Error()})
http.Error(w, string(d), http.StatusInternalServerError)
return
}
lines := [][]byte{}
r := bufio.NewReaderSize(bytes.NewReader(data), len(data))
filter := nickFilter(rs.Nick())
for {
line, err := r.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if filter(line) {
lines = append(lines, line[0:len(line)-1])
}
}
for i := len(lines) - 1; i >= 0; i-- {
index--
buf[index] = string(lines[i])
if index == 0 {
break ScanLogs
}
}
}
if index == limit {
d, _ := json.Marshal(Error{"User not found"})
http.Error(w, string(d), http.StatusInternalServerError)
return
}
type Line struct {
Timestamp int64 `json:"timestamp"`
Text string `json:"text"`
}
data := struct {
Nick string `json:"nick"`
Lines []Line `json:"lines"`
}{
Lines: []Line{},
}
for i := int(index); i < len(buf); i++ {
t, err := time.Parse("2006-01-02 15:04:05 MST", buf[i][1:24])
if err != nil {
continue
}
ci := strings.Index(buf[i][LogLinePrefixLength:], ":")
data.Nick = buf[i][LogLinePrefixLength : LogLinePrefixLength+ci]
data.Lines = append(data.Lines, Line{
Timestamp: t.Unix(),
Text: buf[i][ci+LogLinePrefixLength+2:],
})
}
d, _ := json.Marshal(data)
w.Write(d)
}
type dirsByMonth []string
func (l dirsByMonth) Len() int {
return len(l)
}
func (l dirsByMonth) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l dirsByMonth) Less(i, j int) bool {
format := "January 2006"
a, err := time.Parse(format, l[i])
if err != nil {
return true
}
b, err := time.Parse(format, l[j])
if err != nil {
return false
}
return !b.After(a)
}
type dirsByDay []string
func (l dirsByDay) Len() int {
return len(l)
}
func (l dirsByDay) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
func (l dirsByDay) Less(i, j int) bool {
format := "2006-01-02.txt.lz4"
a, err := time.Parse(format, lz4Path(l[i]))
if err != nil {
log.Println(l[i])
return true
}
b, err := time.Parse(format, lz4Path(l[j]))
if err != nil {
log.Println(l[j])
return false
}
return !b.After(a)
}
func lz4Path(path string) string {
if path[len(path)-4:] != ".lz4" {
path += ".lz4"
}
return path
}
func readDirIndex(path string) ([]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, ErrNotFound
}
names, err := f.Readdirnames(0)
if err != nil {
return nil, err
}
sort.Sort(handysort.Strings(names))
return names, nil
}
func readLogDir(path string) ([]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, ErrNotFound
}
files, err := f.Readdir(0)
if err != nil {
return nil, err
}
var names []string
for _, file := range files {
if LogExtension.MatchString(file.Name()) {
names = append(names, file.Name())
}
}
sort.Sort(handysort.Strings(names))
return names, nil
}
func readLogFile(path string) ([]byte, error) {
var buf []byte
path = LogExtension.ReplaceAllString(path, "")
buf, err := common.ReadCompressedFile(path + ".txt")
if os.IsNotExist(err) {
f, err := os.Open(path + ".txt")
if os.IsNotExist(err) {
return nil, ErrNotFound
}
buf, err = ioutil.ReadAll(f)
if err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
return buf, nil
}
func nickFilter(nick string) func([]byte) bool {
nick += ":"
return func(line []byte) bool {
for i := 0; i < len(nick); i++ {
if i+LogLinePrefixLength > len(line) || line[i+LogLinePrefixLength] != nick[i] {
return false
}
}
return true
}
}
func searchKey(nick, filter string) func([]byte) bool {
nick += ":"
return func(line []byte) bool {
for i := 0; i < len(nick); i++ {
if i+LogLinePrefixLength > len(line) || line[i+LogLinePrefixLength] != nick[i] {
return false
}
}
return bytes.Contains(bytes.ToLower(line[len(nick)+LogLinePrefixLength:]), bytes.ToLower([]byte(filter)))
}
}
func filterKey(line []byte, f string) bool {
return bytes.Contains(bytes.ToLower(line), bytes.ToLower([]byte(f)))
}
// serveError ...
func serveError(w http.ResponseWriter, e error) {
tpl, err := ace.Load(filepath.Join(common.GetConfig().Server.ViewPath, "layout"), filepath.Join(common.GetConfig().Server.ViewPath, "error"), nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]interface{}{}
if e == ErrNotFound {
w.WriteHeader(http.StatusNotFound)
data["Message"] = e.Error()
} else if e != nil {
// w.WriteHeader(http.StatusInternalServerError)
data["Message"] = e.Error()
} else {
// w.WriteHeader(http.StatusInternalServerError)
data["Message"] = "Unknown Error"
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// serveDirIndex ...
func serveDirIndex(w http.ResponseWriter, base []string, paths []string) {
tpl, err := ace.Load(filepath.Join(common.GetConfig().Server.ViewPath, "layout"), filepath.Join(common.GetConfig().Server.ViewPath, "directory"), nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]interface{}{
"Breadcrumbs": []map[string]string{},
"Paths": []map[string]string{},
}
basePath := ""
for _, path := range base {
basePath += "/" + path
data["Breadcrumbs"] = append(data["Breadcrumbs"].([]map[string]string), map[string]string{
"Path": basePath,
"Name": path,
})
}
basePath += "/"
for _, path := range paths {
icon := "file-text"
if filepath.Ext(path) == "" {
icon = "folder"
}
data["Paths"] = append(data["Paths"].([]map[string]string), map[string]string{
"Path": basePath + strings.Replace(path, ".txt", "", -1),
"Name": path,
"Icon": icon,
})
}
w.Header().Set("Content-type", "text/html")
if err := tpl.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func serveFilteredLogs(w http.ResponseWriter, path string, filter func([]byte) bool) {
logs, err := readLogDir(path)
if err != nil {
http.Error(w, ErrNotFound.Error(), http.StatusNotFound)
return
}
w.Header().Set("Content-type", "text/plain; charset=UTF-8")
var lineCount int
for _, name := range logs {
data, err := readLogFile(filepath.Join(path, name))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
r := bufio.NewReaderSize(bytes.NewReader(data), len(data))
for {
line, err := r.ReadSlice('\n')
if err != nil {
if err != io.EOF {
log.Printf("error reading bytes %s", err)
}
break
}
if filter(line) {
w.Write(line)
lineCount++
}
}
}
if lineCount == 0 {
http.Error(w, ErrSearchKeyNotFound.Error(), http.StatusNotFound)
return
}
}
|
/*
Copyright 2018 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Command server launches a stand-alone inverting proxy.
//
// Example usage:
// go build -o ~/bin/inverting-proxy ./server/server.go
// ~/bin/inverting-proxy --port 8081
package main
import (
"bufio"
"context"
"crypto/sha256"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
"strings"
"sync"
"time"
"github.com/google/inverting-proxy/agent/utils"
)
var (
port = flag.Int("port", 0, "Port on which to listen")
)
// pendingRequest represents a frontend request
type pendingRequest struct {
startTime time.Time
req *http.Request
respChan chan *http.Response
}
func newPendingRequest(r *http.Request) *pendingRequest {
return &pendingRequest{
startTime: time.Now(),
req: r,
respChan: make(chan *http.Response),
}
}
type proxy struct {
requestIDs chan string
randGenerator *rand.Rand
// protects the map below
sync.Mutex
requests map[string]*pendingRequest
}
func newProxy() *proxy {
return &proxy{
requestIDs: make(chan string),
randGenerator: rand.New(rand.NewSource(time.Now().UnixNano())),
requests: make(map[string]*pendingRequest),
}
}
func (p *proxy) handleAgentPostResponse(w http.ResponseWriter, r *http.Request, requestID string) {
p.Lock()
pending, ok := p.requests[requestID]
p.Unlock()
if !ok {
log.Printf("Could not find pending request: %q", requestID)
http.NotFound(w, r)
return
}
resp, err := http.ReadResponse(bufio.NewReader(r.Body), pending.req)
if err != nil {
log.Printf("Could not parse response to request %q: %v", requestID, err)
http.Error(w, "Failure parsing request body", http.StatusBadRequest)
return
}
// We want to track whether or not the body has finished being read so that we can
// make sure that this method does not return until after that. However, we do not
// want to block the sending of the response to the client while it is being read.
//
// To accommodate both goals, we replace the response body with a pipereader, start
// forwarding the response immediately, and then copy the original body to the
// corresponding pipewriter.
respBody := resp.Body
defer respBody.Close()
pr, pw := io.Pipe()
defer pw.Close()
resp.Body = pr
select {
case <-r.Context().Done():
return
case pending.respChan <- resp:
}
if _, err := io.Copy(pw, respBody); err != nil {
log.Printf("Could not read response to request %q: %v", requestID, err)
http.Error(w, "Failure reading request body", http.StatusInternalServerError)
}
}
func (p *proxy) handleAgentGetRequest(w http.ResponseWriter, r *http.Request, requestID string) {
p.Lock()
pending, ok := p.requests[requestID]
p.Unlock()
if !ok {
log.Printf("Could not find pending request: %q", requestID)
http.NotFound(w, r)
return
}
log.Printf("Returning pending request: %q", requestID)
w.Header().Set(utils.HeaderRequestStartTime, pending.startTime.Format(time.RFC3339Nano))
w.WriteHeader(http.StatusOK)
pending.req.Write(w)
}
// waitForRequestIDs blocks until at least one request ID is available, and then returns
// a slice of all of the IDs available at that time.
//
// Note that any IDs returned by this method will never be returned again.
func (p *proxy) waitForRequestIDs(ctx context.Context) []string {
var requestIDs []string
select {
case <-ctx.Done():
return nil
case <-time.After(30 * time.Second):
return nil
case id := <-p.requestIDs:
requestIDs = append(requestIDs, id)
}
for {
select {
case id := <-p.requestIDs:
requestIDs = append(requestIDs, id)
default:
return requestIDs
}
}
}
func (p *proxy) handleAgentListRequests(w http.ResponseWriter, r *http.Request) {
requestIDs := p.waitForRequestIDs(r.Context())
respJSON, err := json.Marshal(requestIDs)
if err != nil {
http.Error(w, fmt.Sprintf("Failure serializing the request IDs: %v", err), http.StatusInternalServerError)
return
}
log.Printf("Reporting pending requests: %s", respJSON)
w.WriteHeader(http.StatusOK)
w.Write(respJSON)
}
func (p *proxy) handleAgentRequest(w http.ResponseWriter, r *http.Request, backendID string) {
requestID := r.Header.Get(utils.HeaderRequestID)
if requestID == "" {
log.Printf("Received new backend list request from %q", backendID)
p.handleAgentListRequests(w, r)
return
}
if r.Method == http.MethodPost {
log.Printf("Received new backend post request from %q", backendID)
p.handleAgentPostResponse(w, r, requestID)
return
}
log.Printf("Received new backend get request from %q", backendID)
p.handleAgentGetRequest(w, r, requestID)
}
func (p *proxy) newID() string {
sum := sha256.Sum256([]byte(fmt.Sprintf("%d", p.randGenerator.Int63())))
return fmt.Sprintf("%x", sum)
}
// isHopByHopHeader determines whether or not the given header name represents
// a header that is specific to a single network hop and thus should not be
// retransmitted by a proxy.
//
// See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#hbh
func isHopByHopHeader(name string) bool {
switch n := strings.ToLower(name); n {
case "connection", "keep-alive", "proxy-authenticate", "proxy-authorization", "te", "trailer", "transfer-encoding", "upgrade":
return true
default:
return false
}
}
func (p *proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if backendID := r.Header.Get(utils.HeaderBackendID); backendID != "" {
p.handleAgentRequest(w, r, backendID)
return
}
id := p.newID()
log.Printf("Received new frontend request %q", id)
// Filter out hop-by-hop headers from the request
for name := range r.Header {
if isHopByHopHeader(name) {
r.Header.Del(name)
}
}
pending := newPendingRequest(r)
p.Lock()
p.requests[id] = pending
p.Unlock()
// Enqueue the request
select {
case <-r.Context().Done():
// The client request was cancelled
log.Printf("Timeout waiting to enqueue the request ID for %q", id)
return
case p.requestIDs <- id:
}
log.Printf("Request %q enqueued after %s", id, time.Since(pending.startTime))
// Pull out and copy the response
select {
case <-r.Context().Done():
// The client request was cancelled
log.Printf("Timeout waiting for the response to %q", id)
return
case resp := <-pending.respChan:
defer resp.Body.Close()
// Copy all of the non-hop-by-hop headers to the proxied response
for name, vals := range resp.Header {
if !isHopByHopHeader(name) {
w.Header()[name] = vals
}
}
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
return
}
log.Printf("Response for %q received after %s", id, time.Since(pending.startTime))
}
func main() {
flag.Parse()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
if err != nil {
log.Fatalf("Failed to create the TCP listener for port %d: %v", *port, err)
}
log.Printf("Listening on %s", listener.Addr())
log.Fatal(http.Serve(listener, newProxy()))
}
Simplify the expression in a switch statement
/*
Copyright 2018 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Command server launches a stand-alone inverting proxy.
//
// Example usage:
// go build -o ~/bin/inverting-proxy ./server/server.go
// ~/bin/inverting-proxy --port 8081
package main
import (
"bufio"
"context"
"crypto/sha256"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"math/rand"
"net"
"net/http"
"strings"
"sync"
"time"
"github.com/google/inverting-proxy/agent/utils"
)
var (
port = flag.Int("port", 0, "Port on which to listen")
)
// pendingRequest represents a frontend request
type pendingRequest struct {
startTime time.Time
req *http.Request
respChan chan *http.Response
}
func newPendingRequest(r *http.Request) *pendingRequest {
return &pendingRequest{
startTime: time.Now(),
req: r,
respChan: make(chan *http.Response),
}
}
type proxy struct {
requestIDs chan string
randGenerator *rand.Rand
// protects the map below
sync.Mutex
requests map[string]*pendingRequest
}
func newProxy() *proxy {
return &proxy{
requestIDs: make(chan string),
randGenerator: rand.New(rand.NewSource(time.Now().UnixNano())),
requests: make(map[string]*pendingRequest),
}
}
func (p *proxy) handleAgentPostResponse(w http.ResponseWriter, r *http.Request, requestID string) {
p.Lock()
pending, ok := p.requests[requestID]
p.Unlock()
if !ok {
log.Printf("Could not find pending request: %q", requestID)
http.NotFound(w, r)
return
}
resp, err := http.ReadResponse(bufio.NewReader(r.Body), pending.req)
if err != nil {
log.Printf("Could not parse response to request %q: %v", requestID, err)
http.Error(w, "Failure parsing request body", http.StatusBadRequest)
return
}
// We want to track whether or not the body has finished being read so that we can
// make sure that this method does not return until after that. However, we do not
// want to block the sending of the response to the client while it is being read.
//
// To accommodate both goals, we replace the response body with a pipereader, start
// forwarding the response immediately, and then copy the original body to the
// corresponding pipewriter.
respBody := resp.Body
defer respBody.Close()
pr, pw := io.Pipe()
defer pw.Close()
resp.Body = pr
select {
case <-r.Context().Done():
return
case pending.respChan <- resp:
}
if _, err := io.Copy(pw, respBody); err != nil {
log.Printf("Could not read response to request %q: %v", requestID, err)
http.Error(w, "Failure reading request body", http.StatusInternalServerError)
}
}
func (p *proxy) handleAgentGetRequest(w http.ResponseWriter, r *http.Request, requestID string) {
p.Lock()
pending, ok := p.requests[requestID]
p.Unlock()
if !ok {
log.Printf("Could not find pending request: %q", requestID)
http.NotFound(w, r)
return
}
log.Printf("Returning pending request: %q", requestID)
w.Header().Set(utils.HeaderRequestStartTime, pending.startTime.Format(time.RFC3339Nano))
w.WriteHeader(http.StatusOK)
pending.req.Write(w)
}
// waitForRequestIDs blocks until at least one request ID is available, and then returns
// a slice of all of the IDs available at that time.
//
// Note that any IDs returned by this method will never be returned again.
func (p *proxy) waitForRequestIDs(ctx context.Context) []string {
var requestIDs []string
select {
case <-ctx.Done():
return nil
case <-time.After(30 * time.Second):
return nil
case id := <-p.requestIDs:
requestIDs = append(requestIDs, id)
}
for {
select {
case id := <-p.requestIDs:
requestIDs = append(requestIDs, id)
default:
return requestIDs
}
}
}
func (p *proxy) handleAgentListRequests(w http.ResponseWriter, r *http.Request) {
requestIDs := p.waitForRequestIDs(r.Context())
respJSON, err := json.Marshal(requestIDs)
if err != nil {
http.Error(w, fmt.Sprintf("Failure serializing the request IDs: %v", err), http.StatusInternalServerError)
return
}
log.Printf("Reporting pending requests: %s", respJSON)
w.WriteHeader(http.StatusOK)
w.Write(respJSON)
}
func (p *proxy) handleAgentRequest(w http.ResponseWriter, r *http.Request, backendID string) {
requestID := r.Header.Get(utils.HeaderRequestID)
if requestID == "" {
log.Printf("Received new backend list request from %q", backendID)
p.handleAgentListRequests(w, r)
return
}
if r.Method == http.MethodPost {
log.Printf("Received new backend post request from %q", backendID)
p.handleAgentPostResponse(w, r, requestID)
return
}
log.Printf("Received new backend get request from %q", backendID)
p.handleAgentGetRequest(w, r, requestID)
}
func (p *proxy) newID() string {
sum := sha256.Sum256([]byte(fmt.Sprintf("%d", p.randGenerator.Int63())))
return fmt.Sprintf("%x", sum)
}
// isHopByHopHeader determines whether or not the given header name represents
// a header that is specific to a single network hop and thus should not be
// retransmitted by a proxy.
//
// See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#hbh
func isHopByHopHeader(name string) bool {
switch strings.ToLower(name) {
case "connection", "keep-alive", "proxy-authenticate", "proxy-authorization", "te", "trailer", "transfer-encoding", "upgrade":
return true
default:
return false
}
}
func (p *proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if backendID := r.Header.Get(utils.HeaderBackendID); backendID != "" {
p.handleAgentRequest(w, r, backendID)
return
}
id := p.newID()
log.Printf("Received new frontend request %q", id)
// Filter out hop-by-hop headers from the request
for name := range r.Header {
if isHopByHopHeader(name) {
r.Header.Del(name)
}
}
pending := newPendingRequest(r)
p.Lock()
p.requests[id] = pending
p.Unlock()
// Enqueue the request
select {
case <-r.Context().Done():
// The client request was cancelled
log.Printf("Timeout waiting to enqueue the request ID for %q", id)
return
case p.requestIDs <- id:
}
log.Printf("Request %q enqueued after %s", id, time.Since(pending.startTime))
// Pull out and copy the response
select {
case <-r.Context().Done():
// The client request was cancelled
log.Printf("Timeout waiting for the response to %q", id)
return
case resp := <-pending.respChan:
defer resp.Body.Close()
// Copy all of the non-hop-by-hop headers to the proxied response
for name, vals := range resp.Header {
if !isHopByHopHeader(name) {
w.Header()[name] = vals
}
}
w.WriteHeader(resp.StatusCode)
io.Copy(w, resp.Body)
return
}
log.Printf("Response for %q received after %s", id, time.Since(pending.startTime))
}
func main() {
flag.Parse()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", *port))
if err != nil {
log.Fatalf("Failed to create the TCP listener for port %d: %v", *port, err)
}
log.Printf("Listening on %s", listener.Addr())
log.Fatal(http.Serve(listener, newProxy()))
}
|
package server
import (
"database/sql"
"log"
"net/http"
"github.com/99designs/gqlgen/graphql"
"github.com/emwalker/digraph/models"
"github.com/emwalker/digraph/resolvers"
"github.com/volatiletech/sqlboiler/boil"
)
// Server holds config information for running the API server.
type Server struct {
BasicAuthUsername string
BasicAuthPassword string
ConnectionString string
db *sql.DB
DevMode bool
LogLevel int
Port string
resolver *resolvers.Resolver
schema graphql.ExecutableSchema
}
// New returns a new *Server configured with the parameters passed in.
func New(
port string, devMode bool, username, password string, logLevel int, connectionString string,
) *Server {
db, err := sql.Open("postgres", connectionString)
must(err)
must(db.Ping())
resolver := &resolvers.Resolver{DB: db}
schema := models.NewExecutableSchema(models.Config{Resolvers: resolver})
return &Server{
BasicAuthPassword: password,
BasicAuthUsername: username,
ConnectionString: connectionString,
db: db,
DevMode: devMode,
LogLevel: logLevel,
Port: port,
resolver: resolver,
schema: schema,
}
}
// Routes registers route handlers with the http server.
func (s *Server) Routes() {
http.Handle("/static/", s.withBasicAuth(s.handleStaticFiles()))
http.Handle("/graphql", s.withSession(s.withBasicAuth(s.handleGraphqlRequest())))
http.Handle("/playground", s.withBasicAuth(s.handleGraphqlPlayground()))
http.Handle("/_ah/health", s.handleHealthCheck())
http.Handle("/", s.withBasicAuth(s.handleRoot()))
s.RegisterOauth2Routes()
}
// Run starts up the http server.
func (s *Server) Run() {
log.Printf("Running server with log level %d", s.LogLevel)
if s.LogLevel > 1 {
boil.DebugMode = true
}
log.Printf("Connect to http://localhost:%s/playground for the GraphQL playground", s.Port)
log.Printf("Listening on port %s", s.Port)
log.Fatal(http.ListenAndServe(":"+s.Port, nil))
}
Move db.Ping() from factory method to Run().
package server
import (
"database/sql"
"log"
"net/http"
"github.com/99designs/gqlgen/graphql"
"github.com/emwalker/digraph/models"
"github.com/emwalker/digraph/resolvers"
"github.com/volatiletech/sqlboiler/boil"
)
// Server holds config information for running the API server.
type Server struct {
BasicAuthUsername string
BasicAuthPassword string
ConnectionString string
db *sql.DB
DevMode bool
LogLevel int
Port string
resolver *resolvers.Resolver
schema graphql.ExecutableSchema
}
// New returns a new *Server configured with the parameters passed in.
func New(
port string, devMode bool, username, password string, logLevel int, connectionString string,
) *Server {
db, err := sql.Open("postgres", connectionString)
must(err)
resolver := &resolvers.Resolver{DB: db}
schema := models.NewExecutableSchema(models.Config{Resolvers: resolver})
return &Server{
BasicAuthPassword: password,
BasicAuthUsername: username,
ConnectionString: connectionString,
db: db,
DevMode: devMode,
LogLevel: logLevel,
Port: port,
resolver: resolver,
schema: schema,
}
}
// Routes registers route handlers with the http server.
func (s *Server) Routes() {
http.Handle("/static/", s.withBasicAuth(s.handleStaticFiles()))
http.Handle("/graphql", s.withSession(s.withBasicAuth(s.handleGraphqlRequest())))
http.Handle("/playground", s.withBasicAuth(s.handleGraphqlPlayground()))
http.Handle("/_ah/health", s.handleHealthCheck())
http.Handle("/", s.withBasicAuth(s.handleRoot()))
s.RegisterOauth2Routes()
}
// Run starts up the http server.
func (s *Server) Run() {
must(s.db.Ping())
log.Printf("Running server with log level %d", s.LogLevel)
if s.LogLevel > 1 {
boil.DebugMode = true
}
log.Printf("Connect to http://localhost:%s/playground for the GraphQL playground", s.Port)
log.Printf("Listening on port %s", s.Port)
log.Fatal(http.ListenAndServe(":"+s.Port, nil))
}
|
package server
import (
"fmt"
"net/http"
"os"
"github.com/adrianduke/configr"
_ "github.com/adrianduke/configr/sources/file/toml"
"github.com/astaxie/beego/session"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
"github.com/qor/admin"
"github.com/qor/i18n"
"github.com/qor/i18n/backends/database"
"github.com/qor/qor"
"github.com/8legd/HugoCMS/qor/models"
"github.com/8legd/hugocms/config"
hugocms_qor "github.com/8legd/hugocms/qor"
)
var SessionManager *session.Manager
type Auth struct {
UserName string
Password string
}
func (a Auth) LoginURL(c *admin.Context) string {
return "/login"
}
func (a Auth) LogoutURL(c *admin.Context) string {
return "/admin/logout"
}
func (a Auth) GetCurrentUser(c *admin.Context) qor.CurrentUser {
w := c.Writer
r := c.Request
sess, err := SessionManager.SessionStart(w, r)
if err != nil {
handleError(err)
}
defer sess.SessionRelease(w)
if r.URL.String() == "/admin/auth" &&
r.FormValue("inputAccount") != "" &&
(r.FormValue("inputAccount") == a.UserName) &&
r.FormValue("inputPassword") != "" &&
(r.FormValue("inputPassword") == a.Password) {
sess.Set("User", User{a.UserName})
}
if u, ok := sess.Get("User").(User); ok && u.Name != "" {
return u
}
return nil
}
type User struct {
Name string
}
func (u User) DisplayName() string {
return u.Name
}
type DatabaseType int
const (
DB_SQLite DatabaseType = iota
DB_MySQL
)
func ListenAndServe(port int, auth Auth, dbType DatabaseType) {
var db *gorm.DB
var err error
if dbType == DB_MySQL {
dbConn := fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)", auth.UserName, auth.Password)
db, err = gorm.Open("mysql", dbConn+"/hugocms_"+auth.UserName+"?charset=utf8&parseTime=True&loc=Local")
} else {
db, err = gorm.Open("sqlite3", "hugocms_"+auth.UserName+".db")
}
if err != nil {
handleError(err)
}
db.LogMode(true)
if err = config.DB.First(&models.Settings{}).Error; err != nil {
// error handling...
handleError(err)
if false {
// TODO check error, setup empty database
for _, table := range hugocms_qor.Tables {
if err := db.DropTableIfExists(table).Error; err != nil {
handleError(err)
}
if err := db.AutoMigrate(table).Error; err != nil {
handleError(err)
}
}
}
}
siteName := fmt.Sprintf("%s - Hugo CMS", auth.UserName)
if err := setupConfig(port, siteName, db, auth); err != nil {
handleError(err)
}
// Add session support - used by Auth
sessionLifetime := 3600 // session lifetime in seconds
SessionManager, err = session.NewManager("memory", fmt.Sprintf(`{"cookieName":"gosessionid","gclifetime":%d}`, sessionLifetime))
if err != nil {
handleError(err)
}
go SessionManager.GC()
// Create Hugo's content directory if it doesnt exist
// TODO read content dir from config
if _, err := os.Stat("./content"); os.IsNotExist(err) {
err = os.MkdirAll("./content", os.ModePerm)
}
mux := http.NewServeMux()
mux.Handle("/", http.FileServer(http.Dir("public")))
adm := hugocms_qor.SetupAdmin()
adm.MountTo("/admin", mux)
adm.GetRouter().Post("/auth", func(ctx *admin.Context) {
// we will only hit this on succesful login - redirect to admin dashboard
w := ctx.Writer
r := ctx.Request
http.Redirect(w, r, "/admin", http.StatusFound)
})
adm.GetRouter().Get("/logout", func(ctx *admin.Context) {
w := ctx.Writer
r := ctx.Request
sess, err := SessionManager.SessionStart(w, r)
if err != nil {
handleError(err)
}
defer sess.SessionRelease(w)
sess.Delete("User")
http.Redirect(w, r, "/login", http.StatusFound)
})
// NOTE: `system` is where QOR admin will upload files e.g. images - we map this to Hugo's static dir along with our other static assets
// TODO read static dir from config
// TODO read static assets list from config
for _, path := range []string{"system", "css", "fonts", "images", "js", "login"} {
mux.Handle(fmt.Sprintf("/%s/", path), http.FileServer(http.Dir("static")))
}
if err := http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", config.QOR.Port), mux); err != nil {
handleError(err)
}
fmt.Printf("Listening on: %v\n", config.QOR.Port)
}
func setupConfig(port int, sitename string, db *gorm.DB, auth admin.Auth) error {
config.QOR.Port = port
config.QOR.SiteName = sitename
// As a minumum add the root path for our site
config.QOR.Paths = append(config.QOR.Paths, "/")
config.Hugo.MetaDataFormat = "json"
hugoConf := configr.New()
hugoConf.RegisterKey("baseurl", "Hugo site baseurl", "/")
hugoConf.RegisterKey("staticdir", "Hugo site static dir", "static")
hugoConf.RegisterKey("publishdir", "Hugo site publish dir", "public")
hugoConf.RegisterKey("languageCode", "Hugo site languageCode", "en")
hugoConf.RegisterKey("disableRSS", "Hugo site disableRSS", true)
hugoConf.RegisterKey("menu", "Hugo site menus", make(map[string]interface{}))
hugoConfigFile := "hugo.toml"
hugoConf.AddSource(configr.NewFile(hugoConfigFile))
if err := hugoConf.Parse(); err != nil {
return err
}
baseurl, err := hugoConf.String("baseurl")
if err != nil {
return err
}
config.Hugo.BaseURL = baseurl
staticdir, err := hugoConf.String("staticdir")
if err != nil {
return err
}
config.Hugo.StaticDir = staticdir
publishdir, err := hugoConf.String("publishdir")
if err != nil {
return err
}
config.Hugo.PublishDir = publishdir
languageCode, err := hugoConf.String("languageCode")
if err != nil {
return err
}
config.Hugo.LanguageCode = languageCode
disableRSS, err := hugoConf.Bool("disableRSS")
if err != nil {
return err
}
config.Hugo.DisableRSS = disableRSS
rawMenu, err := hugoConf.Get("menu")
if err != nil {
return err
}
if menu, ok := rawMenu.(map[string]interface{}); ok {
config.Hugo.Menu = menu
// Add additional site paths from main menu items
if rawMainMenu, ok := menu["main"]; ok {
if mainMenu, ok := rawMainMenu.([]map[string]interface{}); ok {
for _, item := range mainMenu {
if url, ok := item["url"].(string); ok {
if url != "" && url != "/" {
config.QOR.Paths = append(config.QOR.Paths, url)
}
}
}
}
}
}
config.DB = db
config.I18n = i18n.New(database.New(db))
config.Auth = auth
return nil
}
func handleError(err error) {
fmt.Println(err)
os.Exit(1)
//TODO more graceful exit!
}
import typo
package server
import (
"fmt"
"net/http"
"os"
"github.com/adrianduke/configr"
_ "github.com/adrianduke/configr/sources/file/toml"
"github.com/astaxie/beego/session"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
"github.com/qor/admin"
"github.com/qor/i18n"
"github.com/qor/i18n/backends/database"
"github.com/qor/qor"
"github.com/8legd/hugocms/config"
hugocms_qor "github.com/8legd/hugocms/qor"
"github.com/8legd/hugocms/qor/models"
)
var SessionManager *session.Manager
type Auth struct {
UserName string
Password string
}
func (a Auth) LoginURL(c *admin.Context) string {
return "/login"
}
func (a Auth) LogoutURL(c *admin.Context) string {
return "/admin/logout"
}
func (a Auth) GetCurrentUser(c *admin.Context) qor.CurrentUser {
w := c.Writer
r := c.Request
sess, err := SessionManager.SessionStart(w, r)
if err != nil {
handleError(err)
}
defer sess.SessionRelease(w)
if r.URL.String() == "/admin/auth" &&
r.FormValue("inputAccount") != "" &&
(r.FormValue("inputAccount") == a.UserName) &&
r.FormValue("inputPassword") != "" &&
(r.FormValue("inputPassword") == a.Password) {
sess.Set("User", User{a.UserName})
}
if u, ok := sess.Get("User").(User); ok && u.Name != "" {
return u
}
return nil
}
type User struct {
Name string
}
func (u User) DisplayName() string {
return u.Name
}
type DatabaseType int
const (
DB_SQLite DatabaseType = iota
DB_MySQL
)
func ListenAndServe(port int, auth Auth, dbType DatabaseType) {
var db *gorm.DB
var err error
if dbType == DB_MySQL {
dbConn := fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)", auth.UserName, auth.Password)
db, err = gorm.Open("mysql", dbConn+"/hugocms_"+auth.UserName+"?charset=utf8&parseTime=True&loc=Local")
} else {
db, err = gorm.Open("sqlite3", "hugocms_"+auth.UserName+".db")
}
if err != nil {
handleError(err)
}
db.LogMode(true)
if err = config.DB.First(&models.Settings{}).Error; err != nil {
// error handling...
handleError(err)
if false {
// TODO check error, setup empty database
for _, table := range hugocms_qor.Tables {
if err := db.DropTableIfExists(table).Error; err != nil {
handleError(err)
}
if err := db.AutoMigrate(table).Error; err != nil {
handleError(err)
}
}
}
}
siteName := fmt.Sprintf("%s - Hugo CMS", auth.UserName)
if err := setupConfig(port, siteName, db, auth); err != nil {
handleError(err)
}
// Add session support - used by Auth
sessionLifetime := 3600 // session lifetime in seconds
SessionManager, err = session.NewManager("memory", fmt.Sprintf(`{"cookieName":"gosessionid","gclifetime":%d}`, sessionLifetime))
if err != nil {
handleError(err)
}
go SessionManager.GC()
// Create Hugo's content directory if it doesnt exist
// TODO read content dir from config
if _, err := os.Stat("./content"); os.IsNotExist(err) {
err = os.MkdirAll("./content", os.ModePerm)
}
mux := http.NewServeMux()
mux.Handle("/", http.FileServer(http.Dir("public")))
adm := hugocms_qor.SetupAdmin()
adm.MountTo("/admin", mux)
adm.GetRouter().Post("/auth", func(ctx *admin.Context) {
// we will only hit this on succesful login - redirect to admin dashboard
w := ctx.Writer
r := ctx.Request
http.Redirect(w, r, "/admin", http.StatusFound)
})
adm.GetRouter().Get("/logout", func(ctx *admin.Context) {
w := ctx.Writer
r := ctx.Request
sess, err := SessionManager.SessionStart(w, r)
if err != nil {
handleError(err)
}
defer sess.SessionRelease(w)
sess.Delete("User")
http.Redirect(w, r, "/login", http.StatusFound)
})
// NOTE: `system` is where QOR admin will upload files e.g. images - we map this to Hugo's static dir along with our other static assets
// TODO read static dir from config
// TODO read static assets list from config
for _, path := range []string{"system", "css", "fonts", "images", "js", "login"} {
mux.Handle(fmt.Sprintf("/%s/", path), http.FileServer(http.Dir("static")))
}
if err := http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", config.QOR.Port), mux); err != nil {
handleError(err)
}
fmt.Printf("Listening on: %v\n", config.QOR.Port)
}
func setupConfig(port int, sitename string, db *gorm.DB, auth admin.Auth) error {
config.QOR.Port = port
config.QOR.SiteName = sitename
// As a minumum add the root path for our site
config.QOR.Paths = append(config.QOR.Paths, "/")
config.Hugo.MetaDataFormat = "json"
hugoConf := configr.New()
hugoConf.RegisterKey("baseurl", "Hugo site baseurl", "/")
hugoConf.RegisterKey("staticdir", "Hugo site static dir", "static")
hugoConf.RegisterKey("publishdir", "Hugo site publish dir", "public")
hugoConf.RegisterKey("languageCode", "Hugo site languageCode", "en")
hugoConf.RegisterKey("disableRSS", "Hugo site disableRSS", true)
hugoConf.RegisterKey("menu", "Hugo site menus", make(map[string]interface{}))
hugoConfigFile := "hugo.toml"
hugoConf.AddSource(configr.NewFile(hugoConfigFile))
if err := hugoConf.Parse(); err != nil {
return err
}
baseurl, err := hugoConf.String("baseurl")
if err != nil {
return err
}
config.Hugo.BaseURL = baseurl
staticdir, err := hugoConf.String("staticdir")
if err != nil {
return err
}
config.Hugo.StaticDir = staticdir
publishdir, err := hugoConf.String("publishdir")
if err != nil {
return err
}
config.Hugo.PublishDir = publishdir
languageCode, err := hugoConf.String("languageCode")
if err != nil {
return err
}
config.Hugo.LanguageCode = languageCode
disableRSS, err := hugoConf.Bool("disableRSS")
if err != nil {
return err
}
config.Hugo.DisableRSS = disableRSS
rawMenu, err := hugoConf.Get("menu")
if err != nil {
return err
}
if menu, ok := rawMenu.(map[string]interface{}); ok {
config.Hugo.Menu = menu
// Add additional site paths from main menu items
if rawMainMenu, ok := menu["main"]; ok {
if mainMenu, ok := rawMainMenu.([]map[string]interface{}); ok {
for _, item := range mainMenu {
if url, ok := item["url"].(string); ok {
if url != "" && url != "/" {
config.QOR.Paths = append(config.QOR.Paths, url)
}
}
}
}
}
}
config.DB = db
config.I18n = i18n.New(database.New(db))
config.Auth = auth
return nil
}
func handleError(err error) {
fmt.Println(err)
os.Exit(1)
//TODO more graceful exit!
}
|
package server
import (
"fmt"
"html/template"
"net/http"
)
func RootHandler(w http.ResponseWriter, r *http.Request, processes []*Process) {
tmpl, err := template.ParseFiles("templates/root.html")
if err != nil {
fmt.Printf("Error parsing template")
}
if err := tmpl.Execute(w, processes); err != nil {
fmt.Printf(err.Error())
panic(err)
}
}
func ElectionHandler(w http.ResponseWriter, r *http.Request, processes []*Process) {
processes[1].God <- &Force{Election: &True}
fmt.Fprintf(w, "Forcing an election")
}
func LagHandler(w http.ResponseWriter, r *http.Request) {
if _, err := template.ParseFiles("templates/lag.html"); err != nil {
http.Error(w, "Error parsing lag template", http.StatusInternalServerError)
} else {
if err := r.ParseForm(); err != nil {
http.Error(w, "Error parsing lag", http.StatusInternalServerError)
return
}
fmt.Fprintf(w, "Adding lag")
}
}
Remove form parsing
package server
import (
"fmt"
"html/template"
"net/http"
)
func RootHandler(w http.ResponseWriter, r *http.Request, processes []*Process) {
tmpl, err := template.ParseFiles("templates/root.html")
if err != nil {
fmt.Printf("Error parsing template")
}
if err := tmpl.Execute(w, processes); err != nil {
fmt.Printf(err.Error())
panic(err)
}
}
func ElectionHandler(w http.ResponseWriter, r *http.Request, processes []*Process) {
processes[1].God <- &Force{Election: &True}
fmt.Fprintf(w, "Forcing an election")
}
func LagHandler(w http.ResponseWriter, r *http.Request) {
if _, err := template.ParseFiles("templates/lag.html"); err != nil {
http.Error(w, "Error parsing lag template", http.StatusInternalServerError)
} else {
fmt.Fprintf(w, "Adding lag")
}
}
|
package server
import (
".."
"../fs"
"../future"
"../rcli"
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
)
const VERSION = "0.0.1"
func (srv *Server) ListenAndServe() error {
go rcli.ListenAndServeHTTP("127.0.0.1:8080", srv)
// FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose
// CloseWrite(), which we need to cleanly signal that stdin is closed without
// closing the connection.
// See http://code.google.com/p/go/issues/detail?id=3345
return rcli.ListenAndServe("tcp", "127.0.0.1:4242", srv)
}
func (srv *Server) Name() string {
return "docker"
}
// FIXME: Stop violating DRY by repeating usage here and in Subcmd declarations
func (srv *Server) Help() string {
help := "Usage: docker COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n"
for _, cmd := range [][]interface{}{
{"run", "Run a command in a container"},
{"ps", "Display a list of containers"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"attach", "Attach to a running container"},
{"cat", "Write the contents of a container's file to standard output"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Create a copy of IMAGE and call it NAME"},
{"debug", "(debug only) (No documentation available)"},
{"diff", "Inspect changes on a container's filesystem"},
{"images", "List images"},
{"info", "Display system-wide information"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"layers", "(debug only) List filesystem layers"},
{"logs", "Fetch the logs of a container"},
{"ls", "List the contents of a container's directory"},
{"mirror", "(debug only) (No documentation available)"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"ps", "List containers"},
{"pull", "Download a new image from a remote location"},
{"put", "Import a new image from a local archive"},
{"reset", "Reset changes to a container's filesystem"},
{"restart", "Restart a running container"},
{"rm", "Remove a container"},
{"rmimage", "Remove an image"},
{"run", "Run a command in a new container"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tar", "Stream the contents of a container as a tar archive"},
{"umount", "(debug only) Mount a container's filesystem"},
{"wait", "Block until a container stops, then print its exit code"},
{"web", "A web UI for docker"},
{"write", "Write the contents of standard input to a container's file"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", cmd...)
}
return help
}
// 'docker wait': block until a container stops
func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "wait", "[OPTIONS] NAME", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
fmt.Fprintln(stdout, container.Wait())
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
// 'docker info': display system-wide information.
func (srv *Server) CmdInfo(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "info", "", "Display system-wide information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
fmt.Fprintf(stdout, "containers: %d\nversion: %s\nimages: %d\n",
len(srv.containers.List()),
VERSION,
len(srv.images.ById))
return nil
}
func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "stop", "[OPTIONS] NAME", "Stop a running container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Stop(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "restart", "[OPTIONS] NAME", "Restart a running container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Restart(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdStart(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "start", "[OPTIONS] NAME", "Start a stopped container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Start(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdUmount(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "umount", "[OPTIONS] NAME", "umount a container's filesystem (debug only)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.Umount(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdMount(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "umount", "[OPTIONS] NAME", "mount a container's filesystem (debug only)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.EnsureMounted(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdCat(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "cat", "[OPTIONS] CONTAINER PATH", "write the contents of a container's file to standard output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 2 {
cmd.Usage()
return nil
}
name, path := cmd.Arg(0), cmd.Arg(1)
if container := srv.containers.Get(name); container != nil {
if f, err := container.Mountpoint.OpenFile(path, os.O_RDONLY, 0); err != nil {
return err
} else if _, err := io.Copy(stdout, f); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdWrite(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "write", "[OPTIONS] CONTAINER PATH", "write the contents of standard input to a container's file")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 2 {
cmd.Usage()
return nil
}
name, path := cmd.Arg(0), cmd.Arg(1)
if container := srv.containers.Get(name); container != nil {
if f, err := container.Mountpoint.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600); err != nil {
return err
} else if _, err := io.Copy(f, stdin); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdLs(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "ls", "[OPTIONS] CONTAINER PATH", "List the contents of a container's directory")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 2 {
cmd.Usage()
return nil
}
name, path := cmd.Arg(0), cmd.Arg(1)
if container := srv.containers.Get(name); container != nil {
if files, err := container.Mountpoint.ReadDir(path); err != nil {
return err
} else {
for _, f := range files {
fmt.Fprintln(stdout, f.Name())
}
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdInspect(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "inspect", "[OPTIONS] CONTAINER", "Return low-level information on a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
var obj interface{}
if container := srv.containers.Get(name); container != nil {
obj = container
//} else if image, err := srv.images.List(name); image != nil {
// obj = image
} else {
return errors.New("No such container or image: " + name)
}
data, err := json.Marshal(obj)
if err != nil {
return err
}
indented := new(bytes.Buffer)
if err = json.Indent(indented, data, "", " "); err != nil {
return err
}
if _, err := io.Copy(stdout, indented); err != nil {
return err
}
return nil
}
func (srv *Server) CmdPort(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "port", "[OPTIONS] CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
privatePort := cmd.Arg(1)
if container := srv.containers.Get(name); container == nil {
return errors.New("No such container: " + name)
} else {
if frontend, exists := container.NetworkSettings.PortMapping[privatePort]; !exists {
return fmt.Errorf("No private port '%s' allocated on %s", privatePort, name)
} else {
fmt.Fprintln(stdout, frontend)
}
}
return nil
}
// 'docker rmi NAME' removes all images with the name NAME
// func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
// cmd := rcli.Subcmd(stdout, "rmimage", "[OPTIONS] IMAGE", "Remove an image")
// fl_regexp := cmd.Bool("r", false, "Use IMAGE as a regular expression instead of an exact name")
// if err := cmd.Parse(args); err != nil {
// cmd.Usage()
// return nil
// }
// if cmd.NArg() < 1 {
// cmd.Usage()
// return nil
// }
// for _, name := range cmd.Args() {
// var err error
// if *fl_regexp {
// err = srv.images.DeleteMatch(name)
// } else {
// image := srv.images.Find(name)
// if image == nil {
// return errors.New("No such image: " + name)
// }
// err = srv.images.Delete(name)
// }
// if err != nil {
// return err
// }
// }
// return nil
// }
func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "rm", "[OPTIONS] CONTAINER", "Remove a container")
if err := cmd.Parse(args); err != nil {
return nil
}
for _, name := range cmd.Args() {
container := srv.containers.Get(name)
if container == nil {
return errors.New("No such container: " + name)
}
if err := srv.containers.Destroy(container); err != nil {
fmt.Fprintln(stdout, "Error destroying container "+name+": "+err.Error())
}
}
return nil
}
// 'docker kill NAME' kills a running container
func (srv *Server) CmdKill(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container")
if err := cmd.Parse(args); err != nil {
return nil
}
for _, name := range cmd.Args() {
container := srv.containers.Get(name)
if container == nil {
return errors.New("No such container: " + name)
}
if err := container.Kill(); err != nil {
fmt.Fprintln(stdout, "Error killing container "+name+": "+err.Error())
}
}
return nil
}
func (srv *Server) CmdImport(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "import", "[OPTIONS] NAME", "Create a new filesystem image from the contents of a tarball")
fl_stdin := cmd.Bool("stdin", false, "Read tarball from stdin")
if err := cmd.Parse(args); err != nil {
return nil
}
var archive io.Reader
name := cmd.Arg(0)
if name == "" {
return errors.New("Not enough arguments")
}
if *fl_stdin {
archive = stdin
} else {
u, err := url.Parse(name)
if err != nil {
return err
}
if u.Scheme == "" {
u.Scheme = "http"
}
// FIXME: hardcode a mirror URL that does not depend on a single provider.
if u.Host == "" {
u.Host = "s3.amazonaws.com"
u.Path = path.Join("/docker.io/images", u.Path)
}
fmt.Fprintf(stdout, "Downloading from %s\n", u.String())
// Download with curl (pretty progress bar)
// If curl is not available, fallback to http.Get()
archive, err = future.Curl(u.String(), stdout)
if err != nil {
if resp, err := http.Get(u.String()); err != nil {
return err
} else {
archive = resp.Body
}
}
}
fmt.Fprintf(stdout, "Unpacking to %s\n", name)
img, err := srv.images.Create(archive, nil, name, "")
if err != nil {
return err
}
fmt.Fprintln(stdout, img.Id)
return nil
}
func (srv *Server) CmdImages(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "images", "[OPTIONS] [NAME]", "List images")
limit := cmd.Int("l", 0, "Only show the N most recent versions of each image")
quiet := cmd.Bool("q", false, "only show numeric IDs")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
var nameFilter string
if cmd.NArg() == 1 {
nameFilter = cmd.Arg(0)
}
w := tabwriter.NewWriter(stdout, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintf(w, "NAME\tID\tCREATED\tPARENT\n")
}
paths, err := srv.images.Paths()
if err != nil {
return err
}
for _, name := range paths {
if nameFilter != "" && nameFilter != name {
continue
}
ids, err := srv.images.List(name)
if err != nil {
return err
}
for idx, img := range ids {
if *limit > 0 && idx >= *limit {
break
}
if !*quiet {
for idx, field := range []string{
/* NAME */ name,
/* ID */ img.Id,
/* CREATED */ future.HumanDuration(time.Now().Sub(time.Unix(img.Created, 0))) + " ago",
/* PARENT */ img.Parent,
} {
if idx == 0 {
w.Write([]byte(field))
} else {
w.Write([]byte("\t" + field))
}
}
w.Write([]byte{'\n'})
} else {
stdout.Write([]byte(img.Id + "\n"))
}
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"ps", "[OPTIONS]", "List containers")
quiet := cmd.Bool("q", false, "Only display numeric IDs")
fl_all := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.")
fl_full := cmd.Bool("notrunc", false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
w := tabwriter.NewWriter(stdout, 12, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintf(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tCOMMENT\n")
}
for _, container := range srv.containers.List() {
comment := container.GetUserData("comment")
if !container.State.Running && !*fl_all {
continue
}
if !*quiet {
command := fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
if !*fl_full {
command = docker.Trunc(command, 20)
}
for idx, field := range []string{
/* ID */ container.Id,
/* IMAGE */ container.GetUserData("image"),
/* COMMAND */ command,
/* CREATED */ future.HumanDuration(time.Now().Sub(container.Created)) + " ago",
/* STATUS */ container.State.String(),
/* COMMENT */ comment,
} {
if idx == 0 {
w.Write([]byte(field))
} else {
w.Write([]byte("\t" + field))
}
}
w.Write([]byte{'\n'})
} else {
stdout.Write([]byte(container.Id + "\n"))
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (srv *Server) CmdLayers(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"layers", "[OPTIONS]",
"List filesystem layers (debug only)")
if err := cmd.Parse(args); err != nil {
return nil
}
for _, layer := range srv.images.Layers() {
fmt.Fprintln(stdout, layer)
}
return nil
}
func (srv *Server) CmdCp(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"cp", "[OPTIONS] IMAGE NAME",
"Create a copy of IMAGE and call it NAME")
if err := cmd.Parse(args); err != nil {
return nil
}
if image, err := srv.images.Get(cmd.Arg(0)); err != nil {
return err
} else if image == nil {
return errors.New("Image " + cmd.Arg(0) + " does not exist")
} else {
if img, err := image.Copy(cmd.Arg(1)); err != nil {
return err
} else {
fmt.Fprintln(stdout, img.Id)
}
}
return nil
}
func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"commit", "[OPTIONS] CONTAINER [DEST]",
"Create a new image from a container's changes")
if err := cmd.Parse(args); err != nil {
return nil
}
containerName, imgName := cmd.Arg(0), cmd.Arg(1)
if containerName == "" || imgName == "" {
cmd.Usage()
return nil
}
if container := srv.containers.Get(containerName); container != nil {
// FIXME: freeze the container before copying it to avoid data corruption?
rwTar, err := fs.Tar(container.Mountpoint.Rw, fs.Uncompressed)
if err != nil {
return err
}
// Create a new image from the container's base layers + a new layer from container changes
parentImg, err := srv.images.Get(container.Image)
if err != nil {
return err
}
img, err := srv.images.Create(rwTar, parentImg, imgName, "")
if err != nil {
return err
}
fmt.Fprintln(stdout, img.Id)
return nil
}
return errors.New("No such container: " + containerName)
}
func (srv *Server) CmdTar(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"tar", "CONTAINER",
"Stream the contents of a container as a tar archive")
fl_sparse := cmd.Bool("s", false, "Generate a sparse tar stream (top layer + reference to bottom layers)")
if err := cmd.Parse(args); err != nil {
return nil
}
if *fl_sparse {
return errors.New("Sparse mode not yet implemented") // FIXME
}
name := cmd.Arg(0)
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.EnsureMounted(); err != nil {
return err
}
data, err := fs.Tar(container.Mountpoint.Root, fs.Uncompressed)
if err != nil {
return err
}
// Stream the entire contents of the container (basically a volatile snapshot)
if _, err := io.Copy(stdout, data); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdDiff(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"diff", "CONTAINER [OPTIONS]",
"Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
return errors.New("Not enough arguments")
}
if container := srv.containers.Get(cmd.Arg(0)); container == nil {
return errors.New("No such container")
} else {
changes, err := srv.images.Changes(container.Mountpoint)
if err != nil {
return err
}
for _, change := range changes {
fmt.Fprintln(stdout, change.String())
}
}
return nil
}
func (srv *Server) CmdReset(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"reset", "CONTAINER [OPTIONS]",
"Reset changes to a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
return errors.New("Not enough arguments")
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.Reset(); err != nil {
return errors.New("Reset " + container.Id + ": " + err.Error())
}
}
}
return nil
}
func (srv *Server) CmdLogs(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "logs", "[OPTIONS] CONTAINER", "Fetch the logs of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
if container := srv.containers.Get(name); container != nil {
if _, err := io.Copy(stdout, container.StdoutLog()); err != nil {
return err
}
if _, err := io.Copy(stdout, container.StderrLog()); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + cmd.Arg(0))
}
func (srv *Server) CreateContainer(img *fs.Image, ports []int, user string, tty bool, openStdin bool, comment string, cmd string, args ...string) (*docker.Container, error) {
id := future.RandomId()[:8]
container, err := srv.containers.Create(id, cmd, args, img,
&docker.Config{Hostname: id, Ports: ports, User: user, Tty: tty, OpenStdin: openStdin})
if err != nil {
return nil, err
}
if err := container.SetUserData("image", img.Id); err != nil {
srv.containers.Destroy(container)
return nil, errors.New("Error setting container userdata: " + err.Error())
}
if err := container.SetUserData("comment", comment); err != nil {
srv.containers.Destroy(container)
return nil, errors.New("Error setting container userdata: " + err.Error())
}
return container, nil
}
func (srv *Server) CmdAttach(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "attach", "[OPTIONS]", "Attach to a running container")
fl_i := cmd.Bool("i", false, "Attach to stdin")
fl_o := cmd.Bool("o", true, "Attach to stdout")
fl_e := cmd.Bool("e", true, "Attach to stderr")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
container := srv.containers.Get(name)
if container == nil {
return errors.New("No such container: " + name)
}
var wg sync.WaitGroup
if *fl_i {
c_stdin, err := container.StdinPipe()
if err != nil {
return err
}
wg.Add(1)
go func() { io.Copy(c_stdin, stdin); wg.Add(-1) }()
}
if *fl_o {
c_stdout, err := container.StdoutPipe()
if err != nil {
return err
}
wg.Add(1)
go func() { io.Copy(stdout, c_stdout); wg.Add(-1) }()
}
if *fl_e {
c_stderr, err := container.StderrPipe()
if err != nil {
return err
}
wg.Add(1)
go func() { io.Copy(stdout, c_stderr); wg.Add(-1) }()
}
wg.Wait()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (p *ports) String() string {
return fmt.Sprint(*p)
}
func (p *ports) Set(value string) error {
port, err := strconv.Atoi(value)
if err != nil {
return fmt.Errorf("Invalid port: %v", value)
}
*p = append(*p, port)
return nil
}
func (srv *Server) CmdRun(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
fl_user := cmd.String("u", "", "Username or UID")
fl_attach := cmd.Bool("a", false, "Attach stdin and stdout")
fl_stdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
fl_tty := cmd.Bool("t", false, "Allocate a pseudo-tty")
fl_comment := cmd.String("c", "", "Comment")
fl_memory := cmd.Int64("m", 0, "Memory limit (in bytes)")
var fl_ports ports
cmd.Var(&fl_ports, "p", "Map a network port to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
var cmdline []string
if len(cmd.Args()) >= 2 {
cmdline = cmd.Args()[1:]
}
// Choose a default image if needed
if name == "" {
name = "base"
}
// Choose a default command if needed
if len(cmdline) == 0 {
*fl_stdin = true
*fl_tty = true
*fl_attach = true
cmdline = []string{"/bin/bash", "-i"}
}
// Find the image
img, err := srv.images.Find(name)
if err != nil {
return err
} else if img == nil {
return errors.New("No such image: " + name)
}
// Create new container
container, err := srv.CreateContainer(img, fl_ports, *fl_user, *fl_tty,
*fl_stdin, *fl_memory, *fl_comment, cmdline[0], cmdline[1:]...)
if err != nil {
return errors.New("Error creating container: " + err.Error())
}
if *fl_stdin {
cmd_stdin, err := container.StdinPipe()
if err != nil {
return err
}
if *fl_attach {
future.Go(func() error {
_, err := io.Copy(cmd_stdin, stdin)
cmd_stdin.Close()
return err
})
}
}
// Run the container
if *fl_attach {
cmd_stderr, err := container.StderrPipe()
if err != nil {
return err
}
cmd_stdout, err := container.StdoutPipe()
if err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
sending_stdout := future.Go(func() error {
_, err := io.Copy(stdout, cmd_stdout)
return err
})
sending_stderr := future.Go(func() error {
_, err := io.Copy(stdout, cmd_stderr)
return err
})
err_sending_stdout := <-sending_stdout
err_sending_stderr := <-sending_stderr
if err_sending_stdout != nil {
return err_sending_stdout
}
if err_sending_stderr != nil {
return err_sending_stderr
}
container.Wait()
} else {
if err := container.Start(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
}
return nil
}
func New() (*Server, error) {
future.Seed()
// if err != nil {
// return nil, err
// }
containers, err := docker.New()
if err != nil {
return nil, err
}
srv := &Server{
images: containers.Store,
containers: containers,
}
return srv, nil
}
func (srv *Server) CmdMirror(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
_, err := io.Copy(stdout, stdin)
return err
}
func (srv *Server) CmdDebug(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
for {
if line, err := bufio.NewReader(stdin).ReadString('\n'); err == nil {
fmt.Printf("--- %s", line)
} else if err == io.EOF {
if len(line) > 0 {
fmt.Printf("--- %s\n", line)
}
break
} else {
return err
}
}
return nil
}
func (srv *Server) CmdWeb(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "web", "[OPTIONS]", "A web UI for docker")
showurl := cmd.Bool("u", false, "Return the URL of the web UI")
if err := cmd.Parse(args); err != nil {
return nil
}
if *showurl {
fmt.Fprintln(stdout, "http://localhost:4242/web")
} else {
if file, err := os.Open("dockerweb.html"); err != nil {
return err
} else if _, err := io.Copy(stdout, file); err != nil {
return err
}
}
return nil
}
type Server struct {
containers *docker.Docker
images *fs.Store
}
Fix merge issue
package server
import (
".."
"../fs"
"../future"
"../rcli"
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
)
const VERSION = "0.0.1"
func (srv *Server) ListenAndServe() error {
go rcli.ListenAndServeHTTP("127.0.0.1:8080", srv)
// FIXME: we want to use unix sockets here, but net.UnixConn doesn't expose
// CloseWrite(), which we need to cleanly signal that stdin is closed without
// closing the connection.
// See http://code.google.com/p/go/issues/detail?id=3345
return rcli.ListenAndServe("tcp", "127.0.0.1:4242", srv)
}
func (srv *Server) Name() string {
return "docker"
}
// FIXME: Stop violating DRY by repeating usage here and in Subcmd declarations
func (srv *Server) Help() string {
help := "Usage: docker COMMAND [arg...]\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n"
for _, cmd := range [][]interface{}{
{"run", "Run a command in a container"},
{"ps", "Display a list of containers"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"attach", "Attach to a running container"},
{"cat", "Write the contents of a container's file to standard output"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Create a copy of IMAGE and call it NAME"},
{"debug", "(debug only) (No documentation available)"},
{"diff", "Inspect changes on a container's filesystem"},
{"images", "List images"},
{"info", "Display system-wide information"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"layers", "(debug only) List filesystem layers"},
{"logs", "Fetch the logs of a container"},
{"ls", "List the contents of a container's directory"},
{"mirror", "(debug only) (No documentation available)"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"ps", "List containers"},
{"pull", "Download a new image from a remote location"},
{"put", "Import a new image from a local archive"},
{"reset", "Reset changes to a container's filesystem"},
{"restart", "Restart a running container"},
{"rm", "Remove a container"},
{"rmimage", "Remove an image"},
{"run", "Run a command in a new container"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tar", "Stream the contents of a container as a tar archive"},
{"umount", "(debug only) Mount a container's filesystem"},
{"wait", "Block until a container stops, then print its exit code"},
{"web", "A web UI for docker"},
{"write", "Write the contents of standard input to a container's file"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", cmd...)
}
return help
}
// 'docker wait': block until a container stops
func (srv *Server) CmdWait(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "wait", "[OPTIONS] NAME", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
fmt.Fprintln(stdout, container.Wait())
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
// 'docker info': display system-wide information.
func (srv *Server) CmdInfo(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
images, _ := srv.images.Images()
var imgcount int
if images == nil {
imgcount = 0
} else {
imgcount = len(images)
}
cmd := rcli.Subcmd(stdout, "info", "", "Display system-wide information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
fmt.Fprintf(stdout, "containers: %d\nversion: %s\nimages: %d\n",
len(srv.containers.List()),
VERSION,
imgcount)
return nil
}
func (srv *Server) CmdStop(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "stop", "[OPTIONS] NAME", "Stop a running container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Stop(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdRestart(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "restart", "[OPTIONS] NAME", "Restart a running container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Restart(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdStart(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "start", "[OPTIONS] NAME", "Start a stopped container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Start(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdUmount(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "umount", "[OPTIONS] NAME", "umount a container's filesystem (debug only)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.Umount(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdMount(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "umount", "[OPTIONS] NAME", "mount a container's filesystem (debug only)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.EnsureMounted(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
} else {
return errors.New("No such container: " + name)
}
}
return nil
}
func (srv *Server) CmdCat(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "cat", "[OPTIONS] CONTAINER PATH", "write the contents of a container's file to standard output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 2 {
cmd.Usage()
return nil
}
name, path := cmd.Arg(0), cmd.Arg(1)
if container := srv.containers.Get(name); container != nil {
if f, err := container.Mountpoint.OpenFile(path, os.O_RDONLY, 0); err != nil {
return err
} else if _, err := io.Copy(stdout, f); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdWrite(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "write", "[OPTIONS] CONTAINER PATH", "write the contents of standard input to a container's file")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 2 {
cmd.Usage()
return nil
}
name, path := cmd.Arg(0), cmd.Arg(1)
if container := srv.containers.Get(name); container != nil {
if f, err := container.Mountpoint.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600); err != nil {
return err
} else if _, err := io.Copy(f, stdin); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdLs(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "ls", "[OPTIONS] CONTAINER PATH", "List the contents of a container's directory")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 2 {
cmd.Usage()
return nil
}
name, path := cmd.Arg(0), cmd.Arg(1)
if container := srv.containers.Get(name); container != nil {
if files, err := container.Mountpoint.ReadDir(path); err != nil {
return err
} else {
for _, f := range files {
fmt.Fprintln(stdout, f.Name())
}
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdInspect(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "inspect", "[OPTIONS] CONTAINER", "Return low-level information on a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
var obj interface{}
if container := srv.containers.Get(name); container != nil {
obj = container
//} else if image, err := srv.images.List(name); image != nil {
// obj = image
} else {
return errors.New("No such container or image: " + name)
}
data, err := json.Marshal(obj)
if err != nil {
return err
}
indented := new(bytes.Buffer)
if err = json.Indent(indented, data, "", " "); err != nil {
return err
}
if _, err := io.Copy(stdout, indented); err != nil {
return err
}
return nil
}
func (srv *Server) CmdPort(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "port", "[OPTIONS] CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
privatePort := cmd.Arg(1)
if container := srv.containers.Get(name); container == nil {
return errors.New("No such container: " + name)
} else {
if frontend, exists := container.NetworkSettings.PortMapping[privatePort]; !exists {
return fmt.Errorf("No private port '%s' allocated on %s", privatePort, name)
} else {
fmt.Fprintln(stdout, frontend)
}
}
return nil
}
// 'docker rmi NAME' removes all images with the name NAME
// func (srv *Server) CmdRmi(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
// cmd := rcli.Subcmd(stdout, "rmimage", "[OPTIONS] IMAGE", "Remove an image")
// fl_regexp := cmd.Bool("r", false, "Use IMAGE as a regular expression instead of an exact name")
// if err := cmd.Parse(args); err != nil {
// cmd.Usage()
// return nil
// }
// if cmd.NArg() < 1 {
// cmd.Usage()
// return nil
// }
// for _, name := range cmd.Args() {
// var err error
// if *fl_regexp {
// err = srv.images.DeleteMatch(name)
// } else {
// image := srv.images.Find(name)
// if image == nil {
// return errors.New("No such image: " + name)
// }
// err = srv.images.Delete(name)
// }
// if err != nil {
// return err
// }
// }
// return nil
// }
func (srv *Server) CmdRm(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "rm", "[OPTIONS] CONTAINER", "Remove a container")
if err := cmd.Parse(args); err != nil {
return nil
}
for _, name := range cmd.Args() {
container := srv.containers.Get(name)
if container == nil {
return errors.New("No such container: " + name)
}
if err := srv.containers.Destroy(container); err != nil {
fmt.Fprintln(stdout, "Error destroying container "+name+": "+err.Error())
}
}
return nil
}
// 'docker kill NAME' kills a running container
func (srv *Server) CmdKill(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container")
if err := cmd.Parse(args); err != nil {
return nil
}
for _, name := range cmd.Args() {
container := srv.containers.Get(name)
if container == nil {
return errors.New("No such container: " + name)
}
if err := container.Kill(); err != nil {
fmt.Fprintln(stdout, "Error killing container "+name+": "+err.Error())
}
}
return nil
}
func (srv *Server) CmdImport(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "import", "[OPTIONS] NAME", "Create a new filesystem image from the contents of a tarball")
fl_stdin := cmd.Bool("stdin", false, "Read tarball from stdin")
if err := cmd.Parse(args); err != nil {
return nil
}
var archive io.Reader
name := cmd.Arg(0)
if name == "" {
return errors.New("Not enough arguments")
}
if *fl_stdin {
archive = stdin
} else {
u, err := url.Parse(name)
if err != nil {
return err
}
if u.Scheme == "" {
u.Scheme = "http"
}
// FIXME: hardcode a mirror URL that does not depend on a single provider.
if u.Host == "" {
u.Host = "s3.amazonaws.com"
u.Path = path.Join("/docker.io/images", u.Path)
}
fmt.Fprintf(stdout, "Downloading from %s\n", u.String())
// Download with curl (pretty progress bar)
// If curl is not available, fallback to http.Get()
archive, err = future.Curl(u.String(), stdout)
if err != nil {
if resp, err := http.Get(u.String()); err != nil {
return err
} else {
archive = resp.Body
}
}
}
fmt.Fprintf(stdout, "Unpacking to %s\n", name)
img, err := srv.images.Create(archive, nil, name, "")
if err != nil {
return err
}
fmt.Fprintln(stdout, img.Id)
return nil
}
func (srv *Server) CmdImages(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "images", "[OPTIONS] [NAME]", "List images")
limit := cmd.Int("l", 0, "Only show the N most recent versions of each image")
quiet := cmd.Bool("q", false, "only show numeric IDs")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
var nameFilter string
if cmd.NArg() == 1 {
nameFilter = cmd.Arg(0)
}
w := tabwriter.NewWriter(stdout, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintf(w, "NAME\tID\tCREATED\tPARENT\n")
}
paths, err := srv.images.Paths()
if err != nil {
return err
}
for _, name := range paths {
if nameFilter != "" && nameFilter != name {
continue
}
ids, err := srv.images.List(name)
if err != nil {
return err
}
for idx, img := range ids {
if *limit > 0 && idx >= *limit {
break
}
if !*quiet {
for idx, field := range []string{
/* NAME */ name,
/* ID */ img.Id,
/* CREATED */ future.HumanDuration(time.Now().Sub(time.Unix(img.Created, 0))) + " ago",
/* PARENT */ img.Parent,
} {
if idx == 0 {
w.Write([]byte(field))
} else {
w.Write([]byte("\t" + field))
}
}
w.Write([]byte{'\n'})
} else {
stdout.Write([]byte(img.Id + "\n"))
}
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (srv *Server) CmdPs(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"ps", "[OPTIONS]", "List containers")
quiet := cmd.Bool("q", false, "Only display numeric IDs")
fl_all := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.")
fl_full := cmd.Bool("notrunc", false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
w := tabwriter.NewWriter(stdout, 12, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintf(w, "ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tCOMMENT\n")
}
for _, container := range srv.containers.List() {
comment := container.GetUserData("comment")
if !container.State.Running && !*fl_all {
continue
}
if !*quiet {
command := fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))
if !*fl_full {
command = docker.Trunc(command, 20)
}
for idx, field := range []string{
/* ID */ container.Id,
/* IMAGE */ container.GetUserData("image"),
/* COMMAND */ command,
/* CREATED */ future.HumanDuration(time.Now().Sub(container.Created)) + " ago",
/* STATUS */ container.State.String(),
/* COMMENT */ comment,
} {
if idx == 0 {
w.Write([]byte(field))
} else {
w.Write([]byte("\t" + field))
}
}
w.Write([]byte{'\n'})
} else {
stdout.Write([]byte(container.Id + "\n"))
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (srv *Server) CmdLayers(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"layers", "[OPTIONS]",
"List filesystem layers (debug only)")
if err := cmd.Parse(args); err != nil {
return nil
}
for _, layer := range srv.images.Layers() {
fmt.Fprintln(stdout, layer)
}
return nil
}
func (srv *Server) CmdCp(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"cp", "[OPTIONS] IMAGE NAME",
"Create a copy of IMAGE and call it NAME")
if err := cmd.Parse(args); err != nil {
return nil
}
if image, err := srv.images.Get(cmd.Arg(0)); err != nil {
return err
} else if image == nil {
return errors.New("Image " + cmd.Arg(0) + " does not exist")
} else {
if img, err := image.Copy(cmd.Arg(1)); err != nil {
return err
} else {
fmt.Fprintln(stdout, img.Id)
}
}
return nil
}
func (srv *Server) CmdCommit(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"commit", "[OPTIONS] CONTAINER [DEST]",
"Create a new image from a container's changes")
if err := cmd.Parse(args); err != nil {
return nil
}
containerName, imgName := cmd.Arg(0), cmd.Arg(1)
if containerName == "" || imgName == "" {
cmd.Usage()
return nil
}
if container := srv.containers.Get(containerName); container != nil {
// FIXME: freeze the container before copying it to avoid data corruption?
rwTar, err := fs.Tar(container.Mountpoint.Rw, fs.Uncompressed)
if err != nil {
return err
}
// Create a new image from the container's base layers + a new layer from container changes
parentImg, err := srv.images.Get(container.Image)
if err != nil {
return err
}
img, err := srv.images.Create(rwTar, parentImg, imgName, "")
if err != nil {
return err
}
fmt.Fprintln(stdout, img.Id)
return nil
}
return errors.New("No such container: " + containerName)
}
func (srv *Server) CmdTar(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"tar", "CONTAINER",
"Stream the contents of a container as a tar archive")
fl_sparse := cmd.Bool("s", false, "Generate a sparse tar stream (top layer + reference to bottom layers)")
if err := cmd.Parse(args); err != nil {
return nil
}
if *fl_sparse {
return errors.New("Sparse mode not yet implemented") // FIXME
}
name := cmd.Arg(0)
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.EnsureMounted(); err != nil {
return err
}
data, err := fs.Tar(container.Mountpoint.Root, fs.Uncompressed)
if err != nil {
return err
}
// Stream the entire contents of the container (basically a volatile snapshot)
if _, err := io.Copy(stdout, data); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + name)
}
func (srv *Server) CmdDiff(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"diff", "CONTAINER [OPTIONS]",
"Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
return errors.New("Not enough arguments")
}
if container := srv.containers.Get(cmd.Arg(0)); container == nil {
return errors.New("No such container")
} else {
changes, err := srv.images.Changes(container.Mountpoint)
if err != nil {
return err
}
for _, change := range changes {
fmt.Fprintln(stdout, change.String())
}
}
return nil
}
func (srv *Server) CmdReset(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout,
"reset", "CONTAINER [OPTIONS]",
"Reset changes to a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
return errors.New("Not enough arguments")
}
for _, name := range cmd.Args() {
if container := srv.containers.Get(name); container != nil {
if err := container.Mountpoint.Reset(); err != nil {
return errors.New("Reset " + container.Id + ": " + err.Error())
}
}
}
return nil
}
func (srv *Server) CmdLogs(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "logs", "[OPTIONS] CONTAINER", "Fetch the logs of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
if container := srv.containers.Get(name); container != nil {
if _, err := io.Copy(stdout, container.StdoutLog()); err != nil {
return err
}
if _, err := io.Copy(stdout, container.StderrLog()); err != nil {
return err
}
return nil
}
return errors.New("No such container: " + cmd.Arg(0))
}
func (srv *Server) CreateContainer(img *fs.Image, ports []int, user string, tty bool, openStdin bool, memory int64, comment string, cmd string, args ...string) (*docker.Container, error) {
id := future.RandomId()[:8]
container, err := srv.containers.Create(id, cmd, args, img,
&docker.Config{
Hostname: id,
Ports: ports,
User: user,
Tty: tty,
OpenStdin: openStdin,
Memory: memory,
})
if err != nil {
return nil, err
}
if err := container.SetUserData("image", img.Id); err != nil {
srv.containers.Destroy(container)
return nil, errors.New("Error setting container userdata: " + err.Error())
}
if err := container.SetUserData("comment", comment); err != nil {
srv.containers.Destroy(container)
return nil, errors.New("Error setting container userdata: " + err.Error())
}
return container, nil
}
func (srv *Server) CmdAttach(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "attach", "[OPTIONS]", "Attach to a running container")
fl_i := cmd.Bool("i", false, "Attach to stdin")
fl_o := cmd.Bool("o", true, "Attach to stdout")
fl_e := cmd.Bool("e", true, "Attach to stderr")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
container := srv.containers.Get(name)
if container == nil {
return errors.New("No such container: " + name)
}
var wg sync.WaitGroup
if *fl_i {
c_stdin, err := container.StdinPipe()
if err != nil {
return err
}
wg.Add(1)
go func() { io.Copy(c_stdin, stdin); wg.Add(-1) }()
}
if *fl_o {
c_stdout, err := container.StdoutPipe()
if err != nil {
return err
}
wg.Add(1)
go func() { io.Copy(stdout, c_stdout); wg.Add(-1) }()
}
if *fl_e {
c_stderr, err := container.StderrPipe()
if err != nil {
return err
}
wg.Add(1)
go func() { io.Copy(stdout, c_stderr); wg.Add(-1) }()
}
wg.Wait()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (p *ports) String() string {
return fmt.Sprint(*p)
}
func (p *ports) Set(value string) error {
port, err := strconv.Atoi(value)
if err != nil {
return fmt.Errorf("Invalid port: %v", value)
}
*p = append(*p, port)
return nil
}
func (srv *Server) CmdRun(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "run", "[OPTIONS] IMAGE COMMAND [ARG...]", "Run a command in a new container")
fl_user := cmd.String("u", "", "Username or UID")
fl_attach := cmd.Bool("a", false, "Attach stdin and stdout")
fl_stdin := cmd.Bool("i", false, "Keep stdin open even if not attached")
fl_tty := cmd.Bool("t", false, "Allocate a pseudo-tty")
fl_comment := cmd.String("c", "", "Comment")
fl_memory := cmd.Int64("m", 0, "Memory limit (in bytes)")
var fl_ports ports
cmd.Var(&fl_ports, "p", "Map a network port to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
var cmdline []string
if len(cmd.Args()) >= 2 {
cmdline = cmd.Args()[1:]
}
// Choose a default image if needed
if name == "" {
name = "base"
}
// Choose a default command if needed
if len(cmdline) == 0 {
*fl_stdin = true
*fl_tty = true
*fl_attach = true
cmdline = []string{"/bin/bash", "-i"}
}
// Find the image
img, err := srv.images.Find(name)
if err != nil {
return err
} else if img == nil {
return errors.New("No such image: " + name)
}
// Create new container
container, err := srv.CreateContainer(img, fl_ports, *fl_user, *fl_tty,
*fl_stdin, *fl_memory, *fl_comment, cmdline[0], cmdline[1:]...)
if err != nil {
return errors.New("Error creating container: " + err.Error())
}
if *fl_stdin {
cmd_stdin, err := container.StdinPipe()
if err != nil {
return err
}
if *fl_attach {
future.Go(func() error {
_, err := io.Copy(cmd_stdin, stdin)
cmd_stdin.Close()
return err
})
}
}
// Run the container
if *fl_attach {
cmd_stderr, err := container.StderrPipe()
if err != nil {
return err
}
cmd_stdout, err := container.StdoutPipe()
if err != nil {
return err
}
if err := container.Start(); err != nil {
return err
}
sending_stdout := future.Go(func() error {
_, err := io.Copy(stdout, cmd_stdout)
return err
})
sending_stderr := future.Go(func() error {
_, err := io.Copy(stdout, cmd_stderr)
return err
})
err_sending_stdout := <-sending_stdout
err_sending_stderr := <-sending_stderr
if err_sending_stdout != nil {
return err_sending_stdout
}
if err_sending_stderr != nil {
return err_sending_stderr
}
container.Wait()
} else {
if err := container.Start(); err != nil {
return err
}
fmt.Fprintln(stdout, container.Id)
}
return nil
}
func New() (*Server, error) {
future.Seed()
// if err != nil {
// return nil, err
// }
containers, err := docker.New()
if err != nil {
return nil, err
}
srv := &Server{
images: containers.Store,
containers: containers,
}
return srv, nil
}
func (srv *Server) CmdMirror(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
_, err := io.Copy(stdout, stdin)
return err
}
func (srv *Server) CmdDebug(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
for {
if line, err := bufio.NewReader(stdin).ReadString('\n'); err == nil {
fmt.Printf("--- %s", line)
} else if err == io.EOF {
if len(line) > 0 {
fmt.Printf("--- %s\n", line)
}
break
} else {
return err
}
}
return nil
}
func (srv *Server) CmdWeb(stdin io.ReadCloser, stdout io.Writer, args ...string) error {
cmd := rcli.Subcmd(stdout, "web", "[OPTIONS]", "A web UI for docker")
showurl := cmd.Bool("u", false, "Return the URL of the web UI")
if err := cmd.Parse(args); err != nil {
return nil
}
if *showurl {
fmt.Fprintln(stdout, "http://localhost:4242/web")
} else {
if file, err := os.Open("dockerweb.html"); err != nil {
return err
} else if _, err := io.Copy(stdout, file); err != nil {
return err
}
}
return nil
}
type Server struct {
containers *docker.Docker
images *fs.Store
}
|
package main
import (
// General
"github.com/golang/glog"
"flag"
"github.com/iambc/xerrors"
"reflect"
"os"
//API
"net/http"
"encoding/json"
//DB
"database/sql"
_ "github.com/lib/pq"
)
/*
TODO:
2) Add different input/output formats for the API
6) quote of the day
*/
type image_board_clusters struct {
Id int
Descr string
LongDescr string
BoardLimitCount int
}
type boards struct {
Id int
Name string
Descr string
ImageBoardClusterId string
MaxThreadCount int //to be checked in insert thread
MaxActiveThreadCount int //to be checked in insert thread
MaxPostsPerThread int // to be checked in insert thread
AreAttachmentsAllowed bool // to be checked in insert post
PostLimitsReachedActionId int // to be checked in insert post
}
type threads struct{
Id int
Name string
Descr string
BoardId int
MaxPostsPerThread int
AreAttachmentsAllowed bool
LimitsReachedActionId int
}
type thread_posts struct{
Id int
Body string
ThreadId int
AttachmentUrl *string
}
type thread_limits_reached_actions struct{
Id int
Name string
Descr string
}
type api_request struct{
Status string
Msg *string
Payload interface{}
}
func getBoards(res http.ResponseWriter, req *http.Request) ([]byte, error) {
if req == nil || res == nil {
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
api_key := values[`api_key`][0]
rows, err := dbh.Query("select b.id, b.name, b.descr from boards b join image_board_clusters ibc on ibc.id = b.image_board_cluster_id where api_key = $1;", api_key)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `002`, true)
}
defer rows.Close()
var curr_boards []boards
for rows.Next() {
var board boards
err = rows.Scan(&board.Id, &board.Name, &board.Descr)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `003`, true)
}
curr_boards = append(curr_boards, board)
}
bytes, err1 := json.Marshal(api_request{"ok", nil, &curr_boards})
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `004`, true)
}
return bytes, nil
}
func getActiveThreadsForBoard(res http.ResponseWriter, req *http.Request) ([]byte, error) {
if req == nil || res == nil {
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
board_id, is_passed := values[`board_id`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `005`, true)
}
api_key := values[`api_key`][0]
rows, err := dbh.Query(`select t.id, t.name from threads t
join boards b on b.id = t.board_id
join image_board_clusters ibc on ibc.id = b.image_board_cluster_id
where t.is_active = TRUE and t.board_id = $1 and ibc.api_key = $2;`, board_id[0], api_key)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `006`, true)
}
defer rows.Close()
var active_threads []threads
for rows.Next() {
glog.Info("Popped new thread")
var thread threads
err = rows.Scan(&thread.Id, &thread.Name)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `007`, true)
}
active_threads = append(active_threads, thread)
}
var bytes []byte
var err1 error
if(len(active_threads) == 0){
errMsg := "No objects returned."
bytes, err1 = json.Marshal(api_request{"error", &errMsg, &active_threads})
}else {
bytes, err1 = json.Marshal(api_request{"ok", nil, &active_threads})
}
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `008`, true)
}
return bytes, nil
}
func getPostsForThread(res http.ResponseWriter, req *http.Request) ([]byte, error) {
if req == nil || res == nil {
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
thread_id, is_passed := values[`thread_id`]
if !is_passed {
return []byte{},xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `006`, true)
}
api_key := values[`api_key`][0]
rows, err := dbh.Query(`select tp.id, tp.body, tp.attachment_url
from thread_posts tp join threads t on t.id = tp.thread_id
join boards b on b.id = t.board_id
join image_board_clusters ibc on ibc.id = b.image_board_cluster_id
where tp.thread_id = $1 and ibc.api_key = $2 and t.is_active = true;`, thread_id[0], api_key)
if err != nil {
glog.Error(err)
return []byte{}, xerrors.NewSysErr()
}
defer rows.Close()
var curr_posts []thread_posts
for rows.Next() {
glog.Info("new post for thread with id: ", thread_id[0])
var curr_post thread_posts
err = rows.Scan(&curr_post.Id, &curr_post.Body, &curr_post.AttachmentUrl)
if err != nil {
glog.Error(err)
return []byte{}, xerrors.NewSysErr()
}
curr_posts = append(curr_posts, curr_post)
}
var bytes []byte
var err1 error
if(len(curr_posts) == 0){
errMsg := "No objects returned."
bytes, err1 = json.Marshal(api_request{"error", &errMsg, &curr_posts})
}else {
bytes, err1 = json.Marshal(api_request{"ok", nil, &curr_posts})
}
if err1 != nil {
return []byte{}, xerrors.NewSysErr()
}
return bytes, nil
}
func addPostToThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {
if req == nil || res == nil{
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
thread_id, is_passed := values[`thread_id`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `001`, true)
}
thread_body_post, is_passed := values[`thread_post_body`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_post_body given!`, `Invalid params: No thread_post_body given!`, `001`, true)
}
var is_limit_reached bool
err := dbh.QueryRow("select (select count(*) from thread_posts where thread_id = $1) > max_posts_per_thread from threads where id = $1;", thread_id[0]).Scan(&is_limit_reached)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `009`, true)
}
if is_limit_reached {
dbh.QueryRow("UPDATE threads set is_active = false where id = $1", thread_id[0]).Scan()
return []byte{}, xerrors.NewUIErr(`Thread post limit reached!`, `Thread post limit reached!`, `010`, true)
}
attachment_urls, is_passed := values[`attachment_url`]
var attachment_url *string
if !is_passed{
attachment_url = nil
}else{
attachment_url = &attachment_urls[0]
}
_, err = dbh.Query("INSERT INTO thread_posts(body, thread_id, attachment_url) VALUES($1, $2, $3)", thread_body_post[0], thread_id[0], attachment_url)
if err != nil {
glog.Error(err)
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `011`, true)
}
bytes, err1 := json.Marshal(api_request{"ok", nil, nil})
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `012`, true)
}
return bytes, nil
}
func addThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {
if req == nil || res == nil{
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
thread_name, is_passed := values[`thread_name`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_name given!`, `Invalid params: No thread_name given!`, `013`, true)
}
board_id, is_passed := values[`board_id`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `014`, true)
}
var is_limit_reached bool
err := dbh.QueryRow("select (select count(*) from threads where board_id = $1) > thread_setting_max_thread_count from boards where id = $1;", board_id[0]).Scan(&is_limit_reached)
if err != nil {
glog.Error("COULD NOT SELECT thread_count")
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `015`, true)
}
if is_limit_reached {
return []byte{}, xerrors.NewUIErr(`Thread limit reached!`, `Thread limit reached!`, `016`, true)
}
_, err = dbh.Query("INSERT INTO threads(name, board_id, limits_reached_action_id, max_posts_per_thread) VALUES($1, $2, 1, 10)", thread_name[0], board_id[0])
if err != nil {
glog.Error("INSERT FAILED")
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `017`, true)
}
bytes, err1 := json.Marshal(api_request{"ok", nil, nil})
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `018`, true)
}
return bytes, nil
}
var dbConnString = ``
var dbh *sql.DB
// sample usage
func main() {
flag.Parse()
var err error
dbConnString = os.Getenv("ABC_DB_CONN_STRING") // DB will return error if empty string
dbh, err = sql.Open("postgres", dbConnString)
if err != nil {
glog.Fatal(err)
}
commands := map[string]func(http.ResponseWriter, *http.Request) ([]byte, error){
"getBoards": getBoards,
"getActiveThreadsForBoard": getActiveThreadsForBoard,
"getPostsForThread": getPostsForThread,
"addPostToThread": addPostToThread,
"addThread": addThread,
}
http.HandleFunc("/api", func(res http.ResponseWriter, req *http.Request) {
values := req.URL.Query()
command, is_passed := values[`command`]
if !is_passed {
res.Write([]byte(`{"Status":"error","Msg":"Paremeter 'command' is undefined.","Payload":null}`))
return
}
_, is_passed = values[`api_key`]
if !is_passed {
res.Write([]byte(`{"Status":"error","Msg":"Paremeter 'api_key' is undefined.","Payload":null}`))
return
}
_, is_passed = commands[command[0]]
if !is_passed{
res.Write([]byte(`{"Status":"error","Msg":"No such command exists.","Payload":null}`))
glog.Error("command: ", command[0])
return
}
res.Header().Set("Access-Control-Allow-Origin", "*")
bytes, err := commands[command[0]](res, req)
if err != nil{
if string(reflect.TypeOf(err).Name()) == `SysErr` {
res.Write([]byte(`{"Status":"`+ xerrors.SysErrCode +`","Msg":"` + err.Error() +`","Payload":null}`))
} else if string(reflect.TypeOf(err).Name()) == `UIErr` {
res.Write([]byte(`{"Status":"` + err.(xerrors.UIErr).Code + `","Msg":"`+ err.Error() +`","Payload":null}`))
} else {
res.Write([]byte(`{"Status":"000","Msg":"Application Error!","Payload":null}`))
}
glog.Error(err)
return
}
glog.Info(string(bytes))
res.Write(bytes)
})
http.Handle("/f/", http.StripPrefix("/f/", http.FileServer(http.Dir(os.Getenv("ABC_FILES_DIR")))))
http.ListenAndServe(`:`+ os.Getenv("ABC_SERVER_ENDPOINT_URL"), nil)
}
feat: works with new version of xerrors
package main
import (
// General
"github.com/golang/glog"
"flag"
"github.com/iambc/xerrors"
"reflect"
"os"
//API
"net/http"
"encoding/json"
//DB
"database/sql"
_ "github.com/lib/pq"
)
/*
TODO:
2) Add different input/output formats for the API
6) quote of the day
*/
type image_board_clusters struct {
Id int
Descr string
LongDescr string
BoardLimitCount int
}
type boards struct {
Id int
Name string
Descr string
ImageBoardClusterId string
MaxThreadCount int //to be checked in insert thread
MaxActiveThreadCount int //to be checked in insert thread
MaxPostsPerThread int // to be checked in insert thread
AreAttachmentsAllowed bool // to be checked in insert post
PostLimitsReachedActionId int // to be checked in insert post
}
type threads struct{
Id int
Name string
Descr string
BoardId int
MaxPostsPerThread int
AreAttachmentsAllowed bool
LimitsReachedActionId int
}
type thread_posts struct{
Id int
Body string
ThreadId int
AttachmentUrl *string
}
type thread_limits_reached_actions struct{
Id int
Name string
Descr string
}
type api_request struct{
Status string
Msg *string
Payload interface{}
}
func getBoards(res http.ResponseWriter, req *http.Request) ([]byte, error) {
if req == nil || res == nil {
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
api_key := values[`api_key`][0]
rows, err := dbh.Query("select b.id, b.name, b.descr from boards b join image_board_clusters ibc on ibc.id = b.image_board_cluster_id where api_key = $1;", api_key)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `002`, true)
}
defer rows.Close()
var curr_boards []boards
for rows.Next() {
var board boards
err = rows.Scan(&board.Id, &board.Name, &board.Descr)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `003`, true)
}
curr_boards = append(curr_boards, board)
}
bytes, err1 := json.Marshal(api_request{"ok", nil, &curr_boards})
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `004`, true)
}
return bytes, nil
}
func getActiveThreadsForBoard(res http.ResponseWriter, req *http.Request) ([]byte, error) {
if req == nil || res == nil {
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
board_id, is_passed := values[`board_id`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `005`, true)
}
api_key := values[`api_key`][0]
rows, err := dbh.Query(`select t.id, t.name from threads t
join boards b on b.id = t.board_id
join image_board_clusters ibc on ibc.id = b.image_board_cluster_id
where t.is_active = TRUE and t.board_id = $1 and ibc.api_key = $2;`, board_id[0], api_key)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `006`, true)
}
defer rows.Close()
var active_threads []threads
for rows.Next() {
glog.Info("Popped new thread")
var thread threads
err = rows.Scan(&thread.Id, &thread.Name)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `007`, true)
}
active_threads = append(active_threads, thread)
}
var bytes []byte
var err1 error
if(len(active_threads) == 0){
errMsg := "No objects returned."
bytes, err1 = json.Marshal(api_request{"error", &errMsg, &active_threads})
}else {
bytes, err1 = json.Marshal(api_request{"ok", nil, &active_threads})
}
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `008`, true)
}
return bytes, nil
}
func getPostsForThread(res http.ResponseWriter, req *http.Request) ([]byte, error) {
if req == nil || res == nil {
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
thread_id, is_passed := values[`thread_id`]
if !is_passed {
return []byte{},xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `006`, true)
}
api_key := values[`api_key`][0]
rows, err := dbh.Query(`select tp.id, tp.body, tp.attachment_url
from thread_posts tp join threads t on t.id = tp.thread_id
join boards b on b.id = t.board_id
join image_board_clusters ibc on ibc.id = b.image_board_cluster_id
where tp.thread_id = $1 and ibc.api_key = $2 and t.is_active = true;`, thread_id[0], api_key)
if err != nil {
glog.Error(err)
return []byte{}, xerrors.NewSysErr()
}
defer rows.Close()
var curr_posts []thread_posts
for rows.Next() {
glog.Info("new post for thread with id: ", thread_id[0])
var curr_post thread_posts
err = rows.Scan(&curr_post.Id, &curr_post.Body, &curr_post.AttachmentUrl)
if err != nil {
glog.Error(err)
return []byte{}, xerrors.NewSysErr()
}
curr_posts = append(curr_posts, curr_post)
}
var bytes []byte
var err1 error
if(len(curr_posts) == 0){
errMsg := "No objects returned."
bytes, err1 = json.Marshal(api_request{"error", &errMsg, &curr_posts})
}else {
bytes, err1 = json.Marshal(api_request{"ok", nil, &curr_posts})
}
if err1 != nil {
return []byte{}, xerrors.NewSysErr()
}
return bytes, nil
}
func addPostToThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {
if req == nil || res == nil{
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
thread_id, is_passed := values[`thread_id`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `001`, true)
}
thread_body_post, is_passed := values[`thread_post_body`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_post_body given!`, `Invalid params: No thread_post_body given!`, `001`, true)
}
var is_limit_reached bool
err := dbh.QueryRow("select (select count(*) from thread_posts where thread_id = $1) > max_posts_per_thread from threads where id = $1;", thread_id[0]).Scan(&is_limit_reached)
if err != nil {
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `009`, true)
}
if is_limit_reached {
dbh.QueryRow("UPDATE threads set is_active = false where id = $1", thread_id[0]).Scan()
return []byte{}, xerrors.NewUIErr(`Thread post limit reached!`, `Thread post limit reached!`, `010`, true)
}
attachment_urls, is_passed := values[`attachment_url`]
var attachment_url *string
if !is_passed{
attachment_url = nil
}else{
attachment_url = &attachment_urls[0]
}
_, err = dbh.Query("INSERT INTO thread_posts(body, thread_id, attachment_url) VALUES($1, $2, $3)", thread_body_post[0], thread_id[0], attachment_url)
if err != nil {
glog.Error(err)
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `011`, true)
}
bytes, err1 := json.Marshal(api_request{"ok", nil, nil})
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `012`, true)
}
return bytes, nil
}
func addThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {
if req == nil || res == nil{
return []byte{}, xerrors.NewSysErr()
}
values := req.URL.Query()
thread_name, is_passed := values[`thread_name`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_name given!`, `Invalid params: No thread_name given!`, `013`, true)
}
board_id, is_passed := values[`board_id`]
if !is_passed {
return []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `014`, true)
}
var is_limit_reached bool
err := dbh.QueryRow("select (select count(*) from threads where board_id = $1) > thread_setting_max_thread_count from boards where id = $1;", board_id[0]).Scan(&is_limit_reached)
if err != nil {
glog.Error("COULD NOT SELECT thread_count")
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `015`, true)
}
if is_limit_reached {
return []byte{}, xerrors.NewUIErr(`Thread limit reached!`, `Thread limit reached!`, `016`, true)
}
_, err = dbh.Query("INSERT INTO threads(name, board_id, limits_reached_action_id, max_posts_per_thread) VALUES($1, $2, 1, 10)", thread_name[0], board_id[0])
if err != nil {
glog.Error("INSERT FAILED")
return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `017`, true)
}
bytes, err1 := json.Marshal(api_request{"ok", nil, nil})
if err1 != nil {
return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `018`, true)
}
return bytes, nil
}
var dbConnString = ``
var dbh *sql.DB
// sample usage
func main() {
flag.Parse()
var err error
dbConnString = os.Getenv("ABC_DB_CONN_STRING") // DB will return error if empty string
dbh, err = sql.Open("postgres", dbConnString)
if err != nil {
glog.Fatal(err)
}
commands := map[string]func(http.ResponseWriter, *http.Request) ([]byte, error){
"getBoards": getBoards,
"getActiveThreadsForBoard": getActiveThreadsForBoard,
"getPostsForThread": getPostsForThread,
"addPostToThread": addPostToThread,
"addThread": addThread,
}
http.HandleFunc("/api", func(res http.ResponseWriter, req *http.Request) {
values := req.URL.Query()
command, is_passed := values[`command`]
if !is_passed {
res.Write([]byte(`{"Status":"error","Msg":"Paremeter 'command' is undefined.","Payload":null}`))
return
}
_, is_passed = values[`api_key`]
if !is_passed {
res.Write([]byte(`{"Status":"error","Msg":"Paremeter 'api_key' is undefined.","Payload":null}`))
return
}
_, is_passed = commands[command[0]]
if !is_passed{
res.Write([]byte(`{"Status":"error","Msg":"No such command exists.","Payload":null}`))
glog.Error("command: ", command[0])
return
}
res.Header().Set("Access-Control-Allow-Origin", "*")
bytes, err := commands[command[0]](res, req)
if err != nil{
if string(reflect.TypeOf(err).Name()) == `SysErr` {
res.Write([]byte(`{"Status":"`+ err.(xerrors.XError).Code +`","Msg":"` + err.Error() +`","Payload":null}`))
} else if string(reflect.TypeOf(err).Name()) == `UIErr` {
res.Write([]byte(`{"Status":"` + err.(xerrors.XError).Code + `","Msg":"`+ err.Error() +`","Payload":null}`))
} else {
res.Write([]byte(`{"Status":"000","Msg":"Application Error!","Payload":null}`))
}
glog.Error(err)
return
}
glog.Info(string(bytes))
res.Write(bytes)
})
http.Handle("/f/", http.StripPrefix("/f/", http.FileServer(http.Dir(os.Getenv("ABC_FILES_DIR")))))
http.ListenAndServe(`:`+ os.Getenv("ABC_SERVER_ENDPOINT_URL"), nil)
}
|
package server
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
log "github.com/Sirupsen/logrus"
"github.com/gorilla/mux"
)
type Server struct {
games map[int]Game
}
func NewServer() *Server {
server := new(Server)
server.games = make(map[int]Game)
return server
}
func (server *Server) Start() {
fmt.Println("Ready to Dart !!")
r := mux.NewRouter()
// creation du jeu (POST) - fournit le type de jeu
r.HandleFunc("/games", server.gamesHandler).Methods("POST") // retourne un id
// etat du jeu (GET)
r.HandleFunc("/games/{gameId}", server.gameHandler).Methods("GET")
// creation du joueur (POST) -> retourne joueur
r.HandleFunc("/games/{gameId}/user", server.usersHandler).Methods("POST")
// etat joueur
r.HandleFunc("/games/{gameId}/user/{userId}", server.userHandler).Methods("GET")
// POST : etat de la flechette
r.HandleFunc("/games/{gameId}/dart", server.dartHandler).Methods("POST")
http.Handle("/", r)
log.Println("Start server")
http.ListenAndServe(":8080", nil)
}
type gameRepresentation struct {
Style string `json:"style"`
}
///GamesHandler
func (server *Server) gamesHandler(writer http.ResponseWriter, request *http.Request) {
var g gameRepresentation
decoder := json.NewDecoder(request.Body)
decoder.Decode(&g)
nextID := len(server.games) + 1
theGame, err := gameFactory(g.Style)
if err != nil {
fmt.Fprintf(writer, "go fuck yourself %s ! ", g.Style)
}
server.games[nextID] = theGame
marshal, err := json.Marshal(theGame)
if err != nil {
fmt.Fprintf(writer, "go fuck yourself %s ! ", g.Style)
}
fmt.Fprint(writer, string(marshal))
}
func gameFactory(style string) (result Game, err error) {
switch style {
case "301":
result = NewGamex01(301)
return
default:
err = errors.New("prout")
return
}
}
func (server *Server) gameHandler(writer http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
gameIDStr := vars["gameId"]
gameID, err := strconv.Atoi(gameIDStr)
if err != nil {
fmt.Fprintf(writer, "go fuck yourself %s ! ", gameIDStr)
}
currentGame := server.games[gameID]
result, err := json.Marshal(currentGame)
if err != nil {
fmt.Fprintf(writer, "go fuck yourself %s ! ", gameIDStr)
}
fmt.Fprint(writer, string(result))
}
func (server *Server) usersHandler(writer http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
gameID := vars["gameID"]
fmt.Fprint(writer, "gameID "+gameID)
}
func (server *Server) userHandler(writer http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
gameID := vars["gameId"]
userID := vars["userId"]
fmt.Fprint(writer, "gameID "+gameID+" userId"+userID)
}
func (server *Server) dartHandler(writer http.ResponseWriter, request *http.Request) {
vars := mux.Vars(request)
gameID := vars["gameId"]
fmt.Fprint(writer, "gameID "+gameID+" dart")
}
get rid of std http server, welcome gin-gonic !
package server
import (
"errors"
"fmt"
"net/http"
"strconv"
log "github.com/Sirupsen/logrus"
"github.com/gin-gonic/gin"
)
type Server struct {
games map[int]Game
}
func NewServer() *Server {
server := new(Server)
server.games = make(map[int]Game)
return server
}
func (server *Server) Start() {
fmt.Println("Ready to Dart !!")
r := gin.Default()
// creation du jeu (POST) - fournit le type de jeu
r.POST("/games", server.createNewGameHandler) // retourne un id
// etat du jeu (GET)
r.GET("/games/:gameId", server.findGameByIdHandler)
// // creation du joueur (POST) -> retourne joueur
r.POST("/games/:gameId/players", server.addPlayerToGameHandler)
// // etat joueur
// r.GET("/games/{gameId}/user/{userId}", server.userHandler).Methods("GET")
//
// // POST : etat de la flechette
// r.POST("/games/{gameId}/dart", server.dartHandler).Methods("POST")
http.Handle("/", r)
log.Println("Start server")
http.ListenAndServe(":8080", nil)
}
type gameRepresentation struct {
Style string `json:"style"`
}
///GamesHandler
func (server *Server) createNewGameHandler(c *gin.Context) {
var g gameRepresentation
if c.BindJSON(&g) == nil {
nextID := len(server.games) + 1
theGame, err := gameFactory(g.Style)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"stats": "illegal content"})
return
}
server.games[nextID] = theGame
c.JSON(http.StatusOK, gin.H{"id": nextID, "game": theGame})
} else {
c.JSON(http.StatusBadRequest, gin.H{"stats": "illegal content"})
}
}
func gameFactory(style string) (result Game, err error) {
switch style {
case "301":
result = NewGamex01(301)
return
default:
err = errors.New("prout")
return
}
}
func (server *Server) findGameByIdHandler(c *gin.Context) {
gameID, err := strconv.Atoi(c.Param("gameId"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"stats": "illegal content"})
return
}
log.Infof("flushing game w/ id {}", gameID)
currentGame, ok := server.games[gameID]
if !ok {
c.JSON(http.StatusNotFound, nil)
return
}
c.JSON(http.StatusOK, gin.H{"game": currentGame})
}
type playerRepresentation struct {
Name string `json:"name"`
}
func (server *Server) addPlayerToGameHandler(c *gin.Context) {
gameID, err := strconv.Atoi(c.Param("gameId"))
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"stats": "illegal content"})
return
}
log.Infof("flushing game w/ id {}", gameID)
currentGame, ok := server.games[gameID]
if !ok {
c.JSON(http.StatusNotFound, nil)
return
}
var p playerRepresentation
if c.BindJSON(&p) == nil {
currentGame.AddPlayer(p.Name)
c.JSON(http.StatusCreated, nil)
} else {
c.JSON(http.StatusBadRequest, nil)
}
}
|
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/jpillora/cloud-torrent/engine"
"github.com/suryadewa/ForNesiaToRrent/static"
"github.com/jpillora/go-realtime"
"github.com/jpillora/requestlog"
"github.com/jpillora/scraper/scraper"
"github.com/skratchdot/open-golang/open"
)
//Server is the "State" portion of the diagram
type Server struct {
//config
Title string `help:"Title of this instance" env:"TITLE"`
Port int `help:"Listening port" env:"PORT"`
Host string `help:"Listening interface (default all)"`
Auth string `help:"Optional basic auth in form 'user:password'" env:"AUTH"`
ConfigPath string `help:"Configuration file path"`
KeyPath string `help:"TLS Key file path"`
CertPath string `help:"TLS Certicate file path" short:"r"`
Log bool `help:"Enable request logging"`
Open bool `help:"Open now with your default browser"`
//http handlers
files, static http.Handler
scraper *scraper.Handler
scraperh http.Handler
//torrent engine
engine *engine.Engine
//realtime state (sync'd with browser immediately)
rt *realtime.Handler
state struct {
realtime.Object
sync.Mutex
Config engine.Config
SearchProviders scraper.Config
Downloads *fsNode
Torrents map[string]*engine.Torrent
Users map[string]*realtime.User
Stats struct {
Title string
Version string
Runtime string
Uptime time.Time
}
}
}
func (s *Server) Run(version string) error {
tls := s.CertPath != "" || s.KeyPath != "" //poor man's XOR
if tls && (s.CertPath == "" || s.KeyPath == "") {
return fmt.Errorf("You must provide both key and cert paths")
}
s.state.Stats.Title = s.Title
s.state.Stats.Version = version
s.state.Stats.Runtime = strings.TrimPrefix(runtime.Version(), "go")
s.state.Stats.Uptime = time.Now()
//init maps
s.state.Users = map[string]*realtime.User{}
//will use a the local embed/ dir if it exists, otherwise will use the hardcoded embedded binaries
s.files = http.HandlerFunc(s.serveFiles)
s.static = ctstatic.FileSystemHandler()
s.scraper = &scraper.Handler{Log: false}
if err := s.scraper.LoadConfig(defaultSearchConfig); err != nil {
log.Fatal(err)
}
s.state.SearchProviders = s.scraper.Config //share scraper config
s.scraperh = http.StripPrefix("/search", s.scraper)
s.engine = engine.New()
//realtime
s.rt = realtime.NewHandler()
if err := s.rt.Add("state", &s.state); err != nil {
log.Fatalf("State object not syncable: %s", err)
}
//realtime user events
go func() {
for user := range s.rt.UserEvents() {
if user.Connected {
s.state.Users[user.ID] = user
} else {
delete(s.state.Users, user.ID)
}
s.state.Update()
}
}()
//configure engine
c := engine.Config{
DownloadDirectory: "./downloads",
EnableUpload: true,
AutoStart: true,
}
if _, err := os.Stat(s.ConfigPath); err == nil {
if b, err := ioutil.ReadFile(s.ConfigPath); err != nil {
return fmt.Errorf("Read configuration error: %s", err)
} else if len(b) == 0 {
//ignore empty file
} else if err := json.Unmarshal(b, &c); err != nil {
return fmt.Errorf("Malformed configuration: %s", err)
}
}
if c.IncomingPort <= 0 || c.IncomingPort >= 65535 {
c.IncomingPort = 50007
}
if err := s.reconfigure(c); err != nil {
return fmt.Errorf("initial configure failed: %s", err)
}
//poll torrents and files
go func() {
for {
s.state.Lock()
s.state.Torrents = s.engine.GetTorrents()
s.state.Downloads = s.listFiles()
// log.Printf("torrents #%d files #%d", len(s.state.Torrents), len(s.state.Downloads.Children))
s.state.Unlock()
s.state.Update()
time.Sleep(1 * time.Second)
}
}()
host := s.Host
if host == "" {
host = "0.0.0.0"
}
addr := fmt.Sprintf("%s:%d", host, s.Port)
proto := "http"
if tls {
proto += "s"
}
log.Printf("Listening at %s://%s", proto, addr)
if s.Open {
openhost := host
if openhost == "0.0.0.0" {
openhost = "localhost"
}
go func() {
time.Sleep(1 * time.Second)
open.Run(fmt.Sprintf("%s://%s:%d", proto, openhost, s.Port))
}()
}
h := http.Handler(http.HandlerFunc(s.handle))
if s.Log {
h = requestlog.Wrap(h)
}
if tls {
return http.ListenAndServeTLS(addr, s.CertPath, s.KeyPath, h)
} else {
return http.ListenAndServe(addr, h)
}
}
func (s *Server) reconfigure(c engine.Config) error {
dldir, err := filepath.Abs(c.DownloadDirectory)
if err != nil {
return fmt.Errorf("Invalid path")
}
c.DownloadDirectory = dldir
if err := s.engine.Configure(c); err != nil {
return err
}
b, _ := json.MarshalIndent(&c, "", " ")
ioutil.WriteFile(s.ConfigPath, b, 0755)
s.state.Config = c
s.state.Update()
return nil
}
func (s *Server) handle(w http.ResponseWriter, r *http.Request) {
//handle realtime client connections
if r.URL.Path == "/realtime.js" {
realtime.JS.ServeHTTP(w, r)
return
} else if r.URL.Path == "/realtime" {
s.rt.ServeHTTP(w, r)
return
}
//basic auth
if s.Auth != "" {
u, p, _ := r.BasicAuth()
if s.Auth != u+":"+p {
w.Header().Set("WWW-Authenticate", "Basic")
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte("Access Denied"))
return
}
}
//search
if strings.HasPrefix(r.URL.Path, "/search") {
s.scraperh.ServeHTTP(w, r)
return
}
//api call
if strings.HasPrefix(r.URL.Path, "/api/") {
//only pass request in, expect error out
if err := s.api(r); err == nil {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
} else {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
}
return
}
//no match, assume static file
s.files.ServeHTTP(w, r)
}
Update server.go
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/jpillora/cloud-torrent/engine"
"github.com/jpillora/go-realtime"
"github.com/jpillora/requestlog"
"github.com/jpillora/scraper/scraper"
"github.com/skratchdot/open-golang/open"
)
//Server is the "State" portion of the diagram
type Server struct {
//config
Title string `help:"Title of this instance" env:"TITLE"`
Port int `help:"Listening port" env:"PORT"`
Host string `help:"Listening interface (default all)"`
Auth string `help:"Optional basic auth in form 'user:password'" env:"AUTH"`
ConfigPath string `help:"Configuration file path"`
KeyPath string `help:"TLS Key file path"`
CertPath string `help:"TLS Certicate file path" short:"r"`
Log bool `help:"Enable request logging"`
Open bool `help:"Open now with your default browser"`
//http handlers
files, static http.Handler
scraper *scraper.Handler
scraperh http.Handler
//torrent engine
engine *engine.Engine
//realtime state (sync'd with browser immediately)
rt *realtime.Handler
state struct {
realtime.Object
sync.Mutex
Config engine.Config
SearchProviders scraper.Config
Downloads *fsNode
Torrents map[string]*engine.Torrent
Users map[string]*realtime.User
Stats struct {
Title string
Version string
Runtime string
Uptime time.Time
}
}
}
func (s *Server) Run(version string) error {
tls := s.CertPath != "" || s.KeyPath != "" //poor man's XOR
if tls && (s.CertPath == "" || s.KeyPath == "") {
return fmt.Errorf("You must provide both key and cert paths")
}
s.state.Stats.Title = s.Title
s.state.Stats.Version = version
s.state.Stats.Runtime = strings.TrimPrefix(runtime.Version(), "go")
s.state.Stats.Uptime = time.Now()
//init maps
s.state.Users = map[string]*realtime.User{}
//will use a the local embed/ dir if it exists, otherwise will use the hardcoded embedded binaries
s.files = http.HandlerFunc(s.serveFiles)
s.static = ctstatic.FileSystemHandler()
s.scraper = &scraper.Handler{Log: false}
if err := s.scraper.LoadConfig(defaultSearchConfig); err != nil {
log.Fatal(err)
}
s.state.SearchProviders = s.scraper.Config //share scraper config
s.scraperh = http.StripPrefix("/search", s.scraper)
s.engine = engine.New()
//realtime
s.rt = realtime.NewHandler()
if err := s.rt.Add("state", &s.state); err != nil {
log.Fatalf("State object not syncable: %s", err)
}
//realtime user events
go func() {
for user := range s.rt.UserEvents() {
if user.Connected {
s.state.Users[user.ID] = user
} else {
delete(s.state.Users, user.ID)
}
s.state.Update()
}
}()
//configure engine
c := engine.Config{
DownloadDirectory: "./downloads",
EnableUpload: true,
AutoStart: true,
}
if _, err := os.Stat(s.ConfigPath); err == nil {
if b, err := ioutil.ReadFile(s.ConfigPath); err != nil {
return fmt.Errorf("Read configuration error: %s", err)
} else if len(b) == 0 {
//ignore empty file
} else if err := json.Unmarshal(b, &c); err != nil {
return fmt.Errorf("Malformed configuration: %s", err)
}
}
if c.IncomingPort <= 0 || c.IncomingPort >= 65535 {
c.IncomingPort = 50007
}
if err := s.reconfigure(c); err != nil {
return fmt.Errorf("initial configure failed: %s", err)
}
//poll torrents and files
go func() {
for {
s.state.Lock()
s.state.Torrents = s.engine.GetTorrents()
s.state.Downloads = s.listFiles()
// log.Printf("torrents #%d files #%d", len(s.state.Torrents), len(s.state.Downloads.Children))
s.state.Unlock()
s.state.Update()
time.Sleep(1 * time.Second)
}
}()
host := s.Host
if host == "" {
host = "0.0.0.0"
}
addr := fmt.Sprintf("%s:%d", host, s.Port)
proto := "http"
if tls {
proto += "s"
}
log.Printf("Listening at %s://%s", proto, addr)
if s.Open {
openhost := host
if openhost == "0.0.0.0" {
openhost = "localhost"
}
go func() {
time.Sleep(1 * time.Second)
open.Run(fmt.Sprintf("%s://%s:%d", proto, openhost, s.Port))
}()
}
h := http.Handler(http.HandlerFunc(s.handle))
if s.Log {
h = requestlog.Wrap(h)
}
if tls {
return http.ListenAndServeTLS(addr, s.CertPath, s.KeyPath, h)
} else {
return http.ListenAndServe(addr, h)
}
}
func (s *Server) reconfigure(c engine.Config) error {
dldir, err := filepath.Abs(c.DownloadDirectory)
if err != nil {
return fmt.Errorf("Invalid path")
}
c.DownloadDirectory = dldir
if err := s.engine.Configure(c); err != nil {
return err
}
b, _ := json.MarshalIndent(&c, "", " ")
ioutil.WriteFile(s.ConfigPath, b, 0755)
s.state.Config = c
s.state.Update()
return nil
}
func (s *Server) handle(w http.ResponseWriter, r *http.Request) {
//handle realtime client connections
if r.URL.Path == "/realtime.js" {
realtime.JS.ServeHTTP(w, r)
return
} else if r.URL.Path == "/realtime" {
s.rt.ServeHTTP(w, r)
return
}
//basic auth
if s.Auth != "" {
u, p, _ := r.BasicAuth()
if s.Auth != u+":"+p {
w.Header().Set("WWW-Authenticate", "Basic")
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte("Access Denied"))
return
}
}
//search
if strings.HasPrefix(r.URL.Path, "/search") {
s.scraperh.ServeHTTP(w, r)
return
}
//api call
if strings.HasPrefix(r.URL.Path, "/api/") {
//only pass request in, expect error out
if err := s.api(r); err == nil {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
} else {
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(err.Error()))
}
return
}
//no match, assume static file
s.files.ServeHTTP(w, r)
}
|
// gorewind is an event store server written in Python that talks ZeroMQ.
// Copyright (C) 2013 Jens Rantil
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Contains the ZeroMQ server loop. Deals with incoming requests and
// delegates them to the event store. Also publishes newly stored events
// using a PUB socket.
//
// See README file for an up-to-date documentation of the ZeroMQ wire
// format.
package server
import (
"bytes"
"errors"
"log"
"container/list"
"time"
"sync"
zmq "github.com/alecthomas/gozmq"
"github.com/JensRantil/gorewind/eventstore"
)
// StartParams are parameters required for starting the server.
type InitParams struct {
// The event store to use as backend.
Store *eventstore.EventStore
// The ZeroMQ path that the command receiving socket will bind
// to.
CommandSocketZPath *string
// The ZeroMQ path that the event publishing socket will bind
// to.
EvPubSocketZPath *string
}
// Check all required initialization parameters are set.
func checkAllInitParamsSet(p *InitParams) error {
if p.Store == nil {
return errors.New("Missing param: Store")
}
if p.CommandSocketZPath == nil {
return errors.New("Missing param: CommandSocketZPath")
}
if p.EvPubSocketZPath == nil {
return errors.New("Missing param: EvPubSocketZPath")
}
return nil
}
// A server instance. Can be run.
type Server struct {
params InitParams
evpubsock *zmq.Socket
commandsock *zmq.Socket
context *zmq.Context
runningMutex sync.Mutex
running bool
stopChan chan bool
}
// IsRunning returns true if the server is running, false otherwise.
func (v *Server) IsRunning() bool {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
return v.running
}
func (v* Server) Wait() {
v.waiter.Wait()
}
// Stop stops a running server. Blocks until the server is stopped. If
// the server is not running, an error is returned.
func (v* Server) Stop() error {
if !v.IsRunning() {
return errors.New("Server not running.")
}
select {
case v.stopChan <- true:
default:
return errors.New("Stop already signalled.")
}
<-v.stopChan
// v.running is modified by Server.Run(...)
if v.IsRunning() {
return errors.New("Signalled stopped, but never stopped.")
}
return nil
}
// Initialize a new event store server and return a handle to it. The
// event store is not started. It's up to the caller to execute Run()
// on the server handle.
func New(params *InitParams) (*Server, error) {
if params == nil {
return nil, errors.New("Missing init params")
}
if err := checkAllInitParamsSet(params); err != nil {
return nil, err
}
server := Server{
params: *params,
running: false,
}
var allOkay *bool = new(bool)
*allOkay = false
defer func() {
if (!*allOkay) {
server.closeZmq()
}
}()
context, err := zmq.NewContext()
if err != nil {
return nil, err
}
server.context = context
commandsock, err := context.NewSocket(zmq.ROUTER)
if err != nil {
return nil, err
}
server.commandsock = commandsock
err = commandsock.Bind(*params.CommandSocketZPath)
if err != nil {
return nil, err
}
evpubsock, err := context.NewSocket(zmq.PUB)
if err != nil {
return nil, err
}
server.evpubsock = evpubsock
if binderr := evpubsock.Bind(*params.EvPubSocketZPath); binderr != nil {
return nil, binderr
}
*allOkay = true
return &server, nil
}
func (v *Server) closeZmq() {
(*v.evpubsock).Close()
v.evpubsock = nil
(*v.commandsock).Close()
v.commandsock = nil
(*v.context).Close()
v.context = nil
}
func (v *Server) setRunningState(newState bool) {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
v.running = newState
}
// Runs the server that distributes requests to workers.
// Panics on error since it is an essential piece of code required to
// run the application correctly.
func (v *Server) Start() {
v.setRunningState(true)
defer v.setRunningState(false)
loopServer((*v).params.Store, *(*v).evpubsock, *(*v).commandsock, v.stopChan)
}
// The result of an asynchronous zmq.Poll call.
type zmqPollResult struct {
err error
}
// Polls a bunch of ZeroMQ sockets and notifies the result through a
// channel. This makes it possible to combine ZeroMQ polling with Go's
// own built-in channels.
func asyncPoll(notifier chan zmqPollResult, items zmq.PollItems, stop chan bool) {
for {
timeout := time.Duration(1)*time.Second
count, err := zmq.Poll(items, timeout)
if count > 0 || err != nil {
notifier <- zmqPollResult{err}
}
select {
case <-stop:
stop <- true
return
default:
}
}
}
func stopPoller(cancelChan chan bool) {
cancelChan <- true
<-cancelChan
}
// The core ZeroMQ messaging loop. Handles requests and responses
// asynchronously using the router socket. Every request is delegated to
// a goroutine for maximum concurrency.
//
// `gozmq` does currently not support copy-free messages/frames. This
// means that every message passing through this function needs to be
// copied in-memory. If this becomes a bottleneck in the future,
// multiple router sockets can be hooked to this final router to scale
// message copying.
//
// TODO: Make this a type function of `Server` to remove a lot of
// parameters.
func loopServer(estore *eventstore.EventStore, evpubsock, frontend zmq.Socket,
stop chan bool) {
toPoll := zmq.PollItems{
zmq.PollItem{Socket: &frontend, zmq.Events: zmq.POLLIN},
}
pubchan := make(chan eventstore.StoredEvent)
estore.RegisterPublishedEventsChannel(pubchan)
go publishAllSavedEvents(pubchan, evpubsock)
pollchan := make(chan zmqPollResult)
respchan := make(chan zMsg)
pollCancel := make(chan bool)
defer stopPoller(pollCancel)
go asyncPoll(pollchan, toPoll, pollCancel)
for {
select {
case res := <-pollchan:
if res.err != nil {
log.Print("Could not poll:", res.err)
}
if res.err == nil && toPoll[0].REvents&zmq.POLLIN != 0 {
msg, _ := toPoll[0].Socket.RecvMultipart(0)
zmsg := zMsg(msg)
go handleRequest(respchan, estore, zmsg)
}
go asyncPoll(pollchan, toPoll, pollCancel)
case frames := <-respchan:
if err := frontend.SendMultipart(frames, 0); err != nil {
log.Println(err)
}
case <- stop:
stop <- true
return
}
}
}
// Publishes stored events to event listeners.
//
// Pops previously stored messages off a channel and published them to a
// ZeroMQ socket.
func publishAllSavedEvents(toPublish chan eventstore.StoredEvent, evpub zmq.Socket) {
msg := make(zMsg, 3)
for {
stored := <-toPublish
msg[0] = stored.Event.Stream
msg[1] = stored.Id
msg[2] = stored.Event.Data
if err := evpub.SendMultipart(msg, 0); err != nil {
log.Println(err)
}
}
}
// A single frame in a ZeroMQ message.
type zFrame []byte
// A ZeroMQ message.
//
// I wish it could have been `[]zFrame`, but that would make conversion
// from `[][]byte` pretty messy[1].
//
// [1] http://stackoverflow.com/a/15650327/260805
type zMsg [][]byte
// Handles a single ZeroMQ RES/REQ loop synchronously.
//
// The full request message stored in `msg` and the full ZeroMQ response
// is pushed to `respchan`. The function does not return any error
// because it is expected to be called asynchronously as a goroutine.
func handleRequest(respchan chan zMsg, estore *eventstore.EventStore, msg zMsg) {
// TODO: Rename to 'framelist'
parts := list.New()
for _, msgpart := range msg {
parts.PushBack(msgpart)
}
resptemplate := list.New()
emptyFrame := zFrame("")
for true {
resptemplate.PushBack(parts.Remove(parts.Front()))
if bytes.Equal(parts.Front().Value.(zFrame), emptyFrame) {
break
}
}
if parts.Len() == 0 {
errstr := "Incoming command was empty. Ignoring it."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
return
}
command := string(parts.Front().Value.(zFrame))
switch command {
case "PUBLISH":
parts.Remove(parts.Front())
if parts.Len() != 2 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for PUBLISH."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
data := parts.Remove(parts.Front())
newevent := eventstore.Event{
estream.(eventstore.StreamName),
data.(zFrame),
}
newId, err := estore.Add(newevent)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
// the event was added
response := copyList(resptemplate)
response.PushBack(zFrame("PUBLISHED"))
response.PushBack(zFrame(newId))
respchan <- listToFrames(response)
}
}
case "QUERY":
parts.Remove(parts.Front())
if parts.Len() != 3 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for QUERY."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
fromid := parts.Remove(parts.Front())
toid := parts.Remove(parts.Front())
req := eventstore.QueryRequest{
Stream: estream.(zFrame),
FromId: fromid.(zFrame),
ToId: toid.(zFrame),
}
events, err := estore.Query(req)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
for eventdata := range(events) {
response := copyList(resptemplate)
response.PushBack([]byte("EVENT"))
response.PushBack(eventdata.Id)
response.PushBack(eventdata.Data)
respchan <- listToFrames(response)
}
response := copyList(resptemplate)
response.PushBack(zFrame("END"))
respchan <- listToFrames(response)
}
}
default:
// TODO: Move these error strings out as constants of
// this package.
// TODO: Move the chunk of code below into a separate
// function and reuse for similar piece of code above.
// TODO: Constantify this error message
errstr := "Unknown request type."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
}
}
// Convert a doubly linked list of message frames to a slice of message
// fram
func listToFrames(l *list.List) zMsg {
frames := make(zMsg, l.Len())
i := 0
for e := l.Front(); e != nil; e = e.Next() {
frames[i] = e.Value.(zFrame)
}
return frames
}
// Helper function for copying a doubly linked list.
func copyList(l *list.List) *list.List {
replica := list.New()
replica.PushBackList(l)
return replica
}
Shutting down `publishAllSavedEvents` correctly
// gorewind is an event store server written in Python that talks ZeroMQ.
// Copyright (C) 2013 Jens Rantil
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Contains the ZeroMQ server loop. Deals with incoming requests and
// delegates them to the event store. Also publishes newly stored events
// using a PUB socket.
//
// See README file for an up-to-date documentation of the ZeroMQ wire
// format.
package server
import (
"bytes"
"errors"
"log"
"container/list"
"time"
"sync"
zmq "github.com/alecthomas/gozmq"
"github.com/JensRantil/gorewind/eventstore"
)
// StartParams are parameters required for starting the server.
type InitParams struct {
// The event store to use as backend.
Store *eventstore.EventStore
// The ZeroMQ path that the command receiving socket will bind
// to.
CommandSocketZPath *string
// The ZeroMQ path that the event publishing socket will bind
// to.
EvPubSocketZPath *string
}
// Check all required initialization parameters are set.
func checkAllInitParamsSet(p *InitParams) error {
if p.Store == nil {
return errors.New("Missing param: Store")
}
if p.CommandSocketZPath == nil {
return errors.New("Missing param: CommandSocketZPath")
}
if p.EvPubSocketZPath == nil {
return errors.New("Missing param: EvPubSocketZPath")
}
return nil
}
// A server instance. Can be run.
type Server struct {
params InitParams
evpubsock *zmq.Socket
commandsock *zmq.Socket
context *zmq.Context
runningMutex sync.Mutex
running bool
stopChan chan bool
}
// IsRunning returns true if the server is running, false otherwise.
func (v *Server) IsRunning() bool {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
return v.running
}
func (v* Server) Wait() {
v.waiter.Wait()
}
// Stop stops a running server. Blocks until the server is stopped. If
// the server is not running, an error is returned.
func (v* Server) Stop() error {
if !v.IsRunning() {
return errors.New("Server not running.")
}
select {
case v.stopChan <- true:
default:
return errors.New("Stop already signalled.")
}
<-v.stopChan
// v.running is modified by Server.Run(...)
if v.IsRunning() {
return errors.New("Signalled stopped, but never stopped.")
}
return nil
}
// Initialize a new event store server and return a handle to it. The
// event store is not started. It's up to the caller to execute Run()
// on the server handle.
func New(params *InitParams) (*Server, error) {
if params == nil {
return nil, errors.New("Missing init params")
}
if err := checkAllInitParamsSet(params); err != nil {
return nil, err
}
server := Server{
params: *params,
running: false,
}
var allOkay *bool = new(bool)
*allOkay = false
defer func() {
if (!*allOkay) {
server.closeZmq()
}
}()
context, err := zmq.NewContext()
if err != nil {
return nil, err
}
server.context = context
commandsock, err := context.NewSocket(zmq.ROUTER)
if err != nil {
return nil, err
}
server.commandsock = commandsock
err = commandsock.Bind(*params.CommandSocketZPath)
if err != nil {
return nil, err
}
evpubsock, err := context.NewSocket(zmq.PUB)
if err != nil {
return nil, err
}
server.evpubsock = evpubsock
if binderr := evpubsock.Bind(*params.EvPubSocketZPath); binderr != nil {
return nil, binderr
}
*allOkay = true
return &server, nil
}
func (v *Server) closeZmq() {
(*v.evpubsock).Close()
v.evpubsock = nil
(*v.commandsock).Close()
v.commandsock = nil
(*v.context).Close()
v.context = nil
}
func (v *Server) setRunningState(newState bool) {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
v.running = newState
}
// Runs the server that distributes requests to workers.
// Panics on error since it is an essential piece of code required to
// run the application correctly.
func (v *Server) Start() {
v.setRunningState(true)
defer v.setRunningState(false)
loopServer((*v).params.Store, *(*v).evpubsock, *(*v).commandsock, v.stopChan)
}
// The result of an asynchronous zmq.Poll call.
type zmqPollResult struct {
err error
}
// Polls a bunch of ZeroMQ sockets and notifies the result through a
// channel. This makes it possible to combine ZeroMQ polling with Go's
// own built-in channels.
func asyncPoll(notifier chan zmqPollResult, items zmq.PollItems, stop chan bool) {
for {
timeout := time.Duration(1)*time.Second
count, err := zmq.Poll(items, timeout)
if count > 0 || err != nil {
notifier <- zmqPollResult{err}
}
select {
case <-stop:
stop <- true
return
default:
}
}
}
func stopPoller(cancelChan chan bool) {
cancelChan <- true
<-cancelChan
}
// The core ZeroMQ messaging loop. Handles requests and responses
// asynchronously using the router socket. Every request is delegated to
// a goroutine for maximum concurrency.
//
// `gozmq` does currently not support copy-free messages/frames. This
// means that every message passing through this function needs to be
// copied in-memory. If this becomes a bottleneck in the future,
// multiple router sockets can be hooked to this final router to scale
// message copying.
//
// TODO: Make this a type function of `Server` to remove a lot of
// parameters.
func loopServer(estore *eventstore.EventStore, evpubsock, frontend zmq.Socket,
stop chan bool) {
toPoll := zmq.PollItems{
zmq.PollItem{Socket: &frontend, zmq.Events: zmq.POLLIN},
}
pubchan := make(chan eventstore.StoredEvent)
estore.RegisterPublishedEventsChannel(pubchan)
go publishAllSavedEvents(pubchan, evpubsock)
defer close(pubchan)
pollchan := make(chan zmqPollResult)
respchan := make(chan zMsg)
pollCancel := make(chan bool)
defer stopPoller(pollCancel)
go asyncPoll(pollchan, toPoll, pollCancel)
for {
select {
case res := <-pollchan:
if res.err != nil {
log.Print("Could not poll:", res.err)
}
if res.err == nil && toPoll[0].REvents&zmq.POLLIN != 0 {
msg, _ := toPoll[0].Socket.RecvMultipart(0)
zmsg := zMsg(msg)
go handleRequest(respchan, estore, zmsg)
}
go asyncPoll(pollchan, toPoll, pollCancel)
case frames := <-respchan:
if err := frontend.SendMultipart(frames, 0); err != nil {
log.Println(err)
}
case <- stop:
stop <- true
return
}
}
}
// Publishes stored events to event listeners.
//
// Pops previously stored messages off a channel and published them to a
// ZeroMQ socket.
func publishAllSavedEvents(toPublish chan eventstore.StoredEvent, evpub zmq.Socket) {
msg := make(zMsg, 3)
for stored := range(toPublish) {
msg[0] = stored.Event.Stream
msg[1] = stored.Id
msg[2] = stored.Event.Data
if err := evpub.SendMultipart(msg, 0); err != nil {
log.Println(err)
}
}
}
// A single frame in a ZeroMQ message.
type zFrame []byte
// A ZeroMQ message.
//
// I wish it could have been `[]zFrame`, but that would make conversion
// from `[][]byte` pretty messy[1].
//
// [1] http://stackoverflow.com/a/15650327/260805
type zMsg [][]byte
// Handles a single ZeroMQ RES/REQ loop synchronously.
//
// The full request message stored in `msg` and the full ZeroMQ response
// is pushed to `respchan`. The function does not return any error
// because it is expected to be called asynchronously as a goroutine.
func handleRequest(respchan chan zMsg, estore *eventstore.EventStore, msg zMsg) {
// TODO: Rename to 'framelist'
parts := list.New()
for _, msgpart := range msg {
parts.PushBack(msgpart)
}
resptemplate := list.New()
emptyFrame := zFrame("")
for true {
resptemplate.PushBack(parts.Remove(parts.Front()))
if bytes.Equal(parts.Front().Value.(zFrame), emptyFrame) {
break
}
}
if parts.Len() == 0 {
errstr := "Incoming command was empty. Ignoring it."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
return
}
command := string(parts.Front().Value.(zFrame))
switch command {
case "PUBLISH":
parts.Remove(parts.Front())
if parts.Len() != 2 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for PUBLISH."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
data := parts.Remove(parts.Front())
newevent := eventstore.Event{
estream.(eventstore.StreamName),
data.(zFrame),
}
newId, err := estore.Add(newevent)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
// the event was added
response := copyList(resptemplate)
response.PushBack(zFrame("PUBLISHED"))
response.PushBack(zFrame(newId))
respchan <- listToFrames(response)
}
}
case "QUERY":
parts.Remove(parts.Front())
if parts.Len() != 3 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for QUERY."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
fromid := parts.Remove(parts.Front())
toid := parts.Remove(parts.Front())
req := eventstore.QueryRequest{
Stream: estream.(zFrame),
FromId: fromid.(zFrame),
ToId: toid.(zFrame),
}
events, err := estore.Query(req)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
for eventdata := range(events) {
response := copyList(resptemplate)
response.PushBack([]byte("EVENT"))
response.PushBack(eventdata.Id)
response.PushBack(eventdata.Data)
respchan <- listToFrames(response)
}
response := copyList(resptemplate)
response.PushBack(zFrame("END"))
respchan <- listToFrames(response)
}
}
default:
// TODO: Move these error strings out as constants of
// this package.
// TODO: Move the chunk of code below into a separate
// function and reuse for similar piece of code above.
// TODO: Constantify this error message
errstr := "Unknown request type."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
}
}
// Convert a doubly linked list of message frames to a slice of message
// fram
func listToFrames(l *list.List) zMsg {
frames := make(zMsg, l.Len())
i := 0
for e := l.Front(); e != nil; e = e.Next() {
frames[i] = e.Value.(zFrame)
}
return frames
}
// Helper function for copying a doubly linked list.
func copyList(l *list.List) *list.List {
replica := list.New()
replica.PushBackList(l)
return replica
}
|
package smtpapi
import (
"encoding/json"
"reflect"
"testing"
)
func Test_JsonString(t *testing.T) {
header := NewSMTPAPIHeader()
result, _ := header.JsonString()
if result != "{}" {
t.Errorf("Result did not match")
}
}
func Test_AddTo(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddTo("addTo@mailinator.com")
result, _ := header.JsonString()
if result != "{\"to\":[\"addTo@mailinator.com\"]}" {
t.Errorf("Result did not match")
}
}
func Test_SetTos(t *testing.T) {
header := NewSMTPAPIHeader()
header.SetTos([]string{"setTos@mailinator.com"})
result, _ := header.JsonString()
if result != "{\"to\":[\"setTos@mailinator.com\"]}" {
t.Errorf("Result did not match")
}
}
func Test_AddSubstitution(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddSubstitution("sub", "val")
result, _ := header.JsonString()
if result != "{\"sub\":{\"sub\":[\"val\"]}}" {
t.Errorf("Result did not match")
}
}
func Test_SetSubstitutions(t *testing.T) {
header := NewSMTPAPIHeader()
sub := make(map[string][]string)
sub["sub"] = []string{"val"}
header.SetSubstitutions(sub)
result, _ := header.JsonString()
if result != "{\"sub\":{\"sub\":[\"val\"]}}" {
t.Errorf("Result did not match")
}
}
func Test_AddUniqueArg(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddUniqueArg("add_unique_argument_key", "add_unique_argument_value")
header.AddUniqueArg("add_unique_argument_key_2", "add_unique_argument_value_2")
result, _ := header.JsonString()
if result != "{\"unique_args\":{\"add_unique_argument_key\":\"add_unique_argument_value\",\"add_unique_argument_key_2\":\"add_unique_argument_value_2\"}}" {
t.Errorf("Result did not match")
}
}
func Test_SetUniqueArgs(t *testing.T) {
header := NewSMTPAPIHeader()
args := make(map[string]string)
args["set_unique_argument_key"] = "set_unique_argument_value"
header.SetUniqueArgs(args)
result, _ := header.JsonString()
if result != "{\"unique_args\":{\"set_unique_argument_key\":\"set_unique_argument_value\"}}" {
t.Errorf("Result did not match")
}
}
func Test_AddCategory(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddCategory("addCategory")
header.AddCategory("addCategory2")
result, _ := header.JsonString()
if result != "{\"category\":[\"addCategory\",\"addCategory2\"]}" {
t.Errorf("Result did not match")
}
}
func Test_SetCategories(t *testing.T) {
header := NewSMTPAPIHeader()
header.SetCategories([]string{"setCategories"})
result, _ := header.JsonString()
if result != "{\"category\":[\"setCategories\"]}" {
t.Errorf("Result did not match")
}
}
func Test_AddSection(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddSection("set_section_key", "set_section_value")
header.AddSection("set_section_key_2", "set_section_value_2")
result, _ := header.JsonString()
if result != "{\"section\":{\"set_section_key\":\"set_section_value\",\"set_section_key_2\":\"set_section_value_2\"}}" {
t.Errorf("Result did not match")
}
}
func Test_SetSections(t *testing.T) {
header := NewSMTPAPIHeader()
sections := make(map[string]string)
sections["set_section_key"] = "set_section_value"
header.SetSections(sections)
result, _ := header.JsonString()
if result != "{\"section\":{\"set_section_key\":\"set_section_value\"}}" {
t.Errorf("Result did not match")
}
}
//func Test_AddFilter(t *testing.T) {
// header := NewSMTPAPIHeader()
//
// header.AddFilter("footer", "text/html", "<strong>boo</strong>")
// result, _ := header.JsonString()
//
// t.Logf(result);
// if result != "{\"filters\":{\"footer\":{\"settings\":{\"text/html\":\"<strong>boo</strong>\"}}}}" {
// t.Errorf("Result did not match")
// }
//}
//
//func Test_SetFilters(t *testing.T) {
// header := NewSMTPAPIHeader()
//
// header.SetFilters("footer", "text/html", "<strong>boo</strong>")
// result, _ := header.JsonString()
//
// t.Logf(result);
// if result != "{\"filters\":{\"footer\":{\"setting\":{\"enable\":1,\"text/plain\":\"You can haz footers!\"}}}}" {
// t.Errorf("Result did not match")
// }
//}
func Test_Adds(t *testing.T) {
validHeader, _ := json.Marshal([]byte(`{"to":["test@email.com"],"sub":{"subKey":["subValue"]},"section":{"testSection":"sectionValue"},"category":["testCategory"],"unique_args":{"testUnique":"uniqueValue"},"filters":{"testFilter":{"settings":{"filter":"filterValue"}}}}`))
headers := NewSMTPAPIHeader()
headers.AddTo("test@email.com")
headers.AddSubstitution("subKey", "subValue")
headers.AddSection("testSection", "sectionValue")
headers.AddCategory("testCategory")
headers.AddUniqueArg("testUnique", "uniqueValue")
headers.AddFilter("testFilter", "filter", "filterValue")
if h, e := headers.JsonString(); e != nil {
t.Errorf("Error! %s", e)
} else {
testHeader, _ := json.Marshal([]byte(h))
if reflect.DeepEqual(testHeader, validHeader) {
t.Logf("Success")
} else {
t.Errorf("Invalid headers")
}
}
}
func Test_Sets(t *testing.T) {
validHeader, _ := json.Marshal([]byte(`{"to":["test@email.com"],"sub":{"subKey":["subValue"]},"section":{"testSection":"sectionValue"},"category":["testCategory"],"unique_args":{"testUnique":"uniqueValue"},"filters":{"testFilter":{"settings":{"filter":"filterValue"}}}}`))
headers := NewSMTPAPIHeader()
headers.SetTos([]string{"test@email.com"})
sub := make(map[string][]string)
sub["subKey"] = []string{"subValue"}
headers.SetSubstitutions(sub)
sections := make(map[string]string)
sections["testSection"] = "sectionValue"
headers.SetSections(sections)
headers.SetCategories([]string{"testCategory"})
unique := make(map[string]string)
unique["testUnique"] = "uniqueValue"
headers.SetUniqueArgs(unique)
headers.AddFilter("testFilter", "filter", "filterValue")
if h, e := headers.JsonString(); e != nil {
t.Errorf("Error! %s", e)
} else {
testHeader, _ := json.Marshal([]byte(h))
if reflect.DeepEqual(testHeader, validHeader) {
t.Logf("Success")
} else {
t.Errorf("Invalid headers")
}
}
}
Moves everything over to the ExampleJson json file data
package smtpapi
import (
"encoding/json"
"io/ioutil"
"reflect"
"testing"
)
func ExampleJson() map[string]interface {} {
data, _ := ioutil.ReadFile("smtpapi_test_strings.json")
var f interface{}
json.Unmarshal(data, &f)
json := f.(map[string]interface{})
return json
}
func Test_JsonString(t *testing.T) {
header := NewSMTPAPIHeader()
result, _ := header.JsonString()
if result != ExampleJson()["json_string"] {
t.Errorf("Result did not match")
}
}
func Test_AddTo(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddTo("addTo@mailinator.com")
result, _ := header.JsonString()
if result != ExampleJson()["add_to"] {
t.Errorf("Result did not match")
}
}
func Test_SetTos(t *testing.T) {
header := NewSMTPAPIHeader()
header.SetTos([]string{"setTos@mailinator.com"})
result, _ := header.JsonString()
if result != ExampleJson()["set_tos"] {
t.Errorf("Result did not match")
}
}
func Test_AddSubstitution(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddSubstitution("sub", "val")
result, _ := header.JsonString()
if result != ExampleJson()["add_substitution"] {
t.Errorf("Result did not match")
}
}
func Test_SetSubstitutions(t *testing.T) {
header := NewSMTPAPIHeader()
sub := make(map[string][]string)
sub["sub"] = []string{"val"}
header.SetSubstitutions(sub)
result, _ := header.JsonString()
if result != ExampleJson()["set_substitutions"] {
t.Errorf("Result did not match")
}
}
func Test_AddUniqueArg(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddUniqueArg("add_unique_argument_key", "add_unique_argument_value")
header.AddUniqueArg("add_unique_argument_key_2", "add_unique_argument_value_2")
result, _ := header.JsonString()
if result != ExampleJson()["add_unique_arg"] {
t.Errorf("Result did not match")
}
}
func Test_SetUniqueArgs(t *testing.T) {
header := NewSMTPAPIHeader()
args := make(map[string]string)
args["set_unique_argument_key"] = "set_unique_argument_value"
header.SetUniqueArgs(args)
result, _ := header.JsonString()
if result != ExampleJson()["set_unique_args"] {
t.Errorf("Result did not match")
}
}
func Test_AddCategory(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddCategory("addCategory")
header.AddCategory("addCategory2")
result, _ := header.JsonString()
if result != ExampleJson()["add_category"] {
t.Errorf("Result did not match")
}
}
func Test_SetCategories(t *testing.T) {
header := NewSMTPAPIHeader()
header.SetCategories([]string{"setCategories"})
result, _ := header.JsonString()
if result != ExampleJson()["set_categories"] {
t.Errorf("Result did not match")
}
}
func Test_AddSection(t *testing.T) {
header := NewSMTPAPIHeader()
header.AddSection("set_section_key", "set_section_value")
header.AddSection("set_section_key_2", "set_section_value_2")
result, _ := header.JsonString()
if result != ExampleJson()["add_section"] {
t.Errorf("Result did not match")
}
}
func Test_SetSections(t *testing.T) {
header := NewSMTPAPIHeader()
sections := make(map[string]string)
sections["set_section_key"] = "set_section_value"
header.SetSections(sections)
result, _ := header.JsonString()
if result != ExampleJson()["set_sections"] {
t.Errorf("Result did not match")
}
}
//func Test_AddFilter(t *testing.T) {
// header := NewSMTPAPIHeader()
//
// header.AddFilter("footer", "text/html", "<strong>boo</strong>")
// result, _ := header.JsonString()
//
// t.Logf(result);
// if result != "{\"filters\":{\"footer\":{\"settings\":{\"text/html\":\"<strong>boo</strong>\"}}}}" {
// t.Errorf("Result did not match")
// }
//}
//
//func Test_SetFilters(t *testing.T) {
// header := NewSMTPAPIHeader()
//
// header.SetFilters("footer", "text/html", "<strong>boo</strong>")
// result, _ := header.JsonString()
//
// t.Logf(result);
// if result != "{\"filters\":{\"footer\":{\"setting\":{\"enable\":1,\"text/plain\":\"You can haz footers!\"}}}}" {
// t.Errorf("Result did not match")
// }
//}
func Test_Adds(t *testing.T) {
validHeader, _ := json.Marshal([]byte(`{"to":["test@email.com"],"sub":{"subKey":["subValue"]},"section":{"testSection":"sectionValue"},"category":["testCategory"],"unique_args":{"testUnique":"uniqueValue"},"filters":{"testFilter":{"settings":{"filter":"filterValue"}}}}`))
headers := NewSMTPAPIHeader()
headers.AddTo("test@email.com")
headers.AddSubstitution("subKey", "subValue")
headers.AddSection("testSection", "sectionValue")
headers.AddCategory("testCategory")
headers.AddUniqueArg("testUnique", "uniqueValue")
headers.AddFilter("testFilter", "filter", "filterValue")
if h, e := headers.JsonString(); e != nil {
t.Errorf("Error! %s", e)
} else {
testHeader, _ := json.Marshal([]byte(h))
if reflect.DeepEqual(testHeader, validHeader) {
t.Logf("Success")
} else {
t.Errorf("Invalid headers")
}
}
}
func Test_Sets(t *testing.T) {
validHeader, _ := json.Marshal([]byte(`{"to":["test@email.com"],"sub":{"subKey":["subValue"]},"section":{"testSection":"sectionValue"},"category":["testCategory"],"unique_args":{"testUnique":"uniqueValue"},"filters":{"testFilter":{"settings":{"filter":"filterValue"}}}}`))
headers := NewSMTPAPIHeader()
headers.SetTos([]string{"test@email.com"})
sub := make(map[string][]string)
sub["subKey"] = []string{"subValue"}
headers.SetSubstitutions(sub)
sections := make(map[string]string)
sections["testSection"] = "sectionValue"
headers.SetSections(sections)
headers.SetCategories([]string{"testCategory"})
unique := make(map[string]string)
unique["testUnique"] = "uniqueValue"
headers.SetUniqueArgs(unique)
headers.AddFilter("testFilter", "filter", "filterValue")
if h, e := headers.JsonString(); e != nil {
t.Errorf("Error! %s", e)
} else {
testHeader, _ := json.Marshal([]byte(h))
if reflect.DeepEqual(testHeader, validHeader) {
t.Logf("Success")
} else {
t.Errorf("Invalid headers")
}
}
}
|
package server
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
)
// Routez represents detail information on current routes
type Routez struct {
NumRoutes int `json:"num_routes"`
Routes []*RouteInfo `json:"routes"`
}
// RouteInfo has detailed information on a per connection basis.
type RouteInfo struct {
Cid uint64 `json:"cid"`
URL string `json:"url"`
IP string `json:"ip"`
Port int `json:"port"`
Solicited bool `json:"solicited"`
Subs uint32 `json:"subscriptions"`
Pending int `json:"pending_size"`
InMsgs int64 `json:"in_msgs"`
OutMsgs int64 `json:"out_msgs"`
InBytes int64 `json:"in_bytes"`
OutBytes int64 `json:"out_bytes"`
}
// HandleConnz process HTTP requests for connection information.
func (s *Server) HandleRoutez(w http.ResponseWriter, req *http.Request) {
if req.Method == "GET" {
r := Routez{Routes: []*RouteInfo{}}
// Walk the list
s.mu.Lock()
for _, route := range s.routes {
ri := &RouteInfo{
Cid: route.cid,
Subs: route.subs.Count(),
URL: route.route.url.String(),
Solicited: route.route.didSolicit,
InMsgs: route.inMsgs,
OutMsgs: route.outMsgs,
InBytes: route.inBytes,
OutBytes: route.outBytes,
}
if ip, ok := route.nc.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
ri.Port = addr.Port
ri.IP = addr.IP.String()
}
r.Routes = append(r.Routes, ri)
}
s.mu.Unlock()
r.NumRoutes = len(r.Routes)
b, err := json.MarshalIndent(r, "", " ")
if err != nil {
Logf("Error marshalling response to /routez request: %v", err)
}
w.Write(b)
} else if req.Method == "PUT" {
body := make([]byte, 1024)
req.Body.Read(body)
routeURL, err := url.Parse(string(body))
if err != nil {
w.WriteHeader(400)
w.Write([]byte(fmt.Sprintf(`{"error": "could not parse URL: %v"}`, err)))
return
}
s.connectToRoute(routeURL)
w.Write([]byte(`{"status": "ok"}`))
} else if req.Method == "DELETE" {
body := make([]byte, 1024)
req.Body.Read(body)
routeURL, err := url.Parse(string(body))
routeIP, err := net.ResolveTCPAddr("tcp", routeURL.Host)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(fmt.Sprintf(`{"error": "could not resolve url: %v"}`, err)))
return
}
for _, route := range s.routes {
if ipConn, ok := route.nc.(*net.TCPConn); ok {
addr := ipConn.RemoteAddr().(*net.TCPAddr)
if addr.String() == routeIP.String() {
route.mu.Lock()
route.route.didSolicit = false // don't reconnect
route.mu.Unlock()
route.closeConnection()
w.WriteHeader(200)
w.Write([]byte(`{"status": "ok"}`))
return
}
}
}
w.WriteHeader(404)
w.Write([]byte(`{"error": "could not find matching route"}`))
}
}
Turns out we can access the route URL directly
package server
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
)
// Routez represents detail information on current routes
type Routez struct {
NumRoutes int `json:"num_routes"`
Routes []*RouteInfo `json:"routes"`
}
// RouteInfo has detailed information on a per connection basis.
type RouteInfo struct {
Cid uint64 `json:"cid"`
URL string `json:"url"`
IP string `json:"ip"`
Port int `json:"port"`
Solicited bool `json:"solicited"`
Subs uint32 `json:"subscriptions"`
Pending int `json:"pending_size"`
InMsgs int64 `json:"in_msgs"`
OutMsgs int64 `json:"out_msgs"`
InBytes int64 `json:"in_bytes"`
OutBytes int64 `json:"out_bytes"`
}
// HandleConnz process HTTP requests for connection information.
func (s *Server) HandleRoutez(w http.ResponseWriter, req *http.Request) {
if req.Method == "GET" {
r := Routez{Routes: []*RouteInfo{}}
// Walk the list
s.mu.Lock()
for _, route := range s.routes {
ri := &RouteInfo{
Cid: route.cid,
Subs: route.subs.Count(),
URL: route.route.url.String(),
Solicited: route.route.didSolicit,
InMsgs: route.inMsgs,
OutMsgs: route.outMsgs,
InBytes: route.inBytes,
OutBytes: route.outBytes,
}
if ip, ok := route.nc.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
ri.Port = addr.Port
ri.IP = addr.IP.String()
}
r.Routes = append(r.Routes, ri)
}
s.mu.Unlock()
r.NumRoutes = len(r.Routes)
b, err := json.MarshalIndent(r, "", " ")
if err != nil {
Logf("Error marshalling response to /routez request: %v", err)
}
w.Write(b)
} else if req.Method == "PUT" {
body := make([]byte, 1024)
req.Body.Read(body)
routeURL, err := url.Parse(string(body))
if err != nil {
w.WriteHeader(400)
w.Write([]byte(fmt.Sprintf(`{"error": "could not parse URL: %v"}`, err)))
return
}
s.connectToRoute(routeURL)
w.Write([]byte(`{"status": "ok"}`))
} else if req.Method == "DELETE" {
body := make([]byte, 1024)
req.Body.Read(body)
for _, route := range s.routes {
if route.route.url.String() == body {
route.mu.Lock()
route.route.didSolicit = false // don't reconnect
route.mu.Unlock()
route.closeConnection()
w.WriteHeader(200)
w.Write([]byte(`{"status": "ok"}`))
return
}
}
w.WriteHeader(404)
w.Write([]byte(`{"error": "could not find matching route"}`))
}
}
|
package main
import (
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/manifest/schema1"
humanize "github.com/dustin/go-humanize"
"github.com/jessfraz/reg/clair"
"github.com/jessfraz/reg/registry"
"github.com/jessfraz/reg/utils"
"github.com/urfave/cli"
)
const (
// VERSION is the binary version.
VERSION = "v0.1.0"
dockerConfigPath = ".docker/config.json"
)
var (
updating = false
wg sync.WaitGroup
)
// preload initializes any global options and configuration
// before the main or sub commands are run.
func preload(c *cli.Context) (err error) {
if c.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
func main() {
app := cli.NewApp()
app.Name = "reg-server"
app.Version = VERSION
app.Author = "@jessfraz"
app.Email = "no-reply@butts.com"
app.Usage = "Docker registry v2 static UI server."
app.Before = preload
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug, d",
Usage: "run in debug mode",
},
cli.StringFlag{
Name: "username, u",
Usage: "username for the registry",
},
cli.StringFlag{
Name: "password, p",
Usage: "password for the registry",
},
cli.StringFlag{
Name: "registry, r",
Usage: "URL to the provate registry (ex. r.j3ss.co)",
},
cli.StringFlag{
Name: "port",
Value: "8080",
Usage: "port for server to run on",
},
cli.StringFlag{
Name: "cert",
Usage: "path to ssl cert",
},
cli.StringFlag{
Name: "key",
Usage: "path to ssl key",
},
cli.StringFlag{
Name: "interval",
Value: "5m",
Usage: "interval to generate new index.html's at",
},
cli.StringFlag{
Name: "clair",
Usage: "url to clair instance",
},
}
app.Action = func(c *cli.Context) error {
auth, err := utils.GetAuthConfig(c)
if err != nil {
return err
}
// create the registry client
r, err := registry.New(auth, c.GlobalBool("debug"))
if err != nil {
return err
}
// get the path to the static directory
wd, err := os.Getwd()
if err != nil {
return err
}
staticDir := filepath.Join(wd, "static")
// create the initial index
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
return err
}
// parse the duration
dur, err := time.ParseDuration(c.String("interval"))
if err != nil {
logrus.Fatalf("parsing %s as duration failed: %v", c.String("interval"), err)
}
ticker := time.NewTicker(dur)
go func() {
// create more indexes every X minutes based off interval
for range ticker.C {
if !updating {
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
logrus.Warnf("creating static index failed: %v", err)
wg.Wait()
updating = false
}
}
}
}()
// create mux server
mux := http.NewServeMux()
// static files handler
staticHandler := http.FileServer(http.Dir(staticDir))
mux.Handle("/", staticHandler)
// set up the server
port := c.String("port")
server := &http.Server{
Addr: ":" + port,
Handler: mux,
}
logrus.Infof("Starting server on port %q", port)
if c.String("cert") != "" && c.String("key") != "" {
logrus.Fatal(server.ListenAndServeTLS(c.String("cert"), c.String("key")))
} else {
logrus.Fatal(server.ListenAndServe())
}
return nil
}
app.Run(os.Args)
}
type data struct {
RegistryURL string
LastUpdated string
Repos []repository
}
type repository struct {
Name string
Tag string
RepoURI string
CreatedDate string
VulnURI string
}
type v1Compatibility struct {
ID string `json:"id"`
Created time.Time `json:"created"`
}
func createStaticIndex(r *registry.Registry, staticDir, clairURI string) error {
updating = true
logrus.Info("fetching catalog")
repoList, err := r.Catalog("")
if err != nil {
return fmt.Errorf("getting catalog failed: %v", err)
}
logrus.Info("fetching tags")
var repos []repository
for i, repo := range repoList {
// get the tags
tags, err := r.Tags(repo)
if err != nil {
return fmt.Errorf("getting tags for %s failed: %v", repo, err)
}
for j, tag := range tags {
// get the manifest
manifest, err := r.Manifest(repo, tag)
if err != nil {
return fmt.Errorf("getting tags for %s:%s failed: %v", repo, tag, err)
}
var createdDate string
if m1, ok := manifest.(schema1.SignedManifest); ok {
history := m1.History
for _, h := range history {
var comp v1Compatibility
if err := json.Unmarshal([]byte(h.V1Compatibility), &comp); err != nil {
return fmt.Errorf("unmarshal v1compatibility failed: %v", err)
}
createdDate = humanize.Time(comp.Created)
break
}
}
repoURI := fmt.Sprintf("%s/%s", r.Domain, repo)
if tag != "latest" {
repoURI += ":" + tag
}
newrepo := repository{
Name: repo,
Tag: tag,
RepoURI: repoURI,
CreatedDate: createdDate,
}
if clairURI != "" {
wg.Add(1)
go func(repo, tag string, i, j int) {
defer wg.Done()
throttle := time.Tick(time.Duration(time.Duration(i*j) * time.Microsecond))
<-throttle
logrus.Infof("creating vulns.txt for %s:%s", repo, tag)
if err := createVulnStaticPage(r, staticDir, clairURI, repo, tag); err != nil {
// return fmt.Errorf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
logrus.Warnf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
}
}(repo, tag, i, j)
newrepo.VulnURI = filepath.Join(repo, tag, "vulns.txt")
}
repos = append(repos, newrepo)
}
}
// create temporoary file to save template to
logrus.Info("creating temporary file for template")
f, err := ioutil.TempFile("", "reg-server")
if err != nil {
return fmt.Errorf("creating temp file failed: %v", err)
}
defer f.Close()
defer os.Remove(f.Name())
// parse & execute the template
logrus.Info("parsing and executing the template")
templateDir := filepath.Join(staticDir, "../templates")
lp := filepath.Join(templateDir, "layout.html")
d := data{
RegistryURL: r.Domain,
Repos: repos,
LastUpdated: time.Now().Local().Format(time.RFC1123),
}
tmpl := template.Must(template.New("").ParseFiles(lp))
if err := tmpl.ExecuteTemplate(f, "layout", d); err != nil {
return fmt.Errorf("execute template failed: %v", err)
}
f.Close()
index := filepath.Join(staticDir, "index.html")
logrus.Infof("renaming the temporary file %s to %s", f.Name(), index)
if err := os.Rename(f.Name(), index); err != nil {
return fmt.Errorf("renaming result from %s to %s failed: %v", f.Name(), index, err)
}
updating = false
return nil
}
func createVulnStaticPage(r *registry.Registry, staticDir, clairURI, repo, tag string) error {
// get the manifest
m, err := r.ManifestV1(repo, tag)
if err != nil {
return err
}
// filter out the empty layers
var filteredLayers []schema1.FSLayer
for _, layer := range m.FSLayers {
if layer.BlobSum != clair.EmptyLayerBlobSum {
filteredLayers = append(filteredLayers, layer)
}
}
m.FSLayers = filteredLayers
if len(m.FSLayers) == 0 {
fmt.Printf("No need to analyse image %s:%s as there is no non-emtpy layer", repo, tag)
return nil
}
// initialize clair
cr, err := clair.New(clairURI, false)
if err != nil {
return err
}
for i := len(m.FSLayers) - 1; i >= 0; i-- {
// form the clair layer
l, err := utils.NewClairLayer(r, repo, m.FSLayers, i)
if err != nil {
return err
}
// post the layer
if _, err := cr.PostLayer(l); err != nil {
return err
}
}
vl, err := cr.GetLayer(m.FSLayers[0].BlobSum.String(), false, true)
if err != nil {
return err
}
// get the vulns
var vulns []clair.Vulnerability
for _, f := range vl.Features {
for _, v := range f.Vulnerabilities {
vulns = append(vulns, v)
}
}
path := filepath.Join(staticDir, repo, tag, "vulns.txt")
if err := os.MkdirAll(filepath.Dir(path), 0644); err != nil {
return err
}
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
fmt.Fprintf(file, "Found %d vulnerabilities \n", len(vulns))
vulnsBy := func(sev string, store map[string][]clair.Vulnerability) []clair.Vulnerability {
items, found := store[sev]
if !found {
items = make([]clair.Vulnerability, 0)
store[sev] = items
}
return items
}
// group by severity
store := make(map[string][]clair.Vulnerability)
for _, v := range vulns {
sevRow := vulnsBy(v.Severity, store)
store[v.Severity] = append(sevRow, v)
}
// iterate over the priorities list
iteratePriorities := func(f func(sev string)) {
for _, sev := range clair.Priorities {
if len(store[sev]) != 0 {
f(sev)
}
}
}
iteratePriorities(func(sev string) {
fmt.Fprintf(file, "%s: %d\n", sev, len(store[sev]))
})
fmt.Fprintln(file, "")
// return an error if there are more than 10 bad vulns
lenBadVulns := len(store["High"]) + len(store["Critical"]) + len(store["Defcon1"])
if lenBadVulns > 10 {
fmt.Fprintln(file, "--------------- ALERT ---------------")
fmt.Fprintf(file, "%d bad vunerabilities found", lenBadVulns)
}
fmt.Fprintln(file, "")
iteratePriorities(func(sev string) {
for _, v := range store[sev] {
fmt.Fprintf(file, "%s: [%s] \n%s\n%s\n", v.Name, v.Severity, v.Description, v.Link)
fmt.Fprintln(file, "-----------------------------------------")
}
})
return nil
}
better throttle
Signed-off-by: Jess Frazelle <e0d1a862d8f31af605ecef8c92857b8938ba622e@google.com>
package main
import (
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/manifest/schema1"
humanize "github.com/dustin/go-humanize"
"github.com/jessfraz/reg/clair"
"github.com/jessfraz/reg/registry"
"github.com/jessfraz/reg/utils"
"github.com/urfave/cli"
)
const (
// VERSION is the binary version.
VERSION = "v0.1.0"
dockerConfigPath = ".docker/config.json"
)
var (
updating = false
wg sync.WaitGroup
)
// preload initializes any global options and configuration
// before the main or sub commands are run.
func preload(c *cli.Context) (err error) {
if c.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
func main() {
app := cli.NewApp()
app.Name = "reg-server"
app.Version = VERSION
app.Author = "@jessfraz"
app.Email = "no-reply@butts.com"
app.Usage = "Docker registry v2 static UI server."
app.Before = preload
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug, d",
Usage: "run in debug mode",
},
cli.StringFlag{
Name: "username, u",
Usage: "username for the registry",
},
cli.StringFlag{
Name: "password, p",
Usage: "password for the registry",
},
cli.StringFlag{
Name: "registry, r",
Usage: "URL to the provate registry (ex. r.j3ss.co)",
},
cli.StringFlag{
Name: "port",
Value: "8080",
Usage: "port for server to run on",
},
cli.StringFlag{
Name: "cert",
Usage: "path to ssl cert",
},
cli.StringFlag{
Name: "key",
Usage: "path to ssl key",
},
cli.StringFlag{
Name: "interval",
Value: "5m",
Usage: "interval to generate new index.html's at",
},
cli.StringFlag{
Name: "clair",
Usage: "url to clair instance",
},
}
app.Action = func(c *cli.Context) error {
auth, err := utils.GetAuthConfig(c)
if err != nil {
return err
}
// create the registry client
r, err := registry.New(auth, c.GlobalBool("debug"))
if err != nil {
return err
}
// get the path to the static directory
wd, err := os.Getwd()
if err != nil {
return err
}
staticDir := filepath.Join(wd, "static")
// create the initial index
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
return err
}
// parse the duration
dur, err := time.ParseDuration(c.String("interval"))
if err != nil {
logrus.Fatalf("parsing %s as duration failed: %v", c.String("interval"), err)
}
ticker := time.NewTicker(dur)
go func() {
// create more indexes every X minutes based off interval
for range ticker.C {
if !updating {
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
logrus.Warnf("creating static index failed: %v", err)
wg.Wait()
updating = false
}
}
}
}()
// create mux server
mux := http.NewServeMux()
// static files handler
staticHandler := http.FileServer(http.Dir(staticDir))
mux.Handle("/", staticHandler)
// set up the server
port := c.String("port")
server := &http.Server{
Addr: ":" + port,
Handler: mux,
}
logrus.Infof("Starting server on port %q", port)
if c.String("cert") != "" && c.String("key") != "" {
logrus.Fatal(server.ListenAndServeTLS(c.String("cert"), c.String("key")))
} else {
logrus.Fatal(server.ListenAndServe())
}
return nil
}
app.Run(os.Args)
}
type data struct {
RegistryURL string
LastUpdated string
Repos []repository
}
type repository struct {
Name string
Tag string
RepoURI string
CreatedDate string
VulnURI string
}
type v1Compatibility struct {
ID string `json:"id"`
Created time.Time `json:"created"`
}
func createStaticIndex(r *registry.Registry, staticDir, clairURI string) error {
updating = true
logrus.Info("fetching catalog")
repoList, err := r.Catalog("")
if err != nil {
return fmt.Errorf("getting catalog failed: %v", err)
}
logrus.Info("fetching tags")
var repos []repository
for i, repo := range repoList {
// get the tags
tags, err := r.Tags(repo)
if err != nil {
return fmt.Errorf("getting tags for %s failed: %v", repo, err)
}
for j, tag := range tags {
// get the manifest
manifest, err := r.Manifest(repo, tag)
if err != nil {
return fmt.Errorf("getting tags for %s:%s failed: %v", repo, tag, err)
}
var createdDate string
if m1, ok := manifest.(schema1.SignedManifest); ok {
history := m1.History
for _, h := range history {
var comp v1Compatibility
if err := json.Unmarshal([]byte(h.V1Compatibility), &comp); err != nil {
return fmt.Errorf("unmarshal v1compatibility failed: %v", err)
}
createdDate = humanize.Time(comp.Created)
break
}
}
repoURI := fmt.Sprintf("%s/%s", r.Domain, repo)
if tag != "latest" {
repoURI += ":" + tag
}
newrepo := repository{
Name: repo,
Tag: tag,
RepoURI: repoURI,
CreatedDate: createdDate,
}
if clairURI != "" {
wg.Add(1)
go func(repo, tag string, i, j int) {
defer wg.Done()
throttle := time.Tick(time.Duration(time.Duration((i+1)*(j+1)) * time.Microsecond))
<-throttle
logrus.Infof("creating vulns.txt for %s:%s", repo, tag)
if err := createVulnStaticPage(r, staticDir, clairURI, repo, tag); err != nil {
// return fmt.Errorf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
logrus.Warnf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
}
}(repo, tag, i, j)
newrepo.VulnURI = filepath.Join(repo, tag, "vulns.txt")
}
repos = append(repos, newrepo)
}
}
// create temporoary file to save template to
logrus.Info("creating temporary file for template")
f, err := ioutil.TempFile("", "reg-server")
if err != nil {
return fmt.Errorf("creating temp file failed: %v", err)
}
defer f.Close()
defer os.Remove(f.Name())
// parse & execute the template
logrus.Info("parsing and executing the template")
templateDir := filepath.Join(staticDir, "../templates")
lp := filepath.Join(templateDir, "layout.html")
d := data{
RegistryURL: r.Domain,
Repos: repos,
LastUpdated: time.Now().Local().Format(time.RFC1123),
}
tmpl := template.Must(template.New("").ParseFiles(lp))
if err := tmpl.ExecuteTemplate(f, "layout", d); err != nil {
return fmt.Errorf("execute template failed: %v", err)
}
f.Close()
index := filepath.Join(staticDir, "index.html")
logrus.Infof("renaming the temporary file %s to %s", f.Name(), index)
if err := os.Rename(f.Name(), index); err != nil {
return fmt.Errorf("renaming result from %s to %s failed: %v", f.Name(), index, err)
}
updating = false
return nil
}
func createVulnStaticPage(r *registry.Registry, staticDir, clairURI, repo, tag string) error {
// get the manifest
m, err := r.ManifestV1(repo, tag)
if err != nil {
return err
}
// filter out the empty layers
var filteredLayers []schema1.FSLayer
for _, layer := range m.FSLayers {
if layer.BlobSum != clair.EmptyLayerBlobSum {
filteredLayers = append(filteredLayers, layer)
}
}
m.FSLayers = filteredLayers
if len(m.FSLayers) == 0 {
fmt.Printf("No need to analyse image %s:%s as there is no non-emtpy layer", repo, tag)
return nil
}
// initialize clair
cr, err := clair.New(clairURI, false)
if err != nil {
return err
}
for i := len(m.FSLayers) - 1; i >= 0; i-- {
// form the clair layer
l, err := utils.NewClairLayer(r, repo, m.FSLayers, i)
if err != nil {
return err
}
// post the layer
if _, err := cr.PostLayer(l); err != nil {
return err
}
}
vl, err := cr.GetLayer(m.FSLayers[0].BlobSum.String(), false, true)
if err != nil {
return err
}
// get the vulns
var vulns []clair.Vulnerability
for _, f := range vl.Features {
for _, v := range f.Vulnerabilities {
vulns = append(vulns, v)
}
}
path := filepath.Join(staticDir, repo, tag, "vulns.txt")
if err := os.MkdirAll(filepath.Dir(path), 0644); err != nil {
return err
}
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
fmt.Fprintf(file, "Found %d vulnerabilities \n", len(vulns))
vulnsBy := func(sev string, store map[string][]clair.Vulnerability) []clair.Vulnerability {
items, found := store[sev]
if !found {
items = make([]clair.Vulnerability, 0)
store[sev] = items
}
return items
}
// group by severity
store := make(map[string][]clair.Vulnerability)
for _, v := range vulns {
sevRow := vulnsBy(v.Severity, store)
store[v.Severity] = append(sevRow, v)
}
// iterate over the priorities list
iteratePriorities := func(f func(sev string)) {
for _, sev := range clair.Priorities {
if len(store[sev]) != 0 {
f(sev)
}
}
}
iteratePriorities(func(sev string) {
fmt.Fprintf(file, "%s: %d\n", sev, len(store[sev]))
})
fmt.Fprintln(file, "")
// return an error if there are more than 10 bad vulns
lenBadVulns := len(store["High"]) + len(store["Critical"]) + len(store["Defcon1"])
if lenBadVulns > 10 {
fmt.Fprintln(file, "--------------- ALERT ---------------")
fmt.Fprintf(file, "%d bad vunerabilities found", lenBadVulns)
}
fmt.Fprintln(file, "")
iteratePriorities(func(sev string) {
for _, v := range store[sev] {
fmt.Fprintf(file, "%s: [%s] \n%s\n%s\n", v.Name, v.Severity, v.Description, v.Link)
fmt.Fprintln(file, "-----------------------------------------")
}
})
return nil
}
|
package main
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/docker/cliconfig"
"github.com/docker/engine-api/types"
humanize "github.com/dustin/go-humanize"
"github.com/jessfraz/reg/clair"
"github.com/jessfraz/reg/registry"
"github.com/urfave/cli"
)
const (
// VERSION is the binary version.
VERSION = "v0.1.0"
dockerConfigPath = ".docker/config.json"
)
var (
updating = false
wg sync.WaitGroup
)
// preload initializes any global options and configuration
// before the main or sub commands are run.
func preload(c *cli.Context) (err error) {
if c.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
func main() {
app := cli.NewApp()
app.Name = "reg-server"
app.Version = VERSION
app.Author = "@jessfraz"
app.Email = "no-reply@butts.com"
app.Usage = "Docker registry v2 static UI server."
app.Before = preload
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug, d",
Usage: "run in debug mode",
},
cli.StringFlag{
Name: "username, u",
Usage: "username for the registry",
},
cli.StringFlag{
Name: "password, p",
Usage: "password for the registry",
},
cli.StringFlag{
Name: "registry, r",
Usage: "URL to the provate registry (ex. r.j3ss.co)",
},
cli.StringFlag{
Name: "port",
Value: "8080",
Usage: "port for server to run on",
},
cli.StringFlag{
Name: "cert",
Usage: "path to ssl cert",
},
cli.StringFlag{
Name: "key",
Usage: "path to ssl key",
},
cli.StringFlag{
Name: "interval",
Value: "5m",
Usage: "interval to generate new index.html's at",
},
cli.StringFlag{
Name: "clair",
Usage: "url to clair instance",
},
}
app.Action = func(c *cli.Context) error {
auth, err := getAuthConfig(c)
if err != nil {
return err
}
// create the registry client
r, err := registry.New(auth, c.GlobalBool("debug"))
if err != nil {
return err
}
// get the path to the static directory
wd, err := os.Getwd()
if err != nil {
return err
}
staticDir := filepath.Join(wd, "static")
// create the initial index
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
return err
}
// parse the duration
dur, err := time.ParseDuration(c.String("interval"))
if err != nil {
logrus.Fatalf("parsing %s as duration failed: %v", c.String("interval"), err)
}
ticker := time.NewTicker(dur)
go func() {
// create more indexes every X minutes based off interval
for range ticker.C {
if !updating {
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
logrus.Warnf("creating static index failed: %v", err)
wg.Wait()
updating = false
}
}
}
}()
// create mux server
mux := http.NewServeMux()
// static files handler
staticHandler := http.FileServer(http.Dir(staticDir))
mux.Handle("/", staticHandler)
// set up the server
port := c.String("port")
server := &http.Server{
Addr: ":" + port,
Handler: mux,
}
logrus.Infof("Starting server on port %q", port)
if c.String("cert") != "" && c.String("key") != "" {
logrus.Fatal(server.ListenAndServeTLS(c.String("cert"), c.String("key")))
} else {
logrus.Fatal(server.ListenAndServe())
}
return nil
}
app.Run(os.Args)
}
func getAuthConfig(c *cli.Context) (types.AuthConfig, error) {
if c.GlobalString("username") != "" && c.GlobalString("password") != "" && c.GlobalString("registry") != "" {
return types.AuthConfig{
Username: c.GlobalString("username"),
Password: c.GlobalString("password"),
ServerAddress: c.GlobalString("registry"),
}, nil
}
dcfg, err := cliconfig.Load(cliconfig.ConfigDir())
if err != nil {
return types.AuthConfig{}, fmt.Errorf("Loading config file failed: %v", err)
}
// return error early if there are no auths saved
if !dcfg.ContainsAuth() {
if c.GlobalString("registry") != "" {
return types.AuthConfig{
ServerAddress: c.GlobalString("registry"),
}, nil
}
return types.AuthConfig{}, fmt.Errorf("No auth was present in %s, please pass a registry, username, and password", cliconfig.ConfigDir())
}
// if they passed a specific registry, return those creds _if_ they exist
if c.GlobalString("registry") != "" {
if creds, ok := dcfg.AuthConfigs[c.GlobalString("registry")]; ok {
return creds, nil
}
return types.AuthConfig{}, fmt.Errorf("No authentication credentials exist for %s", c.GlobalString("registry"))
}
// set the auth config as the registryURL, username and Password
for _, creds := range dcfg.AuthConfigs {
return creds, nil
}
return types.AuthConfig{}, fmt.Errorf("Could not find any authentication credentials")
}
func getRepoAndRef(c *cli.Context) (repo, ref string, err error) {
if len(c.Args()) < 1 {
return "", "", errors.New("pass the name of the repository")
}
arg := c.Args()[0]
parts := []string{}
if strings.Contains(arg, "@") {
parts = strings.Split(c.Args()[0], "@")
} else if strings.Contains(arg, ":") {
parts = strings.Split(c.Args()[0], ":")
} else {
parts = []string{arg}
}
repo = parts[0]
ref = "latest"
if len(parts) > 1 {
ref = parts[1]
}
return
}
type data struct {
RegistryURL string
LastUpdated string
Repos []repository
}
type repository struct {
Name string
Tag string
RepoURI string
CreatedDate string
VulnURI string
}
type v1Compatibility struct {
ID string `json:"id"`
Created time.Time `json:"created"`
}
func createStaticIndex(r *registry.Registry, staticDir, clairURI string) error {
updating = true
logrus.Info("fetching catalog")
repoList, err := r.Catalog("")
if err != nil {
return fmt.Errorf("getting catalog failed: %v", err)
}
logrus.Info("fetching tags")
var repos []repository
for _, repo := range repoList {
// get the tags
tags, err := r.Tags(repo)
if err != nil {
return fmt.Errorf("getting tags for %s failed: %v", repo, err)
}
for _, tag := range tags {
// get the manifest
manifest, err := r.Manifest(repo, tag)
if err != nil {
return fmt.Errorf("getting tags for %s:%s failed: %v", repo, tag, err)
}
var createdDate string
if m1, ok := manifest.(schema1.SignedManifest); ok {
history := m1.History
for _, h := range history {
var comp v1Compatibility
if err := json.Unmarshal([]byte(h.V1Compatibility), &comp); err != nil {
return fmt.Errorf("unmarshal v1compatibility failed: %v", err)
}
createdDate = humanize.Time(comp.Created)
break
}
}
repoURI := fmt.Sprintf("%s/%s", r.Domain, repo)
if tag != "latest" {
repoURI += ":" + tag
}
newrepo := repository{
Name: repo,
Tag: tag,
RepoURI: repoURI,
CreatedDate: createdDate,
}
if clairURI != "" {
wg.Add(1)
go func(repo, tag string) {
defer wg.Done()
logrus.Infof("creating vulns.txt for %s:%s", repo, tag)
if err := createVulnStaticPage(r, staticDir, clairURI, repo, tag); err != nil {
// return fmt.Errorf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
logrus.Warnf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
}
}(repo, tag)
newrepo.VulnURI = filepath.Join(repo, tag, "vulns.txt")
}
repos = append(repos, newrepo)
}
}
// create temporoary file to save template to
logrus.Info("creating temporary file for template")
f, err := ioutil.TempFile("", "reg-server")
if err != nil {
return fmt.Errorf("creating temp file failed: %v", err)
}
defer f.Close()
defer os.Remove(f.Name())
// parse & execute the template
logrus.Info("parsing and executing the template")
templateDir := filepath.Join(staticDir, "../templates")
lp := filepath.Join(templateDir, "layout.html")
d := data{
RegistryURL: r.Domain,
Repos: repos,
LastUpdated: time.Now().Local().Format(time.RFC1123),
}
tmpl := template.Must(template.New("").ParseFiles(lp))
if err := tmpl.ExecuteTemplate(f, "layout", d); err != nil {
return fmt.Errorf("execute template failed: %v", err)
}
f.Close()
index := filepath.Join(staticDir, "index.html")
logrus.Infof("renaming the temporary file %s to %s", f.Name(), index)
if err := os.Rename(f.Name(), index); err != nil {
return fmt.Errorf("renaming result from %s to %s failed: %v", f.Name(), index, err)
}
updating = false
return nil
}
func createVulnStaticPage(r *registry.Registry, staticDir, clairURI, repo, tag string) error {
// get the manifest
m, err := r.ManifestV1(repo, tag)
if err != nil {
return err
}
// filter out the empty layers
var filteredLayers []schema1.FSLayer
for _, layer := range m.FSLayers {
if layer.BlobSum != clair.EmptyLayerBlobSum {
filteredLayers = append(filteredLayers, layer)
}
}
m.FSLayers = filteredLayers
if len(m.FSLayers) == 0 {
fmt.Printf("No need to analyse image %s:%s as there is no non-emtpy layer", repo, tag)
return nil
}
// initialize clair
cr, err := clair.New(clairURI, true)
if err != nil {
return err
}
for i := len(m.FSLayers) - 1; i >= 0; i-- {
// form the clair layer
l, err := newClairLayer(r, repo, m.FSLayers, i)
if err != nil {
return err
}
// post the layer
if _, err := cr.PostLayer(l); err != nil {
return err
}
}
vl, err := cr.GetLayer(m.FSLayers[0].BlobSum.String(), false, true)
if err != nil {
return err
}
// get the vulns
var vulns []clair.Vulnerability
for _, f := range vl.Features {
for _, v := range f.Vulnerabilities {
vulns = append(vulns, v)
}
}
path := filepath.Join(staticDir, repo, tag, "vulns.txt")
if err := os.MkdirAll(filepath.Dir(path), 0644); err != nil {
return err
}
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
fmt.Fprintf(file, "Found %d vulnerabilities \n", len(vulns))
vulnsBy := func(sev string, store map[string][]clair.Vulnerability) []clair.Vulnerability {
items, found := store[sev]
if !found {
items = make([]clair.Vulnerability, 0)
store[sev] = items
}
return items
}
// group by severity
store := make(map[string][]clair.Vulnerability)
for _, v := range vulns {
sevRow := vulnsBy(v.Severity, store)
store[v.Severity] = append(sevRow, v)
}
// iterate over the priorities list
iteratePriorities := func(f func(sev string)) {
for _, sev := range clair.Priorities {
if len(store[sev]) != 0 {
f(sev)
}
}
}
iteratePriorities(func(sev string) {
for _, v := range store[sev] {
fmt.Fprintf(file, "%s: [%s] \n%s\n%s\n", v.Name, v.Severity, v.Description, v.Link)
fmt.Fprintln(file, "-----------------------------------------")
}
})
iteratePriorities(func(sev string) {
fmt.Fprintf(file, "%s: %d\n", sev, len(store[sev]))
})
// return an error if there are more than 10 bad vulns
lenBadVulns := len(store["High"]) + len(store["Critical"]) + len(store["Defcon1"])
if lenBadVulns > 10 {
fmt.Fprintf(file, "%d bad vunerabilities found", lenBadVulns)
}
return nil
}
func newClairLayer(r *registry.Registry, image string, fsLayers []schema1.FSLayer, index int) (*clair.Layer, error) {
var parentName string
if index < len(fsLayers)-1 {
parentName = fsLayers[index+1].BlobSum.String()
}
// form the path
p := strings.Join([]string{r.URL, "v2", image, "blobs", fsLayers[index].BlobSum.String()}, "/")
// get the token
token, err := r.Token(p)
if err != nil {
return nil, err
}
return &clair.Layer{
Name: fsLayers[index].BlobSum.String(),
Path: p,
ParentName: parentName,
Format: "Docker",
Headers: map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", token),
},
}, nil
}
less logging
Signed-off-by: Jess Frazelle <e0d1a862d8f31af605ecef8c92857b8938ba622e@google.com>
package main
import (
"encoding/json"
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/docker/cliconfig"
"github.com/docker/engine-api/types"
humanize "github.com/dustin/go-humanize"
"github.com/jessfraz/reg/clair"
"github.com/jessfraz/reg/registry"
"github.com/urfave/cli"
)
const (
// VERSION is the binary version.
VERSION = "v0.1.0"
dockerConfigPath = ".docker/config.json"
)
var (
updating = false
wg sync.WaitGroup
)
// preload initializes any global options and configuration
// before the main or sub commands are run.
func preload(c *cli.Context) (err error) {
if c.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
func main() {
app := cli.NewApp()
app.Name = "reg-server"
app.Version = VERSION
app.Author = "@jessfraz"
app.Email = "no-reply@butts.com"
app.Usage = "Docker registry v2 static UI server."
app.Before = preload
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug, d",
Usage: "run in debug mode",
},
cli.StringFlag{
Name: "username, u",
Usage: "username for the registry",
},
cli.StringFlag{
Name: "password, p",
Usage: "password for the registry",
},
cli.StringFlag{
Name: "registry, r",
Usage: "URL to the provate registry (ex. r.j3ss.co)",
},
cli.StringFlag{
Name: "port",
Value: "8080",
Usage: "port for server to run on",
},
cli.StringFlag{
Name: "cert",
Usage: "path to ssl cert",
},
cli.StringFlag{
Name: "key",
Usage: "path to ssl key",
},
cli.StringFlag{
Name: "interval",
Value: "5m",
Usage: "interval to generate new index.html's at",
},
cli.StringFlag{
Name: "clair",
Usage: "url to clair instance",
},
}
app.Action = func(c *cli.Context) error {
auth, err := getAuthConfig(c)
if err != nil {
return err
}
// create the registry client
r, err := registry.New(auth, c.GlobalBool("debug"))
if err != nil {
return err
}
// get the path to the static directory
wd, err := os.Getwd()
if err != nil {
return err
}
staticDir := filepath.Join(wd, "static")
// create the initial index
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
return err
}
// parse the duration
dur, err := time.ParseDuration(c.String("interval"))
if err != nil {
logrus.Fatalf("parsing %s as duration failed: %v", c.String("interval"), err)
}
ticker := time.NewTicker(dur)
go func() {
// create more indexes every X minutes based off interval
for range ticker.C {
if !updating {
if err := createStaticIndex(r, staticDir, c.GlobalString("clair")); err != nil {
logrus.Warnf("creating static index failed: %v", err)
wg.Wait()
updating = false
}
}
}
}()
// create mux server
mux := http.NewServeMux()
// static files handler
staticHandler := http.FileServer(http.Dir(staticDir))
mux.Handle("/", staticHandler)
// set up the server
port := c.String("port")
server := &http.Server{
Addr: ":" + port,
Handler: mux,
}
logrus.Infof("Starting server on port %q", port)
if c.String("cert") != "" && c.String("key") != "" {
logrus.Fatal(server.ListenAndServeTLS(c.String("cert"), c.String("key")))
} else {
logrus.Fatal(server.ListenAndServe())
}
return nil
}
app.Run(os.Args)
}
func getAuthConfig(c *cli.Context) (types.AuthConfig, error) {
if c.GlobalString("username") != "" && c.GlobalString("password") != "" && c.GlobalString("registry") != "" {
return types.AuthConfig{
Username: c.GlobalString("username"),
Password: c.GlobalString("password"),
ServerAddress: c.GlobalString("registry"),
}, nil
}
dcfg, err := cliconfig.Load(cliconfig.ConfigDir())
if err != nil {
return types.AuthConfig{}, fmt.Errorf("Loading config file failed: %v", err)
}
// return error early if there are no auths saved
if !dcfg.ContainsAuth() {
if c.GlobalString("registry") != "" {
return types.AuthConfig{
ServerAddress: c.GlobalString("registry"),
}, nil
}
return types.AuthConfig{}, fmt.Errorf("No auth was present in %s, please pass a registry, username, and password", cliconfig.ConfigDir())
}
// if they passed a specific registry, return those creds _if_ they exist
if c.GlobalString("registry") != "" {
if creds, ok := dcfg.AuthConfigs[c.GlobalString("registry")]; ok {
return creds, nil
}
return types.AuthConfig{}, fmt.Errorf("No authentication credentials exist for %s", c.GlobalString("registry"))
}
// set the auth config as the registryURL, username and Password
for _, creds := range dcfg.AuthConfigs {
return creds, nil
}
return types.AuthConfig{}, fmt.Errorf("Could not find any authentication credentials")
}
func getRepoAndRef(c *cli.Context) (repo, ref string, err error) {
if len(c.Args()) < 1 {
return "", "", errors.New("pass the name of the repository")
}
arg := c.Args()[0]
parts := []string{}
if strings.Contains(arg, "@") {
parts = strings.Split(c.Args()[0], "@")
} else if strings.Contains(arg, ":") {
parts = strings.Split(c.Args()[0], ":")
} else {
parts = []string{arg}
}
repo = parts[0]
ref = "latest"
if len(parts) > 1 {
ref = parts[1]
}
return
}
type data struct {
RegistryURL string
LastUpdated string
Repos []repository
}
type repository struct {
Name string
Tag string
RepoURI string
CreatedDate string
VulnURI string
}
type v1Compatibility struct {
ID string `json:"id"`
Created time.Time `json:"created"`
}
func createStaticIndex(r *registry.Registry, staticDir, clairURI string) error {
updating = true
logrus.Info("fetching catalog")
repoList, err := r.Catalog("")
if err != nil {
return fmt.Errorf("getting catalog failed: %v", err)
}
logrus.Info("fetching tags")
var repos []repository
for _, repo := range repoList {
// get the tags
tags, err := r.Tags(repo)
if err != nil {
return fmt.Errorf("getting tags for %s failed: %v", repo, err)
}
for _, tag := range tags {
// get the manifest
manifest, err := r.Manifest(repo, tag)
if err != nil {
return fmt.Errorf("getting tags for %s:%s failed: %v", repo, tag, err)
}
var createdDate string
if m1, ok := manifest.(schema1.SignedManifest); ok {
history := m1.History
for _, h := range history {
var comp v1Compatibility
if err := json.Unmarshal([]byte(h.V1Compatibility), &comp); err != nil {
return fmt.Errorf("unmarshal v1compatibility failed: %v", err)
}
createdDate = humanize.Time(comp.Created)
break
}
}
repoURI := fmt.Sprintf("%s/%s", r.Domain, repo)
if tag != "latest" {
repoURI += ":" + tag
}
newrepo := repository{
Name: repo,
Tag: tag,
RepoURI: repoURI,
CreatedDate: createdDate,
}
if clairURI != "" {
wg.Add(1)
go func(repo, tag string) {
defer wg.Done()
logrus.Infof("creating vulns.txt for %s:%s", repo, tag)
if err := createVulnStaticPage(r, staticDir, clairURI, repo, tag); err != nil {
// return fmt.Errorf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
logrus.Warnf("creating vuln static page for %s:%s failed: %v", repo, tag, err)
}
}(repo, tag)
newrepo.VulnURI = filepath.Join(repo, tag, "vulns.txt")
}
repos = append(repos, newrepo)
}
}
// create temporoary file to save template to
logrus.Info("creating temporary file for template")
f, err := ioutil.TempFile("", "reg-server")
if err != nil {
return fmt.Errorf("creating temp file failed: %v", err)
}
defer f.Close()
defer os.Remove(f.Name())
// parse & execute the template
logrus.Info("parsing and executing the template")
templateDir := filepath.Join(staticDir, "../templates")
lp := filepath.Join(templateDir, "layout.html")
d := data{
RegistryURL: r.Domain,
Repos: repos,
LastUpdated: time.Now().Local().Format(time.RFC1123),
}
tmpl := template.Must(template.New("").ParseFiles(lp))
if err := tmpl.ExecuteTemplate(f, "layout", d); err != nil {
return fmt.Errorf("execute template failed: %v", err)
}
f.Close()
index := filepath.Join(staticDir, "index.html")
logrus.Infof("renaming the temporary file %s to %s", f.Name(), index)
if err := os.Rename(f.Name(), index); err != nil {
return fmt.Errorf("renaming result from %s to %s failed: %v", f.Name(), index, err)
}
updating = false
return nil
}
func createVulnStaticPage(r *registry.Registry, staticDir, clairURI, repo, tag string) error {
// get the manifest
m, err := r.ManifestV1(repo, tag)
if err != nil {
return err
}
// filter out the empty layers
var filteredLayers []schema1.FSLayer
for _, layer := range m.FSLayers {
if layer.BlobSum != clair.EmptyLayerBlobSum {
filteredLayers = append(filteredLayers, layer)
}
}
m.FSLayers = filteredLayers
if len(m.FSLayers) == 0 {
fmt.Printf("No need to analyse image %s:%s as there is no non-emtpy layer", repo, tag)
return nil
}
// initialize clair
cr, err := clair.New(clairURI, false)
if err != nil {
return err
}
for i := len(m.FSLayers) - 1; i >= 0; i-- {
// form the clair layer
l, err := newClairLayer(r, repo, m.FSLayers, i)
if err != nil {
return err
}
// post the layer
if _, err := cr.PostLayer(l); err != nil {
return err
}
}
vl, err := cr.GetLayer(m.FSLayers[0].BlobSum.String(), false, true)
if err != nil {
return err
}
// get the vulns
var vulns []clair.Vulnerability
for _, f := range vl.Features {
for _, v := range f.Vulnerabilities {
vulns = append(vulns, v)
}
}
path := filepath.Join(staticDir, repo, tag, "vulns.txt")
if err := os.MkdirAll(filepath.Dir(path), 0644); err != nil {
return err
}
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
fmt.Fprintf(file, "Found %d vulnerabilities \n", len(vulns))
vulnsBy := func(sev string, store map[string][]clair.Vulnerability) []clair.Vulnerability {
items, found := store[sev]
if !found {
items = make([]clair.Vulnerability, 0)
store[sev] = items
}
return items
}
// group by severity
store := make(map[string][]clair.Vulnerability)
for _, v := range vulns {
sevRow := vulnsBy(v.Severity, store)
store[v.Severity] = append(sevRow, v)
}
// iterate over the priorities list
iteratePriorities := func(f func(sev string)) {
for _, sev := range clair.Priorities {
if len(store[sev]) != 0 {
f(sev)
}
}
}
iteratePriorities(func(sev string) {
for _, v := range store[sev] {
fmt.Fprintf(file, "%s: [%s] \n%s\n%s\n", v.Name, v.Severity, v.Description, v.Link)
fmt.Fprintln(file, "-----------------------------------------")
}
})
iteratePriorities(func(sev string) {
fmt.Fprintf(file, "%s: %d\n", sev, len(store[sev]))
})
// return an error if there are more than 10 bad vulns
lenBadVulns := len(store["High"]) + len(store["Critical"]) + len(store["Defcon1"])
if lenBadVulns > 10 {
fmt.Fprintf(file, "%d bad vunerabilities found", lenBadVulns)
}
return nil
}
func newClairLayer(r *registry.Registry, image string, fsLayers []schema1.FSLayer, index int) (*clair.Layer, error) {
var parentName string
if index < len(fsLayers)-1 {
parentName = fsLayers[index+1].BlobSum.String()
}
// form the path
p := strings.Join([]string{r.URL, "v2", image, "blobs", fsLayers[index].BlobSum.String()}, "/")
// get the token
token, err := r.Token(p)
if err != nil {
return nil, err
}
return &clair.Layer{
Name: fsLayers[index].BlobSum.String(),
Path: p,
ParentName: parentName,
Format: "Docker",
Headers: map[string]string{
"Authorization": fmt.Sprintf("Bearer %s", token),
},
}, nil
}
|
package main
import (
"encoding/json"
"fmt"
"net/http"
"os/exec"
"strings"
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"github.com/garyburd/redigo/redis"
)
// Server is the abstraction of a koderunr web api
type Server struct {
redisPool *redis.Pool
logger *logrus.Logger
servingStatic bool
}
// NewServer create a new Server struct
func NewServer(maxRedisConn int, servingStatic bool) *Server {
redisPool := redis.NewPool(func() (redis.Conn, error) {
conn, err := redis.Dial("tcp", ":6379")
if err != nil {
return nil, err
}
return conn, err
}, maxRedisConn)
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "[KodeRunr Service]")
if err != nil {
panic(err)
}
log.Hooks.Add(hook)
return &Server{
redisPool: redisPool,
logger: log,
servingStatic: servingStatic,
}
}
// Serve start serving http requests
func (s *Server) Serve(scope string, port int) {
s.logger.Infof("KodeRunr starting on port: %d", port)
if s.servingStatic {
http.Handle("/", http.FileServer(http.Dir("static")))
}
http.HandleFunc(scope+"langs/", s.HandleLangs)
http.HandleFunc(scope+"run/", s.HandleRunCode)
http.HandleFunc(scope+"save/", s.HandleSaveCode)
http.HandleFunc(scope+"register/", s.HandleReg)
http.HandleFunc(scope+"stdin/", s.HandleStdin)
http.HandleFunc(scope+"fetch/", s.HandleFetchCode)
http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
// HandleRunCode streams the running program output to the frontend
func (s *Server) HandleRunCode(w http.ResponseWriter, r *http.Request) {
uuid := r.FormValue("uuid")
conn := s.redisPool.Get()
defer conn.Close()
// Fetch the code into runner from Redis
runner, err := FetchCode(uuid, conn)
if err != nil {
s.logger.Infof("Source code cannot be found in redis - %v", err)
http.Error(w, "Cannot find the source code for some reason", 422)
return
}
// for close the container right away after the request is halted
closeNotifier := w.(http.CloseNotifier).CloseNotify()
runner.closeNotifier = closeNotifier
runner.logger = s.logger
isEvtStream := r.FormValue("evt") == "true"
client := NewClient(runner, s.redisPool.Get(), uuid)
go client.Write(w, isEvtStream)
client.Run()
// Purge the source code
_, err = conn.Do("DEL", uuid+"#run")
if err != nil {
s.logger.Errorf("Failed to purge the source code for %s - %v", uuid, err)
}
}
// HandleSaveCode saves the source code and returns a ID.
func (s *Server) HandleSaveCode(w http.ResponseWriter, r *http.Request) {
runner := Runner{
Lang: r.FormValue("lang"),
Source: r.FormValue("source"),
Version: r.FormValue("version"),
}
bts, _ := json.Marshal(&runner)
strj := string(bts)
codeID := r.FormValue("codeID")
if codeID == "" {
codeID = NewRandID(10)
}
conn := s.redisPool.Get()
defer conn.Close()
_, err := conn.Do("SET", codeID+"#snippet", strj)
if err != nil {
s.logger.Errorf("Failed to store code snippet: %v", err)
http.Error(w, "A serious error has occured.", 500)
return
}
fmt.Fprintf(w, codeID)
}
// HandleFetchCode loads the code by codeID and returns the source code to user
// Only used by web interface at the moment.
func (s *Server) HandleFetchCode(w http.ResponseWriter, r *http.Request) {
codeID := r.FormValue("codeID")
conn := s.redisPool.Get()
defer conn.Close()
value, err := redis.Bytes(conn.Do("GET", codeID+"#snippet"))
if err != nil {
s.logger.Errorf("Cannot get code snippet: %v", err)
http.Error(w, "The source code doesn't exist", 422)
return
}
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(value)
}
// HandleReg fetch the code from the client and save it in Redis
func (s *Server) HandleReg(w http.ResponseWriter, r *http.Request) {
runner := Runner{
Lang: r.FormValue("lang"),
Source: r.FormValue("source"),
Version: r.FormValue("version"),
Timeout: 15,
}
bts, _ := json.Marshal(&runner)
strj := string(bts)
cmd := exec.Command("uuidgen")
output, _ := cmd.Output()
uuid := strings.TrimSuffix(string(output), "\n")
conn := s.redisPool.Get()
defer conn.Close()
_, err := conn.Do("SET", uuid+"#run", strj)
if err != nil {
s.logger.Errorf("Cannot register the code: %v", err)
http.Error(w, "A serious error has occured.", 500)
return
}
fmt.Fprint(w, uuid)
}
// HandleStdin consumes the stdin from the client side
func (s *Server) HandleStdin(w http.ResponseWriter, r *http.Request) {
input := r.FormValue("input")
uuid := r.FormValue("uuid")
conn := s.redisPool.Get()
defer conn.Close()
conn.Do("PUBLISH", uuid+"#stdin", input)
fmt.Fprintf(w, "")
}
//HandleLangs deals with the request for show available programming languages
func (s *Server) HandleLangs(w http.ResponseWriter, r *http.Request) {
text := `
Supported Languages:
Ruby - 2.3.0
Ruby - 1.9.3-p550
Python - 2.7.6
Python - 3.5.0
Swift - 2.2
C - GCC 4.9
Go - 1.6
Elixir - 1.2.3
`
text = strings.TrimSpace(text)
fmt.Fprintf(w, "%s\n", text)
}
Handle the panic
* Log the errors in the syslog and carry on
package main
import (
"encoding/json"
"fmt"
"net/http"
"os/exec"
"strings"
"log/syslog"
"github.com/Sirupsen/logrus"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"github.com/garyburd/redigo/redis"
)
// Server is the abstraction of a koderunr web api
type Server struct {
redisPool *redis.Pool
logger *logrus.Logger
servingStatic bool
}
// NewServer create a new Server struct
func NewServer(maxRedisConn int, servingStatic bool) *Server {
redisPool := redis.NewPool(func() (redis.Conn, error) {
conn, err := redis.Dial("tcp", ":6379")
if err != nil {
return nil, err
}
return conn, err
}, maxRedisConn)
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "[KodeRunr Service]")
if err != nil {
panic(err)
}
log.Hooks.Add(hook)
return &Server{
redisPool: redisPool,
logger: log,
servingStatic: servingStatic,
}
}
// Serve start serving http requests
func (s *Server) Serve(scope string, port int) {
s.logger.Infof("KodeRunr starting on port: %d", port)
if s.servingStatic {
http.Handle("/", http.FileServer(http.Dir("static")))
}
for url, handleFn := range s.routeMap() {
http.Handle(scope+url, s.recoverMiddleWare(http.HandlerFunc(handleFn)))
}
http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
func (s *Server) routeMap() map[string]func(w http.ResponseWriter, r *http.Request) {
return map[string]func(w http.ResponseWriter, r *http.Request){
"langs/": s.HandleLangs,
"run/": s.HandleRunCode,
"save/": s.HandleSaveCode,
"register/": s.HandleReg,
"stdin/": s.HandleStdin,
"fetch/": s.HandleFetchCode,
}
}
// HandleRunCode streams the running program output to the frontend
func (s *Server) HandleRunCode(w http.ResponseWriter, r *http.Request) {
uuid := r.FormValue("uuid")
conn := s.redisPool.Get()
defer conn.Close()
// Fetch the code into runner from Redis
runner, err := FetchCode(uuid, conn)
if err != nil {
s.logger.Infof("Source code cannot be found in redis - %v", err)
http.Error(w, "Cannot find the source code for some reason", 422)
return
}
// for close the container right away after the request is halted
closeNotifier := w.(http.CloseNotifier).CloseNotify()
runner.closeNotifier = closeNotifier
runner.logger = s.logger
isEvtStream := r.FormValue("evt") == "true"
client := NewClient(runner, s.redisPool.Get(), uuid)
go client.Write(w, isEvtStream)
client.Run()
// Purge the source code
_, err = conn.Do("DEL", uuid+"#run")
if err != nil {
s.logger.Errorf("Failed to purge the source code for %s - %v", uuid, err)
}
}
// HandleSaveCode saves the source code and returns a ID.
func (s *Server) HandleSaveCode(w http.ResponseWriter, r *http.Request) {
runner := Runner{
Lang: r.FormValue("lang"),
Source: r.FormValue("source"),
Version: r.FormValue("version"),
}
bts, _ := json.Marshal(&runner)
strj := string(bts)
codeID := r.FormValue("codeID")
if codeID == "" {
codeID = NewRandID(10)
}
conn := s.redisPool.Get()
defer conn.Close()
_, err := conn.Do("SET", codeID+"#snippet", strj)
if err != nil {
s.logger.Errorf("Failed to store code snippet: %v", err)
http.Error(w, "A serious error has occured.", 500)
return
}
fmt.Fprintf(w, codeID)
}
// HandleFetchCode loads the code by codeID and returns the source code to user
// Only used by web interface at the moment.
func (s *Server) HandleFetchCode(w http.ResponseWriter, r *http.Request) {
codeID := r.FormValue("codeID")
conn := s.redisPool.Get()
defer conn.Close()
value, err := redis.Bytes(conn.Do("GET", codeID+"#snippet"))
if err != nil {
s.logger.Errorf("Cannot get code snippet: %v", err)
http.Error(w, "The source code doesn't exist", 422)
return
}
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(value)
}
// HandleReg fetch the code from the client and save it in Redis
func (s *Server) HandleReg(w http.ResponseWriter, r *http.Request) {
runner := Runner{
Lang: r.FormValue("lang"),
Source: r.FormValue("source"),
Version: r.FormValue("version"),
Timeout: 15,
}
bts, _ := json.Marshal(&runner)
strj := string(bts)
cmd := exec.Command("uuidgen")
output, _ := cmd.Output()
uuid := strings.TrimSuffix(string(output), "\n")
conn := s.redisPool.Get()
defer conn.Close()
_, err := conn.Do("SET", uuid+"#run", strj)
if err != nil {
s.logger.Errorf("Cannot register the code: %v", err)
http.Error(w, "A serious error has occured.", 500)
return
}
fmt.Fprint(w, uuid)
}
// HandleStdin consumes the stdin from the client side
func (s *Server) HandleStdin(w http.ResponseWriter, r *http.Request) {
input := r.FormValue("input")
uuid := r.FormValue("uuid")
conn := s.redisPool.Get()
defer conn.Close()
conn.Do("PUBLISH", uuid+"#stdin", input)
fmt.Fprintf(w, "")
}
//HandleLangs deals with the request for show available programming languages
func (s *Server) HandleLangs(w http.ResponseWriter, r *http.Request) {
text := `
Supported Languages:
Ruby - 2.3.0
Ruby - 1.9.3-p550
Python - 2.7.6
Python - 3.5.0
Swift - 2.2
C - GCC 4.9
Go - 1.6
Elixir - 1.2.3
`
text = strings.TrimSpace(text)
fmt.Fprintf(w, "%s\n", text)
}
func (s *Server) recoverMiddleWare(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if r := recover(); r != nil {
s.logger.Errorf("Request crashed caused by %v\n", r)
}
}()
h.ServeHTTP(w, r)
})
}
|
/* Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
/* Copyright [2016] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"strings"
"bytes"
"regexp"
"strconv"
"sort"
"encoding/json"
"errors"
"flag"
"fmt"
"go/build"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
)
const (
projectDirName = "github.com/Ensembl/guiHive"
)
var (
port string
isVersion = regexp.MustCompile(`^[0-9]+$`)
)
func init() {
flag.StringVar(&port, "port", "8080", "Port to listen (defaults to 8080)")
flag.Parse()
}
func checkError(s string, err error, ss ...string) {
if err != nil {
log.Fatal(s, err, ss)
}
}
// Sortable os.fileInfos by name (-> num)
type sortableFiles []os.FileInfo
func (s sortableFiles) Len () int {
return len(s)
}
func (s sortableFiles) Less (i, j int) bool {
iVer, err := strconv.Atoi(s[i].Name())
checkError(fmt.Sprintf("Dir name %s can't be converted to int", s[i].Name()), err)
jVer, err := strconv.Atoi(s[j].Name())
checkError(fmt.Sprintf("Dir name %s can't be converted to int", s[j].Name()), err)
return iVer < jVer;
}
func (s sortableFiles) Swap (i, j int) {
s[i], s[j] = s[j], s[i]
}
// Return the version number defined in the URL, or the latest available eHive version
func parseVersion(r *http.Request) string {
parts := strings.SplitN(r.URL.Path, "/", 4)
version := parts[2]
if (isVersion.MatchString(version)) {
return version
} else {
path := os.Getenv("GUIHIVE_PROJECTDIR") + "/versions/"
dir, err := os.Open(path)
checkError("Can't open dir " + path, err)
files, err := dir.Readdir(-1)
checkError("Can't read dir " + path, err)
sort.Sort(sortableFiles(files))
version = files[len(files)-1].Name()
debug("Will use the latest version %s", version)
return version
}
return ""
}
func unknown(w http.ResponseWriter, r *http.Request) {
version := parseVersion(r)
fmt.Fprintln(w, r.URL)
fmt.Fprintf(w, "version %s is currently not supported by guiHive\n", version)
}
func scriptHandler(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
defer r.Body.Close()
checkError("Can't parse Form: ", err)
debug("METHOD: %s", r.Method)
debug("URL: %s", r.URL)
var outMsg bytes.Buffer
var errMsg bytes.Buffer
fname := os.Getenv("GUIHIVE_PROJECTDIR") + r.URL.Path
args, err := json.Marshal(r.Form)
checkError("Can't Marshal JSON:", err)
debug("EXECUTING SCRIPT: %s", fname)
debug("ARGS: %s", args)
version := parseVersion(r);
debug("VERSION: %s", version)
versionRootDir := os.Getenv("GUIHIVE_PROJECTDIR") + "/versions/" + version + "/";
ehiveRootDir := os.Getenv("GUIHIVE_PROJECTDIR") + "/ensembl-hive/" + version + "/";
ehiveRootLib := ehiveRootDir + "/modules"
guihiveRootLib := versionRootDir + "/scripts/lib"
newPerl5Lib := addPerl5Lib(ehiveRootLib + ":" + guihiveRootLib)
debug("EHIVE_ROOT_DIR: %s", ehiveRootDir)
debug("NEW_PERL5LIB: %s", newPerl5Lib)
cmd := exec.Command(fname, string(args))
cmd.Env = make([]string,0)
cmd.Env = append(cmd.Env, "PERL5LIB=" + newPerl5Lib)
cmd.Env = append(cmd.Env, "EHIVE_ROOT_DIR=" + ehiveRootDir)
cmd.Env = append(cmd.Env, "GUIHIVE_BASEDIR=" + versionRootDir)
cmd.Env = append(cmd.Env, "PATH=" + os.Getenv("PATH"))
cmd.Stdout = &outMsg
cmd.Stderr = &errMsg
if err := cmd.Start(); err != nil {
log.Println("Error Starting Command: ", err)
}
if err := cmd.Wait(); err != nil {
log.Println("Error Executing Command: ", err)
}
debug("OUTMSG: %s", outMsg.Bytes())
debug("ERRMSG: %s", errMsg.Bytes())
fmt.Fprintln(w, string(outMsg.Bytes()))
}
func pathExists(name string) bool {
_, err := os.Stat(name)
if os.IsNotExist(err) {
return false
}
return err == nil
}
func guessProjectDir() (string, error) {
// First, we try to find the project dir in the working directory
serverPath := os.Args[0]
serverDir := filepath.Dir(serverPath)
pathToIndex := serverDir + "/../index.html"
absPathToIndex, err := filepath.Abs(pathToIndex)
if err != nil {
debug("ABSPATHTOINDEX: %s\n", absPathToIndex)
return "", err
}
if pathExists(absPathToIndex) {
return path.Clean(absPathToIndex + "/.."), nil
}
for _, srcdir := range build.Default.SrcDirs() {
dirName := path.Join(srcdir, projectDirName)
fmt.Println("DIRNAME: ", dirName)
if pathExists(dirName) {
return dirName, nil
}
}
return "", errors.New("Project directory not found")
}
func setEnvVar() error {
projectDirectory, err := guessProjectDir()
if err != nil {
return err
}
//GUIHIVE_PROJECTDIR
if err := os.Setenv("GUIHIVE_PROJECTDIR", projectDirectory+"/"); err != nil {
return err
}
debug("PROJECT_DIRECTORY: %s\n", os.Getenv("GUIHIVE_PROJECTDIR"))
return nil
}
func addPerl5Lib (newDir string) string {
perl5lib := os.Getenv("PERL5LIB")
if perl5lib == "" {
perl5lib = newDir
} else {
perl5lib = newDir + ":" + perl5lib
}
return perl5lib
}
func main() {
// Fix environmental variables
errV := setEnvVar()
checkError("Problem setting environmental variables: ", errV)
relPath := os.Getenv("GUIHIVE_PROJECTDIR")
http.Handle("/", http.FileServer(http.Dir(relPath)))
http.HandleFunc("/versions/", unknown)
http.Handle("/styles/", http.FileServer(http.Dir(relPath)))
http.Handle("/javascript/", http.FileServer(http.Dir(relPath)))
http.Handle("/images/", http.FileServer(http.Dir(relPath)))
http.HandleFunc("/scripts/", scriptHandler)
versionRootDir := relPath + "/versions/"
dir, terr := os.Open(versionRootDir)
checkError("Can't open dir " + versionRootDir, terr)
files, terr := dir.Readdir(-1)
for _, verdir := range files {
if ((verdir.Mode() & os.ModeSymlink) != 0) {
targetF, _ := os.Readlink(versionRootDir + verdir.Name())
debug("Found a symlink from %s to version %s", verdir.Name(), targetF)
http.Handle(fmt.Sprintf("/versions/%s/", verdir.Name()), http.FileServer(http.Dir(relPath)))
http.Handle(fmt.Sprintf("/versions/%s/javascript/", verdir.Name()), http.FileServer(http.Dir(relPath)))
http.HandleFunc(fmt.Sprintf("/versions/%s/scripts/", verdir.Name()), scriptHandler)
} else if (verdir.IsDir()) {
debug("Found eHive version %s", verdir.Name())
http.Handle(fmt.Sprintf("/versions/%s/", verdir.Name()), http.FileServer(http.Dir(relPath)))
http.Handle(fmt.Sprintf("/versions/%s/javascript/", verdir.Name()), http.FileServer(http.Dir(relPath)))
http.HandleFunc(fmt.Sprintf("/versions/%s/scripts/", verdir.Name()), scriptHandler)
}
}
debug("Listening to port: %s", port)
err := http.ListenAndServe(":"+port, nil)
checkError("ListenAndServe ", err)
}
Factorized the code that sets the http handlers for real cehckouts and symlinks
/* Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
/* Copyright [2016] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"strings"
"bytes"
"regexp"
"strconv"
"sort"
"encoding/json"
"errors"
"flag"
"fmt"
"go/build"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
)
const (
projectDirName = "github.com/Ensembl/guiHive"
)
var (
port string
isVersion = regexp.MustCompile(`^[0-9]+$`)
)
func init() {
flag.StringVar(&port, "port", "8080", "Port to listen (defaults to 8080)")
flag.Parse()
}
func checkError(s string, err error, ss ...string) {
if err != nil {
log.Fatal(s, err, ss)
}
}
// Sortable os.fileInfos by name (-> num)
type sortableFiles []os.FileInfo
func (s sortableFiles) Len () int {
return len(s)
}
func (s sortableFiles) Less (i, j int) bool {
iVer, err := strconv.Atoi(s[i].Name())
checkError(fmt.Sprintf("Dir name %s can't be converted to int", s[i].Name()), err)
jVer, err := strconv.Atoi(s[j].Name())
checkError(fmt.Sprintf("Dir name %s can't be converted to int", s[j].Name()), err)
return iVer < jVer;
}
func (s sortableFiles) Swap (i, j int) {
s[i], s[j] = s[j], s[i]
}
// Return the version number defined in the URL, or the latest available eHive version
func parseVersion(r *http.Request) string {
parts := strings.SplitN(r.URL.Path, "/", 4)
version := parts[2]
if (isVersion.MatchString(version)) {
return version
} else {
path := os.Getenv("GUIHIVE_PROJECTDIR") + "/versions/"
dir, err := os.Open(path)
checkError("Can't open dir " + path, err)
files, err := dir.Readdir(-1)
checkError("Can't read dir " + path, err)
sort.Sort(sortableFiles(files))
version = files[len(files)-1].Name()
debug("Will use the latest version %s", version)
return version
}
return ""
}
func unknown(w http.ResponseWriter, r *http.Request) {
version := parseVersion(r)
fmt.Fprintln(w, r.URL)
fmt.Fprintf(w, "version %s is currently not supported by guiHive\n", version)
}
func scriptHandler(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
defer r.Body.Close()
checkError("Can't parse Form: ", err)
debug("METHOD: %s", r.Method)
debug("URL: %s", r.URL)
var outMsg bytes.Buffer
var errMsg bytes.Buffer
fname := os.Getenv("GUIHIVE_PROJECTDIR") + r.URL.Path
args, err := json.Marshal(r.Form)
checkError("Can't Marshal JSON:", err)
debug("EXECUTING SCRIPT: %s", fname)
debug("ARGS: %s", args)
version := parseVersion(r);
debug("VERSION: %s", version)
versionRootDir := os.Getenv("GUIHIVE_PROJECTDIR") + "/versions/" + version + "/";
ehiveRootDir := os.Getenv("GUIHIVE_PROJECTDIR") + "/ensembl-hive/" + version + "/";
ehiveRootLib := ehiveRootDir + "/modules"
guihiveRootLib := versionRootDir + "/scripts/lib"
newPerl5Lib := addPerl5Lib(ehiveRootLib + ":" + guihiveRootLib)
debug("EHIVE_ROOT_DIR: %s", ehiveRootDir)
debug("NEW_PERL5LIB: %s", newPerl5Lib)
cmd := exec.Command(fname, string(args))
cmd.Env = make([]string,0)
cmd.Env = append(cmd.Env, "PERL5LIB=" + newPerl5Lib)
cmd.Env = append(cmd.Env, "EHIVE_ROOT_DIR=" + ehiveRootDir)
cmd.Env = append(cmd.Env, "GUIHIVE_BASEDIR=" + versionRootDir)
cmd.Env = append(cmd.Env, "PATH=" + os.Getenv("PATH"))
cmd.Stdout = &outMsg
cmd.Stderr = &errMsg
if err := cmd.Start(); err != nil {
log.Println("Error Starting Command: ", err)
}
if err := cmd.Wait(); err != nil {
log.Println("Error Executing Command: ", err)
}
debug("OUTMSG: %s", outMsg.Bytes())
debug("ERRMSG: %s", errMsg.Bytes())
fmt.Fprintln(w, string(outMsg.Bytes()))
}
func pathExists(name string) bool {
_, err := os.Stat(name)
if os.IsNotExist(err) {
return false
}
return err == nil
}
func guessProjectDir() (string, error) {
// First, we try to find the project dir in the working directory
serverPath := os.Args[0]
serverDir := filepath.Dir(serverPath)
pathToIndex := serverDir + "/../index.html"
absPathToIndex, err := filepath.Abs(pathToIndex)
if err != nil {
debug("ABSPATHTOINDEX: %s\n", absPathToIndex)
return "", err
}
if pathExists(absPathToIndex) {
return path.Clean(absPathToIndex + "/.."), nil
}
for _, srcdir := range build.Default.SrcDirs() {
dirName := path.Join(srcdir, projectDirName)
fmt.Println("DIRNAME: ", dirName)
if pathExists(dirName) {
return dirName, nil
}
}
return "", errors.New("Project directory not found")
}
func setEnvVar() error {
projectDirectory, err := guessProjectDir()
if err != nil {
return err
}
//GUIHIVE_PROJECTDIR
if err := os.Setenv("GUIHIVE_PROJECTDIR", projectDirectory+"/"); err != nil {
return err
}
debug("PROJECT_DIRECTORY: %s\n", os.Getenv("GUIHIVE_PROJECTDIR"))
return nil
}
func addPerl5Lib (newDir string) string {
perl5lib := os.Getenv("PERL5LIB")
if perl5lib == "" {
perl5lib = newDir
} else {
perl5lib = newDir + ":" + perl5lib
}
return perl5lib
}
func main() {
// Fix environmental variables
errV := setEnvVar()
checkError("Problem setting environmental variables: ", errV)
relPath := os.Getenv("GUIHIVE_PROJECTDIR")
http.Handle("/", http.FileServer(http.Dir(relPath)))
http.HandleFunc("/versions/", unknown)
http.Handle("/styles/", http.FileServer(http.Dir(relPath)))
http.Handle("/javascript/", http.FileServer(http.Dir(relPath)))
http.Handle("/images/", http.FileServer(http.Dir(relPath)))
http.HandleFunc("/scripts/", scriptHandler)
versionRootDir := relPath + "/versions/"
dir, terr := os.Open(versionRootDir)
checkError("Can't open dir " + versionRootDir, terr)
files, terr := dir.Readdir(-1)
for _, verdir := range files {
isvalid := true;
if ((verdir.Mode() & os.ModeSymlink) != 0) {
targetF, _ := os.Readlink(versionRootDir + verdir.Name())
debug("Found a symlink from guiHive %s to version %s", verdir.Name(), targetF)
} else if (verdir.IsDir()) {
debug("Found guiHive version %s", verdir.Name())
} else {
isvalid = false
}
if (isvalid) {
ehive_dir := os.Getenv("GUIHIVE_PROJECTDIR") + "/ensembl-hive/" + verdir.Name()
_, err := os.Stat(ehive_dir)
checkError(ehive_dir + " does not exist: ", err)
debug("Found eHive version %s", verdir.Name())
http.Handle(fmt.Sprintf("/versions/%s/", verdir.Name()), http.FileServer(http.Dir(relPath)))
http.Handle(fmt.Sprintf("/versions/%s/javascript/", verdir.Name()), http.FileServer(http.Dir(relPath)))
http.HandleFunc(fmt.Sprintf("/versions/%s/scripts/", verdir.Name()), scriptHandler)
}
}
debug("Listening to port: %s", port)
err := http.ListenAndServe(":"+port, nil)
checkError("ListenAndServe ", err)
}
|
// gorewind is an event store server written in Python that talks ZeroMQ.
// Copyright (C) 2013 Jens Rantil
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Contains the ZeroMQ server loop. Deals with incoming requests and
// delegates them to the event store. Also publishes newly stored events
// using a PUB socket.
//
// See README file for an up-to-date documentation of the ZeroMQ wire
// format.
package server
import (
"bytes"
"errors"
"log"
"container/list"
"time"
"sync"
zmq "github.com/alecthomas/gozmq"
"github.com/JensRantil/gorewind/eventstore"
)
// StartParams are parameters required for starting the server.
type InitParams struct {
// The event store to use as backend.
Store *eventstore.EventStore
// The ZeroMQ path that the command receiving socket will bind
// to.
CommandSocketZPath *string
// The ZeroMQ path that the event publishing socket will bind
// to.
EvPubSocketZPath *string
}
// Check all required initialization parameters are set.
func checkAllInitParamsSet(p *InitParams) error {
if p.Store == nil {
return errors.New("Missing param: Store")
}
if p.CommandSocketZPath == nil {
return errors.New("Missing param: CommandSocketZPath")
}
if p.EvPubSocketZPath == nil {
return errors.New("Missing param: EvPubSocketZPath")
}
return nil
}
// A server instance. Can be run.
type Server struct {
params InitParams
evpubsock *zmq.Socket
commandsock *zmq.Socket
context *zmq.Context
runningMutex sync.Mutex
running bool
stopChan chan bool
}
// IsRunning returns true if the server is running, false otherwise.
func (v *Server) IsRunning() bool {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
return v.running
}
// Stop stops a running server. Blocks until the server is stopped. If
// the server is not running, an error is returned.
func (v* Server) Stop() error {
if !v.IsRunning() {
return errors.New("Not running.")
}
select {
case v.stopChan <- true:
default:
return errors.New("Stop already signalled.")
}
<-v.stopChan
// v.running is modified by Server.Run(...)
if v.IsRunning() {
return errors.New("Signalled stopped, but never stopped.")
}
return nil
}
// Initialize a new event store server and return a handle to it. The
// event store is not started. It's up to the caller to execute Run()
// on the server handle.
func New(params *InitParams) (*Server, error) {
if params == nil {
return nil, errors.New("Missing init params")
}
if err := checkAllInitParamsSet(params); err != nil {
return nil, err
}
server := Server{
params: *params,
running: false,
}
var allOkay *bool = new(bool)
*allOkay = false
defer func() {
if (!*allOkay) {
server.closeZmq()
}
}()
context, err := zmq.NewContext()
if err != nil {
return nil, err
}
server.context = context
commandsock, err := context.NewSocket(zmq.ROUTER)
if err != nil {
return nil, err
}
server.commandsock = commandsock
err = commandsock.Bind(*params.CommandSocketZPath)
if err != nil {
return nil, err
}
evpubsock, err := context.NewSocket(zmq.PUB)
if err != nil {
return nil, err
}
server.evpubsock = evpubsock
if binderr := evpubsock.Bind(*params.EvPubSocketZPath); binderr != nil {
return nil, err
}
*allOkay = true
return &server, nil
}
func (v *Server) closeZmq() {
(*v.evpubsock).Close()
v.evpubsock = nil
(*v.commandsock).Close()
v.commandsock = nil
(*v.context).Close()
v.context = nil
}
func (v *Server) setRunningState(newState bool) {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
v.running = newState
}
// Runs the server that distributes requests to workers.
// Panics on error since it is an essential piece of code required to
// run the application correctly.
func (v *Server) Start() {
v.setRunningState(true)
defer v.setRunningState(false)
loopServer((*v).params.Store, *(*v).evpubsock, *(*v).commandsock, v.stopChan)
}
// The result of an asynchronous zmq.Poll call.
type zmqPollResult struct {
err error
}
// Polls a bunch of ZeroMQ sockets and notifies the result through a
// channel. This makes it possible to combine ZeroMQ polling with Go's
// own built-in channels.
func asyncPoll(notifier chan zmqPollResult, items zmq.PollItems, stop chan bool) {
for {
timeout := time.Duration(1)*time.Second
count, err := zmq.Poll(items, timeout)
if count > 0 || err != nil {
notifier <- zmqPollResult{err}
}
select {
case <-stop:
stop <- true
return
default:
}
}
}
func stopPoller(cancelChan chan bool) {
cancelChan <- true
<-cancelChan
}
// The core ZeroMQ messaging loop. Handles requests and responses
// asynchronously using the router socket. Every request is delegated to
// a goroutine for maximum concurrency.
//
// `gozmq` does currently not support copy-free messages/frames. This
// means that every message passing through this function needs to be
// copied in-memory. If this becomes a bottleneck in the future,
// multiple router sockets can be hooked to this final router to scale
// message copying.
//
// TODO: Make this a type function of `Server` to remove a lot of
// parameters.
func loopServer(estore *eventstore.EventStore, evpubsock, frontend zmq.Socket,
stop chan bool) {
toPoll := zmq.PollItems{
zmq.PollItem{Socket: &frontend, zmq.Events: zmq.POLLIN},
}
pubchan := make(chan eventstore.StoredEvent)
estore.RegisterPublishedEventsChannel(pubchan)
go publishAllSavedEvents(pubchan, evpubsock)
pollchan := make(chan zmqPollResult)
respchan := make(chan zMsg)
pollCancel := make(chan bool)
defer stopPoller(pollCancel)
go asyncPoll(pollchan, toPoll, pollCancel)
for {
select {
case res := <-pollchan:
if res.err != nil {
log.Print("Could not poll:", res.err)
}
if res.err == nil && toPoll[0].REvents&zmq.POLLIN != 0 {
msg, _ := toPoll[0].Socket.RecvMultipart(0)
zmsg := zMsg(msg)
go handleRequest(respchan, estore, zmsg)
}
go asyncPoll(pollchan, toPoll, pollCancel)
case frames := <-respchan:
if err := frontend.SendMultipart(frames, 0); err != nil {
log.Println(err)
}
case <- stop:
stop <- true
return
}
}
}
// Publishes stored events to event listeners.
//
// Pops previously stored messages off a channel and published them to a
// ZeroMQ socket.
func publishAllSavedEvents(toPublish chan eventstore.StoredEvent, evpub zmq.Socket) {
msg := make(zMsg, 3)
for {
stored := <-toPublish
msg[0] = stored.Event.Stream
msg[1] = stored.Id
msg[2] = stored.Event.Data
if err := evpub.SendMultipart(msg, 0); err != nil {
log.Println(err)
}
}
}
// A single frame in a ZeroMQ message.
type zFrame []byte
// A ZeroMQ message.
//
// I wish it could have been `[]zFrame`, but that would make conversion
// from `[][]byte` pretty messy[1].
//
// [1] http://stackoverflow.com/a/15650327/260805
type zMsg [][]byte
// Handles a single ZeroMQ RES/REQ loop synchronously.
//
// The full request message stored in `msg` and the full ZeroMQ response
// is pushed to `respchan`. The function does not return any error
// because it is expected to be called asynchronously as a goroutine.
func handleRequest(respchan chan zMsg, estore *eventstore.EventStore, msg zMsg) {
// TODO: Rename to 'framelist'
parts := list.New()
for _, msgpart := range msg {
parts.PushBack(msgpart)
}
resptemplate := list.New()
emptyFrame := zFrame("")
for true {
resptemplate.PushBack(parts.Remove(parts.Front()))
if bytes.Equal(parts.Front().Value.(zFrame), emptyFrame) {
break
}
}
if parts.Len() == 0 {
errstr := "Incoming command was empty. Ignoring it."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
return
}
command := string(parts.Front().Value.(zFrame))
switch command {
case "PUBLISH":
parts.Remove(parts.Front())
if parts.Len() != 2 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for PUBLISH."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
data := parts.Remove(parts.Front())
newevent := eventstore.Event{
estream.(eventstore.StreamName),
data.(zFrame),
}
newId, err := estore.Add(newevent)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
// the event was added
response := copyList(resptemplate)
response.PushBack(zFrame("PUBLISHED"))
response.PushBack(zFrame(newId))
respchan <- listToFrames(response)
}
}
case "QUERY":
parts.Remove(parts.Front())
if parts.Len() != 3 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for QUERY."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
fromid := parts.Remove(parts.Front())
toid := parts.Remove(parts.Front())
req := eventstore.QueryRequest{
Stream: estream.(zFrame),
FromId: fromid.(zFrame),
ToId: toid.(zFrame),
}
events, err := estore.Query(req)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
for eventdata := range(events) {
response := copyList(resptemplate)
response.PushBack([]byte("EVENT"))
response.PushBack(eventdata.Id)
response.PushBack(eventdata.Data)
respchan <- listToFrames(response)
}
response := copyList(resptemplate)
response.PushBack(zFrame("END"))
respchan <- listToFrames(response)
}
}
default:
// TODO: Move these error strings out as constants of
// this package.
// TODO: Move the chunk of code below into a separate
// function and reuse for similar piece of code above.
// TODO: Constantify this error message
errstr := "Unknown request type."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
}
}
// Convert a doubly linked list of message frames to a slice of message
// fram
func listToFrames(l *list.List) zMsg {
frames := make(zMsg, l.Len())
i := 0
for e := l.Front(); e != nil; e = e.Next() {
frames[i] = e.Value.(zFrame)
}
return frames
}
// Helper function for copying a doubly linked list.
func copyList(l *list.List) *list.List {
replica := list.New()
replica.PushBackList(l)
return replica
}
Minor: Logging improvement
// gorewind is an event store server written in Python that talks ZeroMQ.
// Copyright (C) 2013 Jens Rantil
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// Contains the ZeroMQ server loop. Deals with incoming requests and
// delegates them to the event store. Also publishes newly stored events
// using a PUB socket.
//
// See README file for an up-to-date documentation of the ZeroMQ wire
// format.
package server
import (
"bytes"
"errors"
"log"
"container/list"
"time"
"sync"
zmq "github.com/alecthomas/gozmq"
"github.com/JensRantil/gorewind/eventstore"
)
// StartParams are parameters required for starting the server.
type InitParams struct {
// The event store to use as backend.
Store *eventstore.EventStore
// The ZeroMQ path that the command receiving socket will bind
// to.
CommandSocketZPath *string
// The ZeroMQ path that the event publishing socket will bind
// to.
EvPubSocketZPath *string
}
// Check all required initialization parameters are set.
func checkAllInitParamsSet(p *InitParams) error {
if p.Store == nil {
return errors.New("Missing param: Store")
}
if p.CommandSocketZPath == nil {
return errors.New("Missing param: CommandSocketZPath")
}
if p.EvPubSocketZPath == nil {
return errors.New("Missing param: EvPubSocketZPath")
}
return nil
}
// A server instance. Can be run.
type Server struct {
params InitParams
evpubsock *zmq.Socket
commandsock *zmq.Socket
context *zmq.Context
runningMutex sync.Mutex
running bool
stopChan chan bool
}
// IsRunning returns true if the server is running, false otherwise.
func (v *Server) IsRunning() bool {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
return v.running
}
func (v* Server) Wait() {
v.waiter.Wait()
}
// Stop stops a running server. Blocks until the server is stopped. If
// the server is not running, an error is returned.
func (v* Server) Stop() error {
if !v.IsRunning() {
return errors.New("Server not running.")
}
select {
case v.stopChan <- true:
default:
return errors.New("Stop already signalled.")
}
<-v.stopChan
// v.running is modified by Server.Run(...)
if v.IsRunning() {
return errors.New("Signalled stopped, but never stopped.")
}
return nil
}
// Initialize a new event store server and return a handle to it. The
// event store is not started. It's up to the caller to execute Run()
// on the server handle.
func New(params *InitParams) (*Server, error) {
if params == nil {
return nil, errors.New("Missing init params")
}
if err := checkAllInitParamsSet(params); err != nil {
return nil, err
}
server := Server{
params: *params,
running: false,
}
var allOkay *bool = new(bool)
*allOkay = false
defer func() {
if (!*allOkay) {
server.closeZmq()
}
}()
context, err := zmq.NewContext()
if err != nil {
return nil, err
}
server.context = context
commandsock, err := context.NewSocket(zmq.ROUTER)
if err != nil {
return nil, err
}
server.commandsock = commandsock
err = commandsock.Bind(*params.CommandSocketZPath)
if err != nil {
return nil, err
}
evpubsock, err := context.NewSocket(zmq.PUB)
if err != nil {
return nil, err
}
server.evpubsock = evpubsock
if binderr := evpubsock.Bind(*params.EvPubSocketZPath); binderr != nil {
return nil, err
}
*allOkay = true
return &server, nil
}
func (v *Server) closeZmq() {
(*v.evpubsock).Close()
v.evpubsock = nil
(*v.commandsock).Close()
v.commandsock = nil
(*v.context).Close()
v.context = nil
}
func (v *Server) setRunningState(newState bool) {
v.runningMutex.Lock()
defer v.runningMutex.Unlock()
v.running = newState
}
// Runs the server that distributes requests to workers.
// Panics on error since it is an essential piece of code required to
// run the application correctly.
func (v *Server) Start() {
v.setRunningState(true)
defer v.setRunningState(false)
loopServer((*v).params.Store, *(*v).evpubsock, *(*v).commandsock, v.stopChan)
}
// The result of an asynchronous zmq.Poll call.
type zmqPollResult struct {
err error
}
// Polls a bunch of ZeroMQ sockets and notifies the result through a
// channel. This makes it possible to combine ZeroMQ polling with Go's
// own built-in channels.
func asyncPoll(notifier chan zmqPollResult, items zmq.PollItems, stop chan bool) {
for {
timeout := time.Duration(1)*time.Second
count, err := zmq.Poll(items, timeout)
if count > 0 || err != nil {
notifier <- zmqPollResult{err}
}
select {
case <-stop:
stop <- true
return
default:
}
}
}
func stopPoller(cancelChan chan bool) {
cancelChan <- true
<-cancelChan
}
// The core ZeroMQ messaging loop. Handles requests and responses
// asynchronously using the router socket. Every request is delegated to
// a goroutine for maximum concurrency.
//
// `gozmq` does currently not support copy-free messages/frames. This
// means that every message passing through this function needs to be
// copied in-memory. If this becomes a bottleneck in the future,
// multiple router sockets can be hooked to this final router to scale
// message copying.
//
// TODO: Make this a type function of `Server` to remove a lot of
// parameters.
func loopServer(estore *eventstore.EventStore, evpubsock, frontend zmq.Socket,
stop chan bool) {
toPoll := zmq.PollItems{
zmq.PollItem{Socket: &frontend, zmq.Events: zmq.POLLIN},
}
pubchan := make(chan eventstore.StoredEvent)
estore.RegisterPublishedEventsChannel(pubchan)
go publishAllSavedEvents(pubchan, evpubsock)
pollchan := make(chan zmqPollResult)
respchan := make(chan zMsg)
pollCancel := make(chan bool)
defer stopPoller(pollCancel)
go asyncPoll(pollchan, toPoll, pollCancel)
for {
select {
case res := <-pollchan:
if res.err != nil {
log.Print("Could not poll:", res.err)
}
if res.err == nil && toPoll[0].REvents&zmq.POLLIN != 0 {
msg, _ := toPoll[0].Socket.RecvMultipart(0)
zmsg := zMsg(msg)
go handleRequest(respchan, estore, zmsg)
}
go asyncPoll(pollchan, toPoll, pollCancel)
case frames := <-respchan:
if err := frontend.SendMultipart(frames, 0); err != nil {
log.Println(err)
}
case <- stop:
stop <- true
return
}
}
}
// Publishes stored events to event listeners.
//
// Pops previously stored messages off a channel and published them to a
// ZeroMQ socket.
func publishAllSavedEvents(toPublish chan eventstore.StoredEvent, evpub zmq.Socket) {
msg := make(zMsg, 3)
for {
stored := <-toPublish
msg[0] = stored.Event.Stream
msg[1] = stored.Id
msg[2] = stored.Event.Data
if err := evpub.SendMultipart(msg, 0); err != nil {
log.Println(err)
}
}
}
// A single frame in a ZeroMQ message.
type zFrame []byte
// A ZeroMQ message.
//
// I wish it could have been `[]zFrame`, but that would make conversion
// from `[][]byte` pretty messy[1].
//
// [1] http://stackoverflow.com/a/15650327/260805
type zMsg [][]byte
// Handles a single ZeroMQ RES/REQ loop synchronously.
//
// The full request message stored in `msg` and the full ZeroMQ response
// is pushed to `respchan`. The function does not return any error
// because it is expected to be called asynchronously as a goroutine.
func handleRequest(respchan chan zMsg, estore *eventstore.EventStore, msg zMsg) {
// TODO: Rename to 'framelist'
parts := list.New()
for _, msgpart := range msg {
parts.PushBack(msgpart)
}
resptemplate := list.New()
emptyFrame := zFrame("")
for true {
resptemplate.PushBack(parts.Remove(parts.Front()))
if bytes.Equal(parts.Front().Value.(zFrame), emptyFrame) {
break
}
}
if parts.Len() == 0 {
errstr := "Incoming command was empty. Ignoring it."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
return
}
command := string(parts.Front().Value.(zFrame))
switch command {
case "PUBLISH":
parts.Remove(parts.Front())
if parts.Len() != 2 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for PUBLISH."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
data := parts.Remove(parts.Front())
newevent := eventstore.Event{
estream.(eventstore.StreamName),
data.(zFrame),
}
newId, err := estore.Add(newevent)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
// the event was added
response := copyList(resptemplate)
response.PushBack(zFrame("PUBLISHED"))
response.PushBack(zFrame(newId))
respchan <- listToFrames(response)
}
}
case "QUERY":
parts.Remove(parts.Front())
if parts.Len() != 3 {
// TODO: Constantify this error message
errstr := "Wrong number of frames for QUERY."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
} else {
estream := parts.Remove(parts.Front())
fromid := parts.Remove(parts.Front())
toid := parts.Remove(parts.Front())
req := eventstore.QueryRequest{
Stream: estream.(zFrame),
FromId: fromid.(zFrame),
ToId: toid.(zFrame),
}
events, err := estore.Query(req)
if err != nil {
sErr := err.Error()
log.Println(sErr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + sErr))
respchan <- listToFrames(response)
} else {
for eventdata := range(events) {
response := copyList(resptemplate)
response.PushBack([]byte("EVENT"))
response.PushBack(eventdata.Id)
response.PushBack(eventdata.Data)
respchan <- listToFrames(response)
}
response := copyList(resptemplate)
response.PushBack(zFrame("END"))
respchan <- listToFrames(response)
}
}
default:
// TODO: Move these error strings out as constants of
// this package.
// TODO: Move the chunk of code below into a separate
// function and reuse for similar piece of code above.
// TODO: Constantify this error message
errstr := "Unknown request type."
log.Println(errstr)
response := copyList(resptemplate)
response.PushBack(zFrame("ERROR " + errstr))
respchan <- listToFrames(response)
}
}
// Convert a doubly linked list of message frames to a slice of message
// fram
func listToFrames(l *list.List) zMsg {
frames := make(zMsg, l.Len())
i := 0
for e := l.Front(); e != nil; e = e.Next() {
frames[i] = e.Value.(zFrame)
}
return frames
}
// Helper function for copying a doubly linked list.
func copyList(l *list.List) *list.List {
replica := list.New()
replica.PushBackList(l)
return replica
}
|
// Copyright 2016 Jacob Taylor jacob@ablox.io
// License: Apache2 - http://www.apache.org/licenses/LICENSE-2.0
package main
import (
"fmt"
"net"
"../utils"
"bufio"
"encoding/binary"
//"time"
"os"
"bytes"
"io"
"io/ioutil"
"github.com/urfave/cli"
"path/filepath"
)
const nbd_folder = "/sample_disks/"
var characters_per_line = 100
var newline = 0
var line_number = 0
// settings for the server
type Settings struct {
ReadOnly bool
AutoFlush bool
Host string
Port int
Listen string
File string
Directory string
BufferLimit int
}
type Connection struct {
File string
RemoteAddr string
ReadOnly bool
}
var connections = make(map[string][]Connection)
/*
Add a new connection to the list of connections for a file. Make sure there is only one writable connection per filename
returns true if the connection was added correctly. false otherwise
*/
func addConnection(filename string, readOnly bool, remoteAddr string) bool {
currentConnections, ok := connections[filename]
if ok == false {
currentConnections = make([]Connection, 4)
}
// If this a writable request, check to see if anybody else has a writable connection
if !readOnly {
for _, conn := range currentConnections {
if !conn.ReadOnly {
fmt.Printf("Error, too many writable connections. %s is already connected to %s\n", remoteAddr, filename)
return false
}
}
}
newConnection := Connection{
File: filename,
RemoteAddr: remoteAddr,
ReadOnly: readOnly,
}
connections[filename] = append(currentConnections, newConnection)
return true
}
var globalSettings Settings = Settings {
ReadOnly: false,
AutoFlush: true,
Host: "localhost",
Port: 8000,
Listen: "",
File: "",
Directory: "sample_disks",
BufferLimit: 2048,
}
func send_export_list_item(output *bufio.Writer, options uint32, export_name string) {
data := make([]byte, 1024)
length := len(export_name)
offset := 0
// length of export name
binary.BigEndian.PutUint32(data[offset:], uint32(length)) // length of string
offset += 4
// export name
copy(data[offset:], export_name)
offset += length
reply_type := uint32(2) // reply_type: NBD_REP_SERVER
send_message(output, options, reply_type, uint32(offset), data)
}
func send_ack(output *bufio.Writer, options uint32) {
send_message(output, options, utils.NBD_COMMAND_ACK, 0, nil)
}
func export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, options uint32, globalSettings Settings) {
fmt.Printf("have request to bind to: %s\n", string(payload[:payload_size]))
defer conn.Close()
//todo add support for file specification
var filename bytes.Buffer
readOnly := false
var current_directory = globalSettings.Directory
var err error
if current_directory == "" {
current_directory, err = os.Getwd()
utils.ErrorCheck(err)
}
filename.WriteString(current_directory)
filename.WriteString(nbd_folder)
filename.Write(payload[:payload_size])
fmt.Printf("Opening file: %s\n", filename.String())
fileMode := os.O_RDWR
if globalSettings.ReadOnly || (options & utils.NBD_OPT_READ_ONLY != 0) {
fmt.Printf("Read Only is set\n")
fileMode = os.O_RDONLY
readOnly = true
}
file, err := os.OpenFile(filename.String(), fileMode, 0644)
utils.ErrorCheck(err)
if err != nil {
return
}
buffer := make([]byte, 256)
offset := 0
fs, err := file.Stat()
file_size := uint64(fs.Size())
binary.BigEndian.PutUint64(buffer[offset:], file_size) // size
offset += 8
binary.BigEndian.PutUint16(buffer[offset:], 1) // flags
offset += 2
// if requested, pad with 124 zeros
if (options & utils.NBD_FLAG_NO_ZEROES) != utils.NBD_FLAG_NO_ZEROES {
offset += 124
}
_, err = output.Write(buffer[:offset])
output.Flush()
utils.ErrorCheck(err)
buffer_limit := globalSettings.BufferLimit*1024 // set the buffer to 2mb
buffer = make([]byte, buffer_limit)
conn_reader := bufio.NewReader(conn)
for {
waiting_for := 28 // wait for at least the minimum payload size
_, err := io.ReadFull(conn_reader, buffer[:waiting_for])
if err == io.EOF {
fmt.Printf("Abort detected, escaping processing loop\n")
break
}
utils.ErrorCheck(err)
//magic := binary.BigEndian.Uint32(buffer)
command := binary.BigEndian.Uint32(buffer[4:8])
//handle := binary.BigEndian.Uint64(buffer[8:16])
from := binary.BigEndian.Uint64(buffer[16:24])
length := binary.BigEndian.Uint32(buffer[24:28])
// Error out and drop the connection if there is an attempt to read too much
if length > buffer_limit {
fmt.Printf("E")
file.Sync()
return
}
newline += 1;
if newline % characters_per_line == 0 {
line_number++
fmt.Printf("\n%5d: ", line_number * 100)
newline -= characters_per_line
}
switch command {
case utils.NBD_COMMAND_READ:
fmt.Printf(".")
_, err = file.ReadAt(buffer[16:16+length], int64(from))
utils.ErrorCheck(err)
binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)
binary.BigEndian.PutUint32(buffer[4:8], 0) // error bits
conn.Write(buffer[:16+length])
continue
case utils.NBD_COMMAND_WRITE:
if readOnly {
fmt.Printf("E")
fmt.Printf("\nAttempt to write to read only file blocked\n")
continue
}
fmt.Printf("W")
_, err := io.ReadFull(conn_reader, buffer[28:28+length])
if err == io.EOF {
fmt.Printf("Abort detected, escaping processing loop\n")
break
}
utils.ErrorCheck(err)
_, err = file.WriteAt(buffer[28:28+length], int64(from))
utils.ErrorCheck(err)
if globalSettings.AutoFlush {
file.Sync()
}
// let them know we are done
binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)
binary.BigEndian.PutUint32(buffer[4:8], 0) // error bits
conn.Write(buffer[:16])
continue
case utils.NBD_COMMAND_DISCONNECT:
fmt.Printf("D")
file.Sync()
return
}
}
}
/*
First check for a specific file. If one is specified, use it. If not, check for a directory. If that is not
available, use the CWD.
*/
func send_export_list(output *bufio.Writer, options uint32, globalSettings Settings) {
if globalSettings.File != "" {
_, file := filepath.Split(globalSettings.File)
send_export_list_item(output, options, file)
send_ack(output, options)
return
}
var current_directory string
var err error
if globalSettings.Directory == "" {
current_directory, err = os.Getwd()
utils.ErrorCheck(err)
}
files, err := ioutil.ReadDir(current_directory + nbd_folder)
utils.ErrorCheck(err)
for _, file := range files {
send_export_list_item(output, options, file.Name())
}
send_ack(output, options)
}
func send_message(output *bufio.Writer, options uint32, reply_type uint32, length uint32, data []byte ) {
endian := binary.BigEndian
buffer := make([]byte, 1024)
offset := 0
endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)
offset += 8
endian.PutUint32(buffer[offset:], options) // put out the server options
offset += 4
endian.PutUint32(buffer[offset:], reply_type) // reply_type: NBD_REP_SERVER
offset += 4
endian.PutUint32(buffer[offset:], length) // length of package
offset += 4
if data != nil {
copy(buffer[offset:], data[0:length])
offset += int(length)
}
data_to_send := buffer[:offset]
output.Write(data_to_send)
output.Flush()
utils.LogData("Just sent:", offset, data_to_send)
}
var defaultOptions = []byte{0, 0}
func main() {
app := cli.NewApp()
app.Name = "AnyBlox"
app.Usage = "block storage for the masses"
app.Action = func(c *cli.Context) error {
fmt.Println("Please specify either a full 'listen' parameter (e.g. 'localhost:8000', '192.168.1.2:8000) or a host and port\n")
return nil
}
app.Flags = []cli.Flag {
cli.StringFlag{
Name: "host",
Value: globalSettings.Host,
Usage: "Hostname or IP address you want to serve traffic on. e.x. 'localhost', '192.168.1.2'",
Destination: &globalSettings.Host,
},
cli.IntFlag{
Name: "port",
Value: globalSettings.Port,
Usage: "Port you want to serve traffic on. e.x. '8000'",
Destination: &globalSettings.Port,
},
cli.StringFlag{
Name: "listen, l",
Destination: &globalSettings.Listen,
Usage: "Address and port the server should listen on. Listen will take priority over host and port parameters. hostname:port - e.x. 'localhost:8000', '192.168.1.2:8000'",
},
cli.StringFlag{
Name: "file, f",
Destination: &globalSettings.File,
Value: "",
Usage: "The file that should be shared by this server. 'file' overrides 'directory'. It is required to be a full absolute path that includes the filename",
},
cli.StringFlag{
Name: "directory, d",
Destination: &globalSettings.Directory,
Value: globalSettings.Directory,
Usage: "Specify a directory where the files to share are located. Default is 'sample_disks",
},
cli.IntFlag{
Name: "buffer",
Value: globalSettings.BufferLimit,
Usage: "The number of kilobytes in size of the maximum supported read request e.x. '2048'",
Destination: &globalSettings.Port,
},
}
app.Run(os.Args)
// Determine where the host should be listening to, depending on the arguments
fmt.Printf("listen (%s) host (%s) port (%d)\n", globalSettings.Listen, globalSettings.Host, globalSettings.Port)
hostingAddress := globalSettings.Listen
if len(globalSettings.Listen) == 0 {
if len(globalSettings.Host) == 0 || globalSettings.Port <= 0 {
panic("You need to specify a host and port or specify a listen address (host:port)\n")
}
var port string
fmt.Sprint(port, "%d", globalSettings.Port)
hostingAddress = globalSettings.Host + ":" + port
}
fmt.Printf("About to listen on %s\n", hostingAddress)
listener, err := net.Listen("tcp", hostingAddress)
utils.ErrorCheck(err)
fmt.Printf("aBlox server online\n")
reply_magic := make([]byte, 4)
binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)
defer fmt.Printf("End of line\n")
for {
conn, err := listener.Accept()
utils.ErrorCheck(err)
fmt.Printf("We have a new connection from: %s\n", conn.RemoteAddr())
output := bufio.NewWriter(conn)
output.WriteString("NBDMAGIC") // init password
output.WriteString("IHAVEOPT") // Magic
output.Write(defaultOptions)
output.Flush()
// Fetch the data until we get the initial options
data := make([]byte, 1024)
offset := 0
waiting_for := 16 // wait for at least the minimum payload size
_, err = io.ReadFull(conn, data[:waiting_for])
utils.ErrorCheck(err)
options := binary.BigEndian.Uint32(data[:4])
command := binary.BigEndian.Uint32(data[12:16])
// If we are requesting an export, make sure we have the length of the data for the export name.
if binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {
waiting_for += 4
_, err = io.ReadFull(conn, data[16:20])
utils.ErrorCheck(err)
}
payload_size := int(binary.BigEndian.Uint32(data[16:]))
fmt.Printf("command is: %d\npayload_size is: %d\n", command, payload_size)
offset = waiting_for
waiting_for += int(payload_size)
_, err = io.ReadFull(conn, data[offset:waiting_for])
utils.ErrorCheck(err)
payload := make([]byte, payload_size)
if payload_size > 0 {
copy(payload, data[20:])
}
utils.LogData("Payload is:", payload_size, payload)
// At this point, we have the command, payload size, and payload.
switch command {
case utils.NBD_COMMAND_LIST:
send_export_list(output, options, globalSettings)
conn.Close()
break
case utils.NBD_COMMAND_EXPORT_NAME:
go export_name(output, conn, payload_size, payload, options, globalSettings)
break
}
}
}
fixed #32 convert length to int before comparison
// Copyright 2016 Jacob Taylor jacob@ablox.io
// License: Apache2 - http://www.apache.org/licenses/LICENSE-2.0
package main
import (
"fmt"
"net"
"../utils"
"bufio"
"encoding/binary"
//"time"
"os"
"bytes"
"io"
"io/ioutil"
"github.com/urfave/cli"
"path/filepath"
)
const nbd_folder = "/sample_disks/"
var characters_per_line = 100
var newline = 0
var line_number = 0
// settings for the server
type Settings struct {
ReadOnly bool
AutoFlush bool
Host string
Port int
Listen string
File string
Directory string
BufferLimit int
}
type Connection struct {
File string
RemoteAddr string
ReadOnly bool
}
var connections = make(map[string][]Connection)
/*
Add a new connection to the list of connections for a file. Make sure there is only one writable connection per filename
returns true if the connection was added correctly. false otherwise
*/
func addConnection(filename string, readOnly bool, remoteAddr string) bool {
currentConnections, ok := connections[filename]
if ok == false {
currentConnections = make([]Connection, 4)
}
// If this a writable request, check to see if anybody else has a writable connection
if !readOnly {
for _, conn := range currentConnections {
if !conn.ReadOnly {
fmt.Printf("Error, too many writable connections. %s is already connected to %s\n", remoteAddr, filename)
return false
}
}
}
newConnection := Connection{
File: filename,
RemoteAddr: remoteAddr,
ReadOnly: readOnly,
}
connections[filename] = append(currentConnections, newConnection)
return true
}
var globalSettings Settings = Settings {
ReadOnly: false,
AutoFlush: true,
Host: "localhost",
Port: 8000,
Listen: "",
File: "",
Directory: "sample_disks",
BufferLimit: 2048,
}
func send_export_list_item(output *bufio.Writer, options uint32, export_name string) {
data := make([]byte, 1024)
length := len(export_name)
offset := 0
// length of export name
binary.BigEndian.PutUint32(data[offset:], uint32(length)) // length of string
offset += 4
// export name
copy(data[offset:], export_name)
offset += length
reply_type := uint32(2) // reply_type: NBD_REP_SERVER
send_message(output, options, reply_type, uint32(offset), data)
}
func send_ack(output *bufio.Writer, options uint32) {
send_message(output, options, utils.NBD_COMMAND_ACK, 0, nil)
}
func export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, options uint32, globalSettings Settings) {
fmt.Printf("have request to bind to: %s\n", string(payload[:payload_size]))
defer conn.Close()
//todo add support for file specification
var filename bytes.Buffer
readOnly := false
var current_directory = globalSettings.Directory
var err error
if current_directory == "" {
current_directory, err = os.Getwd()
utils.ErrorCheck(err)
}
filename.WriteString(current_directory)
filename.WriteString(nbd_folder)
filename.Write(payload[:payload_size])
fmt.Printf("Opening file: %s\n", filename.String())
fileMode := os.O_RDWR
if globalSettings.ReadOnly || (options & utils.NBD_OPT_READ_ONLY != 0) {
fmt.Printf("Read Only is set\n")
fileMode = os.O_RDONLY
readOnly = true
}
file, err := os.OpenFile(filename.String(), fileMode, 0644)
utils.ErrorCheck(err)
if err != nil {
return
}
buffer := make([]byte, 256)
offset := 0
fs, err := file.Stat()
file_size := uint64(fs.Size())
binary.BigEndian.PutUint64(buffer[offset:], file_size) // size
offset += 8
binary.BigEndian.PutUint16(buffer[offset:], 1) // flags
offset += 2
// if requested, pad with 124 zeros
if (options & utils.NBD_FLAG_NO_ZEROES) != utils.NBD_FLAG_NO_ZEROES {
offset += 124
}
_, err = output.Write(buffer[:offset])
output.Flush()
utils.ErrorCheck(err)
buffer_limit := globalSettings.BufferLimit*1024 // set the buffer to 2mb
buffer = make([]byte, buffer_limit)
conn_reader := bufio.NewReader(conn)
for {
waiting_for := 28 // wait for at least the minimum payload size
_, err := io.ReadFull(conn_reader, buffer[:waiting_for])
if err == io.EOF {
fmt.Printf("Abort detected, escaping processing loop\n")
break
}
utils.ErrorCheck(err)
//magic := binary.BigEndian.Uint32(buffer)
command := binary.BigEndian.Uint32(buffer[4:8])
//handle := binary.BigEndian.Uint64(buffer[8:16])
from := binary.BigEndian.Uint64(buffer[16:24])
length := binary.BigEndian.Uint32(buffer[24:28])
// Error out and drop the connection if there is an attempt to read too much
if int(length) > buffer_limit {
fmt.Printf("E")
file.Sync()
return
}
newline += 1;
if newline % characters_per_line == 0 {
line_number++
fmt.Printf("\n%5d: ", line_number * 100)
newline -= characters_per_line
}
switch command {
case utils.NBD_COMMAND_READ:
fmt.Printf(".")
_, err = file.ReadAt(buffer[16:16+length], int64(from))
utils.ErrorCheck(err)
binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)
binary.BigEndian.PutUint32(buffer[4:8], 0) // error bits
conn.Write(buffer[:16+length])
continue
case utils.NBD_COMMAND_WRITE:
if readOnly {
fmt.Printf("E")
fmt.Printf("\nAttempt to write to read only file blocked\n")
continue
}
fmt.Printf("W")
_, err := io.ReadFull(conn_reader, buffer[28:28+length])
if err == io.EOF {
fmt.Printf("Abort detected, escaping processing loop\n")
break
}
utils.ErrorCheck(err)
_, err = file.WriteAt(buffer[28:28+length], int64(from))
utils.ErrorCheck(err)
if globalSettings.AutoFlush {
file.Sync()
}
// let them know we are done
binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)
binary.BigEndian.PutUint32(buffer[4:8], 0) // error bits
conn.Write(buffer[:16])
continue
case utils.NBD_COMMAND_DISCONNECT:
fmt.Printf("D")
file.Sync()
return
}
}
}
/*
First check for a specific file. If one is specified, use it. If not, check for a directory. If that is not
available, use the CWD.
*/
func send_export_list(output *bufio.Writer, options uint32, globalSettings Settings) {
if globalSettings.File != "" {
_, file := filepath.Split(globalSettings.File)
send_export_list_item(output, options, file)
send_ack(output, options)
return
}
var current_directory string
var err error
if globalSettings.Directory == "" {
current_directory, err = os.Getwd()
utils.ErrorCheck(err)
}
files, err := ioutil.ReadDir(current_directory + nbd_folder)
utils.ErrorCheck(err)
for _, file := range files {
send_export_list_item(output, options, file.Name())
}
send_ack(output, options)
}
func send_message(output *bufio.Writer, options uint32, reply_type uint32, length uint32, data []byte ) {
endian := binary.BigEndian
buffer := make([]byte, 1024)
offset := 0
endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)
offset += 8
endian.PutUint32(buffer[offset:], options) // put out the server options
offset += 4
endian.PutUint32(buffer[offset:], reply_type) // reply_type: NBD_REP_SERVER
offset += 4
endian.PutUint32(buffer[offset:], length) // length of package
offset += 4
if data != nil {
copy(buffer[offset:], data[0:length])
offset += int(length)
}
data_to_send := buffer[:offset]
output.Write(data_to_send)
output.Flush()
utils.LogData("Just sent:", offset, data_to_send)
}
var defaultOptions = []byte{0, 0}
func main() {
app := cli.NewApp()
app.Name = "AnyBlox"
app.Usage = "block storage for the masses"
app.Action = func(c *cli.Context) error {
fmt.Println("Please specify either a full 'listen' parameter (e.g. 'localhost:8000', '192.168.1.2:8000) or a host and port\n")
return nil
}
app.Flags = []cli.Flag {
cli.StringFlag{
Name: "host",
Value: globalSettings.Host,
Usage: "Hostname or IP address you want to serve traffic on. e.x. 'localhost', '192.168.1.2'",
Destination: &globalSettings.Host,
},
cli.IntFlag{
Name: "port",
Value: globalSettings.Port,
Usage: "Port you want to serve traffic on. e.x. '8000'",
Destination: &globalSettings.Port,
},
cli.StringFlag{
Name: "listen, l",
Destination: &globalSettings.Listen,
Usage: "Address and port the server should listen on. Listen will take priority over host and port parameters. hostname:port - e.x. 'localhost:8000', '192.168.1.2:8000'",
},
cli.StringFlag{
Name: "file, f",
Destination: &globalSettings.File,
Value: "",
Usage: "The file that should be shared by this server. 'file' overrides 'directory'. It is required to be a full absolute path that includes the filename",
},
cli.StringFlag{
Name: "directory, d",
Destination: &globalSettings.Directory,
Value: globalSettings.Directory,
Usage: "Specify a directory where the files to share are located. Default is 'sample_disks",
},
cli.IntFlag{
Name: "buffer",
Value: globalSettings.BufferLimit,
Usage: "The number of kilobytes in size of the maximum supported read request e.x. '2048'",
Destination: &globalSettings.Port,
},
}
app.Run(os.Args)
// Determine where the host should be listening to, depending on the arguments
fmt.Printf("listen (%s) host (%s) port (%d)\n", globalSettings.Listen, globalSettings.Host, globalSettings.Port)
hostingAddress := globalSettings.Listen
if len(globalSettings.Listen) == 0 {
if len(globalSettings.Host) == 0 || globalSettings.Port <= 0 {
panic("You need to specify a host and port or specify a listen address (host:port)\n")
}
var port string
fmt.Sprint(port, "%d", globalSettings.Port)
hostingAddress = globalSettings.Host + ":" + port
}
fmt.Printf("About to listen on %s\n", hostingAddress)
listener, err := net.Listen("tcp", hostingAddress)
utils.ErrorCheck(err)
fmt.Printf("aBlox server online\n")
reply_magic := make([]byte, 4)
binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)
defer fmt.Printf("End of line\n")
for {
conn, err := listener.Accept()
utils.ErrorCheck(err)
fmt.Printf("We have a new connection from: %s\n", conn.RemoteAddr())
output := bufio.NewWriter(conn)
output.WriteString("NBDMAGIC") // init password
output.WriteString("IHAVEOPT") // Magic
output.Write(defaultOptions)
output.Flush()
// Fetch the data until we get the initial options
data := make([]byte, 1024)
offset := 0
waiting_for := 16 // wait for at least the minimum payload size
_, err = io.ReadFull(conn, data[:waiting_for])
utils.ErrorCheck(err)
options := binary.BigEndian.Uint32(data[:4])
command := binary.BigEndian.Uint32(data[12:16])
// If we are requesting an export, make sure we have the length of the data for the export name.
if binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {
waiting_for += 4
_, err = io.ReadFull(conn, data[16:20])
utils.ErrorCheck(err)
}
payload_size := int(binary.BigEndian.Uint32(data[16:]))
fmt.Printf("command is: %d\npayload_size is: %d\n", command, payload_size)
offset = waiting_for
waiting_for += int(payload_size)
_, err = io.ReadFull(conn, data[offset:waiting_for])
utils.ErrorCheck(err)
payload := make([]byte, payload_size)
if payload_size > 0 {
copy(payload, data[20:])
}
utils.LogData("Payload is:", payload_size, payload)
// At this point, we have the command, payload size, and payload.
switch command {
case utils.NBD_COMMAND_LIST:
send_export_list(output, options, globalSettings)
conn.Close()
break
case utils.NBD_COMMAND_EXPORT_NAME:
go export_name(output, conn, payload_size, payload, options, globalSettings)
break
}
}
}
|
package server
import (
"context"
"net"
"sync"
"time"
"bitbucket.org/harfangapps/regis-companion/resp"
"github.com/pkg/errors"
)
// Server defines the regis-companion Server that listens for incoming connections
// and manages SSH tunnels.
type Server struct {
// The address the server listens on.
Addr net.Addr
// Duration before the server stops if there is no active tunnel
// and no connection attempt.
IdleTimeout time.Duration
// Read timeout before returning a network error on a read attempt.
ReadTimeout time.Duration
// Write timeout before returning a network error on a write attempt.
WriteTimeout time.Duration
// The channel to send errors to. If nil, the errors are logged.
// If the send would block, the error is dropped. It is the responsibility
// of the caller to close the channel once the Server is stopped.
// If set, this ErrChain is used for all Tunnels started by this
// Server.
ErrChan chan<- error
}
// ListenAndServe starts the server on the specified Addr.
//
// This call is blocking, it returns only when an error is
// encountered. As such, it always returns a non-nil error.
func (s *Server) ListenAndServe(ctx context.Context) error {
l, err := net.Listen(s.Addr.Network(), s.Addr.String())
if err != nil {
return errors.Wrap(err, "listen error")
}
return s.serve(ctx, l)
}
func (s *Server) serve(ctx context.Context, l net.Listener) error {
server := retryServer{
listener: l,
dispatch: t.serveConn,
errChan: s.ErrChan,
}
return server.serve(ctx)
}
func (s *Server) serveConn(done <-chan struct{}, serverWg *sync.WaitGroup, conn net.Conn) {
defer func() {
conn.Close() // close the serviced connection
serverWg.Done() // signal the server that this connection is done
}()
dec := resp.NewDecoder(conn)
enc := resp.NewEncoder(conn)
}
server: decode request and encode response
package server
import (
"context"
"fmt"
"net"
"strings"
"sync"
"time"
"bitbucket.org/harfangapps/regis-companion/resp"
"github.com/pkg/errors"
)
var (
errEmptyCmd = errors.New("command is empty")
)
// Server defines the regis-companion Server that listens for incoming connections
// and manages SSH tunnels.
type Server struct {
// The address the server listens on.
Addr net.Addr
// Duration before the server stops if there is no active tunnel
// and no connection attempt.
IdleTimeout time.Duration
// Read timeout before returning a network error on a read attempt.
ReadTimeout time.Duration
// Write timeout before returning a network error on a write attempt.
WriteTimeout time.Duration
// The channel to send errors to. If nil, the errors are logged.
// If the send would block, the error is dropped. It is the responsibility
// of the caller to close the channel once the Server is stopped.
// If set, this ErrChain is used for all Tunnels started by this
// Server.
ErrChan chan<- error
}
// ListenAndServe starts the server on the specified Addr.
//
// This call is blocking, it returns only when an error is
// encountered. As such, it always returns a non-nil error.
func (s *Server) ListenAndServe(ctx context.Context) error {
l, err := net.Listen(s.Addr.Network(), s.Addr.String())
if err != nil {
return errors.Wrap(err, "listen error")
}
return s.serve(ctx, l)
}
func (s *Server) serve(ctx context.Context, l net.Listener) error {
server := retryServer{
listener: l,
dispatch: s.serveConn,
errChan: s.ErrChan,
}
return server.serve(ctx)
}
func (s *Server) serveConn(done <-chan struct{}, serverWg *sync.WaitGroup, conn net.Conn) {
defer func() {
conn.Close() // close the serviced connection
serverWg.Done() // signal the server that this connection is done
}()
dec := resp.NewDecoder(conn)
enc := resp.NewEncoder(conn)
for {
// read the request
req, err := dec.DecodeRequest()
if err != nil {
err = errors.Wrap(err, "decode request error")
handleError(err, s.ErrChan)
return
}
// handle the request
res, err := s.execute(req)
if err != nil {
err = errors.Wrap(err, "execute request error")
handleError(err, s.ErrChan)
return
}
// write the response
if err := enc.Encode(res); err != nil {
err = errors.Wrap(err, "encode response error")
handleError(err, s.ErrChan)
return
}
}
}
func (s *Server) execute(req []string) (interface{}, error) {
if len(req) == 0 {
return nil, errEmptyCmd
}
switch cmd := strings.ToLower(req[0]); cmd {
case "ping":
return resp.Pong{}, nil
default:
return nil, fmt.Errorf("unknown command: %v", cmd)
}
}
|
package main
import (
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"os/signal"
"runtime"
"sync"
"syscall"
"github.com/kardianos/osext"
"github.com/kavu/go_reuseport"
)
var listenAddress = "127.0.0.1:8043"
var privateKeyPath = "server.key"
var certChainPath = "server.crt"
var caBundlePath = "ca-bundle.crt"
func init() {
log.SetPrefix(fmt.Sprintf("[%5d] ", os.Getpid()))
}
func panicOnError(err error) {
if err != nil {
panic(err)
}
}
func parseCertificates(data []byte) (certs [][]byte, err error) {
for {
var block *pem.Block
block, data = pem.Decode(data)
if block == nil {
break
}
_, err = x509.ParseCertificate(block.Bytes)
if err != nil {
return
}
certs = append(certs, block.Bytes)
}
return
}
func parsePrivateKey(data []byte) (key crypto.PrivateKey, err error) {
var block *pem.Block
block, _ = pem.Decode(data)
if block == nil {
err = errors.New("invalid private key pem")
return
}
key, err = x509.ParsePKCS1PrivateKey(block.Bytes)
return
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
var gracefulChild bool
flag.BoolVar(&gracefulChild, "graceful", false, "send sigterm to parent after startup")
flag.Parse()
caBundleBytes, err := ioutil.ReadFile(caBundlePath)
panicOnError(err)
caBundle := x509.NewCertPool()
caBundle.AppendCertsFromPEM(caBundleBytes)
privateKeyBytes, err := ioutil.ReadFile(privateKeyPath)
panicOnError(err)
privateKey, err := parsePrivateKey(privateKeyBytes)
panicOnError(err)
certChainBytes, err := ioutil.ReadFile(certChainPath)
panicOnError(err)
certChain, err := parseCertificates(certChainBytes)
panicOnError(err)
certAndKey := []tls.Certificate{
tls.Certificate{
Certificate: certChain,
PrivateKey: privateKey,
},
}
config := tls.Config{
// Certificates
Certificates: certAndKey,
RootCAs: caBundle,
ClientCAs: caBundle,
// Options
ClientAuth: tls.RequireAndVerifyClientCert,
MinVersion: tls.VersionTLS12,
}
rawListener, err := reuseport.NewReusablePortListener("tcp4", listenAddress)
panicOnError(err)
listener := tls.NewListener(rawListener, &config)
log.Printf("Listening on %s", listenAddress)
wg := &sync.WaitGroup{}
wg.Add(1)
stopper := make(chan bool, 1)
go accept(listener, wg, stopper)
go sigtermHandler(listener, stopper)
go sigusr1Handler()
if gracefulChild {
parent := syscall.Getppid()
log.Printf("Sending SIGTERM to parent PID %d", parent)
syscall.Kill(parent, syscall.SIGTERM)
}
log.Printf("Startup completed, waiting for connections")
wg.Wait()
log.Printf("All connections closed, shutting down")
}
func sigtermHandler(listener net.Listener, stopper chan bool) {
signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGTERM)
<-signals
stopper <- true
log.Printf("Got SIGTERM, closing listening socket")
signal.Stop(signals)
listener.Close()
}
func sigusr1Handler() {
signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGUSR1)
for {
<-signals
log.Printf("Received SIGUSR1, attempting restart")
go reexec()
}
}
func reexec() {
path, err := osext.Executable()
if err != nil {
log.Printf("Failed to get executable path: %s", err)
}
log.Printf("Executing self: %s", path)
cmd := exec.Command(path, "-graceful")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
log.Printf("Child failed with error: %s", err)
}
}
func accept(listener net.Listener, wg *sync.WaitGroup, stopper chan bool) {
defer wg.Done()
defer listener.Close()
for {
conn, err := listener.Accept()
// Check if we're supposed to stop
select {
case _ = <-stopper:
return
default:
}
if err != nil {
log.Printf("Error accepting connection: %s", err)
continue
}
wg.Add(1)
go handle(conn, wg)
}
log.Printf("Closing listening socket")
}
func handle(conn net.Conn, wg *sync.WaitGroup) {
defer wg.Done()
defer conn.Close()
log.Printf("New connection from %s", conn.RemoteAddr())
n, err := io.Copy(os.Stdout, conn)
if err == nil {
log.Printf("Closed connection from %s (success, copied %d bytes total)", conn.RemoteAddr(), n)
} else {
log.Printf("Closed connection from %s (%s)", conn.RemoteAddr(), err)
}
}
Support flags in server
package main
import (
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"os/signal"
"runtime"
"strings"
"sync"
"syscall"
"github.com/kardianos/osext"
"github.com/kavu/go_reuseport"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
listenAddress = kingpin.Flag("addr", "Address and port to listen on").Required().TCP()
privateKeyPath = kingpin.Flag("key", "Path to private key file (PEM/PKCS1)").Required().String()
certChainPath = kingpin.Flag("cert", "Path to certificate chain file (PEM/X509)").Required().String()
caBundlePath = kingpin.Flag("cacert", "Path to CA certificate bundle file (PEM/X509)").Required().String()
gracefulChild = kingpin.Flag("graceful", "Send SIGTERM to parent after startup (internal)").Bool()
)
func init() {
log.SetPrefix(fmt.Sprintf("[%5d] ", os.Getpid()))
}
func panicOnError(err error) {
if err != nil {
panic(err)
}
}
func parseCertificates(data []byte) (certs [][]byte, err error) {
for {
var block *pem.Block
block, data = pem.Decode(data)
if block == nil {
break
}
_, err = x509.ParseCertificate(block.Bytes)
if err != nil {
return
}
certs = append(certs, block.Bytes)
}
return
}
func parsePrivateKey(data []byte) (key crypto.PrivateKey, err error) {
var block *pem.Block
block, _ = pem.Decode(data)
if block == nil {
err = errors.New("invalid private key pem")
return
}
key, err = x509.ParsePKCS1PrivateKey(block.Bytes)
return
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
kingpin.Parse()
caBundleBytes, err := ioutil.ReadFile(*caBundlePath)
panicOnError(err)
caBundle := x509.NewCertPool()
caBundle.AppendCertsFromPEM(caBundleBytes)
privateKeyBytes, err := ioutil.ReadFile(*privateKeyPath)
panicOnError(err)
privateKey, err := parsePrivateKey(privateKeyBytes)
panicOnError(err)
certChainBytes, err := ioutil.ReadFile(*certChainPath)
panicOnError(err)
certChain, err := parseCertificates(certChainBytes)
panicOnError(err)
certAndKey := []tls.Certificate{
tls.Certificate{
Certificate: certChain,
PrivateKey: privateKey,
},
}
config := tls.Config{
// Certificates
Certificates: certAndKey,
RootCAs: caBundle,
ClientCAs: caBundle,
// Options
ClientAuth: tls.RequireAndVerifyClientCert,
MinVersion: tls.VersionTLS12,
}
var proto string
if (*listenAddress).IP.To4() != nil {
proto = "tcp4"
} else {
proto = "tcp6"
}
rawListener, err := reuseport.NewReusablePortListener(proto, (*listenAddress).String())
panicOnError(err)
listener := tls.NewListener(rawListener, &config)
log.Printf("Listening on %s", *listenAddress)
wg := &sync.WaitGroup{}
wg.Add(1)
stopper := make(chan bool, 1)
go accept(listener, wg, stopper)
go sigtermHandler(listener, stopper)
go sigusr1Handler()
if *gracefulChild {
parent := syscall.Getppid()
log.Printf("Sending SIGTERM to parent PID %d", parent)
syscall.Kill(parent, syscall.SIGTERM)
}
log.Printf("Startup completed, waiting for connections")
wg.Wait()
log.Printf("All connections closed, shutting down")
}
func sigtermHandler(listener net.Listener, stopper chan bool) {
signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGTERM)
<-signals
stopper <- true
log.Printf("Got SIGTERM, closing listening socket")
signal.Stop(signals)
listener.Close()
}
func sigusr1Handler() {
signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGUSR1)
for {
<-signals
log.Printf("Received SIGUSR1, attempting restart")
go reexec()
}
}
func reexec() {
path, err := osext.Executable()
if err != nil {
log.Printf("Failed to get executable path: %s", err)
}
args := []string{"--graceful"}
for _, val := range os.Args[1:] {
if val != "--graceful" {
args = append(args, val)
}
}
log.Printf("Executing self: %s %s", path, strings.Join(args, " "))
cmd := exec.Command(path, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
log.Printf("Child failed with error: %s", err)
}
}
func accept(listener net.Listener, wg *sync.WaitGroup, stopper chan bool) {
defer wg.Done()
defer listener.Close()
for {
conn, err := listener.Accept()
// Check if we're supposed to stop
select {
case _ = <-stopper:
return
default:
}
if err != nil {
log.Printf("Error accepting connection: %s", err)
continue
}
wg.Add(1)
go handle(conn, wg)
}
log.Printf("Closing listening socket")
}
func handle(conn net.Conn, wg *sync.WaitGroup) {
defer wg.Done()
defer conn.Close()
log.Printf("New connection from %s", conn.RemoteAddr())
n, err := io.Copy(os.Stdout, conn)
if err == nil {
log.Printf("Closed connection from %s (success, copied %d bytes total)", conn.RemoteAddr(), n)
} else {
log.Printf("Closed connection from %s (%s)", conn.RemoteAddr(), err)
}
}
|
package main
import (
"io"
"log"
"fmt"
"net/http"
mrand "math/rand"
"crypto/rand"
"code.google.com/p/go.net/websocket"
)
var globalWorld = newWorld()
func handler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "." + r.URL.Path)
}
type Message struct {
Kind string
Payload map[string]interface{}
}
func newMessage(kind string) *Message {
ms := new(Message)
ms.Kind = kind
ms.Payload = make(map[string]interface{})
return ms
}
func (m *Message) String() string {
return fmt.Sprintf("{kind: %s, payload: %v}", m.Kind, m.Payload)
}
// http://stackoverflow.com/questions/12771930/what-is-the-fastest-way-to-generate-a-long-random-string-in-go
func randString(n int) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphanum[b % byte(len(alphanum))]
}
return string(bytes)
}
type Player struct {
w *World
ws *websocket.Conn
in chan *Message
out chan *Message
name string
inBroadcast chan *Message
loadedChunks map[ChunkCoords]bool
}
func newPlayer(ws *websocket.Conn) *Player {
p := new(Player)
p.w = globalWorld
p.ws = ws
p.in = make(chan *Message, 10)
p.out = make(chan *Message, 10)
p.name = "player-" + randString(10)
p.inBroadcast = make(chan *Message, 10)
p.loadedChunks = make(map[ChunkCoords]bool, 0)
return p
}
func (p *Player) handleIncoming() {
for {
ms := new(Message)
err := websocket.JSON.Receive(p.ws, ms)
if err != nil {
if err != io.EOF {
log.Print("Reading websocket message (", p.name, "): ", err)
}
close(p.in)
return
}
p.in <- ms
}
}
func (p *Player) handleOutgoing() {
for {
ms := <-p.out
err := websocket.JSON.Send(p.ws, ms)
if err != nil {
log.Print("Sending websocket message (", p.name, "): ", err)
close(p.out)
return
}
}
}
func (p *Player) handleChunk(ms *Message) {
pl := ms.Payload
pos := pl["ccpos"].(map[string]interface{})
cc := chunkCoordsFromMap(pos)
p.sendChunk(cc)
}
func (p *Player) sendChunk(cc ChunkCoords) {
ms := newMessage("chunk")
ms.Payload["ccpos"] = cc.toMap()
chunk := p.w.requestChunk(cc)
p.loadedChunks[cc] = true
ms.Payload["data"] = chunk
p.out <- ms
}
func (p *Player) sendUnloadChunk(cc ChunkCoords) {
ms := newMessage("unload-chunk")
ms.Payload["ccpos"] = cc.toMap()
delete(p.loadedChunks, cc)
p.out <- ms
}
func (p *Player) handleBlock(ms *Message) {
pl := ms.Payload
wc := readWorldCoords(pl)
typ := Block(pl["type"].(float64))
p.w.changeBlock(wc, typ)
p.w.broadcast <- ms
}
func (p *Player) handlerPlayerPosition(ms *Message) {
pl := ms.Payload
// TODO: Verify position is valid
// (they didn't move too much in the last
// couple frames, and they are not currently
// in the ground).
wc := readWorldCoords(pl["pos"].(map[string]interface{}))
pl["id"] = p.name
ms.Kind = "entity-position"
p.w.broadcast <- ms
cc := wc.Chunk()
for x := -2; x <= 2; x++ {
for y := -2; y <= 2; y++ {
for z := -2; z <= 2; z++ {
newCC := ChunkCoords{
x: cc.x + x,
y: cc.y + y,
z: cc.z + z,
}
if p.loadedChunks[newCC] != true {
// Terrible hack to stagger chunk loading
if (mrand.Float32() > 0.9) {
p.sendChunk(newCC)
}
}
}
}
}
for lcc := range p.loadedChunks {
if lcc.x < cc.x - 2 || lcc.x > cc.x + 2 ||
lcc.y < cc.y - 2 || lcc.y > cc.y + 2 ||
lcc.z < cc.z - 2 || lcc.z > cc.z + 2 {
// Terrible hack to stagger chunk unloading
if (mrand.Float32() > 0.9) {
p.sendUnloadChunk(lcc)
}
}
}
}
func wsHandler(ws *websocket.Conn) {
p := newPlayer(ws)
globalWorld.register <- p
defer func () {
globalWorld.unregister <- p
}()
go p.handleIncoming()
go p.handleOutgoing()
for {
select {
case m := <-p.in:
if m == nil {
return
}
switch m.Kind {
case "chunk":
go p.handleChunk(m)
case "block":
go p.handleBlock(m)
case "player-position":
go p.handlerPlayerPosition(m)
default:
log.Print("Unknown message recieved from client of kind ", m.Kind)
continue
}
case m := <-p.inBroadcast:
if m.Kind == "entity-position" && p.name == m.Payload["id"] {
continue
}
p.out <- m
}
}
}
func main() {
go globalWorld.run()
http.HandleFunc("/", handler)
http.Handle("/ws", websocket.Handler(wsHandler))
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal("ListenAndServe:", err)
}
}
Better chunk staggering.
package main
import (
"io"
"log"
"fmt"
"net/http"
"math"
mrand "math/rand"
"crypto/rand"
"code.google.com/p/go.net/websocket"
)
var globalWorld = newWorld()
func handler(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "." + r.URL.Path)
}
type Message struct {
Kind string
Payload map[string]interface{}
}
func newMessage(kind string) *Message {
ms := new(Message)
ms.Kind = kind
ms.Payload = make(map[string]interface{})
return ms
}
func (m *Message) String() string {
return fmt.Sprintf("{kind: %s, payload: %v}", m.Kind, m.Payload)
}
// http://stackoverflow.com/questions/12771930/what-is-the-fastest-way-to-generate-a-long-random-string-in-go
func randString(n int) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphanum[b % byte(len(alphanum))]
}
return string(bytes)
}
type Player struct {
w *World
ws *websocket.Conn
in chan *Message
out chan *Message
name string
inBroadcast chan *Message
loadedChunks map[ChunkCoords]bool
}
func newPlayer(ws *websocket.Conn) *Player {
p := new(Player)
p.w = globalWorld
p.ws = ws
p.in = make(chan *Message, 10)
p.out = make(chan *Message, 10)
p.name = "player-" + randString(10)
p.inBroadcast = make(chan *Message, 10)
p.loadedChunks = make(map[ChunkCoords]bool, 0)
return p
}
func (p *Player) handleIncoming() {
for {
ms := new(Message)
err := websocket.JSON.Receive(p.ws, ms)
if err != nil {
if err != io.EOF {
log.Print("Reading websocket message (", p.name, "): ", err)
}
close(p.in)
return
}
p.in <- ms
}
}
func (p *Player) handleOutgoing() {
for {
ms := <-p.out
err := websocket.JSON.Send(p.ws, ms)
if err != nil {
log.Print("Sending websocket message (", p.name, "): ", err)
close(p.out)
return
}
}
}
func (p *Player) handleChunk(ms *Message) {
// pl := ms.Payload
// pos := pl["ccpos"].(map[string]interface{})
// cc := chunkCoordsFromMap(pos)
// p.sendChunk(cc)
}
func (p *Player) sendChunk(cc ChunkCoords) {
ms := newMessage("chunk")
ms.Payload["ccpos"] = cc.toMap()
chunk := p.w.requestChunk(cc)
p.loadedChunks[cc] = true
ms.Payload["data"] = chunk
p.out <- ms
}
func (p *Player) sendUnloadChunk(cc ChunkCoords) {
ms := newMessage("unload-chunk")
ms.Payload["ccpos"] = cc.toMap()
delete(p.loadedChunks, cc)
p.out <- ms
}
func (p *Player) handleBlock(ms *Message) {
pl := ms.Payload
wc := readWorldCoords(pl)
typ := Block(pl["type"].(float64))
p.w.changeBlock(wc, typ)
p.w.broadcast <- ms
}
func (p *Player) handlerPlayerPosition(ms *Message) {
pl := ms.Payload
// TODO: Verify position is valid
// (they didn't move too much in the last
// couple frames, and they are not currently
// in the ground).
wc := readWorldCoords(pl["pos"].(map[string]interface{}))
pl["id"] = p.name
ms.Kind = "entity-position"
p.w.broadcast <- ms
DIST := 2
cc := wc.Chunk()
for x := -DIST; x <= DIST; x++ {
for y := -DIST; y <= DIST; y++ {
for z := -DIST; z <= DIST; z++ {
newCC := ChunkCoords{
x: cc.x + x,
y: cc.y + y,
z: cc.z + z,
}
if p.loadedChunks[newCC] != true {
// Stagger chunk loading
// biased towards loading chunks nearest
// the player's position.
val := mrand.Float64()
val *= float64(cc.x - x)
val *= float64(cc.y - y)
val *= float64(cc.z - z)
if math.Abs(val) < 0.3 {
p.sendChunk(newCC)
}
}
}
}
}
abs := func (n float64) float64 {
return math.Abs(n)
}
max := func (a, b float64) float64 {
return math.Max(a, b)
}
for lcc := range p.loadedChunks {
if lcc.x < cc.x - DIST || lcc.x > cc.x + DIST ||
lcc.y < cc.y - DIST || lcc.y > cc.y + DIST ||
lcc.z < cc.z - DIST || lcc.z > cc.z + DIST {
// Stagger chunk unloading
// Should be biased towards unloading
// chunks furthest from the player.
val := mrand.Float64()
val *= max(abs(float64(cc.x - lcc.x)) - 2, 1)
val *= max(abs(float64(cc.y - lcc.y)) - 2, 1)
val *= max(abs(float64(cc.z - lcc.z)) - 2, 1)
if (math.Abs(val) > 0.9) {
p.sendUnloadChunk(lcc)
}
}
}
}
func wsHandler(ws *websocket.Conn) {
p := newPlayer(ws)
globalWorld.register <- p
defer func () {
globalWorld.unregister <- p
}()
go p.handleIncoming()
go p.handleOutgoing()
for {
select {
case m := <-p.in:
if m == nil {
return
}
switch m.Kind {
case "chunk":
go p.handleChunk(m)
case "block":
go p.handleBlock(m)
case "player-position":
go p.handlerPlayerPosition(m)
default:
log.Print("Unknown message recieved from client of kind ", m.Kind)
continue
}
case m := <-p.inBroadcast:
if m.Kind == "entity-position" && p.name == m.Payload["id"] {
continue
}
p.out <- m
}
}
}
func main() {
go globalWorld.run()
http.HandleFunc("/", handler)
http.Handle("/ws", websocket.Handler(wsHandler))
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal("ListenAndServe:", err)
}
}
|
package server
import (
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"github.com/gtfierro/hod/config"
hod "github.com/gtfierro/hod/db"
"github.com/gtfierro/hod/query"
"github.com/julienschmidt/httprouter"
"github.com/op/go-logging"
"github.com/pkg/profile"
)
// logger
var log *logging.Logger
// set up logging facilities
func init() {
log = logging.MustGetLogger("http")
var format = "%{color}%{level} %{time:Jan 02 15:04:05} %{shortfile}%{color:reset} ▶ %{message}"
var logBackend = logging.NewLogBackend(os.Stderr, "", 0)
logBackendLeveled := logging.AddModuleLevel(logBackend)
logging.SetBackend(logBackendLeveled)
logging.SetFormatter(logging.MustStringFormatter(format))
}
type hodServer struct {
db *hod.DB
port string
staticpath string
router *httprouter.Router
}
func StartHodServer(db *hod.DB, cfg *config.Config) {
server := &hodServer{
db: db,
port: cfg.ServerPort,
staticpath: cfg.StaticPath,
}
r := httprouter.New()
// TODO: how do we handle loading in data? Need to have the multiple
// concurrent buildings issue fixed first, but for now it is sufficient
// to just have one server per building
r.POST("/api/query", server.handleQuery)
r.POST("/api/loadlinks", server.handleLoadLinks)
r.POST("/api/querydot", server.handleQueryDot)
r.ServeFiles("/static/*filepath", http.Dir("./server/static"))
r.GET("/", server.serveQuery)
r.GET("/query", server.serveQuery)
r.GET("/help", server.serveHelp)
r.GET("/visualize", server.serveVisualize)
server.router = r
var (
addrString string
nettype string
)
// check if ipv6
if cfg.UseIPv6 {
nettype = "tcp6"
} else {
nettype = "tcp4"
}
if cfg.Localhost {
addrString = "localhost:" + server.port
} else {
addrString = "0.0.0.0:" + server.port
}
address, err := net.ResolveTCPAddr(nettype, addrString)
if err != nil {
log.Fatalf("Error resolving address %s (%s)", server.port, err.Error())
}
http.Handle("/", server.router)
log.Notice("Starting HTTP Server on ", addrString)
srv := &http.Server{
Addr: address.String(),
}
// enable profiling if configured
if cfg.EnableCPUProfile {
defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
} else if cfg.EnableMEMProfile {
defer profile.Start(profile.MemProfile, profile.ProfilePath(".")).Stop()
} else if cfg.EnableBlockProfile {
defer profile.Start(profile.BlockProfile, profile.ProfilePath(".")).Stop()
}
log.Fatal(srv.ListenAndServe())
}
func (srv *hodServer) handleQuery(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
parsed, err := query.Parse(req.Body)
if err != nil {
log.Error(err)
rw.WriteHeader(400)
rw.Write([]byte(err.Error()))
return
}
// evaluate query
res := srv.db.RunQuery(parsed)
encoder := json.NewEncoder(rw)
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
err = encoder.Encode(res)
if err != nil {
log.Error(err)
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
return
}
return
}
func (srv *hodServer) handleLoadLinks(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
var updates = new(hod.LinkUpdates)
decoder := json.NewDecoder(req.Body)
if err := decoder.Decode(updates); err != nil {
log.Error(err)
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
return
}
_, err := rw.Write([]byte(fmt.Sprintf("Adding %d links, Removing %d links", len(updates.Adding), len(updates.Removing))))
if err != nil {
log.Error(err)
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
return
}
return
}
func (srv *hodServer) serveHelp(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
http.ServeFile(rw, req, srv.staticpath+"/help.html")
}
func (srv *hodServer) serveQuery(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
log.Debug(srv.staticpath)
http.ServeFile(rw, req, srv.staticpath+"/query.html")
}
func (srv *hodServer) serveVisualize(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
http.ServeFile(rw, req, srv.staticpath+"/visualize.html")
}
func (srv *hodServer) handleQueryDot(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
dot, err := srv.db.QueryToDOT(req.Body)
if err != nil {
log.Error(err)
rw.WriteHeader(400)
rw.Write([]byte(err.Error()))
return
}
rw.Write([]byte(dot))
return
}
add static path to static files
package server
import (
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"github.com/gtfierro/hod/config"
hod "github.com/gtfierro/hod/db"
"github.com/gtfierro/hod/query"
"github.com/julienschmidt/httprouter"
"github.com/op/go-logging"
"github.com/pkg/profile"
)
// logger
var log *logging.Logger
// set up logging facilities
func init() {
log = logging.MustGetLogger("http")
var format = "%{color}%{level} %{time:Jan 02 15:04:05} %{shortfile}%{color:reset} ▶ %{message}"
var logBackend = logging.NewLogBackend(os.Stderr, "", 0)
logBackendLeveled := logging.AddModuleLevel(logBackend)
logging.SetBackend(logBackendLeveled)
logging.SetFormatter(logging.MustStringFormatter(format))
}
type hodServer struct {
db *hod.DB
port string
staticpath string
router *httprouter.Router
}
func StartHodServer(db *hod.DB, cfg *config.Config) {
server := &hodServer{
db: db,
port: cfg.ServerPort,
staticpath: cfg.StaticPath,
}
r := httprouter.New()
// TODO: how do we handle loading in data? Need to have the multiple
// concurrent buildings issue fixed first, but for now it is sufficient
// to just have one server per building
r.POST("/api/query", server.handleQuery)
r.POST("/api/loadlinks", server.handleLoadLinks)
r.POST("/api/querydot", server.handleQueryDot)
r.ServeFiles("/static/*filepath", http.Dir(cfg.StaticPath+"/server/static"))
r.GET("/", server.serveQuery)
r.GET("/query", server.serveQuery)
r.GET("/help", server.serveHelp)
r.GET("/visualize", server.serveVisualize)
server.router = r
var (
addrString string
nettype string
)
// check if ipv6
if cfg.UseIPv6 {
nettype = "tcp6"
} else {
nettype = "tcp4"
}
if cfg.Localhost {
addrString = "localhost:" + server.port
} else {
addrString = "0.0.0.0:" + server.port
}
address, err := net.ResolveTCPAddr(nettype, addrString)
if err != nil {
log.Fatalf("Error resolving address %s (%s)", server.port, err.Error())
}
http.Handle("/", server.router)
log.Notice("Starting HTTP Server on ", addrString)
srv := &http.Server{
Addr: address.String(),
}
// enable profiling if configured
if cfg.EnableCPUProfile {
defer profile.Start(profile.CPUProfile, profile.ProfilePath(".")).Stop()
} else if cfg.EnableMEMProfile {
defer profile.Start(profile.MemProfile, profile.ProfilePath(".")).Stop()
} else if cfg.EnableBlockProfile {
defer profile.Start(profile.BlockProfile, profile.ProfilePath(".")).Stop()
}
log.Fatal(srv.ListenAndServe())
}
func (srv *hodServer) handleQuery(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
parsed, err := query.Parse(req.Body)
if err != nil {
log.Error(err)
rw.WriteHeader(400)
rw.Write([]byte(err.Error()))
return
}
// evaluate query
res := srv.db.RunQuery(parsed)
encoder := json.NewEncoder(rw)
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
err = encoder.Encode(res)
if err != nil {
log.Error(err)
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
return
}
return
}
func (srv *hodServer) handleLoadLinks(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
var updates = new(hod.LinkUpdates)
decoder := json.NewDecoder(req.Body)
if err := decoder.Decode(updates); err != nil {
log.Error(err)
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
return
}
_, err := rw.Write([]byte(fmt.Sprintf("Adding %d links, Removing %d links", len(updates.Adding), len(updates.Removing))))
if err != nil {
log.Error(err)
rw.WriteHeader(500)
rw.Write([]byte(err.Error()))
return
}
return
}
func (srv *hodServer) serveHelp(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
http.ServeFile(rw, req, srv.staticpath+"/help.html")
}
func (srv *hodServer) serveQuery(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
log.Debug(srv.staticpath)
http.ServeFile(rw, req, srv.staticpath+"/query.html")
}
func (srv *hodServer) serveVisualize(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
http.ServeFile(rw, req, srv.staticpath+"/visualize.html")
}
func (srv *hodServer) handleQueryDot(rw http.ResponseWriter, req *http.Request, ps httprouter.Params) {
defer req.Body.Close()
dot, err := srv.db.QueryToDOT(req.Body)
if err != nil {
log.Error(err)
rw.WriteHeader(400)
rw.Write([]byte(err.Error()))
return
}
rw.Write([]byte(dot))
return
}
|
// Copyright 2012 Apcera Inc. All rights reserved.
package server
import (
"bufio"
"encoding/json"
"fmt"
"net"
"sync/atomic"
"github.com/apcera/gnatsd/hashmap"
"github.com/apcera/gnatsd/sublist"
)
type Options struct {
Host string
Port int
Trace bool
Debug bool
Logtime bool
MaxConn int
}
type info struct {
Id string `json:"server_id"`
Version string `json:"version"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required"`
SslRequired bool `json:"ssl_required"`
MaxPayload int `json:"max_payload"`
}
type Server struct {
info info
infoJson []byte
sl *sublist.Sublist
gcid uint64
opts Options
trace bool
debug bool
}
func optionDefaults(opt *Options) {
if opt.Host == "" {
opt.Host = DEFAULT_HOST
}
if opt.Port == 0 {
opt.Port = DEFAULT_PORT
}
if opt.MaxConn == 0 {
opt.MaxConn = DEFAULT_MAX_CONNECTIONS
}
}
func New(opts Options) *Server {
optionDefaults(&opts)
inf := info{
Id: genId(),
Version: VERSION,
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
SslRequired: false,
MaxPayload: MAX_PAYLOAD_SIZE,
}
s := &Server{
info: inf,
sl: sublist.New(),
opts: opts,
debug: opts.Debug,
trace: opts.Trace,
}
// Setup logging with flags
s.LogInit()
// Generate the info json
b, err := json.Marshal(s.info)
if err != nil {
Fatalf("Err marshalling INFO JSON: %+v\n", err)
}
s.infoJson = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
return s
}
func (s *Server) AcceptLoop() {
Logf("Starting nats-server version %s on port %d", VERSION, s.opts.Port)
hp := fmt.Sprintf("%s:%d", s.opts.Host, s.opts.Port)
l, e := net.Listen("tcp", hp)
if e != nil {
Fatalf("Error listening on port: %d - %v", s.opts.Port, e)
return
}
for {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
Logf("Accept error: %v", err)
}
continue
}
s.createClient(conn)
}
}
func clientConnStr(conn net.Conn) interface{} {
if ip, ok := conn.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
return []string{fmt.Sprintf("%v, %d", addr.IP, addr.Port)}
}
return "N/A"
}
func (s *Server) createClient(conn net.Conn) *client {
c := &client{srv: s, conn: conn}
c.cid = atomic.AddUint64(&s.gcid, 1)
c.bw = bufio.NewWriterSize(c.conn, defaultBufSize)
c.br = bufio.NewReaderSize(c.conn, defaultBufSize)
c.subs = hashmap.New()
if ip, ok := conn.(*net.TCPConn); ok {
ip.SetReadBuffer(32768)
}
s.sendInfo(c)
go c.readLoop()
Debug("Client connection created", clientConnStr(conn), c.cid)
return c
}
func (s *Server) sendInfo(c *client) {
// FIXME, err
c.conn.Write(s.infoJson)
}
Made info public
// Copyright 2012 Apcera Inc. All rights reserved.
package server
import (
"bufio"
"encoding/json"
"fmt"
"net"
"sync/atomic"
"github.com/apcera/gnatsd/hashmap"
"github.com/apcera/gnatsd/sublist"
)
type Options struct {
Host string
Port int
Trace bool
Debug bool
Logtime bool
MaxConn int
}
type Info struct {
Id string `json:"server_id"`
Version string `json:"version"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required"`
SslRequired bool `json:"ssl_required"`
MaxPayload int `json:"max_payload"`
}
type Server struct {
info Info
infoJson []byte
sl *sublist.Sublist
gcid uint64
opts Options
trace bool
debug bool
}
func optionDefaults(opt *Options) {
if opt.Host == "" {
opt.Host = DEFAULT_HOST
}
if opt.Port == 0 {
opt.Port = DEFAULT_PORT
}
if opt.MaxConn == 0 {
opt.MaxConn = DEFAULT_MAX_CONNECTIONS
}
}
func New(opts Options) *Server {
optionDefaults(&opts)
inf := Info{
Id: genId(),
Version: VERSION,
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
SslRequired: false,
MaxPayload: MAX_PAYLOAD_SIZE,
}
s := &Server{
info: inf,
sl: sublist.New(),
opts: opts,
debug: opts.Debug,
trace: opts.Trace,
}
// Setup logging with flags
s.LogInit()
// Generate the info json
b, err := json.Marshal(s.info)
if err != nil {
Fatalf("Err marshalling INFO JSON: %+v\n", err)
}
s.infoJson = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
return s
}
func (s *Server) AcceptLoop() {
Logf("Starting nats-server version %s on port %d", VERSION, s.opts.Port)
hp := fmt.Sprintf("%s:%d", s.opts.Host, s.opts.Port)
l, e := net.Listen("tcp", hp)
if e != nil {
Fatalf("Error listening on port: %d - %v", s.opts.Port, e)
return
}
for {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
Logf("Accept error: %v", err)
}
continue
}
s.createClient(conn)
}
}
func clientConnStr(conn net.Conn) interface{} {
if ip, ok := conn.(*net.TCPConn); ok {
addr := ip.RemoteAddr().(*net.TCPAddr)
return []string{fmt.Sprintf("%v, %d", addr.IP, addr.Port)}
}
return "N/A"
}
func (s *Server) createClient(conn net.Conn) *client {
c := &client{srv: s, conn: conn}
c.cid = atomic.AddUint64(&s.gcid, 1)
c.bw = bufio.NewWriterSize(c.conn, defaultBufSize)
c.br = bufio.NewReaderSize(c.conn, defaultBufSize)
c.subs = hashmap.New()
if ip, ok := conn.(*net.TCPConn); ok {
ip.SetReadBuffer(32768)
}
s.sendInfo(c)
go c.readLoop()
Debug("Client connection created", clientConnStr(conn), c.cid)
return c
}
func (s *Server) sendInfo(c *client) {
// FIXME, err
c.conn.Write(s.infoJson)
}
|
package server
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/exec"
"runtime"
"time"
"github.com/99designs/aws-vault/vault"
)
const (
metadataBind = "169.254.169.254:80"
awsTimeFormat = "2006-01-02T15:04:05Z"
localServerUrl = "http://127.0.0.1:9099"
localServerBind = "127.0.0.1:9099"
)
func StartMetadataServer() error {
if _, err := installNetworkAlias(); err != nil {
return err
}
router := http.NewServeMux()
router.HandleFunc("/latest/meta-data/iam/security-credentials/", indexHandler)
router.HandleFunc("/latest/meta-data/iam/security-credentials/local-credentials", credentialsHandler)
// The AWS Go SDK checks the instance-id endpoint to validate the existence of EC2 Metadata
router.HandleFunc("/latest/meta-data/instance-id/", instanceIdHandler)
l, err := net.Listen("tcp", metadataBind)
if err != nil {
return err
}
log.Printf("Local instance role server running on %s", l.Addr())
return http.Serve(l, router)
}
type metadataHandler struct {
http.Handler
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "local-credentials")
}
func credentialsHandler(w http.ResponseWriter, r *http.Request) {
resp, err := http.Get(localServerUrl)
if err != nil {
http.Error(w, err.Error(), http.StatusGatewayTimeout)
return
}
defer resp.Body.Close()
log.Printf("Fetched credentials from %s", localServerUrl)
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
io.Copy(w, resp.Body)
}
func instanceIdHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "aws-vault")
}
func checkServerRunning(bind string) bool {
_, err := net.DialTimeout("tcp", bind, time.Millisecond*10)
return err == nil
}
func StartCredentialProxyOnWindows() error {
log.Printf("Starting `aws-vault server` in the background")
cmd := exec.Command(os.Args[0], "server")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return err
}
time.Sleep(time.Second * 1)
if !checkServerRunning(metadataBind) {
return errors.New("The credential proxy server isn't running. Run aws-vault server as Administrator in the background and then try this command again")
}
return nil
}
func StartCredentialProxyWithSudo() error {
log.Printf("Starting `aws-vault server` as root in the background")
cmd := exec.Command("sudo", "-b", os.Args[0], "server")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func StartCredentialProxy() error {
if runtime.GOOS == "windows" {
return StartCredentialProxyOnWindows()
}
return StartCredentialProxyWithSudo()
}
func StartCredentialsServer(creds *vault.VaultCredentials) error {
if !checkServerRunning(metadataBind) {
if err := StartCredentialProxy(); err != nil {
return err
}
}
l, err := net.Listen("tcp", localServerBind)
if err != nil {
return err
}
log.Printf("Local instance role server running on %s", l.Addr())
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Printf("Credentials.IsExpired() = %#v", creds.IsExpired())
val, err := creds.Get()
if err != nil {
http.Error(w, err.Error(), http.StatusGatewayTimeout)
return
}
log.Printf("Serving credentials via http ****************%s, expiration of %s (%s)",
val.AccessKeyID[len(val.AccessKeyID)-4:],
creds.Expires().Format(awsTimeFormat),
creds.Expires().Sub(time.Now()).String())
json.NewEncoder(w).Encode(map[string]interface{}{
"Code": "Success",
"LastUpdated": time.Now().Format(awsTimeFormat),
"Type": "AWS-HMAC",
"AccessKeyId": val.AccessKeyID,
"SecretAccessKey": val.SecretAccessKey,
"Token": val.SessionToken,
"Expiration": creds.Expires().Format(awsTimeFormat),
})
}))
return nil
}
Add a check that the remote address is localhost
See #198 for more details.
package server
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/exec"
"runtime"
"time"
"github.com/99designs/aws-vault/vault"
)
const (
metadataBind = "169.254.169.254:80"
awsTimeFormat = "2006-01-02T15:04:05Z"
localServerUrl = "http://127.0.0.1:9099"
localServerBind = "127.0.0.1:9099"
)
func StartMetadataServer() error {
if _, err := installNetworkAlias(); err != nil {
return err
}
router := http.NewServeMux()
router.HandleFunc("/latest/meta-data/iam/security-credentials/", indexHandler)
router.HandleFunc("/latest/meta-data/iam/security-credentials/local-credentials", credentialsHandler)
// The AWS Go SDK checks the instance-id endpoint to validate the existence of EC2 Metadata
router.HandleFunc("/latest/meta-data/instance-id/", instanceIdHandler)
l, err := net.Listen("tcp", metadataBind)
if err != nil {
return err
}
log.Printf("Local instance role server running on %s", l.Addr())
return http.Serve(l, router)
}
type metadataHandler struct {
http.Handler
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "local-credentials")
}
func credentialsHandler(w http.ResponseWriter, r *http.Request) {
resp, err := http.Get(localServerUrl)
if err != nil {
http.Error(w, err.Error(), http.StatusGatewayTimeout)
return
}
defer resp.Body.Close()
log.Printf("Fetched credentials from %s", localServerUrl)
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
io.Copy(w, resp.Body)
}
func instanceIdHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "aws-vault")
}
func checkServerRunning(bind string) bool {
_, err := net.DialTimeout("tcp", bind, time.Millisecond*10)
return err == nil
}
func StartCredentialProxyOnWindows() error {
log.Printf("Starting `aws-vault server` in the background")
cmd := exec.Command(os.Args[0], "server")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
return err
}
time.Sleep(time.Second * 1)
if !checkServerRunning(metadataBind) {
return errors.New("The credential proxy server isn't running. Run aws-vault server as Administrator in the background and then try this command again")
}
return nil
}
func StartCredentialProxyWithSudo() error {
log.Printf("Starting `aws-vault server` as root in the background")
cmd := exec.Command("sudo", "-b", os.Args[0], "server")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func StartCredentialProxy() error {
if runtime.GOOS == "windows" {
return StartCredentialProxyOnWindows()
}
return StartCredentialProxyWithSudo()
}
func StartCredentialsServer(creds *vault.VaultCredentials) error {
if !checkServerRunning(metadataBind) {
if err := StartCredentialProxy(); err != nil {
return err
}
}
l, err := net.Listen("tcp", localServerBind)
if err != nil {
return err
}
log.Printf("Local instance role server running on %s", l.Addr())
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Must make sure the remote ip is localhost, otherwise clients on the same network segment could
// potentially route traffic via 169.254.169.254:80
if ip != `127.0.0.1` {
http.Error(w, "Access denied from non-localhost address", http.StatusUnauthorized)
return
}
log.Printf("RemoteAddr = %v", r.RemoteAddr)
log.Printf("Credentials.IsExpired() = %#v", creds.IsExpired())
val, err := creds.Get()
if err != nil {
http.Error(w, err.Error(), http.StatusGatewayTimeout)
return
}
log.Printf("Serving credentials via http ****************%s, expiration of %s (%s)",
val.AccessKeyID[len(val.AccessKeyID)-4:],
creds.Expires().Format(awsTimeFormat),
creds.Expires().Sub(time.Now()).String())
json.NewEncoder(w).Encode(map[string]interface{}{
"Code": "Success",
"LastUpdated": time.Now().Format(awsTimeFormat),
"Type": "AWS-HMAC",
"AccessKeyId": val.AccessKeyID,
"SecretAccessKey": val.SecretAccessKey,
"Token": val.SessionToken,
"Expiration": creds.Expires().Format(awsTimeFormat),
})
}))
return nil
}
|
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"github.com/seiflotfy/skizze/config"
"github.com/seiflotfy/skizze/counters"
"github.com/seiflotfy/skizze/storage"
"github.com/seiflotfy/skizze/utils"
)
type requestData struct {
id string
typ string
Capacity uint64 `json:"capacity"`
Values []string `json:"values"`
}
var logger = utils.GetLogger()
var counterManager *counters.ManagerStruct
/*
Server manages the http connections and communciates with the counters manager
*/
type Server struct{}
type sketchesResult struct {
Result []string `json:"result"`
Error error `json:"error"`
}
type sketchResult struct {
Result interface{} `json:"result"`
Error error `json:"error"`
}
/*
New returns a new Server
*/
func New() (*Server, error) {
var err error
counterManager, err = counters.GetManager()
if err != nil {
return nil, err
}
server := Server{}
return &server, nil
}
func (srv *Server) handleTopRequest(w http.ResponseWriter, method string, data requestData) {
var err error
var sketches []string
var js []byte
switch {
case method == "GET":
// Get all counters
sketches, err = counterManager.GetSketches()
js, err = json.Marshal(sketchesResult{sketches, err})
logger.Info.Printf("[%v]: Getting all available sketches", method)
case method == "MERGE":
// Reserved for merging hyper log log
http.Error(w, "Not Implemented", http.StatusNotImplemented)
return
default:
http.Error(w, "Invalid Method: "+method, http.StatusBadRequest)
return
}
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.Write(js)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (srv *Server) handleSketchRequest(w http.ResponseWriter, method string, data requestData) {
var res sketchResult
var err error
// TODO (mb): handle errors from counterManager.*
switch {
case method == "GET":
// Get a count for a specific sketch
count, err := counterManager.GetCountForSketch(data.id, data.typ, data.Values)
logger.Info.Printf("[%v]: Getting counter for sketch: %v", method, data.id)
res = sketchResult{count, err}
case method == "POST":
// Create a new sketch counter
err = counterManager.CreateSketch(data.id, data.typ, data.Capacity)
logger.Info.Printf("[%v]: Creating new sketch: %v", method, data.id)
res = sketchResult{0, err}
case method == "PUT":
// Add values to counter
err = counterManager.AddToSketch(data.id, data.typ, data.Values)
logger.Info.Printf("[%v]: Updating counter for sketch: %v", method, data.id)
res = sketchResult{nil, err}
case method == "PURGE":
// Purges values from counter
err = counterManager.DeleteFromSketch(data.id, data.typ, data.Values)
logger.Info.Printf("[%v]: Purging values for sketch: %v", method, data.id)
res = sketchResult{nil, err}
case method == "DELETE":
// Delete Counter
err := counterManager.DeleteSketch(data.id, data.typ)
logger.Info.Printf("[%v]: Deleting sketch: %v", method, data.id)
res = sketchResult{nil, err}
default:
logger.Error.Printf("[%v]: Invalid Method: %v", method, http.StatusBadRequest)
http.Error(w, fmt.Sprintf("Invalid Method: %s", method), http.StatusBadRequest)
return
}
js, err := json.Marshal(res)
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.Write(js)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (srv *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
method := r.Method
paths := strings.Split(r.URL.Path[1:], "/")
body, _ := ioutil.ReadAll(r.Body)
var data requestData
if len(body) > 0 {
err := json.Unmarshal(body, &data)
if err != nil {
logger.Error.Printf("An error has ocurred: %v", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
} else {
data = requestData{}
}
if len(paths) == 1 {
srv.handleTopRequest(w, method, data)
} else if len(paths) == 2 {
data.typ = strings.TrimSpace(string(paths[0]))
data.id = strings.TrimSpace(strings.Join(paths[1:], "/"))
srv.handleSketchRequest(w, method, data)
}
}
/*
Run ...
*/
func (srv *Server) Run() {
conf := config.GetConfig()
port := int(conf.GetPort())
logger.Info.Println("Server up and running on port: " + strconv.Itoa(port))
http.ListenAndServe(":"+strconv.Itoa(port), srv)
}
/*
Stop ...
*/
func (srv *Server) Stop() {
logger.Info.Println("Stopping server...")
storage.CloseInfoDB()
os.Exit(0)
}
Add new storage and error paremeters for creating sketches
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"github.com/seiflotfy/skizze/config"
"github.com/seiflotfy/skizze/counters"
"github.com/seiflotfy/skizze/storage"
"github.com/seiflotfy/skizze/utils"
)
type requestData struct {
id string
typ string
Capacity uint64 `json:"capacity"`
Error float64 `json:"error"`
Storage string `json:"storage"`
Values []string `json:"values"`
}
var logger = utils.GetLogger()
var counterManager *counters.ManagerStruct
/*
Server manages the http connections and communciates with the counters manager
*/
type Server struct{}
type sketchesResult struct {
Result []string `json:"result"`
Error error `json:"error"`
}
type sketchResult struct {
Result interface{} `json:"result"`
Error error `json:"error"`
}
/*
New returns a new Server
*/
func New() (*Server, error) {
var err error
counterManager, err = counters.GetManager()
if err != nil {
return nil, err
}
server := Server{}
return &server, nil
}
func (srv *Server) handleTopRequest(w http.ResponseWriter, method string, data requestData) {
var err error
var sketches []string
var js []byte
switch {
case method == "GET":
// Get all counters
sketches, err = counterManager.GetSketches()
js, err = json.Marshal(sketchesResult{sketches, err})
logger.Info.Printf("[%v]: Getting all available sketches", method)
case method == "MERGE":
// Reserved for merging hyper log log
http.Error(w, "Not Implemented", http.StatusNotImplemented)
return
default:
http.Error(w, "Invalid Method: "+method, http.StatusBadRequest)
return
}
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.Write(js)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (srv *Server) handleSketchRequest(w http.ResponseWriter, method string, data requestData) {
var res sketchResult
var err error
// TODO (mb): handle errors from counterManager.*
switch {
case method == "GET":
// Get a count for a specific sketch
count, err := counterManager.GetCountForSketch(data.id, data.typ, data.Values)
logger.Info.Printf("[%v]: Getting counter for sketch: %v", method, data.id)
res = sketchResult{count, err}
case method == "POST":
// Create a new sketch counter
err = counterManager.CreateSketch(data.id, data.typ, data.Capacity)
logger.Info.Printf("[%v]: Creating new sketch: %v", method, data.id)
res = sketchResult{0, err}
case method == "PUT":
// Add values to counter
err = counterManager.AddToSketch(data.id, data.typ, data.Values)
logger.Info.Printf("[%v]: Updating counter for sketch: %v", method, data.id)
res = sketchResult{nil, err}
case method == "PURGE":
// Purges values from counter
err = counterManager.DeleteFromSketch(data.id, data.typ, data.Values)
logger.Info.Printf("[%v]: Purging values for sketch: %v", method, data.id)
res = sketchResult{nil, err}
case method == "DELETE":
// Delete Counter
err := counterManager.DeleteSketch(data.id, data.typ)
logger.Info.Printf("[%v]: Deleting sketch: %v", method, data.id)
res = sketchResult{nil, err}
default:
logger.Error.Printf("[%v]: Invalid Method: %v", method, http.StatusBadRequest)
http.Error(w, fmt.Sprintf("Invalid Method: %s", method), http.StatusBadRequest)
return
}
js, err := json.Marshal(res)
if err == nil {
w.Header().Set("Content-Type", "application/json")
w.Write(js)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (srv *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
method := r.Method
paths := strings.Split(r.URL.Path[1:], "/")
body, _ := ioutil.ReadAll(r.Body)
var data requestData
if len(body) > 0 {
err := json.Unmarshal(body, &data)
if err != nil {
logger.Error.Printf("An error has ocurred: %v", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
} else {
data = requestData{}
}
if len(paths) == 1 {
srv.handleTopRequest(w, method, data)
} else if len(paths) == 2 {
data.typ = strings.TrimSpace(string(paths[0]))
data.id = strings.TrimSpace(strings.Join(paths[1:], "/"))
srv.handleSketchRequest(w, method, data)
}
}
/*
Run ...
*/
func (srv *Server) Run() {
conf := config.GetConfig()
port := int(conf.GetPort())
logger.Info.Println("Server up and running on port: " + strconv.Itoa(port))
http.ListenAndServe(":"+strconv.Itoa(port), srv)
}
/*
Stop ...
*/
func (srv *Server) Stop() {
logger.Info.Println("Stopping server...")
storage.CloseInfoDB()
os.Exit(0)
}
|
// Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/armon/go-radix"
api "github.com/osrg/gobgp/api"
"github.com/osrg/gobgp/config"
"github.com/osrg/gobgp/packet"
"github.com/osrg/gobgp/table"
"github.com/osrg/gobgp/zebra"
"net"
"os"
"strconv"
"strings"
"time"
)
const (
GLOBAL_RIB_NAME = "global"
)
type SenderMsg struct {
messages []*bgp.BGPMessage
sendCh chan *bgp.BGPMessage
destination string
twoBytesAs bool
}
type broadcastMsg interface {
send()
}
type broadcastGrpcMsg struct {
req *GrpcRequest
result *GrpcResponse
done bool
}
func (m *broadcastGrpcMsg) send() {
m.req.ResponseCh <- m.result
if m.done == true {
close(m.req.ResponseCh)
}
}
type broadcastBGPMsg struct {
message *bgp.BGPMessage
peerAS uint32
localAS uint32
peerAddress net.IP
localAddress net.IP
fourBytesAs bool
ch chan *broadcastBGPMsg
}
func (m *broadcastBGPMsg) send() {
m.ch <- m
}
type BgpServer struct {
bgpConfig config.Bgp
globalTypeCh chan config.Global
addedPeerCh chan config.Neighbor
deletedPeerCh chan config.Neighbor
updatedPeerCh chan config.Neighbor
rpkiConfigCh chan config.RpkiServers
bmpConfigCh chan config.BmpServers
dumper *dumper
GrpcReqCh chan *GrpcRequest
listenPort int
policyUpdateCh chan config.RoutingPolicy
policy *table.RoutingPolicy
broadcastReqs []*GrpcRequest
broadcastMsgs []broadcastMsg
neighborMap map[string]*Peer
globalRib *table.TableManager
zclient *zebra.Client
roaClient *roaClient
bmpClient *bmpClient
bmpConnCh chan *bmpConn
shutdown bool
}
func NewBgpServer(port int) *BgpServer {
b := BgpServer{}
b.globalTypeCh = make(chan config.Global)
b.addedPeerCh = make(chan config.Neighbor)
b.deletedPeerCh = make(chan config.Neighbor)
b.updatedPeerCh = make(chan config.Neighbor)
b.rpkiConfigCh = make(chan config.RpkiServers)
b.bmpConfigCh = make(chan config.BmpServers)
b.bmpConnCh = make(chan *bmpConn)
b.GrpcReqCh = make(chan *GrpcRequest, 1)
b.policyUpdateCh = make(chan config.RoutingPolicy)
b.neighborMap = make(map[string]*Peer)
b.listenPort = port
b.roaClient, _ = newROAClient(config.RpkiServers{})
return &b
}
// avoid mapped IPv6 address
func listenAndAccept(proto string, port int, ch chan *net.TCPConn) (*net.TCPListener, error) {
service := ":" + strconv.Itoa(port)
addr, _ := net.ResolveTCPAddr(proto, service)
l, err := net.ListenTCP(proto, addr)
if err != nil {
log.Info(err)
return nil, err
}
go func() {
for {
conn, err := l.AcceptTCP()
if err != nil {
log.Info(err)
continue
}
ch <- conn
}
}()
return l, nil
}
func (server *BgpServer) Serve() {
g := <-server.globalTypeCh
server.bgpConfig.Global = g
if g.Mrt.FileName != "" {
d, err := newDumper(g.Mrt.FileName)
if err != nil {
log.Warn(err)
} else {
server.dumper = d
}
}
if g.Zebra.Enabled == true {
if g.Zebra.Url == "" {
g.Zebra.Url = "unix:/var/run/quagga/zserv.api"
}
redists := make([]string, 0, len(g.Zebra.RedistributeRouteTypeList))
for _, t := range g.Zebra.RedistributeRouteTypeList {
redists = append(redists, t.RouteType)
}
err := server.NewZclient(g.Zebra.Url, redists)
if err != nil {
log.Error(err)
}
}
senderCh := make(chan *SenderMsg, 1<<16)
go func(ch chan *SenderMsg) {
for {
// TODO: must be more clever. Slow peer makes other peers slow too.
m := <-ch
w := func(c chan *bgp.BGPMessage, msg *bgp.BGPMessage) {
// nasty but the peer could already become non established state before here.
defer func() { recover() }()
c <- msg
}
for _, b := range m.messages {
if m.twoBytesAs == false && b.Header.Type == bgp.BGP_MSG_UPDATE {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": m.destination,
"Data": b,
}).Debug("update for 2byte AS peer")
table.UpdatePathAttrs2ByteAs(b.Body.(*bgp.BGPUpdate))
}
w(m.sendCh, b)
}
}
}(senderCh)
broadcastCh := make(chan broadcastMsg, 8)
go func(ch chan broadcastMsg) {
for {
m := <-ch
m.send()
}
}(broadcastCh)
toRFlist := func(l []config.AfiSafi) []bgp.RouteFamily {
rfList := []bgp.RouteFamily{}
for _, rf := range l {
k, _ := bgp.GetRouteFamily(rf.AfiSafiName)
rfList = append(rfList, k)
}
return rfList
}
server.globalRib = table.NewTableManager(GLOBAL_RIB_NAME, toRFlist(g.AfiSafis.AfiSafiList), g.MplsLabelRange.MinLabel, g.MplsLabelRange.MaxLabel)
listenerMap := make(map[string]*net.TCPListener)
acceptCh := make(chan *net.TCPConn)
l4, err1 := listenAndAccept("tcp4", server.listenPort, acceptCh)
listenerMap["tcp4"] = l4
l6, err2 := listenAndAccept("tcp6", server.listenPort, acceptCh)
listenerMap["tcp6"] = l6
if err1 != nil && err2 != nil {
log.Fatal("can't listen either v4 and v6")
os.Exit(1)
}
listener := func(addr net.IP) *net.TCPListener {
var l *net.TCPListener
if addr.To4() != nil {
l = listenerMap["tcp4"]
} else {
l = listenerMap["tcp6"]
}
return l
}
incoming := make(chan *fsmMsg, 4096)
var senderMsgs []*SenderMsg
var zapiMsgCh chan *zebra.Message
if server.zclient != nil {
zapiMsgCh = server.zclient.Receive()
}
for {
var firstMsg *SenderMsg
var sCh chan *SenderMsg
if len(senderMsgs) > 0 {
sCh = senderCh
firstMsg = senderMsgs[0]
}
var firstBroadcastMsg broadcastMsg
var bCh chan broadcastMsg
if len(server.broadcastMsgs) > 0 {
bCh = broadcastCh
firstBroadcastMsg = server.broadcastMsgs[0]
}
passConn := func(conn *net.TCPConn) {
remoteAddr, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
peer, found := server.neighborMap[remoteAddr]
if found {
localAddrValid := func(laddr net.IP) bool {
if laddr == nil {
return true
}
l := conn.LocalAddr()
if l == nil {
// already closed
return false
}
host, _, _ := net.SplitHostPort(l.String())
if host != laddr.String() {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Configured addr": laddr.String(),
"Addr": host,
}).Info("Mismatched local address")
return false
}
return true
}(peer.conf.Transport.TransportConfig.LocalAddress)
if localAddrValid == false {
conn.Close()
return
}
log.Debug("accepted a new passive connection from ", remoteAddr)
peer.PassConn(conn)
} else {
log.Info("can't find configuration for a new passive connection from ", remoteAddr)
conn.Close()
}
}
select {
case grpcReq := <-server.GrpcReqCh:
m := server.handleGrpc(grpcReq)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case conn := <-acceptCh:
passConn(conn)
default:
}
select {
case c := <-server.rpkiConfigCh:
server.roaClient, _ = newROAClient(c)
case c := <-server.bmpConfigCh:
server.bmpClient, _ = newBMPClient(c, server.bmpConnCh)
case c := <-server.bmpConnCh:
bmpMsgList := []*bgp.BMPMessage{}
for _, targetPeer := range server.neighborMap {
pathList := make([]*table.Path, 0)
if targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED {
continue
}
for _, rf := range targetPeer.configuredRFlist() {
pathList = append(pathList, targetPeer.adjRib.GetInPathList(rf)...)
}
for _, p := range pathList {
// avoid to merge for timestamp
u := table.CreateUpdateMsgFromPaths([]*table.Path{p})
bmpMsgList = append(bmpMsgList, bmpPeerRoute(bgp.BMP_PEER_TYPE_GLOBAL, false, 0, targetPeer.peerInfo, p.GetTimestamp().Unix(), u[0]))
}
}
m := &broadcastBMPMsg{
ch: server.bmpClient.send(),
conn: c.conn,
addr: c.addr,
msgList: bmpMsgList,
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
case rmsg := <-server.roaClient.recieveROA():
server.roaClient.handleRTRMsg(rmsg)
case zmsg := <-zapiMsgCh:
m := handleZapiMsg(zmsg, server)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case conn := <-acceptCh:
passConn(conn)
case config := <-server.addedPeerCh:
addr := config.NeighborConfig.NeighborAddress.String()
_, found := server.neighborMap[addr]
if found {
log.Warn("Can't overwrite the exising peer ", addr)
continue
}
SetTcpMD5SigSockopts(listener(config.NeighborConfig.NeighborAddress), addr, config.NeighborConfig.AuthPassword)
var loc *table.TableManager
if config.RouteServer.RouteServerConfig.RouteServerClient {
loc = table.NewTableManager(config.NeighborConfig.NeighborAddress.String(), toRFlist(config.AfiSafis.AfiSafiList), g.MplsLabelRange.MinLabel, g.MplsLabelRange.MaxLabel)
} else {
loc = server.globalRib
}
peer := NewPeer(g, config, loc)
server.setPolicyByConfig(peer, config.ApplyPolicy)
if peer.isRouteServerClient() {
pathList := make([]*table.Path, 0)
rfList := peer.configuredRFlist()
for _, p := range server.neighborMap {
if p.isRouteServerClient() == true {
pathList = append(pathList, p.getAccepted(rfList)...)
}
}
pathList, _ = peer.ApplyPolicy(table.POLICY_DIRECTION_IMPORT, pathList)
if len(pathList) > 0 {
peer.localRib.ProcessPaths(pathList)
}
}
server.neighborMap[addr] = peer
peer.startFSMHandler(incoming)
server.broadcastPeerState(peer)
case config := <-server.deletedPeerCh:
addr := config.NeighborConfig.NeighborAddress.String()
SetTcpMD5SigSockopts(listener(config.NeighborConfig.NeighborAddress), addr, "")
peer, found := server.neighborMap[addr]
if found {
log.Info("Delete a peer configuration for ", addr)
go func(addr string) {
t := time.AfterFunc(time.Minute*5, func() { log.Fatal("failed to free the fsm.h.t for ", addr) })
peer.fsm.h.t.Kill(nil)
peer.fsm.h.t.Wait()
t.Stop()
t = time.AfterFunc(time.Minute*5, func() { log.Fatal("failed to free the fsm.h for ", addr) })
peer.fsm.t.Kill(nil)
peer.fsm.t.Wait()
t.Stop()
}(addr)
m := server.dropPeerAllRoutes(peer)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
delete(server.neighborMap, addr)
} else {
log.Info("Can't delete a peer configuration for ", addr)
}
case config := <-server.updatedPeerCh:
addr := config.NeighborConfig.NeighborAddress.String()
peer := server.neighborMap[addr]
peer.conf = config
server.setPolicyByConfig(peer, config.ApplyPolicy)
case e := <-incoming:
peer, found := server.neighborMap[e.MsgSrc]
if !found {
log.Warn("Can't find the neighbor ", e.MsgSrc)
break
}
m := server.handleFSMMessage(peer, e, incoming)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case sCh <- firstMsg:
senderMsgs = senderMsgs[1:]
case bCh <- firstBroadcastMsg:
server.broadcastMsgs = server.broadcastMsgs[1:]
case grpcReq := <-server.GrpcReqCh:
m := server.handleGrpc(grpcReq)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case pl := <-server.policyUpdateCh:
server.handlePolicy(pl)
}
}
}
func newSenderMsg(peer *Peer, messages []*bgp.BGPMessage) *SenderMsg {
_, y := peer.capMap[bgp.BGP_CAP_FOUR_OCTET_AS_NUMBER]
return &SenderMsg{
messages: messages,
sendCh: peer.outgoing,
destination: peer.conf.NeighborConfig.NeighborAddress.String(),
twoBytesAs: y,
}
}
func filterpath(peer *Peer, pathList []*table.Path) []*table.Path {
filtered := make([]*table.Path, 0)
for _, path := range pathList {
if _, ok := peer.rfMap[path.GetRouteFamily()]; !ok {
continue
}
remoteAddr := peer.conf.NeighborConfig.NeighborAddress
//iBGP handling
if !path.IsLocal() && peer.isIBGPPeer() {
ignore := true
info := path.GetSource()
//if the path comes from eBGP peer
if info.AS != peer.conf.NeighborConfig.PeerAs {
ignore = false
}
// RFC4456 8. Avoiding Routing Information Loops
// A router that recognizes the ORIGINATOR_ID attribute SHOULD
// ignore a route received with its BGP Identifier as the ORIGINATOR_ID.
if id := path.GetOriginatorID(); peer.gConf.GlobalConfig.RouterId.Equal(id) {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"OriginatorID": id,
"Data": path,
}).Debug("Originator ID is mine, ignore")
continue
}
if info.RouteReflectorClient {
ignore = false
}
if peer.isRouteReflectorClient() {
// RFC4456 8. Avoiding Routing Information Loops
// If the local CLUSTER_ID is found in the CLUSTER_LIST,
// the advertisement received SHOULD be ignored.
for _, clusterId := range path.GetClusterList() {
if clusterId.Equal(peer.peerInfo.RouteReflectorClusterID) {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"ClusterID": clusterId,
"Data": path,
}).Debug("cluster list path attribute has local cluster id, ignore")
continue
}
}
ignore = false
}
if ignore {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Data": path,
}).Debug("From same AS, ignore.")
continue
}
}
if remoteAddr.Equal(path.GetSource().Address) {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Data": path,
}).Debug("From me, ignore.")
continue
}
send := true
for _, as := range path.GetAsList() {
if as == peer.conf.NeighborConfig.PeerAs {
send = false
break
}
}
if !send {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Data": path,
}).Debug("AS PATH loop, ignore.")
continue
}
filtered = append(filtered, path.Clone(remoteAddr, path.IsWithdraw))
}
return filtered
}
func (server *BgpServer) dropPeerAllRoutes(peer *Peer) []*SenderMsg {
msgs := make([]*SenderMsg, 0)
for _, rf := range peer.configuredRFlist() {
if peer.isRouteServerClient() {
for _, targetPeer := range server.neighborMap {
rib := targetPeer.localRib
if !targetPeer.isRouteServerClient() || rib.OwnerName() == peer.conf.NeighborConfig.NeighborAddress.String() {
continue
}
pathList, _ := rib.DeletePathsforPeer(peer.peerInfo, rf)
if targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED || len(pathList) == 0 {
continue
}
msgList := table.CreateUpdateMsgFromPaths(pathList)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
targetPeer.adjRib.UpdateOut(pathList)
}
} else {
rib := server.globalRib
pathList, _ := rib.DeletePathsforPeer(peer.peerInfo, rf)
if len(pathList) == 0 {
continue
}
server.broadcastBests(pathList)
msgList := table.CreateUpdateMsgFromPaths(pathList)
for _, targetPeer := range server.neighborMap {
if targetPeer.isRouteServerClient() || targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED {
continue
}
targetPeer.adjRib.UpdateOut(pathList)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
}
}
}
return msgs
}
func (server *BgpServer) broadcastBests(bests []*table.Path) {
for _, path := range bests {
if !path.IsFromZebra {
z := newBroadcastZapiBestMsg(server.zclient, path)
if z != nil {
server.broadcastMsgs = append(server.broadcastMsgs, z)
log.WithFields(log.Fields{
"Topic": "Server",
"Client": z.client,
"Message": z.msg,
}).Debug("Default policy applied and rejected.")
}
}
rf := path.GetRouteFamily()
result := &GrpcResponse{
Data: &api.Destination{
Prefix: path.GetNlri().String(),
Paths: []*api.Path{path.ToApiStruct()},
},
}
remainReqs := make([]*GrpcRequest, 0, len(server.broadcastReqs))
for _, req := range server.broadcastReqs {
select {
case <-req.EndCh:
continue
default:
}
if req.RequestType != REQ_MONITOR_GLOBAL_BEST_CHANGED {
remainReqs = append(remainReqs, req)
continue
}
if req.RouteFamily == bgp.RouteFamily(0) || req.RouteFamily == rf {
m := &broadcastGrpcMsg{
req: req,
result: result,
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
}
remainReqs = append(remainReqs, req)
}
server.broadcastReqs = remainReqs
}
}
func (server *BgpServer) broadcastPeerState(peer *Peer) {
result := &GrpcResponse{
Data: peer.ToApiStruct(),
}
remainReqs := make([]*GrpcRequest, 0, len(server.broadcastReqs))
for _, req := range server.broadcastReqs {
select {
case <-req.EndCh:
continue
default:
}
ignore := req.RequestType != REQ_MONITOR_NEIGHBOR_PEER_STATE
ignore = ignore || (req.Name != "" && req.Name != peer.conf.NeighborConfig.NeighborAddress.String())
if ignore {
remainReqs = append(remainReqs, req)
continue
}
m := &broadcastGrpcMsg{
req: req,
result: result,
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
remainReqs = append(remainReqs, req)
}
server.broadcastReqs = remainReqs
}
func (server *BgpServer) propagateUpdate(peer *Peer, pathList []*table.Path) []*SenderMsg {
msgs := make([]*SenderMsg, 0)
if peer != nil && peer.isRouteServerClient() {
for _, targetPeer := range server.neighborMap {
rib := targetPeer.localRib
if !targetPeer.isRouteServerClient() || rib.OwnerName() == peer.conf.NeighborConfig.NeighborAddress.String() {
continue
}
sendPathList, _ := targetPeer.ApplyPolicy(table.POLICY_DIRECTION_IMPORT, pathList)
sendPathList, _ = rib.ProcessPaths(sendPathList)
if targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED || len(sendPathList) == 0 {
continue
}
sendPathList, _ = targetPeer.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(targetPeer, sendPathList))
if len(sendPathList) == 0 {
continue
}
msgList := table.CreateUpdateMsgFromPaths(sendPathList)
targetPeer.adjRib.UpdateOut(sendPathList)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
}
} else {
rib := server.globalRib
pathList = rib.ApplyPolicy(table.POLICY_DIRECTION_IMPORT, pathList)
sendPathList, _ := rib.ProcessPaths(pathList)
if len(sendPathList) == 0 {
return msgs
}
server.broadcastBests(sendPathList)
for _, targetPeer := range server.neighborMap {
if targetPeer.isRouteServerClient() || targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED {
continue
}
f := rib.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(targetPeer, sendPathList))
if len(f) == 0 {
continue
}
for _, path := range f {
path.UpdatePathAttrs(&server.bgpConfig.Global, &targetPeer.conf)
}
targetPeer.adjRib.UpdateOut(f)
msgList := table.CreateUpdateMsgFromPaths(f)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
}
}
return msgs
}
func (server *BgpServer) handleFSMMessage(peer *Peer, e *fsmMsg, incoming chan *fsmMsg) []*SenderMsg {
msgs := make([]*SenderMsg, 0)
switch e.MsgType {
case FSM_MSG_STATE_CHANGE:
nextState := e.MsgData.(bgp.FSMState)
oldState := bgp.FSMState(peer.conf.NeighborState.SessionState)
peer.conf.NeighborState.SessionState = uint32(nextState)
peer.fsm.StateChange(nextState)
if oldState == bgp.BGP_FSM_ESTABLISHED {
if ch := server.bmpClient.send(); ch != nil {
m := &broadcastBMPMsg{
ch: ch,
msgList: []*bgp.BMPMessage{bmpPeerDown(bgp.BMP_PEER_DOWN_REASON_UNKNOWN, bgp.BMP_PEER_TYPE_GLOBAL, false, 0, peer.peerInfo, peer.conf.Timers.TimersState.Downtime)},
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
}
t := time.Now()
if t.Sub(time.Unix(peer.conf.Timers.TimersState.Uptime, 0)) < FLOP_THRESHOLD {
peer.conf.NeighborState.Flops++
}
for _, rf := range peer.configuredRFlist() {
peer.DropAll(rf)
}
msgs = append(msgs, server.dropPeerAllRoutes(peer)...)
}
close(peer.outgoing)
peer.outgoing = make(chan *bgp.BGPMessage, 128)
if nextState == bgp.BGP_FSM_ESTABLISHED {
if ch := server.bmpClient.send(); ch != nil {
laddr, lport := peer.fsm.LocalHostPort()
_, rport := peer.fsm.RemoteHostPort()
m := &broadcastBMPMsg{
ch: ch,
msgList: []*bgp.BMPMessage{bmpPeerUp(laddr, lport, rport, buildopen(peer.fsm.gConf, peer.fsm.pConf), peer.recvOpen, bgp.BMP_PEER_TYPE_GLOBAL, false, 0, peer.peerInfo, peer.conf.Timers.TimersState.Uptime)},
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
}
pathList, _ := server.getBestFromLocal(peer)
if len(pathList) > 0 {
peer.adjRib.UpdateOut(pathList)
msgs = append(msgs, newSenderMsg(peer, table.CreateUpdateMsgFromPaths(pathList)))
}
} else {
if server.shutdown && nextState == bgp.BGP_FSM_IDLE {
die := true
for _, p := range server.neighborMap {
if p.fsm.state != bgp.BGP_FSM_IDLE {
die = false
break
}
}
if die {
os.Exit(0)
}
}
peer.conf.Timers.TimersState.Downtime = time.Now().Unix()
}
// clear counter
if peer.fsm.adminState == ADMIN_STATE_DOWN {
peer.conf.NeighborState = config.NeighborState{}
peer.conf.Timers.TimersState = config.TimersState{}
}
peer.startFSMHandler(incoming)
server.broadcastPeerState(peer)
case FSM_MSG_BGP_MESSAGE:
switch m := e.MsgData.(type) {
case *bgp.MessageError:
msgs = append(msgs, newSenderMsg(peer, []*bgp.BGPMessage{bgp.NewBGPNotificationMessage(m.TypeCode, m.SubTypeCode, m.Data)}))
case *bgp.BGPMessage:
pathList, update, msgList := peer.handleBGPmessage(m)
if len(msgList) > 0 {
msgs = append(msgs, newSenderMsg(peer, msgList))
break
}
if update == false {
if len(pathList) > 0 {
msgList := table.CreateUpdateMsgFromPaths(pathList)
msgs = append(msgs, newSenderMsg(peer, msgList))
}
break
} else {
if len(pathList) > 0 {
server.roaClient.validate(pathList)
}
}
if m.Header.Type == bgp.BGP_MSG_UPDATE {
if server.dumper != nil {
_, y := peer.capMap[bgp.BGP_CAP_FOUR_OCTET_AS_NUMBER]
l, _ := peer.fsm.LocalHostPort()
bm := &broadcastBGPMsg{
message: m,
peerAS: peer.peerInfo.AS,
localAS: peer.peerInfo.LocalAS,
peerAddress: peer.peerInfo.Address,
localAddress: net.ParseIP(l),
fourBytesAs: y,
ch: server.dumper.sendCh(),
}
server.broadcastMsgs = append(server.broadcastMsgs, bm)
}
if ch := server.bmpClient.send(); ch != nil {
bm := &broadcastBMPMsg{
ch: ch,
msgList: []*bgp.BMPMessage{bmpPeerRoute(bgp.BMP_PEER_TYPE_GLOBAL, false, 0, peer.peerInfo, time.Now().Unix(), m)},
}
server.broadcastMsgs = append(server.broadcastMsgs, bm)
}
}
// FIXME: refactor peer.handleBGPmessage and this func
if peer.isRouteServerClient() {
var accepted []*table.Path
for _, p := range pathList {
if p.Filtered == false {
accepted = append(accepted, p)
}
}
msgs = append(msgs, server.propagateUpdate(peer, accepted)...)
} else {
msgs = append(msgs, server.propagateUpdate(peer, pathList)...)
}
default:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
"Data": e.MsgData,
}).Panic("unknown msg type")
}
}
return msgs
}
func (server *BgpServer) SetGlobalType(g config.Global) {
server.globalTypeCh <- g
}
func (server *BgpServer) SetRpkiConfig(c config.RpkiServers) {
server.rpkiConfigCh <- c
}
func (server *BgpServer) SetBmpConfig(c config.BmpServers) {
server.bmpConfigCh <- c
}
func (server *BgpServer) PeerAdd(peer config.Neighbor) {
server.addedPeerCh <- peer
}
func (server *BgpServer) PeerDelete(peer config.Neighbor) {
server.deletedPeerCh <- peer
}
func (server *BgpServer) PeerUpdate(peer config.Neighbor) {
server.updatedPeerCh <- peer
}
func (server *BgpServer) Shutdown() {
server.shutdown = true
for _, p := range server.neighborMap {
p.fsm.adminStateCh <- ADMIN_STATE_DOWN
}
}
func (server *BgpServer) UpdatePolicy(policy config.RoutingPolicy) {
server.policyUpdateCh <- policy
}
func (server *BgpServer) setPolicyByConfig(p policyPoint, c config.ApplyPolicy) {
for _, dir := range []table.PolicyDirection{table.POLICY_DIRECTION_IN, table.POLICY_DIRECTION_IMPORT, table.POLICY_DIRECTION_EXPORT} {
ps, def, err := server.policy.GetAssignmentFromConfig(dir, c)
if err != nil {
log.WithFields(log.Fields{
"Topic": "Policy",
"Dir": dir,
}).Errorf("failed to get policy info: %s", err)
continue
}
p.SetDefaultPolicy(dir, def)
p.SetPolicy(dir, ps)
}
}
func (server *BgpServer) SetPolicy(pl config.RoutingPolicy) error {
p, err := table.NewRoutingPolicy(pl)
if err != nil {
log.WithFields(log.Fields{
"Topic": "Policy",
}).Errorf("failed to create routing policy: %s", err)
return err
}
server.policy = p
server.setPolicyByConfig(server.globalRib, server.bgpConfig.Global.ApplyPolicy)
return nil
}
func (server *BgpServer) handlePolicy(pl config.RoutingPolicy) error {
if err := server.SetPolicy(pl); err != nil {
log.WithFields(log.Fields{
"Topic": "Policy",
}).Errorf("failed to set new policy: %s", err)
return err
}
for _, peer := range server.neighborMap {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
}).Info("call set policy")
server.setPolicyByConfig(peer, peer.conf.ApplyPolicy)
}
return nil
}
func (server *BgpServer) checkNeighborRequest(grpcReq *GrpcRequest) (*Peer, error) {
remoteAddr := grpcReq.Name
peer, found := server.neighborMap[remoteAddr]
if !found {
result := &GrpcResponse{}
result.ResponseErr = fmt.Errorf("Neighbor that has %v doesn't exist.", remoteAddr)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return nil, result.ResponseErr
}
return peer, nil
}
// EVPN MAC MOBILITY HANDLING
//
// We don't have multihoming function now, so ignore
// ESI comparison.
//
// RFC7432 15. MAC Mobility
//
// A PE detecting a locally attached MAC address for which it had
// previously received a MAC/IP Advertisement route with the same zero
// Ethernet segment identifier (single-homed scenarios) advertises it
// with a MAC Mobility extended community attribute with the sequence
// number set properly. In the case of single-homed scenarios, there
// is no need for ESI comparison.
func getMacMobilityExtendedCommunity(etag uint32, mac net.HardwareAddr, evpnPaths []*table.Path) *bgp.MacMobilityExtended {
seqs := make([]struct {
seq int
isLocal bool
}, 0)
for _, path := range evpnPaths {
nlri := path.GetNlri().(*bgp.EVPNNLRI)
target, ok := nlri.RouteTypeData.(*bgp.EVPNMacIPAdvertisementRoute)
if !ok {
continue
}
if target.ETag == etag && bytes.Equal(target.MacAddress, mac) {
found := false
for _, ec := range path.GetExtCommunities() {
if t, st := ec.GetTypes(); t == bgp.EC_TYPE_EVPN && st == bgp.EC_SUBTYPE_MAC_MOBILITY {
seqs = append(seqs, struct {
seq int
isLocal bool
}{int(ec.(*bgp.MacMobilityExtended).Sequence), path.IsLocal()})
found = true
break
}
}
if !found {
seqs = append(seqs, struct {
seq int
isLocal bool
}{-1, path.IsLocal()})
}
}
}
if len(seqs) > 0 {
newSeq := -2
var isLocal bool
for _, seq := range seqs {
if seq.seq > newSeq {
newSeq = seq.seq
isLocal = seq.isLocal
}
}
if !isLocal {
newSeq += 1
}
if newSeq != -1 {
return &bgp.MacMobilityExtended{
Sequence: uint32(newSeq),
}
}
}
return nil
}
func (server *BgpServer) handleModPathRequest(grpcReq *GrpcRequest) []*table.Path {
var nlri bgp.AddrPrefixInterface
result := &GrpcResponse{}
var pattr []bgp.PathAttributeInterface
var extcomms []bgp.ExtendedCommunityInterface
var nexthop string
var rf bgp.RouteFamily
var paths []*table.Path
var path *api.Path
var pi *table.PeerInfo
arg, ok := grpcReq.Data.(*api.ModPathArguments)
if !ok {
result.ResponseErr = fmt.Errorf("type assertion failed")
goto ERR
}
paths = make([]*table.Path, 0, len(arg.Paths))
for _, path = range arg.Paths {
seen := make(map[bgp.BGPAttrType]bool)
pattr = make([]bgp.PathAttributeInterface, 0)
extcomms = make([]bgp.ExtendedCommunityInterface, 0)
if path.SourceAsn != 0 {
pi = &table.PeerInfo{
AS: path.SourceAsn,
LocalID: net.ParseIP(path.SourceId),
}
} else {
pi = &table.PeerInfo{
AS: server.bgpConfig.Global.GlobalConfig.As,
LocalID: server.bgpConfig.Global.GlobalConfig.RouterId,
}
}
if len(path.Nlri) > 0 {
nlri = &bgp.IPAddrPrefix{}
err := nlri.DecodeFromBytes(path.Nlri)
if err != nil {
result.ResponseErr = err
goto ERR
}
}
for _, attr := range path.Pattrs {
p, err := bgp.GetPathAttribute(attr)
if err != nil {
result.ResponseErr = err
goto ERR
}
err = p.DecodeFromBytes(attr)
if err != nil {
result.ResponseErr = err
goto ERR
}
if _, ok := seen[p.GetType()]; !ok {
seen[p.GetType()] = true
} else {
result.ResponseErr = fmt.Errorf("the path attribute apears twice. Type : " + strconv.Itoa(int(p.GetType())))
goto ERR
}
switch p.GetType() {
case bgp.BGP_ATTR_TYPE_NEXT_HOP:
nexthop = p.(*bgp.PathAttributeNextHop).Value.String()
case bgp.BGP_ATTR_TYPE_EXTENDED_COMMUNITIES:
value := p.(*bgp.PathAttributeExtendedCommunities).Value
if len(value) > 0 {
extcomms = append(extcomms, value...)
}
case bgp.BGP_ATTR_TYPE_MP_REACH_NLRI:
mpreach := p.(*bgp.PathAttributeMpReachNLRI)
if len(mpreach.Value) != 1 {
result.ResponseErr = fmt.Errorf("include only one route in mp_reach_nlri")
goto ERR
}
nlri = mpreach.Value[0]
nexthop = mpreach.Nexthop.String()
default:
pattr = append(pattr, p)
}
}
if nlri == nil || nexthop == "" {
result.ResponseErr = fmt.Errorf("not found nlri or nexthop")
goto ERR
}
rf = bgp.AfiSafiToRouteFamily(nlri.AFI(), nlri.SAFI())
if arg.Resource == api.Resource_VRF {
label, err := server.globalRib.GetNextLabel(arg.Name, nexthop, path.IsWithdraw)
if err != nil {
result.ResponseErr = err
goto ERR
}
vrf := server.globalRib.Vrfs[arg.Name]
switch rf {
case bgp.RF_IPv4_UC:
n := nlri.(*bgp.IPAddrPrefix)
nlri = bgp.NewLabeledVPNIPAddrPrefix(n.Length, n.Prefix.String(), *bgp.NewMPLSLabelStack(label), vrf.Rd)
case bgp.RF_IPv6_UC:
n := nlri.(*bgp.IPv6AddrPrefix)
nlri = bgp.NewLabeledVPNIPv6AddrPrefix(n.Length, n.Prefix.String(), *bgp.NewMPLSLabelStack(label), vrf.Rd)
case bgp.RF_EVPN:
n := nlri.(*bgp.EVPNNLRI)
switch n.RouteType {
case bgp.EVPN_ROUTE_TYPE_MAC_IP_ADVERTISEMENT:
n.RouteTypeData.(*bgp.EVPNMacIPAdvertisementRoute).RD = vrf.Rd
case bgp.EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG:
n.RouteTypeData.(*bgp.EVPNMulticastEthernetTagRoute).RD = vrf.Rd
}
default:
result.ResponseErr = fmt.Errorf("unsupported route family for vrf: %s", rf)
goto ERR
}
extcomms = append(extcomms, vrf.ExportRt...)
}
if arg.Resource != api.Resource_VRF && rf == bgp.RF_IPv4_UC {
pattr = append(pattr, bgp.NewPathAttributeNextHop(nexthop))
} else {
pattr = append(pattr, bgp.NewPathAttributeMpReachNLRI(nexthop, []bgp.AddrPrefixInterface{nlri}))
}
if rf == bgp.RF_EVPN {
evpnNlri := nlri.(*bgp.EVPNNLRI)
if evpnNlri.RouteType == bgp.EVPN_ROUTE_TYPE_MAC_IP_ADVERTISEMENT {
macIpAdv := evpnNlri.RouteTypeData.(*bgp.EVPNMacIPAdvertisementRoute)
etag := macIpAdv.ETag
mac := macIpAdv.MacAddress
paths := server.globalRib.GetBestPathList(bgp.RF_EVPN)
if m := getMacMobilityExtendedCommunity(etag, mac, paths); m != nil {
extcomms = append(extcomms, m)
}
}
}
if len(extcomms) > 0 {
pattr = append(pattr, bgp.NewPathAttributeExtendedCommunities(extcomms))
}
paths = append(paths, table.NewPath(pi, nlri, path.IsWithdraw, pattr, false, time.Now(), path.NoImplicitWithdraw))
}
return paths
ERR:
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return []*table.Path{}
}
func (server *BgpServer) handleVrfMod(arg *api.ModVrfArguments) ([]*table.Path, error) {
rib := server.globalRib
var msgs []*table.Path
switch arg.Operation {
case api.Operation_ADD:
rd := bgp.GetRouteDistinguisher(arg.Vrf.Rd)
f := func(bufs [][]byte) ([]bgp.ExtendedCommunityInterface, error) {
ret := make([]bgp.ExtendedCommunityInterface, 0, len(bufs))
for _, rt := range bufs {
r, err := bgp.ParseExtended(rt)
if err != nil {
return nil, err
}
ret = append(ret, r)
}
return ret, nil
}
importRt, err := f(arg.Vrf.ImportRt)
if err != nil {
return nil, err
}
exportRt, err := f(arg.Vrf.ExportRt)
if err != nil {
return nil, err
}
pi := &table.PeerInfo{
AS: server.bgpConfig.Global.GlobalConfig.As,
LocalID: server.bgpConfig.Global.GlobalConfig.RouterId,
}
msgs, err = rib.AddVrf(arg.Vrf.Name, rd, importRt, exportRt, pi)
if err != nil {
return nil, err
}
case api.Operation_DEL:
var err error
msgs, err = rib.DeleteVrf(arg.Vrf.Name)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown operation: %d", arg.Operation)
}
return msgs, nil
}
func (server *BgpServer) handleVrfRequest(req *GrpcRequest) []*table.Path {
var msgs []*table.Path
result := &GrpcResponse{}
switch req.RequestType {
case REQ_VRF:
name := req.Name
rib := server.globalRib
vrfs := rib.Vrfs
if _, ok := vrfs[name]; !ok {
result.ResponseErr = fmt.Errorf("vrf %s not found", name)
break
}
var rf bgp.RouteFamily
switch req.RouteFamily {
case bgp.RF_IPv4_UC:
rf = bgp.RF_IPv4_VPN
case bgp.RF_IPv6_UC:
rf = bgp.RF_IPv6_VPN
case bgp.RF_EVPN:
rf = bgp.RF_EVPN
default:
result.ResponseErr = fmt.Errorf("unsupported route family: %s", req.RouteFamily)
break
}
for _, path := range rib.GetPathList(rf) {
ok := table.CanImportToVrf(vrfs[name], path)
if !ok {
continue
}
req.ResponseCh <- &GrpcResponse{
Data: &api.Destination{
Prefix: path.GetNlri().String(),
Paths: []*api.Path{path.ToApiStruct()},
},
}
}
goto END
case REQ_VRFS:
vrfs := server.globalRib.Vrfs
for _, vrf := range vrfs {
req.ResponseCh <- &GrpcResponse{
Data: vrf.ToApiStruct(),
}
}
goto END
case REQ_VRF_MOD:
arg := req.Data.(*api.ModVrfArguments)
msgs, result.ResponseErr = server.handleVrfMod(arg)
default:
result.ResponseErr = fmt.Errorf("unknown request type: %d", req.RequestType)
}
req.ResponseCh <- result
END:
close(req.ResponseCh)
return msgs
}
func sendMultipleResponses(grpcReq *GrpcRequest, results []*GrpcResponse) {
defer close(grpcReq.ResponseCh)
for _, r := range results {
select {
case grpcReq.ResponseCh <- r:
case <-grpcReq.EndCh:
return
}
}
}
func (server *BgpServer) getBestFromLocal(peer *Peer) ([]*table.Path, []*table.Path) {
var pathList []*table.Path
var filtered []*table.Path
if peer.isRouteServerClient() {
pathList, filtered = peer.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(peer, peer.getBests(peer.localRib)))
} else {
rib := server.globalRib
l, _ := peer.fsm.LocalHostPort()
peer.conf.Transport.TransportConfig.LocalAddress = net.ParseIP(l)
bests := rib.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(peer, peer.getBests(rib)))
pathList = make([]*table.Path, 0, len(bests))
for _, path := range bests {
path.UpdatePathAttrs(&server.bgpConfig.Global, &peer.conf)
pathList = append(pathList, path)
}
}
return pathList, filtered
}
func (server *BgpServer) handleGrpc(grpcReq *GrpcRequest) []*SenderMsg {
var msgs []*SenderMsg
logOp := func(addr string, action string) {
log.WithFields(log.Fields{
"Topic": "Operation",
"Key": addr,
}).Info(action)
}
reqToPeers := func(grpcReq *GrpcRequest) ([]*Peer, error) {
peers := make([]*Peer, 0)
if grpcReq.Name == "all" {
for _, p := range server.neighborMap {
peers = append(peers, p)
}
return peers, nil
}
peer, err := server.checkNeighborRequest(grpcReq)
return []*Peer{peer}, err
}
sortedDsts := func(t *table.Table) []*GrpcResponse {
results := make([]*GrpcResponse, len(t.GetDestinations()))
r := radix.New()
for _, dst := range t.GetDestinations() {
result := &GrpcResponse{}
result.Data = dst.ToApiStruct()
r.Insert(dst.RadixKey, result)
}
i := 0
r.Walk(func(s string, v interface{}) bool {
r, _ := v.(*GrpcResponse)
results[i] = r
i++
return false
})
return results
}
switch grpcReq.RequestType {
case REQ_GLOBAL_RIB:
var results []*GrpcResponse
if t, ok := server.globalRib.Tables[grpcReq.RouteFamily]; ok {
results = make([]*GrpcResponse, len(t.GetDestinations()))
switch grpcReq.RouteFamily {
case bgp.RF_IPv4_UC, bgp.RF_IPv6_UC:
results = sortedDsts(server.globalRib.Tables[grpcReq.RouteFamily])
default:
i := 0
for _, dst := range t.GetDestinations() {
result := &GrpcResponse{}
result.Data = dst.ToApiStruct()
results[i] = result
i++
}
}
}
go sendMultipleResponses(grpcReq, results)
case REQ_MOD_PATH:
pathList := server.handleModPathRequest(grpcReq)
if len(pathList) > 0 {
msgs = server.propagateUpdate(nil, pathList)
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
}
case REQ_NEIGHBORS:
results := make([]*GrpcResponse, len(server.neighborMap))
i := 0
for _, peer := range server.neighborMap {
result := &GrpcResponse{
Data: peer.ToApiStruct(),
}
results[i] = result
i++
}
go sendMultipleResponses(grpcReq, results)
case REQ_NEIGHBOR:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
break
}
result := &GrpcResponse{
Data: peer.ToApiStruct(),
}
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
case REQ_LOCAL_RIB:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
break
}
var results []*GrpcResponse
if peer.isRouteServerClient() && peer.fsm.adminState != ADMIN_STATE_DOWN {
if t, ok := peer.localRib.Tables[grpcReq.RouteFamily]; ok {
results = make([]*GrpcResponse, len(t.GetDestinations()))
switch grpcReq.RouteFamily {
case bgp.RF_IPv4_UC, bgp.RF_IPv6_UC:
results = sortedDsts(peer.localRib.Tables[grpcReq.RouteFamily])
default:
i := 0
for _, dst := range t.GetDestinations() {
result := &GrpcResponse{}
result.Data = dst.ToApiStruct()
results[i] = result
i++
}
}
}
}
go sendMultipleResponses(grpcReq, results)
case REQ_ADJ_RIB_IN, REQ_ADJ_RIB_OUT:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
break
}
rf := grpcReq.RouteFamily
var paths []*table.Path
if grpcReq.RequestType == REQ_ADJ_RIB_IN {
paths = peer.adjRib.GetInPathList(rf)
log.Debugf("RouteFamily=%v adj-rib-in found : %d", rf.String(), len(paths))
} else {
paths = peer.adjRib.GetOutPathList(rf)
log.Debugf("RouteFamily=%v adj-rib-out found : %d", rf.String(), len(paths))
}
toResult := func(p *table.Path) *GrpcResponse {
return &GrpcResponse{
Data: &api.Destination{
Prefix: p.GetNlri().String(),
Paths: []*api.Path{p.ToApiStruct()},
},
}
}
results := make([]*GrpcResponse, len(paths))
switch rf {
case bgp.RF_IPv4_UC, bgp.RF_IPv6_UC:
r := radix.New()
for _, p := range paths {
r.Insert(table.CidrToRadixkey(p.GetNlri().String()), toResult(p))
}
i := 0
r.Walk(func(s string, v interface{}) bool {
r, _ := v.(*GrpcResponse)
results[i] = r
i++
return false
})
default:
for i, p := range paths {
results[i] = toResult(p)
}
}
go sendMultipleResponses(grpcReq, results)
case REQ_NEIGHBOR_SHUTDOWN:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
logOp(grpcReq.Name, "Neighbor shutdown")
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, nil)
for _, peer := range peers {
msgs = append(msgs, newSenderMsg(peer, []*bgp.BGPMessage{m}))
}
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
case REQ_NEIGHBOR_RESET:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
logOp(grpcReq.Name, "Neighbor reset")
for _, peer := range peers {
peer.fsm.idleHoldTime = peer.conf.Timers.TimersConfig.IdleHoldTimeAfterReset
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_RESET, nil)
msgs = append(msgs, newSenderMsg(peer, []*bgp.BGPMessage{m}))
}
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
case REQ_NEIGHBOR_SOFT_RESET, REQ_NEIGHBOR_SOFT_RESET_IN:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
if grpcReq.RequestType == REQ_NEIGHBOR_SOFT_RESET {
logOp(grpcReq.Name, "Neighbor soft reset")
} else {
logOp(grpcReq.Name, "Neighbor soft reset in")
}
for _, peer := range peers {
pathList := peer.adjRib.GetInPathList(grpcReq.RouteFamily)
if peer.isRouteServerClient() {
pathList, _ = peer.ApplyPolicy(table.POLICY_DIRECTION_IN, pathList)
}
msgs = append(msgs, server.propagateUpdate(peer, pathList)...)
}
if grpcReq.RequestType == REQ_NEIGHBOR_SOFT_RESET_IN {
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
break
}
fallthrough
case REQ_NEIGHBOR_SOFT_RESET_OUT:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
if grpcReq.RequestType == REQ_NEIGHBOR_SOFT_RESET_OUT {
logOp(grpcReq.Name, "Neighbor soft reset out")
}
for _, peer := range peers {
for _, rf := range peer.configuredRFlist() {
peer.adjRib.DropOut(rf)
}
pathList, filtered := server.getBestFromLocal(peer)
if len(pathList) > 0 {
peer.adjRib.UpdateOut(pathList)
msgs = append(msgs, newSenderMsg(peer, table.CreateUpdateMsgFromPaths(pathList)))
}
if len(filtered) > 0 {
for _, p := range filtered {
p.IsWithdraw = true
}
msgs = append(msgs, newSenderMsg(peer, table.CreateUpdateMsgFromPaths(filtered)))
}
}
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
case REQ_NEIGHBOR_ENABLE, REQ_NEIGHBOR_DISABLE:
peer, err1 := server.checkNeighborRequest(grpcReq)
if err1 != nil {
break
}
var err api.Error
result := &GrpcResponse{}
if grpcReq.RequestType == REQ_NEIGHBOR_ENABLE {
select {
case peer.fsm.adminStateCh <- ADMIN_STATE_UP:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
}).Debug("ADMIN_STATE_UP requested")
err.Code = api.Error_SUCCESS
err.Msg = "ADMIN_STATE_UP"
default:
log.Warning("previous request is still remaining. : ", peer.conf.NeighborConfig.NeighborAddress)
err.Code = api.Error_FAIL
err.Msg = "previous request is still remaining"
}
} else {
select {
case peer.fsm.adminStateCh <- ADMIN_STATE_DOWN:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
}).Debug("ADMIN_STATE_DOWN requested")
err.Code = api.Error_SUCCESS
err.Msg = "ADMIN_STATE_DOWN"
default:
log.Warning("previous request is still remaining. : ", peer.conf.NeighborConfig.NeighborAddress)
err.Code = api.Error_FAIL
err.Msg = "previous request is still remaining"
}
}
result.Data = err
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
case REQ_DEFINED_SET:
if err := server.handleGrpcGetDefinedSet(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_DEFINED_SET:
err := server.handleGrpcModDefinedSet(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_STATEMENT:
if err := server.handleGrpcGetStatement(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_STATEMENT:
err := server.handleGrpcModStatement(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_POLICY:
if err := server.handleGrpcGetPolicy(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_POLICY:
err := server.handleGrpcModPolicy(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_POLICY_ASSIGNMENT:
if err := server.handleGrpcGetPolicyAssignment(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_POLICY_ASSIGNMENT:
err := server.handleGrpcModPolicyAssignment(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_MONITOR_GLOBAL_BEST_CHANGED, REQ_MONITOR_NEIGHBOR_PEER_STATE:
server.broadcastReqs = append(server.broadcastReqs, grpcReq)
case REQ_MRT_GLOBAL_RIB, REQ_MRT_LOCAL_RIB:
server.handleMrt(grpcReq)
case REQ_ROA, REQ_RPKI:
server.roaClient.handleGRPC(grpcReq)
case REQ_VRF, REQ_VRFS, REQ_VRF_MOD:
pathList := server.handleVrfRequest(grpcReq)
if len(pathList) > 0 {
msgs = server.propagateUpdate(nil, pathList)
}
default:
errmsg := fmt.Errorf("Unknown request type: %v", grpcReq.RequestType)
result := &GrpcResponse{
ResponseErr: errmsg,
}
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
}
return msgs
}
func (server *BgpServer) handleGrpcGetDefinedSet(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.DefinedSet)
typ := table.DefinedType(arg.Type)
name := arg.Name
set, ok := server.policy.DefinedSetMap[typ]
if !ok {
return fmt.Errorf("invalid defined-set type: %d", typ)
}
found := false
for _, s := range set {
if name != "" && name != s.Name() {
continue
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: s.ToApiStruct(),
}
found = true
if name != "" {
break
}
}
if !found {
return fmt.Errorf("not found %s", name)
}
return nil
}
func (server *BgpServer) handleGrpcModDefinedSet(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.ModDefinedSetArguments)
set := arg.Set
typ := table.DefinedType(set.Type)
name := set.Name
var err error
m, ok := server.policy.DefinedSetMap[typ]
if !ok {
return fmt.Errorf("invalid defined-set type: %d", typ)
}
d, ok := m[name]
if arg.Operation != api.Operation_ADD && !ok {
return fmt.Errorf("not found defined-set: %s", name)
}
s, err := table.NewDefinedSetFromApiStruct(set)
if err != nil {
return err
}
switch arg.Operation {
case api.Operation_ADD:
if ok {
err = d.Append(s)
} else {
m[name] = s
}
case api.Operation_DEL:
err = d.Remove(s)
case api.Operation_DEL_ALL:
if server.policy.InUse(d) {
return fmt.Errorf("can't delete. defined-set %s is in use", name)
}
delete(m, name)
case api.Operation_REPLACE:
err = d.Replace(s)
}
return err
}
func (server *BgpServer) handleGrpcGetStatement(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.Statement)
name := arg.Name
found := false
for _, s := range server.policy.StatementMap {
if name != "" && name != s.Name {
continue
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: s.ToApiStruct(),
}
found = true
if name != "" {
break
}
}
if !found {
return fmt.Errorf("not found %s", name)
}
return nil
}
func (server *BgpServer) handleGrpcModStatement(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.ModStatementArguments)
s, err := table.NewStatementFromApiStruct(arg.Statement, server.policy.DefinedSetMap)
if err != nil {
return err
}
m := server.policy.StatementMap
name := s.Name
d, ok := m[name]
if arg.Operation != api.Operation_ADD && !ok {
return fmt.Errorf("not found statement: %s", name)
}
switch arg.Operation {
case api.Operation_ADD:
if ok {
err = d.Add(s)
} else {
m[name] = s
}
case api.Operation_DEL:
err = d.Remove(s)
case api.Operation_DEL_ALL:
if server.policy.StatementInUse(d) {
return fmt.Errorf("can't delete. statement %s is in use", name)
}
delete(m, name)
case api.Operation_REPLACE:
err = d.Replace(s)
}
return err
}
func (server *BgpServer) handleGrpcGetPolicy(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.Policy)
name := arg.Name
found := false
for _, s := range server.policy.PolicyMap {
if name != "" && name != s.Name() {
continue
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: s.ToApiStruct(),
}
found = true
if name != "" {
break
}
}
if !found {
return fmt.Errorf("not found %s", name)
}
return nil
}
func (server *BgpServer) policyInUse(x *table.Policy) bool {
for _, peer := range server.neighborMap {
for _, dir := range []table.PolicyDirection{table.POLICY_DIRECTION_IN, table.POLICY_DIRECTION_EXPORT, table.POLICY_DIRECTION_EXPORT} {
for _, y := range peer.GetPolicy(dir) {
if x.Name() == y.Name() {
return true
}
}
}
}
for _, dir := range []table.PolicyDirection{table.POLICY_DIRECTION_EXPORT, table.POLICY_DIRECTION_EXPORT} {
for _, y := range server.globalRib.GetPolicy(dir) {
if x.Name() == y.Name() {
return true
}
}
}
return false
}
func (server *BgpServer) handleGrpcModPolicy(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.ModPolicyArguments)
x, err := table.NewPolicyFromApiStruct(arg.Policy, server.policy.DefinedSetMap)
if err != nil {
return err
}
pMap := server.policy.PolicyMap
sMap := server.policy.StatementMap
name := x.Name()
y, ok := pMap[name]
if arg.Operation != api.Operation_ADD && !ok {
return fmt.Errorf("not found policy: %s", name)
}
switch arg.Operation {
case api.Operation_ADD, api.Operation_REPLACE:
if arg.ReferExistingStatements {
err = x.FillUp(sMap)
if err != nil {
return err
}
} else {
for _, s := range x.Statements {
if _, ok := sMap[s.Name]; ok {
return fmt.Errorf("statement %s already defined", s.Name)
}
sMap[s.Name] = s
}
}
if arg.Operation == api.Operation_REPLACE {
err = y.Replace(x)
} else if ok {
err = y.Add(x)
} else {
pMap[name] = x
}
case api.Operation_DEL:
err = y.Remove(x)
case api.Operation_DEL_ALL:
if server.policyInUse(y) {
return fmt.Errorf("can't delete. policy %s is in use", name)
}
log.WithFields(log.Fields{
"Topic": "Policy",
"Key": name,
}).Debug("delete policy")
delete(pMap, name)
}
if err == nil && arg.Operation != api.Operation_ADD && !arg.PreserveStatements {
for _, s := range y.Statements {
if !server.policy.StatementInUse(s) {
log.WithFields(log.Fields{
"Topic": "Policy",
"Key": s.Name,
}).Debug("delete unused statement")
delete(sMap, s.Name)
}
}
}
return err
}
type policyPoint interface {
GetDefaultPolicy(table.PolicyDirection) table.RouteType
GetPolicy(table.PolicyDirection) []*table.Policy
SetDefaultPolicy(table.PolicyDirection, table.RouteType) error
SetPolicy(table.PolicyDirection, []*table.Policy) error
}
func (server *BgpServer) getPolicyInfo(a *api.PolicyAssignment) (policyPoint, table.PolicyDirection, error) {
switch a.Resource {
case api.Resource_GLOBAL:
switch a.Type {
case api.PolicyType_IMPORT:
return server.globalRib, table.POLICY_DIRECTION_IMPORT, nil
case api.PolicyType_EXPORT:
return server.globalRib, table.POLICY_DIRECTION_EXPORT, nil
default:
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("invalid policy type")
}
case api.Resource_LOCAL:
peer, ok := server.neighborMap[a.Name]
if !ok {
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("not found peer %s", a.Name)
}
switch a.Type {
case api.PolicyType_IN:
return peer, table.POLICY_DIRECTION_IN, nil
case api.PolicyType_IMPORT:
return peer, table.POLICY_DIRECTION_IMPORT, nil
case api.PolicyType_EXPORT:
return peer, table.POLICY_DIRECTION_EXPORT, nil
default:
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("invalid policy type")
}
default:
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("invalid resource type")
}
}
func (server *BgpServer) handleGrpcGetPolicyAssignment(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.PolicyAssignment)
i, dir, err := server.getPolicyInfo(arg)
if err != nil {
return err
}
arg.Default = i.GetDefaultPolicy(dir).ToApiStruct()
ps := i.GetPolicy(dir)
arg.Policies = make([]*api.Policy, 0, len(ps))
for _, x := range ps {
arg.Policies = append(arg.Policies, x.ToApiStruct())
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: arg,
}
return nil
}
func (server *BgpServer) handleGrpcModPolicyAssignment(grpcReq *GrpcRequest) error {
var err error
var dir table.PolicyDirection
var i policyPoint
arg := grpcReq.Data.(*api.ModPolicyAssignmentArguments)
assignment := arg.Assignment
i, dir, err = server.getPolicyInfo(assignment)
if err != nil {
return err
}
ps := make([]*table.Policy, 0, len(assignment.Policies))
for _, x := range assignment.Policies {
p, ok := server.policy.PolicyMap[x.Name]
if !ok {
return fmt.Errorf("not found policy %s", x.Name)
}
ps = append(ps, p)
}
cur := i.GetPolicy(dir)
switch arg.Operation {
case api.Operation_ADD, api.Operation_REPLACE:
if arg.Operation == api.Operation_REPLACE || cur == nil {
err = i.SetPolicy(dir, ps)
} else {
err = i.SetPolicy(dir, append(cur, ps...))
}
if err != nil {
return err
}
switch assignment.Default {
case api.RouteAction_ACCEPT:
err = i.SetDefaultPolicy(dir, table.ROUTE_TYPE_ACCEPT)
case api.RouteAction_REJECT:
err = i.SetDefaultPolicy(dir, table.ROUTE_TYPE_REJECT)
}
case api.Operation_DEL:
n := make([]*table.Policy, 0, len(cur)-len(ps))
for _, x := range ps {
found := false
for _, y := range cur {
if x.Name() == y.Name() {
found = true
break
}
}
if !found {
n = append(n, x)
}
}
err = i.SetPolicy(dir, n)
case api.Operation_DEL_ALL:
err = i.SetPolicy(dir, nil)
if err != nil {
return err
}
err = i.SetDefaultPolicy(dir, table.ROUTE_TYPE_NONE)
}
return err
}
func (server *BgpServer) handleMrt(grpcReq *GrpcRequest) {
now := uint32(time.Now().Unix())
view := ""
result := &GrpcResponse{}
var rib *table.TableManager
switch grpcReq.RequestType {
case REQ_MRT_GLOBAL_RIB:
rib = server.globalRib
case REQ_MRT_LOCAL_RIB:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
return
}
rib = peer.localRib
if rib == nil {
result.ResponseErr = fmt.Errorf("no local rib for %s", grpcReq.Name)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
view = grpcReq.Name
}
msg, err := server.mkMrtPeerIndexTableMsg(now, view)
if err != nil {
result.ResponseErr = fmt.Errorf("failed to make new mrt peer index table message: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
data, err := msg.Serialize()
if err != nil {
result.ResponseErr = fmt.Errorf("failed to serialize table: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
tbl, ok := rib.Tables[grpcReq.RouteFamily]
if !ok {
result.ResponseErr = fmt.Errorf("unsupported route family: %s", grpcReq.RouteFamily)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
msgs, err := server.mkMrtRibMsgs(tbl, now)
if err != nil {
result.ResponseErr = fmt.Errorf("failed to make new mrt rib message: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
for _, msg := range msgs {
d, err := msg.Serialize()
if err != nil {
result.ResponseErr = fmt.Errorf("failed to serialize rib msg: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
data = append(data, d...)
}
result.Data = &api.MrtMessage{
Data: data,
}
select {
case <-grpcReq.EndCh:
return
default:
}
m := &broadcastGrpcMsg{
req: grpcReq,
result: result,
}
interval := int64(grpcReq.Data.(uint64))
if interval > 0 {
go func() {
t := time.NewTimer(time.Second * time.Duration(interval))
<-t.C
server.GrpcReqCh <- grpcReq
}()
} else {
m.done = true
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
return
}
func (server *BgpServer) mkMrtPeerIndexTableMsg(t uint32, view string) (*bgp.MRTMessage, error) {
peers := make([]*bgp.Peer, 0, len(server.neighborMap))
for _, peer := range server.neighborMap {
id := peer.peerInfo.ID.To4().String()
ipaddr := peer.conf.NeighborConfig.NeighborAddress.String()
asn := peer.conf.NeighborConfig.PeerAs
peers = append(peers, bgp.NewPeer(id, ipaddr, asn, true))
}
bgpid := server.bgpConfig.Global.GlobalConfig.RouterId.To4().String()
table := bgp.NewPeerIndexTable(bgpid, view, peers)
return bgp.NewMRTMessage(t, bgp.TABLE_DUMPv2, bgp.PEER_INDEX_TABLE, table)
}
func (server *BgpServer) mkMrtRibMsgs(tbl *table.Table, t uint32) ([]*bgp.MRTMessage, error) {
getPeerIndex := func(info *table.PeerInfo) uint16 {
var idx uint16
for _, peer := range server.neighborMap {
if peer.peerInfo.Equal(info) {
return idx
}
idx++
}
return idx
}
var subtype bgp.MRTSubTypeTableDumpv2
switch tbl.GetRoutefamily() {
case bgp.RF_IPv4_UC:
subtype = bgp.RIB_IPV4_UNICAST
case bgp.RF_IPv4_MC:
subtype = bgp.RIB_IPV4_MULTICAST
case bgp.RF_IPv6_UC:
subtype = bgp.RIB_IPV6_UNICAST
case bgp.RF_IPv6_MC:
subtype = bgp.RIB_IPV6_MULTICAST
default:
subtype = bgp.RIB_GENERIC
}
var seq uint32
msgs := make([]*bgp.MRTMessage, 0, len(tbl.GetDestinations()))
for _, dst := range tbl.GetDestinations() {
l := dst.GetKnownPathList()
entries := make([]*bgp.RibEntry, 0, len(l))
for _, p := range l {
// mrt doesn't assume to dump locally generated routes
if p.IsLocal() {
continue
}
idx := getPeerIndex(p.GetSource())
e := bgp.NewRibEntry(idx, uint32(p.GetTimestamp().Unix()), p.GetPathAttrs())
entries = append(entries, e)
}
// if dst only contains locally generated routes, ignore it
if len(entries) == 0 {
continue
}
rib := bgp.NewRib(seq, dst.GetNlri(), entries)
seq++
msg, err := bgp.NewMRTMessage(t, bgp.TABLE_DUMPv2, subtype, rib)
if err != nil {
return nil, err
}
msgs = append(msgs, msg)
}
return msgs, nil
}
func (server *BgpServer) NewZclient(url string, redistRouteTypes []string) error {
l := strings.SplitN(url, ":", 2)
if len(l) != 2 {
return fmt.Errorf("unsupported url: %s", url)
}
cli, err := zebra.NewClient(l[0], l[1], zebra.ROUTE_BGP)
if err != nil {
return err
}
cli.SendHello()
cli.SendRouterIDAdd()
cli.SendInterfaceAdd()
for _, typ := range redistRouteTypes {
t, err := zebra.RouteTypeFromString(typ)
if err != nil {
return err
}
cli.SendRedistribute(t)
}
if e := cli.SendCommand(zebra.REDISTRIBUTE_DEFAULT_ADD, nil); e != nil {
return e
}
server.zclient = cli
return nil
}
server: avoid updating peer's LocalAddress unnecessary
Signed-off-by: FUJITA Tomonori <93dac1fe9c4b2a3957982200319981492ad4976e@lab.ntt.co.jp>
// Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/armon/go-radix"
api "github.com/osrg/gobgp/api"
"github.com/osrg/gobgp/config"
"github.com/osrg/gobgp/packet"
"github.com/osrg/gobgp/table"
"github.com/osrg/gobgp/zebra"
"net"
"os"
"strconv"
"strings"
"time"
)
const (
GLOBAL_RIB_NAME = "global"
)
type SenderMsg struct {
messages []*bgp.BGPMessage
sendCh chan *bgp.BGPMessage
destination string
twoBytesAs bool
}
type broadcastMsg interface {
send()
}
type broadcastGrpcMsg struct {
req *GrpcRequest
result *GrpcResponse
done bool
}
func (m *broadcastGrpcMsg) send() {
m.req.ResponseCh <- m.result
if m.done == true {
close(m.req.ResponseCh)
}
}
type broadcastBGPMsg struct {
message *bgp.BGPMessage
peerAS uint32
localAS uint32
peerAddress net.IP
localAddress net.IP
fourBytesAs bool
ch chan *broadcastBGPMsg
}
func (m *broadcastBGPMsg) send() {
m.ch <- m
}
type BgpServer struct {
bgpConfig config.Bgp
globalTypeCh chan config.Global
addedPeerCh chan config.Neighbor
deletedPeerCh chan config.Neighbor
updatedPeerCh chan config.Neighbor
rpkiConfigCh chan config.RpkiServers
bmpConfigCh chan config.BmpServers
dumper *dumper
GrpcReqCh chan *GrpcRequest
listenPort int
policyUpdateCh chan config.RoutingPolicy
policy *table.RoutingPolicy
broadcastReqs []*GrpcRequest
broadcastMsgs []broadcastMsg
neighborMap map[string]*Peer
globalRib *table.TableManager
zclient *zebra.Client
roaClient *roaClient
bmpClient *bmpClient
bmpConnCh chan *bmpConn
shutdown bool
}
func NewBgpServer(port int) *BgpServer {
b := BgpServer{}
b.globalTypeCh = make(chan config.Global)
b.addedPeerCh = make(chan config.Neighbor)
b.deletedPeerCh = make(chan config.Neighbor)
b.updatedPeerCh = make(chan config.Neighbor)
b.rpkiConfigCh = make(chan config.RpkiServers)
b.bmpConfigCh = make(chan config.BmpServers)
b.bmpConnCh = make(chan *bmpConn)
b.GrpcReqCh = make(chan *GrpcRequest, 1)
b.policyUpdateCh = make(chan config.RoutingPolicy)
b.neighborMap = make(map[string]*Peer)
b.listenPort = port
b.roaClient, _ = newROAClient(config.RpkiServers{})
return &b
}
// avoid mapped IPv6 address
func listenAndAccept(proto string, port int, ch chan *net.TCPConn) (*net.TCPListener, error) {
service := ":" + strconv.Itoa(port)
addr, _ := net.ResolveTCPAddr(proto, service)
l, err := net.ListenTCP(proto, addr)
if err != nil {
log.Info(err)
return nil, err
}
go func() {
for {
conn, err := l.AcceptTCP()
if err != nil {
log.Info(err)
continue
}
ch <- conn
}
}()
return l, nil
}
func (server *BgpServer) Serve() {
g := <-server.globalTypeCh
server.bgpConfig.Global = g
if g.Mrt.FileName != "" {
d, err := newDumper(g.Mrt.FileName)
if err != nil {
log.Warn(err)
} else {
server.dumper = d
}
}
if g.Zebra.Enabled == true {
if g.Zebra.Url == "" {
g.Zebra.Url = "unix:/var/run/quagga/zserv.api"
}
redists := make([]string, 0, len(g.Zebra.RedistributeRouteTypeList))
for _, t := range g.Zebra.RedistributeRouteTypeList {
redists = append(redists, t.RouteType)
}
err := server.NewZclient(g.Zebra.Url, redists)
if err != nil {
log.Error(err)
}
}
senderCh := make(chan *SenderMsg, 1<<16)
go func(ch chan *SenderMsg) {
for {
// TODO: must be more clever. Slow peer makes other peers slow too.
m := <-ch
w := func(c chan *bgp.BGPMessage, msg *bgp.BGPMessage) {
// nasty but the peer could already become non established state before here.
defer func() { recover() }()
c <- msg
}
for _, b := range m.messages {
if m.twoBytesAs == false && b.Header.Type == bgp.BGP_MSG_UPDATE {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": m.destination,
"Data": b,
}).Debug("update for 2byte AS peer")
table.UpdatePathAttrs2ByteAs(b.Body.(*bgp.BGPUpdate))
}
w(m.sendCh, b)
}
}
}(senderCh)
broadcastCh := make(chan broadcastMsg, 8)
go func(ch chan broadcastMsg) {
for {
m := <-ch
m.send()
}
}(broadcastCh)
toRFlist := func(l []config.AfiSafi) []bgp.RouteFamily {
rfList := []bgp.RouteFamily{}
for _, rf := range l {
k, _ := bgp.GetRouteFamily(rf.AfiSafiName)
rfList = append(rfList, k)
}
return rfList
}
server.globalRib = table.NewTableManager(GLOBAL_RIB_NAME, toRFlist(g.AfiSafis.AfiSafiList), g.MplsLabelRange.MinLabel, g.MplsLabelRange.MaxLabel)
listenerMap := make(map[string]*net.TCPListener)
acceptCh := make(chan *net.TCPConn)
l4, err1 := listenAndAccept("tcp4", server.listenPort, acceptCh)
listenerMap["tcp4"] = l4
l6, err2 := listenAndAccept("tcp6", server.listenPort, acceptCh)
listenerMap["tcp6"] = l6
if err1 != nil && err2 != nil {
log.Fatal("can't listen either v4 and v6")
os.Exit(1)
}
listener := func(addr net.IP) *net.TCPListener {
var l *net.TCPListener
if addr.To4() != nil {
l = listenerMap["tcp4"]
} else {
l = listenerMap["tcp6"]
}
return l
}
incoming := make(chan *fsmMsg, 4096)
var senderMsgs []*SenderMsg
var zapiMsgCh chan *zebra.Message
if server.zclient != nil {
zapiMsgCh = server.zclient.Receive()
}
for {
var firstMsg *SenderMsg
var sCh chan *SenderMsg
if len(senderMsgs) > 0 {
sCh = senderCh
firstMsg = senderMsgs[0]
}
var firstBroadcastMsg broadcastMsg
var bCh chan broadcastMsg
if len(server.broadcastMsgs) > 0 {
bCh = broadcastCh
firstBroadcastMsg = server.broadcastMsgs[0]
}
passConn := func(conn *net.TCPConn) {
remoteAddr, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
peer, found := server.neighborMap[remoteAddr]
if found {
localAddrValid := func(laddr net.IP) bool {
if laddr == nil {
return true
}
l := conn.LocalAddr()
if l == nil {
// already closed
return false
}
host, _, _ := net.SplitHostPort(l.String())
if host != laddr.String() {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Configured addr": laddr.String(),
"Addr": host,
}).Info("Mismatched local address")
return false
}
return true
}(peer.conf.Transport.TransportConfig.LocalAddress)
if localAddrValid == false {
conn.Close()
return
}
log.Debug("accepted a new passive connection from ", remoteAddr)
peer.PassConn(conn)
} else {
log.Info("can't find configuration for a new passive connection from ", remoteAddr)
conn.Close()
}
}
select {
case grpcReq := <-server.GrpcReqCh:
m := server.handleGrpc(grpcReq)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case conn := <-acceptCh:
passConn(conn)
default:
}
select {
case c := <-server.rpkiConfigCh:
server.roaClient, _ = newROAClient(c)
case c := <-server.bmpConfigCh:
server.bmpClient, _ = newBMPClient(c, server.bmpConnCh)
case c := <-server.bmpConnCh:
bmpMsgList := []*bgp.BMPMessage{}
for _, targetPeer := range server.neighborMap {
pathList := make([]*table.Path, 0)
if targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED {
continue
}
for _, rf := range targetPeer.configuredRFlist() {
pathList = append(pathList, targetPeer.adjRib.GetInPathList(rf)...)
}
for _, p := range pathList {
// avoid to merge for timestamp
u := table.CreateUpdateMsgFromPaths([]*table.Path{p})
bmpMsgList = append(bmpMsgList, bmpPeerRoute(bgp.BMP_PEER_TYPE_GLOBAL, false, 0, targetPeer.peerInfo, p.GetTimestamp().Unix(), u[0]))
}
}
m := &broadcastBMPMsg{
ch: server.bmpClient.send(),
conn: c.conn,
addr: c.addr,
msgList: bmpMsgList,
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
case rmsg := <-server.roaClient.recieveROA():
server.roaClient.handleRTRMsg(rmsg)
case zmsg := <-zapiMsgCh:
m := handleZapiMsg(zmsg, server)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case conn := <-acceptCh:
passConn(conn)
case config := <-server.addedPeerCh:
addr := config.NeighborConfig.NeighborAddress.String()
_, found := server.neighborMap[addr]
if found {
log.Warn("Can't overwrite the exising peer ", addr)
continue
}
SetTcpMD5SigSockopts(listener(config.NeighborConfig.NeighborAddress), addr, config.NeighborConfig.AuthPassword)
var loc *table.TableManager
if config.RouteServer.RouteServerConfig.RouteServerClient {
loc = table.NewTableManager(config.NeighborConfig.NeighborAddress.String(), toRFlist(config.AfiSafis.AfiSafiList), g.MplsLabelRange.MinLabel, g.MplsLabelRange.MaxLabel)
} else {
loc = server.globalRib
}
peer := NewPeer(g, config, loc)
server.setPolicyByConfig(peer, config.ApplyPolicy)
if peer.isRouteServerClient() {
pathList := make([]*table.Path, 0)
rfList := peer.configuredRFlist()
for _, p := range server.neighborMap {
if p.isRouteServerClient() == true {
pathList = append(pathList, p.getAccepted(rfList)...)
}
}
pathList, _ = peer.ApplyPolicy(table.POLICY_DIRECTION_IMPORT, pathList)
if len(pathList) > 0 {
peer.localRib.ProcessPaths(pathList)
}
}
server.neighborMap[addr] = peer
peer.startFSMHandler(incoming)
server.broadcastPeerState(peer)
case config := <-server.deletedPeerCh:
addr := config.NeighborConfig.NeighborAddress.String()
SetTcpMD5SigSockopts(listener(config.NeighborConfig.NeighborAddress), addr, "")
peer, found := server.neighborMap[addr]
if found {
log.Info("Delete a peer configuration for ", addr)
go func(addr string) {
t := time.AfterFunc(time.Minute*5, func() { log.Fatal("failed to free the fsm.h.t for ", addr) })
peer.fsm.h.t.Kill(nil)
peer.fsm.h.t.Wait()
t.Stop()
t = time.AfterFunc(time.Minute*5, func() { log.Fatal("failed to free the fsm.h for ", addr) })
peer.fsm.t.Kill(nil)
peer.fsm.t.Wait()
t.Stop()
}(addr)
m := server.dropPeerAllRoutes(peer)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
delete(server.neighborMap, addr)
} else {
log.Info("Can't delete a peer configuration for ", addr)
}
case config := <-server.updatedPeerCh:
addr := config.NeighborConfig.NeighborAddress.String()
peer := server.neighborMap[addr]
peer.conf = config
server.setPolicyByConfig(peer, config.ApplyPolicy)
case e := <-incoming:
peer, found := server.neighborMap[e.MsgSrc]
if !found {
log.Warn("Can't find the neighbor ", e.MsgSrc)
break
}
m := server.handleFSMMessage(peer, e, incoming)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case sCh <- firstMsg:
senderMsgs = senderMsgs[1:]
case bCh <- firstBroadcastMsg:
server.broadcastMsgs = server.broadcastMsgs[1:]
case grpcReq := <-server.GrpcReqCh:
m := server.handleGrpc(grpcReq)
if len(m) > 0 {
senderMsgs = append(senderMsgs, m...)
}
case pl := <-server.policyUpdateCh:
server.handlePolicy(pl)
}
}
}
func newSenderMsg(peer *Peer, messages []*bgp.BGPMessage) *SenderMsg {
_, y := peer.capMap[bgp.BGP_CAP_FOUR_OCTET_AS_NUMBER]
return &SenderMsg{
messages: messages,
sendCh: peer.outgoing,
destination: peer.conf.NeighborConfig.NeighborAddress.String(),
twoBytesAs: y,
}
}
func filterpath(peer *Peer, pathList []*table.Path) []*table.Path {
filtered := make([]*table.Path, 0)
for _, path := range pathList {
if _, ok := peer.rfMap[path.GetRouteFamily()]; !ok {
continue
}
remoteAddr := peer.conf.NeighborConfig.NeighborAddress
//iBGP handling
if !path.IsLocal() && peer.isIBGPPeer() {
ignore := true
info := path.GetSource()
//if the path comes from eBGP peer
if info.AS != peer.conf.NeighborConfig.PeerAs {
ignore = false
}
// RFC4456 8. Avoiding Routing Information Loops
// A router that recognizes the ORIGINATOR_ID attribute SHOULD
// ignore a route received with its BGP Identifier as the ORIGINATOR_ID.
if id := path.GetOriginatorID(); peer.gConf.GlobalConfig.RouterId.Equal(id) {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"OriginatorID": id,
"Data": path,
}).Debug("Originator ID is mine, ignore")
continue
}
if info.RouteReflectorClient {
ignore = false
}
if peer.isRouteReflectorClient() {
// RFC4456 8. Avoiding Routing Information Loops
// If the local CLUSTER_ID is found in the CLUSTER_LIST,
// the advertisement received SHOULD be ignored.
for _, clusterId := range path.GetClusterList() {
if clusterId.Equal(peer.peerInfo.RouteReflectorClusterID) {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"ClusterID": clusterId,
"Data": path,
}).Debug("cluster list path attribute has local cluster id, ignore")
continue
}
}
ignore = false
}
if ignore {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Data": path,
}).Debug("From same AS, ignore.")
continue
}
}
if remoteAddr.Equal(path.GetSource().Address) {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Data": path,
}).Debug("From me, ignore.")
continue
}
send := true
for _, as := range path.GetAsList() {
if as == peer.conf.NeighborConfig.PeerAs {
send = false
break
}
}
if !send {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": remoteAddr,
"Data": path,
}).Debug("AS PATH loop, ignore.")
continue
}
filtered = append(filtered, path.Clone(remoteAddr, path.IsWithdraw))
}
return filtered
}
func (server *BgpServer) dropPeerAllRoutes(peer *Peer) []*SenderMsg {
msgs := make([]*SenderMsg, 0)
for _, rf := range peer.configuredRFlist() {
if peer.isRouteServerClient() {
for _, targetPeer := range server.neighborMap {
rib := targetPeer.localRib
if !targetPeer.isRouteServerClient() || rib.OwnerName() == peer.conf.NeighborConfig.NeighborAddress.String() {
continue
}
pathList, _ := rib.DeletePathsforPeer(peer.peerInfo, rf)
if targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED || len(pathList) == 0 {
continue
}
msgList := table.CreateUpdateMsgFromPaths(pathList)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
targetPeer.adjRib.UpdateOut(pathList)
}
} else {
rib := server.globalRib
pathList, _ := rib.DeletePathsforPeer(peer.peerInfo, rf)
if len(pathList) == 0 {
continue
}
server.broadcastBests(pathList)
msgList := table.CreateUpdateMsgFromPaths(pathList)
for _, targetPeer := range server.neighborMap {
if targetPeer.isRouteServerClient() || targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED {
continue
}
targetPeer.adjRib.UpdateOut(pathList)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
}
}
}
return msgs
}
func (server *BgpServer) broadcastBests(bests []*table.Path) {
for _, path := range bests {
if !path.IsFromZebra {
z := newBroadcastZapiBestMsg(server.zclient, path)
if z != nil {
server.broadcastMsgs = append(server.broadcastMsgs, z)
log.WithFields(log.Fields{
"Topic": "Server",
"Client": z.client,
"Message": z.msg,
}).Debug("Default policy applied and rejected.")
}
}
rf := path.GetRouteFamily()
result := &GrpcResponse{
Data: &api.Destination{
Prefix: path.GetNlri().String(),
Paths: []*api.Path{path.ToApiStruct()},
},
}
remainReqs := make([]*GrpcRequest, 0, len(server.broadcastReqs))
for _, req := range server.broadcastReqs {
select {
case <-req.EndCh:
continue
default:
}
if req.RequestType != REQ_MONITOR_GLOBAL_BEST_CHANGED {
remainReqs = append(remainReqs, req)
continue
}
if req.RouteFamily == bgp.RouteFamily(0) || req.RouteFamily == rf {
m := &broadcastGrpcMsg{
req: req,
result: result,
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
}
remainReqs = append(remainReqs, req)
}
server.broadcastReqs = remainReqs
}
}
func (server *BgpServer) broadcastPeerState(peer *Peer) {
result := &GrpcResponse{
Data: peer.ToApiStruct(),
}
remainReqs := make([]*GrpcRequest, 0, len(server.broadcastReqs))
for _, req := range server.broadcastReqs {
select {
case <-req.EndCh:
continue
default:
}
ignore := req.RequestType != REQ_MONITOR_NEIGHBOR_PEER_STATE
ignore = ignore || (req.Name != "" && req.Name != peer.conf.NeighborConfig.NeighborAddress.String())
if ignore {
remainReqs = append(remainReqs, req)
continue
}
m := &broadcastGrpcMsg{
req: req,
result: result,
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
remainReqs = append(remainReqs, req)
}
server.broadcastReqs = remainReqs
}
func (server *BgpServer) propagateUpdate(peer *Peer, pathList []*table.Path) []*SenderMsg {
msgs := make([]*SenderMsg, 0)
if peer != nil && peer.isRouteServerClient() {
for _, targetPeer := range server.neighborMap {
rib := targetPeer.localRib
if !targetPeer.isRouteServerClient() || rib.OwnerName() == peer.conf.NeighborConfig.NeighborAddress.String() {
continue
}
sendPathList, _ := targetPeer.ApplyPolicy(table.POLICY_DIRECTION_IMPORT, pathList)
sendPathList, _ = rib.ProcessPaths(sendPathList)
if targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED || len(sendPathList) == 0 {
continue
}
sendPathList, _ = targetPeer.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(targetPeer, sendPathList))
if len(sendPathList) == 0 {
continue
}
msgList := table.CreateUpdateMsgFromPaths(sendPathList)
targetPeer.adjRib.UpdateOut(sendPathList)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
}
} else {
rib := server.globalRib
pathList = rib.ApplyPolicy(table.POLICY_DIRECTION_IMPORT, pathList)
sendPathList, _ := rib.ProcessPaths(pathList)
if len(sendPathList) == 0 {
return msgs
}
server.broadcastBests(sendPathList)
for _, targetPeer := range server.neighborMap {
if targetPeer.isRouteServerClient() || targetPeer.fsm.state != bgp.BGP_FSM_ESTABLISHED {
continue
}
f := rib.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(targetPeer, sendPathList))
if len(f) == 0 {
continue
}
for _, path := range f {
path.UpdatePathAttrs(&server.bgpConfig.Global, &targetPeer.conf)
}
targetPeer.adjRib.UpdateOut(f)
msgList := table.CreateUpdateMsgFromPaths(f)
msgs = append(msgs, newSenderMsg(targetPeer, msgList))
}
}
return msgs
}
func (server *BgpServer) handleFSMMessage(peer *Peer, e *fsmMsg, incoming chan *fsmMsg) []*SenderMsg {
msgs := make([]*SenderMsg, 0)
switch e.MsgType {
case FSM_MSG_STATE_CHANGE:
nextState := e.MsgData.(bgp.FSMState)
oldState := bgp.FSMState(peer.conf.NeighborState.SessionState)
peer.conf.NeighborState.SessionState = uint32(nextState)
peer.fsm.StateChange(nextState)
if oldState == bgp.BGP_FSM_ESTABLISHED {
if ch := server.bmpClient.send(); ch != nil {
m := &broadcastBMPMsg{
ch: ch,
msgList: []*bgp.BMPMessage{bmpPeerDown(bgp.BMP_PEER_DOWN_REASON_UNKNOWN, bgp.BMP_PEER_TYPE_GLOBAL, false, 0, peer.peerInfo, peer.conf.Timers.TimersState.Downtime)},
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
}
t := time.Now()
if t.Sub(time.Unix(peer.conf.Timers.TimersState.Uptime, 0)) < FLOP_THRESHOLD {
peer.conf.NeighborState.Flops++
}
for _, rf := range peer.configuredRFlist() {
peer.DropAll(rf)
}
msgs = append(msgs, server.dropPeerAllRoutes(peer)...)
}
close(peer.outgoing)
peer.outgoing = make(chan *bgp.BGPMessage, 128)
if nextState == bgp.BGP_FSM_ESTABLISHED {
// update for export policy
laddr, lport := peer.fsm.LocalHostPort()
peer.conf.Transport.TransportConfig.LocalAddress = net.ParseIP(laddr)
if ch := server.bmpClient.send(); ch != nil {
_, rport := peer.fsm.RemoteHostPort()
m := &broadcastBMPMsg{
ch: ch,
msgList: []*bgp.BMPMessage{bmpPeerUp(laddr, lport, rport, buildopen(peer.fsm.gConf, peer.fsm.pConf), peer.recvOpen, bgp.BMP_PEER_TYPE_GLOBAL, false, 0, peer.peerInfo, peer.conf.Timers.TimersState.Uptime)},
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
}
pathList, _ := server.getBestFromLocal(peer)
if len(pathList) > 0 {
peer.adjRib.UpdateOut(pathList)
msgs = append(msgs, newSenderMsg(peer, table.CreateUpdateMsgFromPaths(pathList)))
}
} else {
if server.shutdown && nextState == bgp.BGP_FSM_IDLE {
die := true
for _, p := range server.neighborMap {
if p.fsm.state != bgp.BGP_FSM_IDLE {
die = false
break
}
}
if die {
os.Exit(0)
}
}
peer.conf.Timers.TimersState.Downtime = time.Now().Unix()
}
// clear counter
if peer.fsm.adminState == ADMIN_STATE_DOWN {
peer.conf.NeighborState = config.NeighborState{}
peer.conf.Timers.TimersState = config.TimersState{}
}
peer.startFSMHandler(incoming)
server.broadcastPeerState(peer)
case FSM_MSG_BGP_MESSAGE:
switch m := e.MsgData.(type) {
case *bgp.MessageError:
msgs = append(msgs, newSenderMsg(peer, []*bgp.BGPMessage{bgp.NewBGPNotificationMessage(m.TypeCode, m.SubTypeCode, m.Data)}))
case *bgp.BGPMessage:
pathList, update, msgList := peer.handleBGPmessage(m)
if len(msgList) > 0 {
msgs = append(msgs, newSenderMsg(peer, msgList))
break
}
if update == false {
if len(pathList) > 0 {
msgList := table.CreateUpdateMsgFromPaths(pathList)
msgs = append(msgs, newSenderMsg(peer, msgList))
}
break
} else {
if len(pathList) > 0 {
server.roaClient.validate(pathList)
}
}
if m.Header.Type == bgp.BGP_MSG_UPDATE {
if server.dumper != nil {
_, y := peer.capMap[bgp.BGP_CAP_FOUR_OCTET_AS_NUMBER]
l, _ := peer.fsm.LocalHostPort()
bm := &broadcastBGPMsg{
message: m,
peerAS: peer.peerInfo.AS,
localAS: peer.peerInfo.LocalAS,
peerAddress: peer.peerInfo.Address,
localAddress: net.ParseIP(l),
fourBytesAs: y,
ch: server.dumper.sendCh(),
}
server.broadcastMsgs = append(server.broadcastMsgs, bm)
}
if ch := server.bmpClient.send(); ch != nil {
bm := &broadcastBMPMsg{
ch: ch,
msgList: []*bgp.BMPMessage{bmpPeerRoute(bgp.BMP_PEER_TYPE_GLOBAL, false, 0, peer.peerInfo, time.Now().Unix(), m)},
}
server.broadcastMsgs = append(server.broadcastMsgs, bm)
}
}
// FIXME: refactor peer.handleBGPmessage and this func
if peer.isRouteServerClient() {
var accepted []*table.Path
for _, p := range pathList {
if p.Filtered == false {
accepted = append(accepted, p)
}
}
msgs = append(msgs, server.propagateUpdate(peer, accepted)...)
} else {
msgs = append(msgs, server.propagateUpdate(peer, pathList)...)
}
default:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
"Data": e.MsgData,
}).Panic("unknown msg type")
}
}
return msgs
}
func (server *BgpServer) SetGlobalType(g config.Global) {
server.globalTypeCh <- g
}
func (server *BgpServer) SetRpkiConfig(c config.RpkiServers) {
server.rpkiConfigCh <- c
}
func (server *BgpServer) SetBmpConfig(c config.BmpServers) {
server.bmpConfigCh <- c
}
func (server *BgpServer) PeerAdd(peer config.Neighbor) {
server.addedPeerCh <- peer
}
func (server *BgpServer) PeerDelete(peer config.Neighbor) {
server.deletedPeerCh <- peer
}
func (server *BgpServer) PeerUpdate(peer config.Neighbor) {
server.updatedPeerCh <- peer
}
func (server *BgpServer) Shutdown() {
server.shutdown = true
for _, p := range server.neighborMap {
p.fsm.adminStateCh <- ADMIN_STATE_DOWN
}
}
func (server *BgpServer) UpdatePolicy(policy config.RoutingPolicy) {
server.policyUpdateCh <- policy
}
func (server *BgpServer) setPolicyByConfig(p policyPoint, c config.ApplyPolicy) {
for _, dir := range []table.PolicyDirection{table.POLICY_DIRECTION_IN, table.POLICY_DIRECTION_IMPORT, table.POLICY_DIRECTION_EXPORT} {
ps, def, err := server.policy.GetAssignmentFromConfig(dir, c)
if err != nil {
log.WithFields(log.Fields{
"Topic": "Policy",
"Dir": dir,
}).Errorf("failed to get policy info: %s", err)
continue
}
p.SetDefaultPolicy(dir, def)
p.SetPolicy(dir, ps)
}
}
func (server *BgpServer) SetPolicy(pl config.RoutingPolicy) error {
p, err := table.NewRoutingPolicy(pl)
if err != nil {
log.WithFields(log.Fields{
"Topic": "Policy",
}).Errorf("failed to create routing policy: %s", err)
return err
}
server.policy = p
server.setPolicyByConfig(server.globalRib, server.bgpConfig.Global.ApplyPolicy)
return nil
}
func (server *BgpServer) handlePolicy(pl config.RoutingPolicy) error {
if err := server.SetPolicy(pl); err != nil {
log.WithFields(log.Fields{
"Topic": "Policy",
}).Errorf("failed to set new policy: %s", err)
return err
}
for _, peer := range server.neighborMap {
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
}).Info("call set policy")
server.setPolicyByConfig(peer, peer.conf.ApplyPolicy)
}
return nil
}
func (server *BgpServer) checkNeighborRequest(grpcReq *GrpcRequest) (*Peer, error) {
remoteAddr := grpcReq.Name
peer, found := server.neighborMap[remoteAddr]
if !found {
result := &GrpcResponse{}
result.ResponseErr = fmt.Errorf("Neighbor that has %v doesn't exist.", remoteAddr)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return nil, result.ResponseErr
}
return peer, nil
}
// EVPN MAC MOBILITY HANDLING
//
// We don't have multihoming function now, so ignore
// ESI comparison.
//
// RFC7432 15. MAC Mobility
//
// A PE detecting a locally attached MAC address for which it had
// previously received a MAC/IP Advertisement route with the same zero
// Ethernet segment identifier (single-homed scenarios) advertises it
// with a MAC Mobility extended community attribute with the sequence
// number set properly. In the case of single-homed scenarios, there
// is no need for ESI comparison.
func getMacMobilityExtendedCommunity(etag uint32, mac net.HardwareAddr, evpnPaths []*table.Path) *bgp.MacMobilityExtended {
seqs := make([]struct {
seq int
isLocal bool
}, 0)
for _, path := range evpnPaths {
nlri := path.GetNlri().(*bgp.EVPNNLRI)
target, ok := nlri.RouteTypeData.(*bgp.EVPNMacIPAdvertisementRoute)
if !ok {
continue
}
if target.ETag == etag && bytes.Equal(target.MacAddress, mac) {
found := false
for _, ec := range path.GetExtCommunities() {
if t, st := ec.GetTypes(); t == bgp.EC_TYPE_EVPN && st == bgp.EC_SUBTYPE_MAC_MOBILITY {
seqs = append(seqs, struct {
seq int
isLocal bool
}{int(ec.(*bgp.MacMobilityExtended).Sequence), path.IsLocal()})
found = true
break
}
}
if !found {
seqs = append(seqs, struct {
seq int
isLocal bool
}{-1, path.IsLocal()})
}
}
}
if len(seqs) > 0 {
newSeq := -2
var isLocal bool
for _, seq := range seqs {
if seq.seq > newSeq {
newSeq = seq.seq
isLocal = seq.isLocal
}
}
if !isLocal {
newSeq += 1
}
if newSeq != -1 {
return &bgp.MacMobilityExtended{
Sequence: uint32(newSeq),
}
}
}
return nil
}
func (server *BgpServer) handleModPathRequest(grpcReq *GrpcRequest) []*table.Path {
var nlri bgp.AddrPrefixInterface
result := &GrpcResponse{}
var pattr []bgp.PathAttributeInterface
var extcomms []bgp.ExtendedCommunityInterface
var nexthop string
var rf bgp.RouteFamily
var paths []*table.Path
var path *api.Path
var pi *table.PeerInfo
arg, ok := grpcReq.Data.(*api.ModPathArguments)
if !ok {
result.ResponseErr = fmt.Errorf("type assertion failed")
goto ERR
}
paths = make([]*table.Path, 0, len(arg.Paths))
for _, path = range arg.Paths {
seen := make(map[bgp.BGPAttrType]bool)
pattr = make([]bgp.PathAttributeInterface, 0)
extcomms = make([]bgp.ExtendedCommunityInterface, 0)
if path.SourceAsn != 0 {
pi = &table.PeerInfo{
AS: path.SourceAsn,
LocalID: net.ParseIP(path.SourceId),
}
} else {
pi = &table.PeerInfo{
AS: server.bgpConfig.Global.GlobalConfig.As,
LocalID: server.bgpConfig.Global.GlobalConfig.RouterId,
}
}
if len(path.Nlri) > 0 {
nlri = &bgp.IPAddrPrefix{}
err := nlri.DecodeFromBytes(path.Nlri)
if err != nil {
result.ResponseErr = err
goto ERR
}
}
for _, attr := range path.Pattrs {
p, err := bgp.GetPathAttribute(attr)
if err != nil {
result.ResponseErr = err
goto ERR
}
err = p.DecodeFromBytes(attr)
if err != nil {
result.ResponseErr = err
goto ERR
}
if _, ok := seen[p.GetType()]; !ok {
seen[p.GetType()] = true
} else {
result.ResponseErr = fmt.Errorf("the path attribute apears twice. Type : " + strconv.Itoa(int(p.GetType())))
goto ERR
}
switch p.GetType() {
case bgp.BGP_ATTR_TYPE_NEXT_HOP:
nexthop = p.(*bgp.PathAttributeNextHop).Value.String()
case bgp.BGP_ATTR_TYPE_EXTENDED_COMMUNITIES:
value := p.(*bgp.PathAttributeExtendedCommunities).Value
if len(value) > 0 {
extcomms = append(extcomms, value...)
}
case bgp.BGP_ATTR_TYPE_MP_REACH_NLRI:
mpreach := p.(*bgp.PathAttributeMpReachNLRI)
if len(mpreach.Value) != 1 {
result.ResponseErr = fmt.Errorf("include only one route in mp_reach_nlri")
goto ERR
}
nlri = mpreach.Value[0]
nexthop = mpreach.Nexthop.String()
default:
pattr = append(pattr, p)
}
}
if nlri == nil || nexthop == "" {
result.ResponseErr = fmt.Errorf("not found nlri or nexthop")
goto ERR
}
rf = bgp.AfiSafiToRouteFamily(nlri.AFI(), nlri.SAFI())
if arg.Resource == api.Resource_VRF {
label, err := server.globalRib.GetNextLabel(arg.Name, nexthop, path.IsWithdraw)
if err != nil {
result.ResponseErr = err
goto ERR
}
vrf := server.globalRib.Vrfs[arg.Name]
switch rf {
case bgp.RF_IPv4_UC:
n := nlri.(*bgp.IPAddrPrefix)
nlri = bgp.NewLabeledVPNIPAddrPrefix(n.Length, n.Prefix.String(), *bgp.NewMPLSLabelStack(label), vrf.Rd)
case bgp.RF_IPv6_UC:
n := nlri.(*bgp.IPv6AddrPrefix)
nlri = bgp.NewLabeledVPNIPv6AddrPrefix(n.Length, n.Prefix.String(), *bgp.NewMPLSLabelStack(label), vrf.Rd)
case bgp.RF_EVPN:
n := nlri.(*bgp.EVPNNLRI)
switch n.RouteType {
case bgp.EVPN_ROUTE_TYPE_MAC_IP_ADVERTISEMENT:
n.RouteTypeData.(*bgp.EVPNMacIPAdvertisementRoute).RD = vrf.Rd
case bgp.EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG:
n.RouteTypeData.(*bgp.EVPNMulticastEthernetTagRoute).RD = vrf.Rd
}
default:
result.ResponseErr = fmt.Errorf("unsupported route family for vrf: %s", rf)
goto ERR
}
extcomms = append(extcomms, vrf.ExportRt...)
}
if arg.Resource != api.Resource_VRF && rf == bgp.RF_IPv4_UC {
pattr = append(pattr, bgp.NewPathAttributeNextHop(nexthop))
} else {
pattr = append(pattr, bgp.NewPathAttributeMpReachNLRI(nexthop, []bgp.AddrPrefixInterface{nlri}))
}
if rf == bgp.RF_EVPN {
evpnNlri := nlri.(*bgp.EVPNNLRI)
if evpnNlri.RouteType == bgp.EVPN_ROUTE_TYPE_MAC_IP_ADVERTISEMENT {
macIpAdv := evpnNlri.RouteTypeData.(*bgp.EVPNMacIPAdvertisementRoute)
etag := macIpAdv.ETag
mac := macIpAdv.MacAddress
paths := server.globalRib.GetBestPathList(bgp.RF_EVPN)
if m := getMacMobilityExtendedCommunity(etag, mac, paths); m != nil {
extcomms = append(extcomms, m)
}
}
}
if len(extcomms) > 0 {
pattr = append(pattr, bgp.NewPathAttributeExtendedCommunities(extcomms))
}
paths = append(paths, table.NewPath(pi, nlri, path.IsWithdraw, pattr, false, time.Now(), path.NoImplicitWithdraw))
}
return paths
ERR:
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return []*table.Path{}
}
func (server *BgpServer) handleVrfMod(arg *api.ModVrfArguments) ([]*table.Path, error) {
rib := server.globalRib
var msgs []*table.Path
switch arg.Operation {
case api.Operation_ADD:
rd := bgp.GetRouteDistinguisher(arg.Vrf.Rd)
f := func(bufs [][]byte) ([]bgp.ExtendedCommunityInterface, error) {
ret := make([]bgp.ExtendedCommunityInterface, 0, len(bufs))
for _, rt := range bufs {
r, err := bgp.ParseExtended(rt)
if err != nil {
return nil, err
}
ret = append(ret, r)
}
return ret, nil
}
importRt, err := f(arg.Vrf.ImportRt)
if err != nil {
return nil, err
}
exportRt, err := f(arg.Vrf.ExportRt)
if err != nil {
return nil, err
}
pi := &table.PeerInfo{
AS: server.bgpConfig.Global.GlobalConfig.As,
LocalID: server.bgpConfig.Global.GlobalConfig.RouterId,
}
msgs, err = rib.AddVrf(arg.Vrf.Name, rd, importRt, exportRt, pi)
if err != nil {
return nil, err
}
case api.Operation_DEL:
var err error
msgs, err = rib.DeleteVrf(arg.Vrf.Name)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown operation: %d", arg.Operation)
}
return msgs, nil
}
func (server *BgpServer) handleVrfRequest(req *GrpcRequest) []*table.Path {
var msgs []*table.Path
result := &GrpcResponse{}
switch req.RequestType {
case REQ_VRF:
name := req.Name
rib := server.globalRib
vrfs := rib.Vrfs
if _, ok := vrfs[name]; !ok {
result.ResponseErr = fmt.Errorf("vrf %s not found", name)
break
}
var rf bgp.RouteFamily
switch req.RouteFamily {
case bgp.RF_IPv4_UC:
rf = bgp.RF_IPv4_VPN
case bgp.RF_IPv6_UC:
rf = bgp.RF_IPv6_VPN
case bgp.RF_EVPN:
rf = bgp.RF_EVPN
default:
result.ResponseErr = fmt.Errorf("unsupported route family: %s", req.RouteFamily)
break
}
for _, path := range rib.GetPathList(rf) {
ok := table.CanImportToVrf(vrfs[name], path)
if !ok {
continue
}
req.ResponseCh <- &GrpcResponse{
Data: &api.Destination{
Prefix: path.GetNlri().String(),
Paths: []*api.Path{path.ToApiStruct()},
},
}
}
goto END
case REQ_VRFS:
vrfs := server.globalRib.Vrfs
for _, vrf := range vrfs {
req.ResponseCh <- &GrpcResponse{
Data: vrf.ToApiStruct(),
}
}
goto END
case REQ_VRF_MOD:
arg := req.Data.(*api.ModVrfArguments)
msgs, result.ResponseErr = server.handleVrfMod(arg)
default:
result.ResponseErr = fmt.Errorf("unknown request type: %d", req.RequestType)
}
req.ResponseCh <- result
END:
close(req.ResponseCh)
return msgs
}
func sendMultipleResponses(grpcReq *GrpcRequest, results []*GrpcResponse) {
defer close(grpcReq.ResponseCh)
for _, r := range results {
select {
case grpcReq.ResponseCh <- r:
case <-grpcReq.EndCh:
return
}
}
}
func (server *BgpServer) getBestFromLocal(peer *Peer) ([]*table.Path, []*table.Path) {
var pathList []*table.Path
var filtered []*table.Path
if peer.isRouteServerClient() {
pathList, filtered = peer.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(peer, peer.getBests(peer.localRib)))
} else {
rib := server.globalRib
bests := rib.ApplyPolicy(table.POLICY_DIRECTION_EXPORT, filterpath(peer, peer.getBests(rib)))
pathList = make([]*table.Path, 0, len(bests))
for _, path := range bests {
path.UpdatePathAttrs(&server.bgpConfig.Global, &peer.conf)
pathList = append(pathList, path)
}
}
return pathList, filtered
}
func (server *BgpServer) handleGrpc(grpcReq *GrpcRequest) []*SenderMsg {
var msgs []*SenderMsg
logOp := func(addr string, action string) {
log.WithFields(log.Fields{
"Topic": "Operation",
"Key": addr,
}).Info(action)
}
reqToPeers := func(grpcReq *GrpcRequest) ([]*Peer, error) {
peers := make([]*Peer, 0)
if grpcReq.Name == "all" {
for _, p := range server.neighborMap {
peers = append(peers, p)
}
return peers, nil
}
peer, err := server.checkNeighborRequest(grpcReq)
return []*Peer{peer}, err
}
sortedDsts := func(t *table.Table) []*GrpcResponse {
results := make([]*GrpcResponse, len(t.GetDestinations()))
r := radix.New()
for _, dst := range t.GetDestinations() {
result := &GrpcResponse{}
result.Data = dst.ToApiStruct()
r.Insert(dst.RadixKey, result)
}
i := 0
r.Walk(func(s string, v interface{}) bool {
r, _ := v.(*GrpcResponse)
results[i] = r
i++
return false
})
return results
}
switch grpcReq.RequestType {
case REQ_GLOBAL_RIB:
var results []*GrpcResponse
if t, ok := server.globalRib.Tables[grpcReq.RouteFamily]; ok {
results = make([]*GrpcResponse, len(t.GetDestinations()))
switch grpcReq.RouteFamily {
case bgp.RF_IPv4_UC, bgp.RF_IPv6_UC:
results = sortedDsts(server.globalRib.Tables[grpcReq.RouteFamily])
default:
i := 0
for _, dst := range t.GetDestinations() {
result := &GrpcResponse{}
result.Data = dst.ToApiStruct()
results[i] = result
i++
}
}
}
go sendMultipleResponses(grpcReq, results)
case REQ_MOD_PATH:
pathList := server.handleModPathRequest(grpcReq)
if len(pathList) > 0 {
msgs = server.propagateUpdate(nil, pathList)
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
}
case REQ_NEIGHBORS:
results := make([]*GrpcResponse, len(server.neighborMap))
i := 0
for _, peer := range server.neighborMap {
result := &GrpcResponse{
Data: peer.ToApiStruct(),
}
results[i] = result
i++
}
go sendMultipleResponses(grpcReq, results)
case REQ_NEIGHBOR:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
break
}
result := &GrpcResponse{
Data: peer.ToApiStruct(),
}
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
case REQ_LOCAL_RIB:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
break
}
var results []*GrpcResponse
if peer.isRouteServerClient() && peer.fsm.adminState != ADMIN_STATE_DOWN {
if t, ok := peer.localRib.Tables[grpcReq.RouteFamily]; ok {
results = make([]*GrpcResponse, len(t.GetDestinations()))
switch grpcReq.RouteFamily {
case bgp.RF_IPv4_UC, bgp.RF_IPv6_UC:
results = sortedDsts(peer.localRib.Tables[grpcReq.RouteFamily])
default:
i := 0
for _, dst := range t.GetDestinations() {
result := &GrpcResponse{}
result.Data = dst.ToApiStruct()
results[i] = result
i++
}
}
}
}
go sendMultipleResponses(grpcReq, results)
case REQ_ADJ_RIB_IN, REQ_ADJ_RIB_OUT:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
break
}
rf := grpcReq.RouteFamily
var paths []*table.Path
if grpcReq.RequestType == REQ_ADJ_RIB_IN {
paths = peer.adjRib.GetInPathList(rf)
log.Debugf("RouteFamily=%v adj-rib-in found : %d", rf.String(), len(paths))
} else {
paths = peer.adjRib.GetOutPathList(rf)
log.Debugf("RouteFamily=%v adj-rib-out found : %d", rf.String(), len(paths))
}
toResult := func(p *table.Path) *GrpcResponse {
return &GrpcResponse{
Data: &api.Destination{
Prefix: p.GetNlri().String(),
Paths: []*api.Path{p.ToApiStruct()},
},
}
}
results := make([]*GrpcResponse, len(paths))
switch rf {
case bgp.RF_IPv4_UC, bgp.RF_IPv6_UC:
r := radix.New()
for _, p := range paths {
r.Insert(table.CidrToRadixkey(p.GetNlri().String()), toResult(p))
}
i := 0
r.Walk(func(s string, v interface{}) bool {
r, _ := v.(*GrpcResponse)
results[i] = r
i++
return false
})
default:
for i, p := range paths {
results[i] = toResult(p)
}
}
go sendMultipleResponses(grpcReq, results)
case REQ_NEIGHBOR_SHUTDOWN:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
logOp(grpcReq.Name, "Neighbor shutdown")
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_SHUTDOWN, nil)
for _, peer := range peers {
msgs = append(msgs, newSenderMsg(peer, []*bgp.BGPMessage{m}))
}
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
case REQ_NEIGHBOR_RESET:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
logOp(grpcReq.Name, "Neighbor reset")
for _, peer := range peers {
peer.fsm.idleHoldTime = peer.conf.Timers.TimersConfig.IdleHoldTimeAfterReset
m := bgp.NewBGPNotificationMessage(bgp.BGP_ERROR_CEASE, bgp.BGP_ERROR_SUB_ADMINISTRATIVE_RESET, nil)
msgs = append(msgs, newSenderMsg(peer, []*bgp.BGPMessage{m}))
}
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
case REQ_NEIGHBOR_SOFT_RESET, REQ_NEIGHBOR_SOFT_RESET_IN:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
if grpcReq.RequestType == REQ_NEIGHBOR_SOFT_RESET {
logOp(grpcReq.Name, "Neighbor soft reset")
} else {
logOp(grpcReq.Name, "Neighbor soft reset in")
}
for _, peer := range peers {
pathList := peer.adjRib.GetInPathList(grpcReq.RouteFamily)
if peer.isRouteServerClient() {
pathList, _ = peer.ApplyPolicy(table.POLICY_DIRECTION_IN, pathList)
}
msgs = append(msgs, server.propagateUpdate(peer, pathList)...)
}
if grpcReq.RequestType == REQ_NEIGHBOR_SOFT_RESET_IN {
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
break
}
fallthrough
case REQ_NEIGHBOR_SOFT_RESET_OUT:
peers, err := reqToPeers(grpcReq)
if err != nil {
break
}
if grpcReq.RequestType == REQ_NEIGHBOR_SOFT_RESET_OUT {
logOp(grpcReq.Name, "Neighbor soft reset out")
}
for _, peer := range peers {
for _, rf := range peer.configuredRFlist() {
peer.adjRib.DropOut(rf)
}
pathList, filtered := server.getBestFromLocal(peer)
if len(pathList) > 0 {
peer.adjRib.UpdateOut(pathList)
msgs = append(msgs, newSenderMsg(peer, table.CreateUpdateMsgFromPaths(pathList)))
}
if len(filtered) > 0 {
for _, p := range filtered {
p.IsWithdraw = true
}
msgs = append(msgs, newSenderMsg(peer, table.CreateUpdateMsgFromPaths(filtered)))
}
}
grpcReq.ResponseCh <- &GrpcResponse{}
close(grpcReq.ResponseCh)
case REQ_NEIGHBOR_ENABLE, REQ_NEIGHBOR_DISABLE:
peer, err1 := server.checkNeighborRequest(grpcReq)
if err1 != nil {
break
}
var err api.Error
result := &GrpcResponse{}
if grpcReq.RequestType == REQ_NEIGHBOR_ENABLE {
select {
case peer.fsm.adminStateCh <- ADMIN_STATE_UP:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
}).Debug("ADMIN_STATE_UP requested")
err.Code = api.Error_SUCCESS
err.Msg = "ADMIN_STATE_UP"
default:
log.Warning("previous request is still remaining. : ", peer.conf.NeighborConfig.NeighborAddress)
err.Code = api.Error_FAIL
err.Msg = "previous request is still remaining"
}
} else {
select {
case peer.fsm.adminStateCh <- ADMIN_STATE_DOWN:
log.WithFields(log.Fields{
"Topic": "Peer",
"Key": peer.conf.NeighborConfig.NeighborAddress,
}).Debug("ADMIN_STATE_DOWN requested")
err.Code = api.Error_SUCCESS
err.Msg = "ADMIN_STATE_DOWN"
default:
log.Warning("previous request is still remaining. : ", peer.conf.NeighborConfig.NeighborAddress)
err.Code = api.Error_FAIL
err.Msg = "previous request is still remaining"
}
}
result.Data = err
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
case REQ_DEFINED_SET:
if err := server.handleGrpcGetDefinedSet(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_DEFINED_SET:
err := server.handleGrpcModDefinedSet(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_STATEMENT:
if err := server.handleGrpcGetStatement(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_STATEMENT:
err := server.handleGrpcModStatement(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_POLICY:
if err := server.handleGrpcGetPolicy(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_POLICY:
err := server.handleGrpcModPolicy(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_POLICY_ASSIGNMENT:
if err := server.handleGrpcGetPolicyAssignment(grpcReq); err != nil {
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
}
close(grpcReq.ResponseCh)
case REQ_MOD_POLICY_ASSIGNMENT:
err := server.handleGrpcModPolicyAssignment(grpcReq)
grpcReq.ResponseCh <- &GrpcResponse{
ResponseErr: err,
}
close(grpcReq.ResponseCh)
case REQ_MONITOR_GLOBAL_BEST_CHANGED, REQ_MONITOR_NEIGHBOR_PEER_STATE:
server.broadcastReqs = append(server.broadcastReqs, grpcReq)
case REQ_MRT_GLOBAL_RIB, REQ_MRT_LOCAL_RIB:
server.handleMrt(grpcReq)
case REQ_ROA, REQ_RPKI:
server.roaClient.handleGRPC(grpcReq)
case REQ_VRF, REQ_VRFS, REQ_VRF_MOD:
pathList := server.handleVrfRequest(grpcReq)
if len(pathList) > 0 {
msgs = server.propagateUpdate(nil, pathList)
}
default:
errmsg := fmt.Errorf("Unknown request type: %v", grpcReq.RequestType)
result := &GrpcResponse{
ResponseErr: errmsg,
}
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
}
return msgs
}
func (server *BgpServer) handleGrpcGetDefinedSet(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.DefinedSet)
typ := table.DefinedType(arg.Type)
name := arg.Name
set, ok := server.policy.DefinedSetMap[typ]
if !ok {
return fmt.Errorf("invalid defined-set type: %d", typ)
}
found := false
for _, s := range set {
if name != "" && name != s.Name() {
continue
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: s.ToApiStruct(),
}
found = true
if name != "" {
break
}
}
if !found {
return fmt.Errorf("not found %s", name)
}
return nil
}
func (server *BgpServer) handleGrpcModDefinedSet(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.ModDefinedSetArguments)
set := arg.Set
typ := table.DefinedType(set.Type)
name := set.Name
var err error
m, ok := server.policy.DefinedSetMap[typ]
if !ok {
return fmt.Errorf("invalid defined-set type: %d", typ)
}
d, ok := m[name]
if arg.Operation != api.Operation_ADD && !ok {
return fmt.Errorf("not found defined-set: %s", name)
}
s, err := table.NewDefinedSetFromApiStruct(set)
if err != nil {
return err
}
switch arg.Operation {
case api.Operation_ADD:
if ok {
err = d.Append(s)
} else {
m[name] = s
}
case api.Operation_DEL:
err = d.Remove(s)
case api.Operation_DEL_ALL:
if server.policy.InUse(d) {
return fmt.Errorf("can't delete. defined-set %s is in use", name)
}
delete(m, name)
case api.Operation_REPLACE:
err = d.Replace(s)
}
return err
}
func (server *BgpServer) handleGrpcGetStatement(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.Statement)
name := arg.Name
found := false
for _, s := range server.policy.StatementMap {
if name != "" && name != s.Name {
continue
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: s.ToApiStruct(),
}
found = true
if name != "" {
break
}
}
if !found {
return fmt.Errorf("not found %s", name)
}
return nil
}
func (server *BgpServer) handleGrpcModStatement(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.ModStatementArguments)
s, err := table.NewStatementFromApiStruct(arg.Statement, server.policy.DefinedSetMap)
if err != nil {
return err
}
m := server.policy.StatementMap
name := s.Name
d, ok := m[name]
if arg.Operation != api.Operation_ADD && !ok {
return fmt.Errorf("not found statement: %s", name)
}
switch arg.Operation {
case api.Operation_ADD:
if ok {
err = d.Add(s)
} else {
m[name] = s
}
case api.Operation_DEL:
err = d.Remove(s)
case api.Operation_DEL_ALL:
if server.policy.StatementInUse(d) {
return fmt.Errorf("can't delete. statement %s is in use", name)
}
delete(m, name)
case api.Operation_REPLACE:
err = d.Replace(s)
}
return err
}
func (server *BgpServer) handleGrpcGetPolicy(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.Policy)
name := arg.Name
found := false
for _, s := range server.policy.PolicyMap {
if name != "" && name != s.Name() {
continue
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: s.ToApiStruct(),
}
found = true
if name != "" {
break
}
}
if !found {
return fmt.Errorf("not found %s", name)
}
return nil
}
func (server *BgpServer) policyInUse(x *table.Policy) bool {
for _, peer := range server.neighborMap {
for _, dir := range []table.PolicyDirection{table.POLICY_DIRECTION_IN, table.POLICY_DIRECTION_EXPORT, table.POLICY_DIRECTION_EXPORT} {
for _, y := range peer.GetPolicy(dir) {
if x.Name() == y.Name() {
return true
}
}
}
}
for _, dir := range []table.PolicyDirection{table.POLICY_DIRECTION_EXPORT, table.POLICY_DIRECTION_EXPORT} {
for _, y := range server.globalRib.GetPolicy(dir) {
if x.Name() == y.Name() {
return true
}
}
}
return false
}
func (server *BgpServer) handleGrpcModPolicy(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.ModPolicyArguments)
x, err := table.NewPolicyFromApiStruct(arg.Policy, server.policy.DefinedSetMap)
if err != nil {
return err
}
pMap := server.policy.PolicyMap
sMap := server.policy.StatementMap
name := x.Name()
y, ok := pMap[name]
if arg.Operation != api.Operation_ADD && !ok {
return fmt.Errorf("not found policy: %s", name)
}
switch arg.Operation {
case api.Operation_ADD, api.Operation_REPLACE:
if arg.ReferExistingStatements {
err = x.FillUp(sMap)
if err != nil {
return err
}
} else {
for _, s := range x.Statements {
if _, ok := sMap[s.Name]; ok {
return fmt.Errorf("statement %s already defined", s.Name)
}
sMap[s.Name] = s
}
}
if arg.Operation == api.Operation_REPLACE {
err = y.Replace(x)
} else if ok {
err = y.Add(x)
} else {
pMap[name] = x
}
case api.Operation_DEL:
err = y.Remove(x)
case api.Operation_DEL_ALL:
if server.policyInUse(y) {
return fmt.Errorf("can't delete. policy %s is in use", name)
}
log.WithFields(log.Fields{
"Topic": "Policy",
"Key": name,
}).Debug("delete policy")
delete(pMap, name)
}
if err == nil && arg.Operation != api.Operation_ADD && !arg.PreserveStatements {
for _, s := range y.Statements {
if !server.policy.StatementInUse(s) {
log.WithFields(log.Fields{
"Topic": "Policy",
"Key": s.Name,
}).Debug("delete unused statement")
delete(sMap, s.Name)
}
}
}
return err
}
type policyPoint interface {
GetDefaultPolicy(table.PolicyDirection) table.RouteType
GetPolicy(table.PolicyDirection) []*table.Policy
SetDefaultPolicy(table.PolicyDirection, table.RouteType) error
SetPolicy(table.PolicyDirection, []*table.Policy) error
}
func (server *BgpServer) getPolicyInfo(a *api.PolicyAssignment) (policyPoint, table.PolicyDirection, error) {
switch a.Resource {
case api.Resource_GLOBAL:
switch a.Type {
case api.PolicyType_IMPORT:
return server.globalRib, table.POLICY_DIRECTION_IMPORT, nil
case api.PolicyType_EXPORT:
return server.globalRib, table.POLICY_DIRECTION_EXPORT, nil
default:
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("invalid policy type")
}
case api.Resource_LOCAL:
peer, ok := server.neighborMap[a.Name]
if !ok {
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("not found peer %s", a.Name)
}
switch a.Type {
case api.PolicyType_IN:
return peer, table.POLICY_DIRECTION_IN, nil
case api.PolicyType_IMPORT:
return peer, table.POLICY_DIRECTION_IMPORT, nil
case api.PolicyType_EXPORT:
return peer, table.POLICY_DIRECTION_EXPORT, nil
default:
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("invalid policy type")
}
default:
return nil, table.POLICY_DIRECTION_NONE, fmt.Errorf("invalid resource type")
}
}
func (server *BgpServer) handleGrpcGetPolicyAssignment(grpcReq *GrpcRequest) error {
arg := grpcReq.Data.(*api.PolicyAssignment)
i, dir, err := server.getPolicyInfo(arg)
if err != nil {
return err
}
arg.Default = i.GetDefaultPolicy(dir).ToApiStruct()
ps := i.GetPolicy(dir)
arg.Policies = make([]*api.Policy, 0, len(ps))
for _, x := range ps {
arg.Policies = append(arg.Policies, x.ToApiStruct())
}
grpcReq.ResponseCh <- &GrpcResponse{
Data: arg,
}
return nil
}
func (server *BgpServer) handleGrpcModPolicyAssignment(grpcReq *GrpcRequest) error {
var err error
var dir table.PolicyDirection
var i policyPoint
arg := grpcReq.Data.(*api.ModPolicyAssignmentArguments)
assignment := arg.Assignment
i, dir, err = server.getPolicyInfo(assignment)
if err != nil {
return err
}
ps := make([]*table.Policy, 0, len(assignment.Policies))
for _, x := range assignment.Policies {
p, ok := server.policy.PolicyMap[x.Name]
if !ok {
return fmt.Errorf("not found policy %s", x.Name)
}
ps = append(ps, p)
}
cur := i.GetPolicy(dir)
switch arg.Operation {
case api.Operation_ADD, api.Operation_REPLACE:
if arg.Operation == api.Operation_REPLACE || cur == nil {
err = i.SetPolicy(dir, ps)
} else {
err = i.SetPolicy(dir, append(cur, ps...))
}
if err != nil {
return err
}
switch assignment.Default {
case api.RouteAction_ACCEPT:
err = i.SetDefaultPolicy(dir, table.ROUTE_TYPE_ACCEPT)
case api.RouteAction_REJECT:
err = i.SetDefaultPolicy(dir, table.ROUTE_TYPE_REJECT)
}
case api.Operation_DEL:
n := make([]*table.Policy, 0, len(cur)-len(ps))
for _, x := range ps {
found := false
for _, y := range cur {
if x.Name() == y.Name() {
found = true
break
}
}
if !found {
n = append(n, x)
}
}
err = i.SetPolicy(dir, n)
case api.Operation_DEL_ALL:
err = i.SetPolicy(dir, nil)
if err != nil {
return err
}
err = i.SetDefaultPolicy(dir, table.ROUTE_TYPE_NONE)
}
return err
}
func (server *BgpServer) handleMrt(grpcReq *GrpcRequest) {
now := uint32(time.Now().Unix())
view := ""
result := &GrpcResponse{}
var rib *table.TableManager
switch grpcReq.RequestType {
case REQ_MRT_GLOBAL_RIB:
rib = server.globalRib
case REQ_MRT_LOCAL_RIB:
peer, err := server.checkNeighborRequest(grpcReq)
if err != nil {
return
}
rib = peer.localRib
if rib == nil {
result.ResponseErr = fmt.Errorf("no local rib for %s", grpcReq.Name)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
view = grpcReq.Name
}
msg, err := server.mkMrtPeerIndexTableMsg(now, view)
if err != nil {
result.ResponseErr = fmt.Errorf("failed to make new mrt peer index table message: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
data, err := msg.Serialize()
if err != nil {
result.ResponseErr = fmt.Errorf("failed to serialize table: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
tbl, ok := rib.Tables[grpcReq.RouteFamily]
if !ok {
result.ResponseErr = fmt.Errorf("unsupported route family: %s", grpcReq.RouteFamily)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
msgs, err := server.mkMrtRibMsgs(tbl, now)
if err != nil {
result.ResponseErr = fmt.Errorf("failed to make new mrt rib message: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
for _, msg := range msgs {
d, err := msg.Serialize()
if err != nil {
result.ResponseErr = fmt.Errorf("failed to serialize rib msg: %s", err)
grpcReq.ResponseCh <- result
close(grpcReq.ResponseCh)
return
}
data = append(data, d...)
}
result.Data = &api.MrtMessage{
Data: data,
}
select {
case <-grpcReq.EndCh:
return
default:
}
m := &broadcastGrpcMsg{
req: grpcReq,
result: result,
}
interval := int64(grpcReq.Data.(uint64))
if interval > 0 {
go func() {
t := time.NewTimer(time.Second * time.Duration(interval))
<-t.C
server.GrpcReqCh <- grpcReq
}()
} else {
m.done = true
}
server.broadcastMsgs = append(server.broadcastMsgs, m)
return
}
func (server *BgpServer) mkMrtPeerIndexTableMsg(t uint32, view string) (*bgp.MRTMessage, error) {
peers := make([]*bgp.Peer, 0, len(server.neighborMap))
for _, peer := range server.neighborMap {
id := peer.peerInfo.ID.To4().String()
ipaddr := peer.conf.NeighborConfig.NeighborAddress.String()
asn := peer.conf.NeighborConfig.PeerAs
peers = append(peers, bgp.NewPeer(id, ipaddr, asn, true))
}
bgpid := server.bgpConfig.Global.GlobalConfig.RouterId.To4().String()
table := bgp.NewPeerIndexTable(bgpid, view, peers)
return bgp.NewMRTMessage(t, bgp.TABLE_DUMPv2, bgp.PEER_INDEX_TABLE, table)
}
func (server *BgpServer) mkMrtRibMsgs(tbl *table.Table, t uint32) ([]*bgp.MRTMessage, error) {
getPeerIndex := func(info *table.PeerInfo) uint16 {
var idx uint16
for _, peer := range server.neighborMap {
if peer.peerInfo.Equal(info) {
return idx
}
idx++
}
return idx
}
var subtype bgp.MRTSubTypeTableDumpv2
switch tbl.GetRoutefamily() {
case bgp.RF_IPv4_UC:
subtype = bgp.RIB_IPV4_UNICAST
case bgp.RF_IPv4_MC:
subtype = bgp.RIB_IPV4_MULTICAST
case bgp.RF_IPv6_UC:
subtype = bgp.RIB_IPV6_UNICAST
case bgp.RF_IPv6_MC:
subtype = bgp.RIB_IPV6_MULTICAST
default:
subtype = bgp.RIB_GENERIC
}
var seq uint32
msgs := make([]*bgp.MRTMessage, 0, len(tbl.GetDestinations()))
for _, dst := range tbl.GetDestinations() {
l := dst.GetKnownPathList()
entries := make([]*bgp.RibEntry, 0, len(l))
for _, p := range l {
// mrt doesn't assume to dump locally generated routes
if p.IsLocal() {
continue
}
idx := getPeerIndex(p.GetSource())
e := bgp.NewRibEntry(idx, uint32(p.GetTimestamp().Unix()), p.GetPathAttrs())
entries = append(entries, e)
}
// if dst only contains locally generated routes, ignore it
if len(entries) == 0 {
continue
}
rib := bgp.NewRib(seq, dst.GetNlri(), entries)
seq++
msg, err := bgp.NewMRTMessage(t, bgp.TABLE_DUMPv2, subtype, rib)
if err != nil {
return nil, err
}
msgs = append(msgs, msg)
}
return msgs, nil
}
func (server *BgpServer) NewZclient(url string, redistRouteTypes []string) error {
l := strings.SplitN(url, ":", 2)
if len(l) != 2 {
return fmt.Errorf("unsupported url: %s", url)
}
cli, err := zebra.NewClient(l[0], l[1], zebra.ROUTE_BGP)
if err != nil {
return err
}
cli.SendHello()
cli.SendRouterIDAdd()
cli.SendInterfaceAdd()
for _, typ := range redistRouteTypes {
t, err := zebra.RouteTypeFromString(typ)
if err != nil {
return err
}
cli.SendRedistribute(t)
}
if e := cli.SendCommand(zebra.REDISTRIBUTE_DEFAULT_ADD, nil); e != nil {
return e
}
server.zclient = cli
return nil
}
|
// Copyright (c) 2014 The SkyDNS Authors. All rights reserved.
// Use of this source code is governed by The MIT License (MIT) that can be
// found in the LICENSE file.
package server
import (
"fmt"
"math"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/coreos/go-etcd/etcd"
"github.com/coreos/go-systemd/activation"
"github.com/miekg/dns"
"github.com/skynetservices/skydns/cache"
"github.com/skynetservices/skydns/msg"
)
const Version = "2.5.2b"
type server struct {
backend Backend
config *Config
group *sync.WaitGroup
dnsUDPclient *dns.Client // used for forwarding queries
dnsTCPclient *dns.Client // used for forwarding queries
scache *cache.Cache
rcache *cache.Cache
}
type Backend interface {
Records(name string, exact bool) ([]msg.Service, error)
ReverseRecord(name string) (*msg.Service, error)
}
// FirstBackend exposes the Backend interface over multiple Backends, returning
// the first Backend that answers the provided record request. If no Backend answers
// a record request, the last error seen will be returned.
type FirstBackend []Backend
// FirstBackend implements Backend
var _ Backend = FirstBackend{}
func (g FirstBackend) Records(name string, exact bool) (records []msg.Service, err error) {
var lastError error
for _, backend := range g {
if records, err = backend.Records(name, exact); err == nil && len(records) > 0 {
return records, nil
}
if err != nil {
lastError = err
}
}
return nil, lastError
}
func (g FirstBackend) ReverseRecord(name string) (record *msg.Service, err error) {
var lastError error
for _, backend := range g {
if record, err = backend.ReverseRecord(name); err == nil && record != nil {
return record, nil
}
if err != nil {
lastError = err
}
}
return nil, lastError
}
// New returns a new SkyDNS server.
func New(backend Backend, config *Config) *server {
return &server{
backend: backend,
config: config,
group: new(sync.WaitGroup),
scache: cache.New(config.SCache, 0),
rcache: cache.New(config.RCache, config.RCacheTtl),
dnsUDPclient: &dns.Client{Net: "udp", ReadTimeout: 2 * config.ReadTimeout, WriteTimeout: 2 * config.ReadTimeout, SingleInflight: true},
dnsTCPclient: &dns.Client{Net: "tcp", ReadTimeout: 2 * config.ReadTimeout, WriteTimeout: 2 * config.ReadTimeout, SingleInflight: true},
}
}
// Run is a blocking operation that starts the server listening on the DNS ports.
func (s *server) Run() error {
mux := dns.NewServeMux()
mux.Handle(".", s)
dnsReadyMsg := func(addr, net string) {
if s.config.DNSSEC == "" {
logf("ready for queries on %s for %s://%s [rcache %d]", s.config.Domain, net, addr, s.config.RCache)
} else {
logf("ready for queries on %s for %s://%s [rcache %d], signing with %s [scache %d]", s.config.Domain, net, addr, s.config.RCache, s.config.DNSSEC, s.config.SCache)
}
}
if s.config.Systemd {
packetConns, err := activation.PacketConns(false)
if err != nil {
return err
}
listeners, err := activation.Listeners(true)
if err != nil {
return err
}
if len(packetConns) == 0 && len(listeners) == 0 {
return fmt.Errorf("no UDP or TCP sockets supplied by systemd")
}
for _, p := range packetConns {
if u, ok := p.(*net.UDPConn); ok {
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ActivateAndServe(nil, u, mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(u.LocalAddr().String(), "udp")
}
}
for _, l := range listeners {
if t, ok := l.(*net.TCPListener); ok {
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ActivateAndServe(t, nil, mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(t.Addr().String(), "tcp")
}
}
} else {
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ListenAndServe(s.config.DnsAddr, "tcp", mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(s.config.DnsAddr, "tcp")
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ListenAndServe(s.config.DnsAddr, "udp", mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(s.config.DnsAddr, "udp")
}
s.group.Wait()
return nil
}
// Stop stops a server.
func (s *server) Stop() {
// TODO(miek)
//s.group.Add(-2)
}
// ServeDNS is the handler for DNS requests, responsible for parsing DNS request, possibly forwarding
// it to a real dns server and returning a response.
func (s *server) ServeDNS(w dns.ResponseWriter, req *dns.Msg) {
m := new(dns.Msg)
m.SetReply(req)
m.Authoritative = true
m.RecursionAvailable = true
m.Compress = true
bufsize := uint16(512)
dnssec := false
tcp := false
start := time.Now()
if req.Question[0].Qtype == dns.TypeANY {
m.Authoritative = false
m.Rcode = dns.RcodeRefused
m.RecursionAvailable = false
m.RecursionDesired = false
m.Compress = false
// if write fails don't care
w.WriteMsg(m)
promErrorCount.WithLabelValues("refused").Inc()
return
}
if o := req.IsEdns0(); o != nil {
bufsize = o.UDPSize()
dnssec = o.Do()
}
if bufsize < 512 {
bufsize = 512
}
// with TCP we can send 64K
if tcp = isTCP(w); tcp {
bufsize = dns.MaxMsgSize - 1
promRequestCount.WithLabelValues("tcp").Inc()
} else {
promRequestCount.WithLabelValues("udp").Inc()
}
StatsRequestCount.Inc(1)
if dnssec {
StatsDnssecOkCount.Inc(1)
promDnssecOkCount.Inc()
}
defer func() {
promCacheSize.WithLabelValues("response").Set(float64(s.rcache.Size()))
}()
// Check cache first.
key := cache.QuestionKey(req.Question[0], dnssec)
m1, exp, hit := s.rcache.Search(key)
if hit {
// Cache hit! \o/
if time.Since(exp) < 0 {
m1.Id = m.Id
m1.Compress = true
m1.Truncated = false
if dnssec {
// The key for DNS/DNSSEC in cache is different, no
// need to do Denial/Sign here.
//if s.config.PubKey != nil {
//s.Denial(m1) // not needed for cache hits
//s.Sign(m1, bufsize)
//}
}
if m1.Len() > int(bufsize) && !tcp {
promErrorCount.WithLabelValues("truncated").Inc()
m1.Truncated = true
}
// Still round-robin even with hits from the cache.
// Only shuffle A and AAAA records with each other.
if req.Question[0].Qtype == dns.TypeA || req.Question[0].Qtype == dns.TypeAAAA {
s.RoundRobin(m1.Answer)
}
if err := w.WriteMsg(m1); err != nil {
logf("failure to return reply %q", err)
}
metricSizeAndDuration(m1, start, tcp)
return
}
// Expired! /o\
s.rcache.Remove(key)
}
q := req.Question[0]
name := strings.ToLower(q.Name)
if s.config.Verbose {
logf("received DNS Request for %q from %q with type %d", q.Name, w.RemoteAddr(), q.Qtype)
}
for zone, ns := range *s.config.stub {
if strings.HasSuffix(name, zone) {
resp := s.ServeDNSStubForward(w, req, ns)
metricSizeAndDuration(resp, start, tcp)
return
}
}
// If the qname is local.dns.skydns.local. and s.config.Local != "", substitute that name.
if s.config.Local != "" && name == s.config.localDomain {
name = s.config.Local
}
if q.Qtype == dns.TypePTR && strings.HasSuffix(name, ".in-addr.arpa.") || strings.HasSuffix(name, ".ip6.arpa.") {
resp := s.ServeDNSReverse(w, req)
metricSizeAndDuration(resp, start, tcp)
return
}
if q.Qclass != dns.ClassCHAOS && !strings.HasSuffix(name, s.config.Domain) {
resp := s.ServeDNSForward(w, req)
metricSizeAndDuration(resp, start, tcp)
return
}
promCacheMiss.WithLabelValues("response").Inc()
defer func() {
if m.Rcode == dns.RcodeServerFailure {
if err := w.WriteMsg(m); err != nil {
logf("failure to return reply %q", err)
}
return
}
// Set TTL to the minimum of the RRset and dedup the message, i.e.
// remove identical RRs.
m = s.dedup(m)
minttl := s.config.Ttl
if len(m.Answer) > 1 {
for _, r := range m.Answer {
if r.Header().Ttl < minttl {
minttl = r.Header().Ttl
}
}
for _, r := range m.Answer {
r.Header().Ttl = minttl
}
}
if !m.Truncated {
s.rcache.InsertMessage(cache.QuestionKey(req.Question[0], dnssec), m)
}
if dnssec {
if s.config.PubKey != nil {
m.AuthenticatedData = true
s.Denial(m)
s.Sign(m, bufsize)
}
}
if m.Len() > dns.MaxMsgSize {
logf("overflowing maximum message size: %d, dropping additional section", m.Len())
m.Extra = nil // Drop entire additional section to see if this helps.
if m.Len() > dns.MaxMsgSize {
// *Still* too large.
logf("still overflowing maximum message size: %d", m.Len())
promErrorCount.WithLabelValues("overflow").Inc()
m1 := new(dns.Msg) // Use smaller msg to signal failure.
m1.SetRcode(m, dns.RcodeServerFailure)
if err := w.WriteMsg(m1); err != nil {
logf("failure to return reply %q", err)
}
metricSizeAndDuration(m1, start, tcp)
return
}
}
if m.Len() > int(bufsize) && !tcp {
m.Extra = nil // As above, drop entire additional section.
if m.Len() > int(bufsize) {
promErrorCount.WithLabelValues("truncated").Inc()
m.Truncated = true
}
}
if err := w.WriteMsg(m); err != nil {
logf("failure to return reply %q %d", err, m.Len())
}
metricSizeAndDuration(m, start, tcp)
}()
if name == s.config.Domain {
if q.Qtype == dns.TypeSOA {
m.Answer = []dns.RR{s.NewSOA()}
return
}
if q.Qtype == dns.TypeDNSKEY {
if s.config.PubKey != nil {
m.Answer = []dns.RR{s.config.PubKey}
return
}
}
}
if q.Qclass == dns.ClassCHAOS {
if q.Qtype == dns.TypeTXT {
switch name {
case "authors.bind.":
fallthrough
case s.config.Domain:
hdr := dns.RR_Header{Name: q.Name, Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}
authors := []string{"Erik St. Martin", "Brian Ketelsen", "Miek Gieben", "Michael Crosby"}
for _, a := range authors {
m.Answer = append(m.Answer, &dns.TXT{Hdr: hdr, Txt: []string{a}})
}
for j := 0; j < len(authors)*(int(dns.Id())%4+1); j++ {
q := int(dns.Id()) % len(authors)
p := int(dns.Id()) % len(authors)
if q == p {
p = (p + 1) % len(authors)
}
m.Answer[q], m.Answer[p] = m.Answer[p], m.Answer[q]
}
return
case "version.bind.":
fallthrough
case "version.server.":
hdr := dns.RR_Header{Name: q.Name, Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}
m.Answer = []dns.RR{&dns.TXT{Hdr: hdr, Txt: []string{Version}}}
return
case "hostname.bind.":
fallthrough
case "id.server.":
// TODO(miek): machine name to return
hdr := dns.RR_Header{Name: q.Name, Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}
m.Answer = []dns.RR{&dns.TXT{Hdr: hdr, Txt: []string{"localhost"}}}
return
}
}
// still here, fail
m.SetReply(req)
m.SetRcode(req, dns.RcodeServerFailure)
return
}
switch q.Qtype {
case dns.TypeNS:
if name != s.config.Domain {
break
}
// Lookup s.config.DnsDomain
records, extra, err := s.NSRecords(q, s.config.dnsDomain)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
case dns.TypeA, dns.TypeAAAA:
records, err := s.AddressRecords(q, name, nil, bufsize, dnssec, false)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
case dns.TypeTXT:
records, err := s.TXTRecords(q, name)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
case dns.TypeCNAME:
records, err := s.CNAMERecords(q, name)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
case dns.TypeMX:
records, extra, err := s.MXRecords(q, name, bufsize, dnssec)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
default:
fallthrough // also catch other types, so that they return NODATA
case dns.TypeSRV:
records, extra, err := s.SRVRecords(q, name, bufsize, dnssec)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
if q.Qtype == dns.TypeSRV { // Otherwise NODATA
s.ServerFailure(m, req)
return
}
}
// if we are here again, check the types, because an answer may only
// be given for SRV. All other types should return NODATA, the
// NXDOMAIN part is handled in the above code. TODO(miek): yes this
// can be done in a more elegant manor.
if q.Qtype == dns.TypeSRV {
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
}
}
if len(m.Answer) == 0 { // NODATA response
StatsNoDataCount.Inc(1)
m.Ns = []dns.RR{s.NewSOA()}
m.Ns[0].Header().Ttl = s.config.MinTtl
}
}
func (s *server) AddressRecords(q dns.Question, name string, previousRecords []dns.RR, bufsize uint16, dnssec, both bool) (records []dns.RR, err error) {
services, err := s.backend.Records(name, false)
if err != nil {
return nil, err
}
services = msg.Group(services)
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
// Try to resolve as CNAME if it's not an IP, but only if we don't create loops.
if q.Name == dns.Fqdn(serv.Host) {
// x CNAME x is a direct loop, don't add those
continue
}
newRecord := serv.NewCNAME(q.Name, dns.Fqdn(serv.Host))
if len(previousRecords) > 7 {
logf("CNAME lookup limit of 8 exceeded for %s", newRecord)
// don't add it, and just continue
continue
}
if s.isDuplicateCNAME(newRecord, previousRecords) {
logf("CNAME loop detected for record %s", newRecord)
continue
}
nextRecords, err := s.AddressRecords(dns.Question{Name: dns.Fqdn(serv.Host), Qtype: q.Qtype, Qclass: q.Qclass},
strings.ToLower(dns.Fqdn(serv.Host)), append(previousRecords, newRecord), bufsize, dnssec, both)
if err == nil {
// Only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
if dns.IsSubDomain(s.config.Domain, target) {
// We should already have found it
continue
}
m1, e1 := s.Lookup(target, q.Qtype, bufsize, dnssec)
if e1 != nil {
logf("incomplete CNAME chain: %s", e1)
continue
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
continue
logf("incomplete CNAME chain for %s", name)
case ip.To4() != nil && (q.Qtype == dns.TypeA || both):
records = append(records, serv.NewA(q.Name, ip.To4()))
case ip.To4() == nil && (q.Qtype == dns.TypeAAAA || both):
records = append(records, serv.NewAAAA(q.Name, ip.To16()))
}
}
if s.config.RoundRobin {
s.RoundRobin(records)
}
return records, nil
}
// NSRecords returns NS records from etcd.
func (s *server) NSRecords(q dns.Question, name string) (records []dns.RR, extra []dns.RR, err error) {
services, err := s.backend.Records(name, false)
if err != nil {
return nil, nil, err
}
services = msg.Group(services)
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
return nil, nil, fmt.Errorf("NS record must be an IP address")
case ip.To4() != nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewNS(q.Name, serv.Host))
extra = append(extra, serv.NewA(serv.Host, ip.To4()))
case ip.To4() == nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewNS(q.Name, serv.Host))
extra = append(extra, serv.NewAAAA(serv.Host, ip.To16()))
}
}
return records, extra, nil
}
// SRVRecords returns SRV records from etcd.
// If the Target is not a name but an IP address, a name is created.
func (s *server) SRVRecords(q dns.Question, name string, bufsize uint16, dnssec bool) (records []dns.RR, extra []dns.RR, err error) {
services, err := s.backend.Records(name, false)
if err != nil {
return nil, nil, err
}
services = msg.Group(services)
// Looping twice to get the right weight vs priority
w := make(map[int]int)
for _, serv := range services {
weight := 100
if serv.Weight != 0 {
weight = serv.Weight
}
if _, ok := w[serv.Priority]; !ok {
w[serv.Priority] = weight
continue
}
w[serv.Priority] += weight
}
lookup := make(map[string]bool)
for _, serv := range services {
w1 := 100.0 / float64(w[serv.Priority])
if serv.Weight == 0 {
w1 *= 100
} else {
w1 *= float64(serv.Weight)
}
weight := uint16(math.Floor(w1))
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
srv := serv.NewSRV(q.Name, weight)
records = append(records, srv)
if _, ok := lookup[srv.Target]; ok {
break
}
lookup[srv.Target] = true
if !dns.IsSubDomain(s.config.Domain, srv.Target) {
m1, e1 := s.Lookup(srv.Target, dns.TypeA, bufsize, dnssec)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = s.Lookup(srv.Target, dns.TypeAAAA, bufsize, dnssec)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
if _, ok := a.(*dns.CNAME); !ok {
extra = append(extra, a)
}
}
}
break
}
// Internal name, we should have some info on them, either v4 or v6
// Clients expect a complete answer, because we are a recursor in their
// view.
addr, e1 := s.AddressRecords(dns.Question{srv.Target, dns.ClassINET, dns.TypeA},
srv.Target, nil, bufsize, dnssec, true)
if e1 == nil {
extra = append(extra, addr...)
}
case ip.To4() != nil:
serv.Host = msg.Domain(serv.Key)
srv := serv.NewSRV(q.Name, weight)
records = append(records, srv)
extra = append(extra, serv.NewA(srv.Target, ip.To4()))
case ip.To4() == nil:
serv.Host = msg.Domain(serv.Key)
srv := serv.NewSRV(q.Name, weight)
records = append(records, srv)
extra = append(extra, serv.NewAAAA(srv.Target, ip.To16()))
}
}
return records, extra, nil
}
// MXRecords returns MX records from etcd.
// If the Target is not a name but an IP address, a name is created.
func (s *server) MXRecords(q dns.Question, name string, bufsize uint16, dnssec bool) (records []dns.RR, extra []dns.RR, err error) {
services, err := s.backend.Records(name, false)
if err != nil {
return nil, nil, err
}
lookup := make(map[string]bool)
for _, serv := range services {
if !serv.Mail {
continue
}
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
mx := serv.NewMX(q.Name)
records = append(records, mx)
if _, ok := lookup[mx.Mx]; ok {
break
}
lookup[mx.Mx] = true
if !dns.IsSubDomain(s.config.Domain, mx.Mx) {
m1, e1 := s.Lookup(mx.Mx, dns.TypeA, bufsize, dnssec)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = s.Lookup(mx.Mx, dns.TypeAAAA, bufsize, dnssec)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
if _, ok := a.(*dns.CNAME); !ok {
extra = append(extra, a)
}
}
}
break
}
// Internal name
addr, e1 := s.AddressRecords(dns.Question{mx.Mx, dns.ClassINET, dns.TypeA},
mx.Mx, nil, bufsize, dnssec, true)
if e1 == nil {
extra = append(extra, addr...)
}
case ip.To4() != nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewMX(q.Name))
extra = append(extra, serv.NewA(serv.Host, ip.To4()))
case ip.To4() == nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewMX(q.Name))
extra = append(extra, serv.NewAAAA(serv.Host, ip.To16()))
}
}
return records, extra, nil
}
func (s *server) CNAMERecords(q dns.Question, name string) (records []dns.RR, err error) {
services, err := s.backend.Records(name, true)
if err != nil {
return nil, err
}
services = msg.Group(services)
if len(services) > 0 {
serv := services[0]
if ip := net.ParseIP(serv.Host); ip == nil {
records = append(records, serv.NewCNAME(q.Name, dns.Fqdn(serv.Host)))
}
}
return records, nil
}
func (s *server) TXTRecords(q dns.Question, name string) (records []dns.RR, err error) {
services, err := s.backend.Records(name, false)
if err != nil {
return nil, err
}
services = msg.Group(services)
for _, serv := range services {
if serv.Text == "" {
continue
}
records = append(records, serv.NewTXT(q.Name))
}
return records, nil
}
func (s *server) PTRRecords(q dns.Question) (records []dns.RR, err error) {
name := strings.ToLower(q.Name)
serv, err := s.backend.ReverseRecord(name)
if err != nil {
return nil, err
}
records = append(records, serv.NewPTR(q.Name, serv.Ttl))
return records, nil
}
// SOA returns a SOA record for this SkyDNS instance.
func (s *server) NewSOA() dns.RR {
return &dns.SOA{Hdr: dns.RR_Header{Name: s.config.Domain, Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: s.config.Ttl},
Ns: appendDomain("ns.dns", s.config.Domain),
Mbox: s.config.Hostmaster,
Serial: uint32(time.Now().Truncate(time.Hour).Unix()),
Refresh: 28800,
Retry: 7200,
Expire: 604800,
Minttl: s.config.MinTtl,
}
}
func (s *server) isDuplicateCNAME(r *dns.CNAME, records []dns.RR) bool {
for _, rec := range records {
if v, ok := rec.(*dns.CNAME); ok {
if v.Target == r.Target {
return true
}
}
}
return false
}
func (s *server) NameError(m, req *dns.Msg) {
m.SetRcode(req, dns.RcodeNameError)
m.Ns = []dns.RR{s.NewSOA()}
m.Ns[0].Header().Ttl = s.config.MinTtl
StatsNameErrorCount.Inc(1)
promErrorCount.WithLabelValues("nxdomain")
}
func (s *server) NoDataError(m, req *dns.Msg) {
m.SetRcode(req, dns.RcodeSuccess)
m.Ns = []dns.RR{s.NewSOA()}
m.Ns[0].Header().Ttl = s.config.MinTtl
StatsNoDataCount.Inc(1)
promErrorCount.WithLabelValues("nodata")
}
func (s *server) ServerFailure(m, req *dns.Msg) {
m.SetRcode(req, dns.RcodeServerFailure)
promErrorCount.WithLabelValues("servfail")
}
func (s *server) logNoConnection(e error) {
if e.(*etcd.EtcdError).ErrorCode == etcd.ErrCodeEtcdNotReachable {
logf("failure to connect to etcd: %s", e)
}
}
func (s *server) RoundRobin(rrs []dns.RR) {
if !s.config.RoundRobin {
return
}
// If we have more than 1 CNAME don't touch the packet, because some stub resolver (=glibc)
// can't deal with the returned packet if the CNAMEs need to be accesses in the reverse order.
cname := 0
for _, r := range rrs {
if r.Header().Rrtype == dns.TypeCNAME {
cname++
if cname > 1 {
return
}
}
}
switch l := len(rrs); l {
case 2:
if dns.Id()%2 == 0 {
rrs[0], rrs[1] = rrs[1], rrs[0]
}
default:
for j := 0; j < l*(int(dns.Id())%4+1); j++ {
q := int(dns.Id()) % l
p := int(dns.Id()) % l
if q == p {
p = (p + 1) % l
}
rrs[q], rrs[p] = rrs[p], rrs[q]
}
}
}
// dedup will de-duplicate a message on a per section basis.
// Multiple identical (same name, class, type and rdata) RRs will be coalesced into one.
func (s *server) dedup(m *dns.Msg) *dns.Msg {
// Answer section
ma := make(map[string]dns.RR)
for _, a := range m.Answer {
// Or use Pack()... Think this function also could be placed in go dns.
s1 := a.Header().Name
s1 += strconv.Itoa(int(a.Header().Class))
s1 += strconv.Itoa(int(a.Header().Rrtype))
// there can only be one CNAME for an ownername
if a.Header().Rrtype == dns.TypeCNAME {
if _, ok := ma[s1]; ok {
// already exist, randomly overwrite if roundrobin is true
// Note: even with roundrobin *off* this depends on the
// order we get the names.
if s.config.RoundRobin && dns.Id()%2 == 0 {
ma[s1] = a
continue
}
}
ma[s1] = a
continue
}
for i := 1; i <= dns.NumField(a); i++ {
s1 += dns.Field(a, i)
}
ma[s1] = a
}
// Only is our map is smaller than the #RR in the answer section we should reset the RRs
// in the section it self
if len(ma) < len(m.Answer) {
i := 0
for _, v := range ma {
m.Answer[i] = v
i++
}
m.Answer = m.Answer[:len(ma)]
}
// Additional section
me := make(map[string]dns.RR)
for _, e := range m.Extra {
s1 := e.Header().Name
s1 += strconv.Itoa(int(e.Header().Class))
s1 += strconv.Itoa(int(e.Header().Rrtype))
// there can only be one CNAME for an ownername
if e.Header().Rrtype == dns.TypeCNAME {
if _, ok := me[s1]; ok {
// already exist, randomly overwrite if roundrobin is true
if s.config.RoundRobin && dns.Id()%2 == 0 {
me[s1] = e
continue
}
}
me[s1] = e
continue
}
for i := 1; i <= dns.NumField(e); i++ {
s1 += dns.Field(e, i)
}
me[s1] = e
}
if len(me) < len(m.Extra) {
i := 0
for _, v := range me {
m.Extra[i] = v
i++
}
m.Extra = m.Extra[:len(me)]
}
return m
}
// isTCP returns true if the client is connecting over TCP.
func isTCP(w dns.ResponseWriter) bool {
_, ok := w.RemoteAddr().(*net.TCPAddr)
return ok
}
Update server.go
// Copyright (c) 2014 The SkyDNS Authors. All rights reserved.
// Use of this source code is governed by The MIT License (MIT) that can be
// found in the LICENSE file.
package server
import (
"fmt"
"math"
"net"
"strconv"
"strings"
"sync"
"time"
"errors"
"github.com/coreos/go-etcd/etcd"
"github.com/coreos/go-systemd/activation"
"github.com/miekg/dns"
"github.com/skynetservices/skydns/cache"
"github.com/skynetservices/skydns/msg"
)
const Version = "2.5.2b"
type server struct {
backend Backend
config *Config
group *sync.WaitGroup
dnsUDPclient *dns.Client // used for forwarding queries
dnsTCPclient *dns.Client // used for forwarding queries
scache *cache.Cache
rcache *cache.Cache
}
type Backend interface {
Records(name string, exact bool) ([]msg.Service, error)
ReverseRecord(name string) (*msg.Service, error)
}
// FirstBackend exposes the Backend interface over multiple Backends, returning
// the first Backend that answers the provided record request. If no Backend answers
// a record request, the last error seen will be returned.
type FirstBackend []Backend
// FirstBackend implements Backend
var _ Backend = FirstBackend{}
func (g FirstBackend) Records(name string, exact bool) (records []msg.Service, err error) {
var lastError error
for _, backend := range g {
if records, err = backend.Records(name, exact); err == nil && len(records) > 0 {
return records, nil
}
if err != nil {
lastError = err
}
}
return nil, lastError
}
func (g FirstBackend) ReverseRecord(name string) (record *msg.Service, err error) {
var lastError error
for _, backend := range g {
if record, err = backend.ReverseRecord(name); err == nil && record != nil {
return record, nil
}
if err != nil {
lastError = err
}
}
return nil, lastError
}
// New returns a new SkyDNS server.
func New(backend Backend, config *Config) *server {
return &server{
backend: backend,
config: config,
group: new(sync.WaitGroup),
scache: cache.New(config.SCache, 0),
rcache: cache.New(config.RCache, config.RCacheTtl),
dnsUDPclient: &dns.Client{Net: "udp", ReadTimeout: 2 * config.ReadTimeout, WriteTimeout: 2 * config.ReadTimeout, SingleInflight: true},
dnsTCPclient: &dns.Client{Net: "tcp", ReadTimeout: 2 * config.ReadTimeout, WriteTimeout: 2 * config.ReadTimeout, SingleInflight: true},
}
}
// Run is a blocking operation that starts the server listening on the DNS ports.
func (s *server) Run() error {
mux := dns.NewServeMux()
mux.Handle(".", s)
dnsReadyMsg := func(addr, net string) {
if s.config.DNSSEC == "" {
logf("ready for queries on %s for %s://%s [rcache %d]", s.config.Domain, net, addr, s.config.RCache)
} else {
logf("ready for queries on %s for %s://%s [rcache %d], signing with %s [scache %d]", s.config.Domain, net, addr, s.config.RCache, s.config.DNSSEC, s.config.SCache)
}
}
if s.config.Systemd {
packetConns, err := activation.PacketConns(false)
if err != nil {
return err
}
listeners, err := activation.Listeners(true)
if err != nil {
return err
}
if len(packetConns) == 0 && len(listeners) == 0 {
return fmt.Errorf("no UDP or TCP sockets supplied by systemd")
}
for _, p := range packetConns {
if u, ok := p.(*net.UDPConn); ok {
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ActivateAndServe(nil, u, mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(u.LocalAddr().String(), "udp")
}
}
for _, l := range listeners {
if t, ok := l.(*net.TCPListener); ok {
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ActivateAndServe(t, nil, mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(t.Addr().String(), "tcp")
}
}
} else {
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ListenAndServe(s.config.DnsAddr, "tcp", mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(s.config.DnsAddr, "tcp")
s.group.Add(1)
go func() {
defer s.group.Done()
if err := dns.ListenAndServe(s.config.DnsAddr, "udp", mux); err != nil {
fatalf("%s", err)
}
}()
dnsReadyMsg(s.config.DnsAddr, "udp")
}
s.group.Wait()
return nil
}
// Stop stops a server.
func (s *server) Stop() {
// TODO(miek)
//s.group.Add(-2)
}
// ServeDNS is the handler for DNS requests, responsible for parsing DNS request, possibly forwarding
// it to a real dns server and returning a response.
func (s *server) ServeDNS(w dns.ResponseWriter, req *dns.Msg) {
m := new(dns.Msg)
m.SetReply(req)
m.Authoritative = true
m.RecursionAvailable = true
m.Compress = true
bufsize := uint16(512)
dnssec := false
tcp := false
start := time.Now()
if req.Question[0].Qtype == dns.TypeANY {
m.Authoritative = false
m.Rcode = dns.RcodeRefused
m.RecursionAvailable = false
m.RecursionDesired = false
m.Compress = false
// if write fails don't care
w.WriteMsg(m)
promErrorCount.WithLabelValues("refused").Inc()
return
}
if o := req.IsEdns0(); o != nil {
bufsize = o.UDPSize()
dnssec = o.Do()
}
if bufsize < 512 {
bufsize = 512
}
// with TCP we can send 64K
if tcp = isTCP(w); tcp {
bufsize = dns.MaxMsgSize - 1
promRequestCount.WithLabelValues("tcp").Inc()
} else {
promRequestCount.WithLabelValues("udp").Inc()
}
StatsRequestCount.Inc(1)
if dnssec {
StatsDnssecOkCount.Inc(1)
promDnssecOkCount.Inc()
}
defer func() {
promCacheSize.WithLabelValues("response").Set(float64(s.rcache.Size()))
}()
// Check cache first.
key := cache.QuestionKey(req.Question[0], dnssec)
m1, exp, hit := s.rcache.Search(key)
if hit {
// Cache hit! \o/
if time.Since(exp) < 0 {
m1.Id = m.Id
m1.Compress = true
m1.Truncated = false
if dnssec {
// The key for DNS/DNSSEC in cache is different, no
// need to do Denial/Sign here.
//if s.config.PubKey != nil {
//s.Denial(m1) // not needed for cache hits
//s.Sign(m1, bufsize)
//}
}
if m1.Len() > int(bufsize) && !tcp {
promErrorCount.WithLabelValues("truncated").Inc()
m1.Truncated = true
}
// Still round-robin even with hits from the cache.
// Only shuffle A and AAAA records with each other.
if req.Question[0].Qtype == dns.TypeA || req.Question[0].Qtype == dns.TypeAAAA {
s.RoundRobin(m1.Answer)
}
if err := w.WriteMsg(m1); err != nil {
logf("failure to return reply %q", err)
}
metricSizeAndDuration(m1, start, tcp)
return
}
// Expired! /o\
s.rcache.Remove(key)
}
q := req.Question[0]
name := strings.ToLower(q.Name)
if s.config.Verbose {
logf("received DNS Request for %q from %q with type %d", q.Name, w.RemoteAddr(), q.Qtype)
}
for zone, ns := range *s.config.stub {
if strings.HasSuffix(name, zone) {
resp := s.ServeDNSStubForward(w, req, ns)
metricSizeAndDuration(resp, start, tcp)
return
}
}
// If the qname is local.dns.skydns.local. and s.config.Local != "", substitute that name.
if s.config.Local != "" && name == s.config.localDomain {
name = s.config.Local
}
if q.Qtype == dns.TypePTR && strings.HasSuffix(name, ".in-addr.arpa.") || strings.HasSuffix(name, ".ip6.arpa.") {
resp := s.ServeDNSReverse(w, req)
metricSizeAndDuration(resp, start, tcp)
return
}
if q.Qclass != dns.ClassCHAOS && !strings.HasSuffix(name, s.config.Domain) {
resp := s.ServeDNSForward(w, req)
metricSizeAndDuration(resp, start, tcp)
return
}
promCacheMiss.WithLabelValues("response").Inc()
defer func() {
if m.Rcode == dns.RcodeServerFailure {
if err := w.WriteMsg(m); err != nil {
logf("failure to return reply %q", err)
}
return
}
// Set TTL to the minimum of the RRset and dedup the message, i.e.
// remove identical RRs.
m = s.dedup(m)
minttl := s.config.Ttl
if len(m.Answer) > 1 {
for _, r := range m.Answer {
if r.Header().Ttl < minttl {
minttl = r.Header().Ttl
}
}
for _, r := range m.Answer {
r.Header().Ttl = minttl
}
}
if !m.Truncated {
s.rcache.InsertMessage(cache.QuestionKey(req.Question[0], dnssec), m)
}
if dnssec {
if s.config.PubKey != nil {
m.AuthenticatedData = true
s.Denial(m)
s.Sign(m, bufsize)
}
}
if m.Len() > dns.MaxMsgSize {
logf("overflowing maximum message size: %d, dropping additional section", m.Len())
m.Extra = nil // Drop entire additional section to see if this helps.
if m.Len() > dns.MaxMsgSize {
// *Still* too large.
logf("still overflowing maximum message size: %d", m.Len())
promErrorCount.WithLabelValues("overflow").Inc()
m1 := new(dns.Msg) // Use smaller msg to signal failure.
m1.SetRcode(m, dns.RcodeServerFailure)
if err := w.WriteMsg(m1); err != nil {
logf("failure to return reply %q", err)
}
metricSizeAndDuration(m1, start, tcp)
return
}
}
if m.Len() > int(bufsize) && !tcp {
m.Extra = nil // As above, drop entire additional section.
if m.Len() > int(bufsize) {
promErrorCount.WithLabelValues("truncated").Inc()
m.Truncated = true
}
}
if err := w.WriteMsg(m); err != nil {
logf("failure to return reply %q %d", err, m.Len())
}
metricSizeAndDuration(m, start, tcp)
}()
if name == s.config.Domain {
if q.Qtype == dns.TypeSOA {
m.Answer = []dns.RR{s.NewSOA()}
return
}
if q.Qtype == dns.TypeDNSKEY {
if s.config.PubKey != nil {
m.Answer = []dns.RR{s.config.PubKey}
return
}
}
}
if q.Qclass == dns.ClassCHAOS {
if q.Qtype == dns.TypeTXT {
switch name {
case "authors.bind.":
fallthrough
case s.config.Domain:
hdr := dns.RR_Header{Name: q.Name, Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}
authors := []string{"Erik St. Martin", "Brian Ketelsen", "Miek Gieben", "Michael Crosby"}
for _, a := range authors {
m.Answer = append(m.Answer, &dns.TXT{Hdr: hdr, Txt: []string{a}})
}
for j := 0; j < len(authors)*(int(dns.Id())%4+1); j++ {
q := int(dns.Id()) % len(authors)
p := int(dns.Id()) % len(authors)
if q == p {
p = (p + 1) % len(authors)
}
m.Answer[q], m.Answer[p] = m.Answer[p], m.Answer[q]
}
return
case "version.bind.":
fallthrough
case "version.server.":
hdr := dns.RR_Header{Name: q.Name, Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}
m.Answer = []dns.RR{&dns.TXT{Hdr: hdr, Txt: []string{Version}}}
return
case "hostname.bind.":
fallthrough
case "id.server.":
// TODO(miek): machine name to return
hdr := dns.RR_Header{Name: q.Name, Rrtype: dns.TypeTXT, Class: dns.ClassCHAOS, Ttl: 0}
m.Answer = []dns.RR{&dns.TXT{Hdr: hdr, Txt: []string{"localhost"}}}
return
}
}
// still here, fail
m.SetReply(req)
m.SetRcode(req, dns.RcodeServerFailure)
return
}
switch q.Qtype {
case dns.TypeNS:
if name != s.config.Domain {
break
}
// Lookup s.config.DnsDomain
records, extra, err := s.NSRecords(q, s.config.dnsDomain)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
case dns.TypeA, dns.TypeAAAA:
records, err := s.AddressRecords(q, name, nil, bufsize, dnssec, false)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
case dns.TypeTXT:
records, err := s.TXTRecords(q, name)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
case dns.TypeCNAME:
records, err := s.CNAMERecords(q, name)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
case dns.TypeMX:
records, extra, err := s.MXRecords(q, name, bufsize, dnssec)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
}
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
default:
fallthrough // also catch other types, so that they return NODATA
case dns.TypeSRV:
records, extra, err := s.SRVRecords(q, name, bufsize, dnssec)
if err != nil {
if e, ok := err.(*etcd.EtcdError); ok {
if e.ErrorCode == 100 {
s.NameError(m, req)
return
}
}
logf("go error from backend: %s", err)
if q.Qtype == dns.TypeSRV { // Otherwise NODATA
s.ServerFailure(m, req)
return
}
}
// if we are here again, check the types, because an answer may only
// be given for SRV. All other types should return NODATA, the
// NXDOMAIN part is handled in the above code. TODO(miek): yes this
// can be done in a more elegant manor.
if q.Qtype == dns.TypeSRV {
m.Answer = append(m.Answer, records...)
m.Extra = append(m.Extra, extra...)
}
}
if len(m.Answer) == 0 { // NODATA response
StatsNoDataCount.Inc(1)
m.Ns = []dns.RR{s.NewSOA()}
m.Ns[0].Header().Ttl = s.config.MinTtl
}
}
func (s *server) AddressRecords(q dns.Question, name string, previousRecords []dns.RR, bufsize uint16, dnssec, both bool) (records []dns.RR, err error) {
defaultName := "1." + s.config.Domain
if name == s.config.Domain {
name = defaultName
}
services, err := s.backend.Records(name, false)
if err != nil {
if name != defaultName {
return AddressRecords(q, defaultName, previousRecords, bufsize, dnssec, both)
}
return nil, err
}
services = msg.Group(services)
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
// Try to resolve as CNAME if it's not an IP, but only if we don't create loops.
if q.Name == dns.Fqdn(serv.Host) {
// x CNAME x is a direct loop, don't add those
continue
}
newRecord := serv.NewCNAME(q.Name, dns.Fqdn(serv.Host))
if len(previousRecords) > 7 {
logf("CNAME lookup limit of 8 exceeded for %s", newRecord)
// don't add it, and just continue
continue
}
if s.isDuplicateCNAME(newRecord, previousRecords) {
logf("CNAME loop detected for record %s", newRecord)
continue
}
nextRecords, err := s.AddressRecords(dns.Question{Name: dns.Fqdn(serv.Host), Qtype: q.Qtype, Qclass: q.Qclass},
strings.ToLower(dns.Fqdn(serv.Host)), append(previousRecords, newRecord), bufsize, dnssec, both)
if err == nil {
// Only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
if dns.IsSubDomain(s.config.Domain, target) {
// We should already have found it
continue
}
m1, e1 := s.Lookup(target, q.Qtype, bufsize, dnssec)
if e1 != nil {
logf("incomplete CNAME chain: %s", e1)
continue
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
continue
logf("incomplete CNAME chain for %s", name)
case ip.To4() != nil && (q.Qtype == dns.TypeA || both):
records = append(records, serv.NewA(q.Name, ip.To4()))
case ip.To4() == nil && (q.Qtype == dns.TypeAAAA || both):
records = append(records, serv.NewAAAA(q.Name, ip.To16()))
}
}
if s.config.RoundRobin {
s.RoundRobin(records)
}
return records, nil
}
// NSRecords returns NS records from etcd.
func (s *server) NSRecords(q dns.Question, name string) (records []dns.RR, extra []dns.RR, err error) {
return nil, nil, errors.New("Not Implement !")
services, err := s.backend.Records(name, false)
if err != nil {
return nil, nil, err
}
services = msg.Group(services)
for _, serv := range services {
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
return nil, nil, fmt.Errorf("NS record must be an IP address")
case ip.To4() != nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewNS(q.Name, serv.Host))
extra = append(extra, serv.NewA(serv.Host, ip.To4()))
case ip.To4() == nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewNS(q.Name, serv.Host))
extra = append(extra, serv.NewAAAA(serv.Host, ip.To16()))
}
}
return records, extra, nil
}
// SRVRecords returns SRV records from etcd.
// If the Target is not a name but an IP address, a name is created.
func (s *server) SRVRecords(q dns.Question, name string, bufsize uint16, dnssec bool) (records []dns.RR, extra []dns.RR, err error) {
return nil, nil, errors.New("Not Implement !")
services, err := s.backend.Records(name, false)
if err != nil {
return nil, nil, err
}
services = msg.Group(services)
// Looping twice to get the right weight vs priority
w := make(map[int]int)
for _, serv := range services {
weight := 100
if serv.Weight != 0 {
weight = serv.Weight
}
if _, ok := w[serv.Priority]; !ok {
w[serv.Priority] = weight
continue
}
w[serv.Priority] += weight
}
lookup := make(map[string]bool)
for _, serv := range services {
w1 := 100.0 / float64(w[serv.Priority])
if serv.Weight == 0 {
w1 *= 100
} else {
w1 *= float64(serv.Weight)
}
weight := uint16(math.Floor(w1))
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
srv := serv.NewSRV(q.Name, weight)
records = append(records, srv)
if _, ok := lookup[srv.Target]; ok {
break
}
lookup[srv.Target] = true
if !dns.IsSubDomain(s.config.Domain, srv.Target) {
m1, e1 := s.Lookup(srv.Target, dns.TypeA, bufsize, dnssec)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = s.Lookup(srv.Target, dns.TypeAAAA, bufsize, dnssec)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
if _, ok := a.(*dns.CNAME); !ok {
extra = append(extra, a)
}
}
}
break
}
// Internal name, we should have some info on them, either v4 or v6
// Clients expect a complete answer, because we are a recursor in their
// view.
addr, e1 := s.AddressRecords(dns.Question{srv.Target, dns.ClassINET, dns.TypeA},
srv.Target, nil, bufsize, dnssec, true)
if e1 == nil {
extra = append(extra, addr...)
}
case ip.To4() != nil:
serv.Host = msg.Domain(serv.Key)
srv := serv.NewSRV(q.Name, weight)
records = append(records, srv)
extra = append(extra, serv.NewA(srv.Target, ip.To4()))
case ip.To4() == nil:
serv.Host = msg.Domain(serv.Key)
srv := serv.NewSRV(q.Name, weight)
records = append(records, srv)
extra = append(extra, serv.NewAAAA(srv.Target, ip.To16()))
}
}
return records, extra, nil
}
// MXRecords returns MX records from etcd.
// If the Target is not a name but an IP address, a name is created.
func (s *server) MXRecords(q dns.Question, name string, bufsize uint16, dnssec bool) (records []dns.RR, extra []dns.RR, err error) {
return nil, nil, errors.New("Not Implement !")
services, err := s.backend.Records(name, false)
if err != nil {
return nil, nil, err
}
lookup := make(map[string]bool)
for _, serv := range services {
if !serv.Mail {
continue
}
ip := net.ParseIP(serv.Host)
switch {
case ip == nil:
mx := serv.NewMX(q.Name)
records = append(records, mx)
if _, ok := lookup[mx.Mx]; ok {
break
}
lookup[mx.Mx] = true
if !dns.IsSubDomain(s.config.Domain, mx.Mx) {
m1, e1 := s.Lookup(mx.Mx, dns.TypeA, bufsize, dnssec)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = s.Lookup(mx.Mx, dns.TypeAAAA, bufsize, dnssec)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
if _, ok := a.(*dns.CNAME); !ok {
extra = append(extra, a)
}
}
}
break
}
// Internal name
addr, e1 := s.AddressRecords(dns.Question{mx.Mx, dns.ClassINET, dns.TypeA},
mx.Mx, nil, bufsize, dnssec, true)
if e1 == nil {
extra = append(extra, addr...)
}
case ip.To4() != nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewMX(q.Name))
extra = append(extra, serv.NewA(serv.Host, ip.To4()))
case ip.To4() == nil:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewMX(q.Name))
extra = append(extra, serv.NewAAAA(serv.Host, ip.To16()))
}
}
return records, extra, nil
}
func (s *server) CNAMERecords(q dns.Question, name string) (records []dns.RR, err error) {
return nil, errors.New("Not Implement !")
services, err := s.backend.Records(name, true)
if err != nil {
return nil, err
}
services = msg.Group(services)
if len(services) > 0 {
serv := services[0]
if ip := net.ParseIP(serv.Host); ip == nil {
records = append(records, serv.NewCNAME(q.Name, dns.Fqdn(serv.Host)))
}
}
return records, nil
}
func (s *server) TXTRecords(q dns.Question, name string) (records []dns.RR, err error) {
return nil, errors.New("Not Implement !")
services, err := s.backend.Records(name, false)
if err != nil {
return nil, err
}
services = msg.Group(services)
for _, serv := range services {
if serv.Text == "" {
continue
}
records = append(records, serv.NewTXT(q.Name))
}
return records, nil
}
func (s *server) PTRRecords(q dns.Question) (records []dns.RR, err error) {
return nil, errors.New("Not Implement !")
name := strings.ToLower(q.Name)
serv, err := s.backend.ReverseRecord(name)
if err != nil {
return nil, err
}
records = append(records, serv.NewPTR(q.Name, serv.Ttl))
return records, nil
}
// SOA returns a SOA record for this SkyDNS instance.
func (s *server) NewSOA() dns.RR {
return &dns.SOA{Hdr: dns.RR_Header{Name: s.config.Domain, Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: s.config.Ttl},
Ns: appendDomain("ns.dns", s.config.Domain),
Mbox: s.config.Hostmaster,
Serial: uint32(time.Now().Truncate(time.Hour).Unix()),
Refresh: 28800,
Retry: 7200,
Expire: 604800,
Minttl: s.config.MinTtl,
}
}
func (s *server) isDuplicateCNAME(r *dns.CNAME, records []dns.RR) bool {
for _, rec := range records {
if v, ok := rec.(*dns.CNAME); ok {
if v.Target == r.Target {
return true
}
}
}
return false
}
func (s *server) NameError(m, req *dns.Msg) {
m.SetRcode(req, dns.RcodeNameError)
m.Ns = []dns.RR{s.NewSOA()}
m.Ns[0].Header().Ttl = s.config.MinTtl
StatsNameErrorCount.Inc(1)
promErrorCount.WithLabelValues("nxdomain")
}
func (s *server) NoDataError(m, req *dns.Msg) {
m.SetRcode(req, dns.RcodeSuccess)
m.Ns = []dns.RR{s.NewSOA()}
m.Ns[0].Header().Ttl = s.config.MinTtl
StatsNoDataCount.Inc(1)
promErrorCount.WithLabelValues("nodata")
}
func (s *server) ServerFailure(m, req *dns.Msg) {
m.SetRcode(req, dns.RcodeServerFailure)
promErrorCount.WithLabelValues("servfail")
}
func (s *server) logNoConnection(e error) {
if e.(*etcd.EtcdError).ErrorCode == etcd.ErrCodeEtcdNotReachable {
logf("failure to connect to etcd: %s", e)
}
}
func (s *server) RoundRobin(rrs []dns.RR) {
if !s.config.RoundRobin {
return
}
// If we have more than 1 CNAME don't touch the packet, because some stub resolver (=glibc)
// can't deal with the returned packet if the CNAMEs need to be accesses in the reverse order.
cname := 0
for _, r := range rrs {
if r.Header().Rrtype == dns.TypeCNAME {
cname++
if cname > 1 {
return
}
}
}
switch l := len(rrs); l {
case 2:
if dns.Id()%2 == 0 {
rrs[0], rrs[1] = rrs[1], rrs[0]
}
default:
for j := 0; j < l*(int(dns.Id())%4+1); j++ {
q := int(dns.Id()) % l
p := int(dns.Id()) % l
if q == p {
p = (p + 1) % l
}
rrs[q], rrs[p] = rrs[p], rrs[q]
}
}
}
// dedup will de-duplicate a message on a per section basis.
// Multiple identical (same name, class, type and rdata) RRs will be coalesced into one.
func (s *server) dedup(m *dns.Msg) *dns.Msg {
// Answer section
ma := make(map[string]dns.RR)
for _, a := range m.Answer {
// Or use Pack()... Think this function also could be placed in go dns.
s1 := a.Header().Name
s1 += strconv.Itoa(int(a.Header().Class))
s1 += strconv.Itoa(int(a.Header().Rrtype))
// there can only be one CNAME for an ownername
if a.Header().Rrtype == dns.TypeCNAME {
if _, ok := ma[s1]; ok {
// already exist, randomly overwrite if roundrobin is true
// Note: even with roundrobin *off* this depends on the
// order we get the names.
if s.config.RoundRobin && dns.Id()%2 == 0 {
ma[s1] = a
continue
}
}
ma[s1] = a
continue
}
for i := 1; i <= dns.NumField(a); i++ {
s1 += dns.Field(a, i)
}
ma[s1] = a
}
// Only is our map is smaller than the #RR in the answer section we should reset the RRs
// in the section it self
if len(ma) < len(m.Answer) {
i := 0
for _, v := range ma {
m.Answer[i] = v
i++
}
m.Answer = m.Answer[:len(ma)]
}
// Additional section
me := make(map[string]dns.RR)
for _, e := range m.Extra {
s1 := e.Header().Name
s1 += strconv.Itoa(int(e.Header().Class))
s1 += strconv.Itoa(int(e.Header().Rrtype))
// there can only be one CNAME for an ownername
if e.Header().Rrtype == dns.TypeCNAME {
if _, ok := me[s1]; ok {
// already exist, randomly overwrite if roundrobin is true
if s.config.RoundRobin && dns.Id()%2 == 0 {
me[s1] = e
continue
}
}
me[s1] = e
continue
}
for i := 1; i <= dns.NumField(e); i++ {
s1 += dns.Field(e, i)
}
me[s1] = e
}
if len(me) < len(m.Extra) {
i := 0
for _, v := range me {
m.Extra[i] = v
i++
}
m.Extra = m.Extra[:len(me)]
}
return m
}
// isTCP returns true if the client is connecting over TCP.
func isTCP(w dns.ResponseWriter) bool {
_, ok := w.RemoteAddr().(*net.TCPAddr)
return ok
}
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cookiejar implements an RFC 6265-compliant http.CookieJar.
//
// TODO: example code to create a memory-backed cookie jar with the default
// public suffix list.
package cookiejar
import (
"net/http"
"net/url"
)
// PublicSuffixList provides the public suffix of a domain. For example:
// - the public suffix of "example.com" is "com",
// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and
// - the public suffix of "bar.pvt.k12.wy.us" is "pvt.k12.wy.us".
//
// Implementations of PublicSuffixList must be safe for concurrent use by
// multiple goroutines.
//
// An implementation that always returns "" is valid and may be useful for
// testing but it is not secure: it means that the HTTP server for foo.com can
// set a cookie for bar.com.
type PublicSuffixList interface {
// PublicSuffix returns the public suffix of domain.
//
// TODO: specify which of the caller and callee is responsible for IP
// addresses, for leading and trailing dots, for case sensitivity, and
// for IDN/Punycode.
PublicSuffix(domain string) string
// String returns a description of the source of this public suffix list.
// A Jar will store its PublicSuffixList's description in its storage,
// and update the stored cookies if its list has a different description
// than the stored list. The description will typically contain something
// like a time stamp or version number.
String() string
}
// Options are the options for creating a new Jar.
type Options struct {
// Storage is the cookie jar storage. It may not be nil.
Storage Storage
// PublicSuffixList is the public suffix list that determines whether an
// HTTP server can set a cookie for a domain. It may not be nil.
PublicSuffixList PublicSuffixList
// TODO: ErrorFunc for handling storage errors?
}
// Jar implements the http.CookieJar interface from the net/http package.
type Jar struct {
storage Storage
psList PublicSuffixList
}
// New returns a new cookie jar.
func New(o *Options) *Jar {
return &Jar{
storage: o.Storage,
psList: o.PublicSuffixList,
}
}
// TODO(nigeltao): how do we reject HttpOnly cookies? Do we post-process the
// return value from Jar.Cookies?
//
// HttpOnly cookies are those for regular HTTP(S) requests but should not be
// visible from JavaScript. The HttpOnly bit mitigates XSS attacks; it's not
// for HTTP vs HTTPS vs FTP transports.
// Cookies implements the Cookies method of the http.CookieJar interface.
//
// It returns an empty slice if the URL's scheme is not HTTP or HTTPS.
func (j *Jar) Cookies(u *url.URL) []*http.Cookie {
// TODO.
return nil
}
// SetCookies implements the SetCookies method of the http.CookieJar interface.
//
// It does nothing if the URL's scheme is not HTTP or HTTPS.
func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) {
// TODO.
}
exp/cookiejar: update PublicSuffixList doc comment to match the
examples at http://publicsuffix.org/.
That website previously listed pvt.k12.wy.us, but that was an error,
as confirmed by correspondance with submissions@publicsuffix.org, and
the website was fixed on 2013-01-23.
R=adg
CC=dr.volker.dobler, golang-dev
https://codereview.appspot.com/7241053
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cookiejar implements an RFC 6265-compliant http.CookieJar.
//
// TODO: example code to create a memory-backed cookie jar with the default
// public suffix list.
package cookiejar
import (
"net/http"
"net/url"
)
// PublicSuffixList provides the public suffix of a domain. For example:
// - the public suffix of "example.com" is "com",
// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and
// - the public suffix of "bar.pvt.k12.ma.us" is "pvt.k12.ma.us".
//
// Implementations of PublicSuffixList must be safe for concurrent use by
// multiple goroutines.
//
// An implementation that always returns "" is valid and may be useful for
// testing but it is not secure: it means that the HTTP server for foo.com can
// set a cookie for bar.com.
type PublicSuffixList interface {
// PublicSuffix returns the public suffix of domain.
//
// TODO: specify which of the caller and callee is responsible for IP
// addresses, for leading and trailing dots, for case sensitivity, and
// for IDN/Punycode.
PublicSuffix(domain string) string
// String returns a description of the source of this public suffix list.
// A Jar will store its PublicSuffixList's description in its storage,
// and update the stored cookies if its list has a different description
// than the stored list. The description will typically contain something
// like a time stamp or version number.
String() string
}
// Options are the options for creating a new Jar.
type Options struct {
// Storage is the cookie jar storage. It may not be nil.
Storage Storage
// PublicSuffixList is the public suffix list that determines whether an
// HTTP server can set a cookie for a domain. It may not be nil.
PublicSuffixList PublicSuffixList
// TODO: ErrorFunc for handling storage errors?
}
// Jar implements the http.CookieJar interface from the net/http package.
type Jar struct {
storage Storage
psList PublicSuffixList
}
// New returns a new cookie jar.
func New(o *Options) *Jar {
return &Jar{
storage: o.Storage,
psList: o.PublicSuffixList,
}
}
// TODO(nigeltao): how do we reject HttpOnly cookies? Do we post-process the
// return value from Jar.Cookies?
//
// HttpOnly cookies are those for regular HTTP(S) requests but should not be
// visible from JavaScript. The HttpOnly bit mitigates XSS attacks; it's not
// for HTTP vs HTTPS vs FTP transports.
// Cookies implements the Cookies method of the http.CookieJar interface.
//
// It returns an empty slice if the URL's scheme is not HTTP or HTTPS.
func (j *Jar) Cookies(u *url.URL) []*http.Cookie {
// TODO.
return nil
}
// SetCookies implements the SetCookies method of the http.CookieJar interface.
//
// It does nothing if the URL's scheme is not HTTP or HTTPS.
func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) {
// TODO.
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x11
import (
"bufio"
"io"
"os"
)
// readU16BE reads a big-endian uint16 from r, using b as a scratch buffer.
func readU16BE(r io.Reader, b []byte) (uint16, os.Error) {
_, err := io.ReadFull(r, b[0:2])
if err != nil {
return 0, err
}
return uint16(b[0])<<8 + uint16(b[1]), nil
}
// readStr reads a length-prefixed string from r, using b as a scratch buffer.
func readStr(r io.Reader, b []byte) (string, os.Error) {
n, err := readU16BE(r, b)
if err != nil {
return "", err
}
if int(n) > len(b) {
return "", os.NewError("Xauthority entry too long for buffer")
}
_, err = io.ReadFull(r, b[0:n])
if err != nil {
return "", err
}
return string(b[0:n]), nil
}
// readAuth reads the X authority file and returns the name/data pair for the display.
// displayStr is the "12" out of a $DISPLAY like ":12.0".
func readAuth(displayStr string) (name, data string, err os.Error) {
// b is a scratch buffer to use and should be at least 256 bytes long
// (i.e. it should be able to hold a hostname).
var b [256]byte
// As per /usr/include/X11/Xauth.h.
const familyLocal = 256
fn := os.Getenv("XAUTHORITY")
if fn == "" {
home := os.Getenv("HOME")
if home == "" {
err = os.NewError("Xauthority not found: $XAUTHORITY, $HOME not set")
return
}
fn = home + "/.Xauthority"
}
r, err := os.Open(fn, os.O_RDONLY, 0444)
if err != nil {
return
}
defer r.Close()
br := bufio.NewReader(r)
hostname, err := os.Hostname()
if err != nil {
return
}
for {
family, err := readU16BE(br, b[0:2])
if err != nil {
return
}
addr, err := readStr(br, b[0:])
if err != nil {
return
}
disp, err := readStr(br, b[0:])
if err != nil {
return
}
name0, err := readStr(br, b[0:])
if err != nil {
return
}
data0, err := readStr(br, b[0:])
if err != nil {
return
}
if family == familyLocal && addr == hostname && disp == displayStr {
return name0, data0, nil
}
}
panic("unreachable")
}
exp/draw/x11: temporarily workaround compiler bug 1011.
R=r
CC=golang-dev
http://codereview.appspot.com/1951041
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x11
import (
"bufio"
"io"
"os"
)
// readU16BE reads a big-endian uint16 from r, using b as a scratch buffer.
func readU16BE(r io.Reader, b []byte) (uint16, os.Error) {
_, err := io.ReadFull(r, b[0:2])
if err != nil {
return 0, err
}
// TODO(nigeltao): remove the workaround when bug 1011 gets fixed.
//return uint16(b[0])<<8 + uint16(b[1]), nil
ret := uint16(b[0])<<8 + uint16(b[1])
return ret, nil
}
// readStr reads a length-prefixed string from r, using b as a scratch buffer.
func readStr(r io.Reader, b []byte) (string, os.Error) {
n, err := readU16BE(r, b)
if err != nil {
return "", err
}
if int(n) > len(b) {
return "", os.NewError("Xauthority entry too long for buffer")
}
_, err = io.ReadFull(r, b[0:n])
if err != nil {
return "", err
}
return string(b[0:n]), nil
}
// readAuth reads the X authority file and returns the name/data pair for the display.
// displayStr is the "12" out of a $DISPLAY like ":12.0".
func readAuth(displayStr string) (name, data string, err os.Error) {
// b is a scratch buffer to use and should be at least 256 bytes long
// (i.e. it should be able to hold a hostname).
var b [256]byte
// As per /usr/include/X11/Xauth.h.
const familyLocal = 256
fn := os.Getenv("XAUTHORITY")
if fn == "" {
home := os.Getenv("HOME")
if home == "" {
err = os.NewError("Xauthority not found: $XAUTHORITY, $HOME not set")
return
}
fn = home + "/.Xauthority"
}
r, err := os.Open(fn, os.O_RDONLY, 0444)
if err != nil {
return
}
defer r.Close()
br := bufio.NewReader(r)
hostname, err := os.Hostname()
if err != nil {
return
}
for {
family, err := readU16BE(br, b[0:2])
if err != nil {
return
}
addr, err := readStr(br, b[0:])
if err != nil {
return
}
disp, err := readStr(br, b[0:])
if err != nil {
return
}
name0, err := readStr(br, b[0:])
if err != nil {
return
}
data0, err := readStr(br, b[0:])
if err != nil {
return
}
if family == familyLocal && addr == hostname && disp == displayStr {
return name0, data0, nil
}
}
panic("unreachable")
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The jpeg package implements a decoder for JPEG images, as defined in ITU-T T.81.
package jpeg
// See http://www.w3.org/Graphics/JPEG/itu-t81.pdf
import (
"bufio"
"image"
"io"
"os"
)
// A FormatError reports that the input is not a valid JPEG.
type FormatError string
func (e FormatError) String() string { return "invalid JPEG format: " + string(e) }
// An UnsupportedError reports that the input uses a valid but unimplemented JPEG feature.
type UnsupportedError string
func (e UnsupportedError) String() string { return "unsupported JPEG feature: " + string(e) }
// Component specification, specified in section B.2.2.
type component struct {
c uint8 // Component identifier.
h uint8 // Horizontal sampling factor.
v uint8 // Vertical sampling factor.
tq uint8 // Quantization table destination selector.
}
const (
blockSize = 64 // A DCT block is 8x8.
dcTableClass = 0
acTableClass = 1
maxTc = 1
maxTh = 3
maxTq = 3
// We only support 4:4:4, 4:2:2 and 4:2:0 downsampling, and assume that the components are Y, Cb, Cr.
nComponent = 3
maxH = 2
maxV = 2
)
const (
soiMarker = 0xd8 // Start Of Image.
eoiMarker = 0xd9 // End Of Image.
sof0Marker = 0xc0 // Start Of Frame (Baseline).
sof2Marker = 0xc2 // Start Of Frame (Progressive).
dhtMarker = 0xc4 // Define Huffman Table.
dqtMarker = 0xdb // Define Quantization Table.
sosMarker = 0xda // Start Of Scan.
app0Marker = 0xe0 // APPlication specific (0).
app15Marker = 0xef // APPlication specific (15).
comMarker = 0xfe // COMment.
)
// Maps from the zig-zag ordering to the natural ordering.
var unzig = [blockSize]int{
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
}
// If the passed in io.Reader does not also have ReadByte, then Decode will introduce its own buffering.
type Reader interface {
io.Reader
ReadByte() (c byte, err os.Error)
}
type decoder struct {
r Reader
width, height int
image *image.RGBA
comps [nComponent]component
huff [maxTc + 1][maxTh + 1]huffman
quant [maxTq + 1][blockSize]int
b bits
blocks [nComponent][maxH * maxV][blockSize]int
tmp [1024]byte
}
// Reads and ignores the next n bytes.
func (d *decoder) ignore(n int) os.Error {
for n > 0 {
m := len(d.tmp)
if m > n {
m = n
}
_, err := io.ReadFull(d.r, d.tmp[0:m])
if err != nil {
return err
}
n -= m
}
return nil
}
// Specified in section B.2.2.
func (d *decoder) processSOF(n int) os.Error {
if n != 6+3*nComponent {
return UnsupportedError("SOF has wrong length")
}
_, err := io.ReadFull(d.r, d.tmp[0:6+3*nComponent])
if err != nil {
return err
}
// We only support 8-bit precision.
if d.tmp[0] != 8 {
return UnsupportedError("precision")
}
d.height = int(d.tmp[1])<<8 + int(d.tmp[2])
d.width = int(d.tmp[3])<<8 + int(d.tmp[4])
if d.tmp[5] != nComponent {
return UnsupportedError("SOF has wrong number of image components")
}
for i := 0; i < nComponent; i++ {
hv := d.tmp[7+3*i]
d.comps[i].c = d.tmp[6+3*i]
d.comps[i].h = hv >> 4
d.comps[i].v = hv & 0x0f
d.comps[i].tq = d.tmp[8+3*i]
// We only support YCbCr images, and 4:4:4, 4:2:2 or 4:2:0 chroma downsampling ratios. This implies that
// the (h, v) values for the Y component are either (1, 1), (2, 1) or (2, 2), and the
// (h, v) values for the Cr and Cb components must be (1, 1).
if i == 0 {
if hv != 0x11 && hv != 0x21 && hv != 0x22 {
return UnsupportedError("luma downsample ratio")
}
} else {
if hv != 0x11 {
return UnsupportedError("chroma downsample ratio")
}
}
}
d.image = image.NewRGBA(d.width, d.height)
return nil
}
// Specified in section B.2.4.1.
func (d *decoder) processDQT(n int) os.Error {
const qtLength = 1 + blockSize
for ; n >= qtLength; n -= qtLength {
_, err := io.ReadFull(d.r, d.tmp[0:qtLength])
if err != nil {
return err
}
pq := d.tmp[0] >> 4
if pq != 0 {
return UnsupportedError("bad Pq value")
}
tq := d.tmp[0] & 0x0f
if tq > maxTq {
return FormatError("bad Tq value")
}
for i := range d.quant[tq] {
d.quant[tq][i] = int(d.tmp[i+1])
}
}
if n != 0 {
return FormatError("DQT has wrong length")
}
return nil
}
// Set the Pixel (px, py)'s RGB value, based on its YCbCr value.
func (d *decoder) calcPixel(px, py, lumaBlock, lumaIndex, chromaIndex int) {
y, cb, cr := d.blocks[0][lumaBlock][lumaIndex], d.blocks[1][0][chromaIndex], d.blocks[2][0][chromaIndex]
// The JFIF specification (http://www.w3.org/Graphics/JPEG/jfif3.pdf, page 3) gives the formula
// for translating YCbCr to RGB as:
// R = Y + 1.402 (Cr-128)
// G = Y - 0.34414 (Cb-128) - 0.71414 (Cr-128)
// B = Y + 1.772 (Cb-128)
yPlusHalf := 100000*y + 50000
cb -= 128
cr -= 128
r := (yPlusHalf + 140200*cr) / 100000
g := (yPlusHalf - 34414*cb - 71414*cr) / 100000
b := (yPlusHalf + 177200*cb) / 100000
if r < 0 {
r = 0
} else if r > 255 {
r = 255
}
if g < 0 {
g = 0
} else if g > 255 {
g = 255
}
if b < 0 {
b = 0
} else if b > 255 {
b = 255
}
d.image.Pixel[py][px] = image.RGBAColor{uint8(r), uint8(g), uint8(b), 0xff}
}
// Convert the MCU from YCbCr to RGB.
func (d *decoder) convertMCU(mx, my, h0, v0 int) {
lumaBlock := 0
for v := 0; v < v0; v++ {
for h := 0; h < h0; h++ {
chromaBase := 8*4*v + 4*h
py := 8 * (v0*my + v)
for y := 0; y < 8 && py < d.height; y++ {
px := 8 * (h0*mx + h)
lumaIndex := 8 * y
chromaIndex := chromaBase + 8*(y/v0)
for x := 0; x < 8 && px < d.width; x++ {
d.calcPixel(px, py, lumaBlock, lumaIndex, chromaIndex)
if h0 == 1 {
chromaIndex += 1
} else {
chromaIndex += x % 2
}
lumaIndex++
px++
}
py++
}
lumaBlock++
}
}
}
// Specified in section B.2.3.
func (d *decoder) processSOS(n int) os.Error {
if d.image == nil {
return FormatError("missing SOF segment")
}
if n != 4+2*nComponent {
return UnsupportedError("SOS has wrong length")
}
_, err := io.ReadFull(d.r, d.tmp[0:4+2*nComponent])
if err != nil {
return err
}
if d.tmp[0] != nComponent {
return UnsupportedError("SOS has wrong number of image components")
}
var scanComps [nComponent]struct {
td uint8 // DC table selector.
ta uint8 // AC table selector.
}
h0, v0 := int(d.comps[0].h), int(d.comps[0].v) // The h and v values from the Y components.
for i := 0; i < nComponent; i++ {
cs := d.tmp[1+2*i] // Component selector.
if cs != d.comps[i].c {
return UnsupportedError("scan components out of order")
}
scanComps[i].td = d.tmp[2+2*i] >> 4
scanComps[i].ta = d.tmp[2+2*i] & 0x0f
}
// mxx and myy are the number of MCUs (Minimum Coded Units) in the image.
mxx := (d.width + 8*int(h0) - 1) / (8 * int(h0))
myy := (d.height + 8*int(v0) - 1) / (8 * int(v0))
var allZeroes [blockSize]int
var dc [nComponent]int
for my := 0; my < myy; my++ {
for mx := 0; mx < mxx; mx++ {
for i := 0; i < nComponent; i++ {
qt := &d.quant[d.comps[i].tq]
for j := 0; j < int(d.comps[i].h*d.comps[i].v); j++ {
d.blocks[i][j] = allZeroes
// Decode the DC coefficient, as specified in section F.2.2.1.
value, err := d.decodeHuffman(&d.huff[dcTableClass][scanComps[i].td])
if err != nil {
return err
}
if value > 16 {
return UnsupportedError("excessive DC component")
}
dcDelta, err := d.receiveExtend(value)
if err != nil {
return err
}
dc[i] += dcDelta
d.blocks[i][j][0] = dc[i] * qt[0]
// Decode the AC coefficients, as specified in section F.2.2.2.
for k := 1; k < blockSize; k++ {
value, err := d.decodeHuffman(&d.huff[acTableClass][scanComps[i].ta])
if err != nil {
return err
}
v0 := value >> 4
v1 := value & 0x0f
if v1 != 0 {
k += int(v0)
if k > blockSize {
return FormatError("bad DCT index")
}
ac, err := d.receiveExtend(v1)
if err != nil {
return err
}
d.blocks[i][j][unzig[k]] = ac * qt[k]
} else {
if v0 != 0x0f {
break
}
k += 0x0f
}
}
idct(&d.blocks[i][j])
} // for j
} // for i
d.convertMCU(mx, my, int(d.comps[0].h), int(d.comps[0].v))
} // for mx
} // for my
return nil
}
// Decode reads a JPEG formatted image from r and returns it as an image.Image.
func Decode(r io.Reader) (image.Image, os.Error) {
var d decoder
if rr, ok := r.(Reader); ok {
d.r = rr
} else {
d.r = bufio.NewReader(r)
}
// Check for the Start Of Image marker.
_, err := io.ReadFull(r, d.tmp[0:2])
if err != nil {
return nil, err
}
if d.tmp[0] != 0xff || d.tmp[1] != soiMarker {
return nil, FormatError("missing SOI marker")
}
// Process the remaining segments until the End Of Image marker.
for {
_, err := io.ReadFull(r, d.tmp[0:2])
if err != nil {
return nil, err
}
if d.tmp[0] != 0xff {
return nil, FormatError("missing 0xff marker start")
}
marker := d.tmp[1]
if marker == eoiMarker { // End Of Image.
break
}
// Read the 16-bit length of the segment. The value includes the 2 bytes for the
// length itself, so we subtract 2 to get the number of remaining bytes.
_, err = io.ReadFull(r, d.tmp[0:2])
if err != nil {
return nil, err
}
n := int(d.tmp[0])<<8 + int(d.tmp[1]) - 2
if n < 0 {
return nil, FormatError("short segment length")
}
switch {
case marker == sof0Marker: // Start Of Frame (Baseline).
err = d.processSOF(n)
case marker == sof2Marker: // Start Of Frame (Progressive).
err = UnsupportedError("progressive mode")
case marker == dhtMarker: // Define Huffman Table.
err = d.processDHT(n)
case marker == dqtMarker: // Define Quantization Table.
err = d.processDQT(n)
case marker == sosMarker: // Start Of Scan.
err = d.processSOS(n)
case marker >= app0Marker && marker <= app15Marker || marker == comMarker: // APPlication specific, or COMment.
err = d.ignore(n)
default:
err = UnsupportedError("unknown marker")
}
if err != nil {
return nil, err
}
}
return d.image, nil
}
JPEG decoder now handles RST (restart) markers.
R=r
CC=golang-dev
https://golang.org/cl/181075
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The jpeg package implements a decoder for JPEG images, as defined in ITU-T T.81.
package jpeg
// See http://www.w3.org/Graphics/JPEG/itu-t81.pdf
import (
"bufio"
"image"
"io"
"os"
)
// A FormatError reports that the input is not a valid JPEG.
type FormatError string
func (e FormatError) String() string { return "invalid JPEG format: " + string(e) }
// An UnsupportedError reports that the input uses a valid but unimplemented JPEG feature.
type UnsupportedError string
func (e UnsupportedError) String() string { return "unsupported JPEG feature: " + string(e) }
// Component specification, specified in section B.2.2.
type component struct {
c uint8 // Component identifier.
h uint8 // Horizontal sampling factor.
v uint8 // Vertical sampling factor.
tq uint8 // Quantization table destination selector.
}
const (
blockSize = 64 // A DCT block is 8x8.
dcTableClass = 0
acTableClass = 1
maxTc = 1
maxTh = 3
maxTq = 3
// We only support 4:4:4, 4:2:2 and 4:2:0 downsampling, and assume that the components are Y, Cb, Cr.
nComponent = 3
maxH = 2
maxV = 2
)
const (
soiMarker = 0xd8 // Start Of Image.
eoiMarker = 0xd9 // End Of Image.
sof0Marker = 0xc0 // Start Of Frame (Baseline).
sof2Marker = 0xc2 // Start Of Frame (Progressive).
dhtMarker = 0xc4 // Define Huffman Table.
dqtMarker = 0xdb // Define Quantization Table.
sosMarker = 0xda // Start Of Scan.
driMarker = 0xdd // Define Restart Interval.
rst0Marker = 0xd0 // ReSTart (0).
rst7Marker = 0xd7 // ReSTart (7).
app0Marker = 0xe0 // APPlication specific (0).
app15Marker = 0xef // APPlication specific (15).
comMarker = 0xfe // COMment.
)
// Maps from the zig-zag ordering to the natural ordering.
var unzig = [blockSize]int{
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63,
}
// If the passed in io.Reader does not also have ReadByte, then Decode will introduce its own buffering.
type Reader interface {
io.Reader
ReadByte() (c byte, err os.Error)
}
type decoder struct {
r Reader
width, height int
image *image.RGBA
ri int // Restart Interval.
comps [nComponent]component
huff [maxTc + 1][maxTh + 1]huffman
quant [maxTq + 1][blockSize]int
b bits
blocks [nComponent][maxH * maxV][blockSize]int
tmp [1024]byte
}
// Reads and ignores the next n bytes.
func (d *decoder) ignore(n int) os.Error {
for n > 0 {
m := len(d.tmp)
if m > n {
m = n
}
_, err := io.ReadFull(d.r, d.tmp[0:m])
if err != nil {
return err
}
n -= m
}
return nil
}
// Specified in section B.2.2.
func (d *decoder) processSOF(n int) os.Error {
if n != 6+3*nComponent {
return UnsupportedError("SOF has wrong length")
}
_, err := io.ReadFull(d.r, d.tmp[0:6+3*nComponent])
if err != nil {
return err
}
// We only support 8-bit precision.
if d.tmp[0] != 8 {
return UnsupportedError("precision")
}
d.height = int(d.tmp[1])<<8 + int(d.tmp[2])
d.width = int(d.tmp[3])<<8 + int(d.tmp[4])
if d.tmp[5] != nComponent {
return UnsupportedError("SOF has wrong number of image components")
}
for i := 0; i < nComponent; i++ {
hv := d.tmp[7+3*i]
d.comps[i].c = d.tmp[6+3*i]
d.comps[i].h = hv >> 4
d.comps[i].v = hv & 0x0f
d.comps[i].tq = d.tmp[8+3*i]
// We only support YCbCr images, and 4:4:4, 4:2:2 or 4:2:0 chroma downsampling ratios. This implies that
// the (h, v) values for the Y component are either (1, 1), (2, 1) or (2, 2), and the
// (h, v) values for the Cr and Cb components must be (1, 1).
if i == 0 {
if hv != 0x11 && hv != 0x21 && hv != 0x22 {
return UnsupportedError("luma downsample ratio")
}
} else {
if hv != 0x11 {
return UnsupportedError("chroma downsample ratio")
}
}
}
d.image = image.NewRGBA(d.width, d.height)
return nil
}
// Specified in section B.2.4.1.
func (d *decoder) processDQT(n int) os.Error {
const qtLength = 1 + blockSize
for ; n >= qtLength; n -= qtLength {
_, err := io.ReadFull(d.r, d.tmp[0:qtLength])
if err != nil {
return err
}
pq := d.tmp[0] >> 4
if pq != 0 {
return UnsupportedError("bad Pq value")
}
tq := d.tmp[0] & 0x0f
if tq > maxTq {
return FormatError("bad Tq value")
}
for i := range d.quant[tq] {
d.quant[tq][i] = int(d.tmp[i+1])
}
}
if n != 0 {
return FormatError("DQT has wrong length")
}
return nil
}
// Set the Pixel (px, py)'s RGB value, based on its YCbCr value.
func (d *decoder) calcPixel(px, py, lumaBlock, lumaIndex, chromaIndex int) {
y, cb, cr := d.blocks[0][lumaBlock][lumaIndex], d.blocks[1][0][chromaIndex], d.blocks[2][0][chromaIndex]
// The JFIF specification (http://www.w3.org/Graphics/JPEG/jfif3.pdf, page 3) gives the formula
// for translating YCbCr to RGB as:
// R = Y + 1.402 (Cr-128)
// G = Y - 0.34414 (Cb-128) - 0.71414 (Cr-128)
// B = Y + 1.772 (Cb-128)
yPlusHalf := 100000*y + 50000
cb -= 128
cr -= 128
r := (yPlusHalf + 140200*cr) / 100000
g := (yPlusHalf - 34414*cb - 71414*cr) / 100000
b := (yPlusHalf + 177200*cb) / 100000
if r < 0 {
r = 0
} else if r > 255 {
r = 255
}
if g < 0 {
g = 0
} else if g > 255 {
g = 255
}
if b < 0 {
b = 0
} else if b > 255 {
b = 255
}
d.image.Pixel[py][px] = image.RGBAColor{uint8(r), uint8(g), uint8(b), 0xff}
}
// Convert the MCU from YCbCr to RGB.
func (d *decoder) convertMCU(mx, my, h0, v0 int) {
lumaBlock := 0
for v := 0; v < v0; v++ {
for h := 0; h < h0; h++ {
chromaBase := 8*4*v + 4*h
py := 8 * (v0*my + v)
for y := 0; y < 8 && py < d.height; y++ {
px := 8 * (h0*mx + h)
lumaIndex := 8 * y
chromaIndex := chromaBase + 8*(y/v0)
for x := 0; x < 8 && px < d.width; x++ {
d.calcPixel(px, py, lumaBlock, lumaIndex, chromaIndex)
if h0 == 1 {
chromaIndex += 1
} else {
chromaIndex += x % 2
}
lumaIndex++
px++
}
py++
}
lumaBlock++
}
}
}
// Specified in section B.2.3.
func (d *decoder) processSOS(n int) os.Error {
if d.image == nil {
return FormatError("missing SOF segment")
}
if n != 4+2*nComponent {
return UnsupportedError("SOS has wrong length")
}
_, err := io.ReadFull(d.r, d.tmp[0:4+2*nComponent])
if err != nil {
return err
}
if d.tmp[0] != nComponent {
return UnsupportedError("SOS has wrong number of image components")
}
var scanComps [nComponent]struct {
td uint8 // DC table selector.
ta uint8 // AC table selector.
}
h0, v0 := int(d.comps[0].h), int(d.comps[0].v) // The h and v values from the Y components.
for i := 0; i < nComponent; i++ {
cs := d.tmp[1+2*i] // Component selector.
if cs != d.comps[i].c {
return UnsupportedError("scan components out of order")
}
scanComps[i].td = d.tmp[2+2*i] >> 4
scanComps[i].ta = d.tmp[2+2*i] & 0x0f
}
// mxx and myy are the number of MCUs (Minimum Coded Units) in the image.
mxx := (d.width + 8*int(h0) - 1) / (8 * int(h0))
myy := (d.height + 8*int(v0) - 1) / (8 * int(v0))
mcu, expectedRST := 0, uint8(rst0Marker)
var allZeroes [blockSize]int
var dc [nComponent]int
for my := 0; my < myy; my++ {
for mx := 0; mx < mxx; mx++ {
for i := 0; i < nComponent; i++ {
qt := &d.quant[d.comps[i].tq]
for j := 0; j < int(d.comps[i].h*d.comps[i].v); j++ {
d.blocks[i][j] = allZeroes
// Decode the DC coefficient, as specified in section F.2.2.1.
value, err := d.decodeHuffman(&d.huff[dcTableClass][scanComps[i].td])
if err != nil {
return err
}
if value > 16 {
return UnsupportedError("excessive DC component")
}
dcDelta, err := d.receiveExtend(value)
if err != nil {
return err
}
dc[i] += dcDelta
d.blocks[i][j][0] = dc[i] * qt[0]
// Decode the AC coefficients, as specified in section F.2.2.2.
for k := 1; k < blockSize; k++ {
value, err := d.decodeHuffman(&d.huff[acTableClass][scanComps[i].ta])
if err != nil {
return err
}
v0 := value >> 4
v1 := value & 0x0f
if v1 != 0 {
k += int(v0)
if k > blockSize {
return FormatError("bad DCT index")
}
ac, err := d.receiveExtend(v1)
if err != nil {
return err
}
d.blocks[i][j][unzig[k]] = ac * qt[k]
} else {
if v0 != 0x0f {
break
}
k += 0x0f
}
}
idct(&d.blocks[i][j])
} // for j
} // for i
d.convertMCU(mx, my, int(d.comps[0].h), int(d.comps[0].v))
mcu++
if d.ri > 0 && mcu%d.ri == 0 && mcu < mxx*myy {
// A more sophisticated decoder could use RST[0-7] markers to resynchronize from corrupt input,
// but this one assumes well-formed input, and hence the restart marker follows immediately.
_, err := io.ReadFull(d.r, d.tmp[0:2])
if err != nil {
return err
}
if d.tmp[0] != 0xff || d.tmp[1] != expectedRST {
return FormatError("bad RST marker")
}
expectedRST++
if expectedRST == rst7Marker+1 {
expectedRST = rst0Marker
}
// Reset the Huffman decoder.
d.b = bits{}
// Reset the DC components, as per section F.2.1.3.1.
for i := 0; i < nComponent; i++ {
dc[i] = 0
}
}
} // for mx
} // for my
return nil
}
// Specified in section B.2.4.4.
func (d *decoder) processDRI(n int) os.Error {
if n != 2 {
return FormatError("DRI has wrong length")
}
_, err := io.ReadFull(d.r, d.tmp[0:2])
if err != nil {
return err
}
d.ri = int(d.tmp[0])<<8 + int(d.tmp[1])
return nil
}
// Decode reads a JPEG formatted image from r and returns it as an image.Image.
func Decode(r io.Reader) (image.Image, os.Error) {
var d decoder
if rr, ok := r.(Reader); ok {
d.r = rr
} else {
d.r = bufio.NewReader(r)
}
// Check for the Start Of Image marker.
_, err := io.ReadFull(r, d.tmp[0:2])
if err != nil {
return nil, err
}
if d.tmp[0] != 0xff || d.tmp[1] != soiMarker {
return nil, FormatError("missing SOI marker")
}
// Process the remaining segments until the End Of Image marker.
for {
_, err := io.ReadFull(r, d.tmp[0:2])
if err != nil {
return nil, err
}
if d.tmp[0] != 0xff {
return nil, FormatError("missing 0xff marker start")
}
marker := d.tmp[1]
if marker == eoiMarker { // End Of Image.
break
}
// Read the 16-bit length of the segment. The value includes the 2 bytes for the
// length itself, so we subtract 2 to get the number of remaining bytes.
_, err = io.ReadFull(r, d.tmp[0:2])
if err != nil {
return nil, err
}
n := int(d.tmp[0])<<8 + int(d.tmp[1]) - 2
if n < 0 {
return nil, FormatError("short segment length")
}
switch {
case marker == sof0Marker: // Start Of Frame (Baseline).
err = d.processSOF(n)
case marker == sof2Marker: // Start Of Frame (Progressive).
err = UnsupportedError("progressive mode")
case marker == dhtMarker: // Define Huffman Table.
err = d.processDHT(n)
case marker == dqtMarker: // Define Quantization Table.
err = d.processDQT(n)
case marker == sosMarker: // Start Of Scan.
err = d.processSOS(n)
case marker == driMarker: // Define Restart Interval.
err = d.processDRI(n)
case marker >= app0Marker && marker <= app15Marker || marker == comMarker: // APPlication specific, or COMment.
err = d.ignore(n)
default:
err = UnsupportedError("unknown marker")
}
if err != nil {
return nil, err
}
}
return d.image, nil
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd linux netbsd openbsd windows
// TCP sockets
package net
import (
"io"
"os"
"syscall"
"time"
)
// BUG(rsc): On OpenBSD, listening on the "tcp" network does not listen for
// both IPv4 and IPv6 connections. This is due to the fact that IPv4 traffic
// will not be routed to an IPv6 socket - two separate sockets are required
// if both AFs are to be supported. See inet6(4) on OpenBSD for details.
func sockaddrToTCP(sa syscall.Sockaddr) Addr {
switch sa := sa.(type) {
case *syscall.SockaddrInet4:
return &TCPAddr{sa.Addr[0:], sa.Port}
case *syscall.SockaddrInet6:
return &TCPAddr{sa.Addr[0:], sa.Port}
}
return nil
}
func (a *TCPAddr) family() int {
if a == nil || len(a.IP) <= IPv4len {
return syscall.AF_INET
}
if a.IP.To4() != nil {
return syscall.AF_INET
}
return syscall.AF_INET6
}
func (a *TCPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
return ipToSockaddr(family, a.IP, a.Port)
}
func (a *TCPAddr) toAddr() sockaddr {
if a == nil { // nil *TCPAddr
return nil // nil interface
}
return a
}
// TCPConn is an implementation of the Conn interface
// for TCP network connections.
type TCPConn struct {
fd *netFD
}
func newTCPConn(fd *netFD) *TCPConn {
c := &TCPConn{fd}
c.SetNoDelay(true)
return c
}
func (c *TCPConn) ok() bool { return c != nil && c.fd != nil }
// Implementation of the Conn interface - see Conn for documentation.
// Read implements the Conn Read method.
func (c *TCPConn) Read(b []byte) (n int, err error) {
if !c.ok() {
return 0, syscall.EINVAL
}
return c.fd.Read(b)
}
// ReadFrom implements the io.ReaderFrom ReadFrom method.
func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
if n, err, handled := sendFile(c.fd, r); handled {
return n, err
}
return genericReadFrom(c, r)
}
// Write implements the Conn Write method.
func (c *TCPConn) Write(b []byte) (n int, err error) {
if !c.ok() {
return 0, syscall.EINVAL
}
return c.fd.Write(b)
}
// Close closes the TCP connection.
func (c *TCPConn) Close() error {
if !c.ok() {
return syscall.EINVAL
}
err := c.fd.Close()
c.fd = nil
return err
}
// CloseRead shuts down the reading side of the TCP connection.
// Most callers should just use Close.
func (c *TCPConn) CloseRead() error {
if !c.ok() {
return syscall.EINVAL
}
return c.fd.CloseRead()
}
// CloseWrite shuts down the writing side of the TCP connection.
// Most callers should just use Close.
func (c *TCPConn) CloseWrite() error {
if !c.ok() {
return syscall.EINVAL
}
return c.fd.CloseWrite()
}
// LocalAddr returns the local network address, a *TCPAddr.
func (c *TCPConn) LocalAddr() Addr {
if !c.ok() {
return nil
}
return c.fd.laddr
}
// RemoteAddr returns the remote network address, a *TCPAddr.
func (c *TCPConn) RemoteAddr() Addr {
if !c.ok() {
return nil
}
return c.fd.raddr
}
// SetDeadline implements the Conn SetDeadline method.
func (c *TCPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
return setDeadline(c.fd, t)
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *TCPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
return setReadDeadline(c.fd, t)
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *TCPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
return setWriteDeadline(c.fd, t)
}
// SetReadBuffer sets the size of the operating system's
// receive buffer associated with the connection.
func (c *TCPConn) SetReadBuffer(bytes int) error {
if !c.ok() {
return syscall.EINVAL
}
return setReadBuffer(c.fd, bytes)
}
// SetWriteBuffer sets the size of the operating system's
// transmit buffer associated with the connection.
func (c *TCPConn) SetWriteBuffer(bytes int) error {
if !c.ok() {
return syscall.EINVAL
}
return setWriteBuffer(c.fd, bytes)
}
// SetLinger sets the behavior of Close() on a connection
// which still has data waiting to be sent or to be acknowledged.
//
// If sec < 0 (the default), Close returns immediately and
// the operating system finishes sending the data in the background.
//
// If sec == 0, Close returns immediately and the operating system
// discards any unsent or unacknowledged data.
//
// If sec > 0, Close blocks for at most sec seconds waiting for
// data to be sent and acknowledged.
func (c *TCPConn) SetLinger(sec int) error {
if !c.ok() {
return syscall.EINVAL
}
return setLinger(c.fd, sec)
}
// SetKeepAlive sets whether the operating system should send
// keepalive messages on the connection.
func (c *TCPConn) SetKeepAlive(keepalive bool) error {
if !c.ok() {
return syscall.EINVAL
}
return setKeepAlive(c.fd, keepalive)
}
// SetNoDelay controls whether the operating system should delay
// packet transmission in hopes of sending fewer packets
// (Nagle's algorithm). The default is true (no delay), meaning
// that data is sent as soon as possible after a Write.
func (c *TCPConn) SetNoDelay(noDelay bool) error {
if !c.ok() {
return syscall.EINVAL
}
return setNoDelay(c.fd, noDelay)
}
// File returns a copy of the underlying os.File, set to blocking mode.
// It is the caller's responsibility to close f when finished.
// Closing c does not affect f, and closing f does not affect c.
func (c *TCPConn) File() (f *os.File, err error) { return c.fd.dup() }
// DialTCP connects to the remote address raddr on the network net,
// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
// as the local address for the connection.
func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
if raddr == nil {
return nil, &OpError{"dial", net, nil, errMissingAddress}
}
fd, err := internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP)
checkRaddr := func(s string) {
if err == nil && fd.raddr == nil {
panic("nil raddr in DialTCP: " + s)
}
}
checkRaddr("early")
// TCP has a rarely used mechanism called a 'simultaneous connection' in
// which Dial("tcp", addr1, addr2) run on the machine at addr1 can
// connect to a simultaneous Dial("tcp", addr2, addr1) run on the machine
// at addr2, without either machine executing Listen. If laddr == nil,
// it means we want the kernel to pick an appropriate originating local
// address. Some Linux kernels cycle blindly through a fixed range of
// local ports, regardless of destination port. If a kernel happens to
// pick local port 50001 as the source for a Dial("tcp", "", "localhost:50001"),
// then the Dial will succeed, having simultaneously connected to itself.
// This can only happen when we are letting the kernel pick a port (laddr == nil)
// and when there is no listener for the destination address.
// It's hard to argue this is anything other than a kernel bug. If we
// see this happen, rather than expose the buggy effect to users, we
// close the fd and try again. If it happens twice more, we relent and
// use the result. See also:
// http://golang.org/issue/2690
// http://stackoverflow.com/questions/4949858/
for i := 0; i < 2 && err == nil && laddr == nil && selfConnect(fd); i++ {
fd.Close()
fd, err = internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP)
checkRaddr("after close")
}
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
func selfConnect(fd *netFD) bool {
l := fd.laddr.(*TCPAddr)
r := fd.raddr.(*TCPAddr)
return l.Port == r.Port && l.IP.Equal(r.IP)
}
// TCPListener is a TCP network listener.
// Clients should typically use variables of type Listener
// instead of assuming TCP.
type TCPListener struct {
fd *netFD
}
// ListenTCP announces on the TCP address laddr and returns a TCP listener.
// Net must be "tcp", "tcp4", or "tcp6".
// If laddr has a port of 0, it means to listen on some available port.
// The caller can use l.Addr() to retrieve the chosen address.
func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
fd, err := internetSocket(net, laddr.toAddr(), nil, syscall.SOCK_STREAM, 0, "listen", sockaddrToTCP)
if err != nil {
return nil, err
}
err = syscall.Listen(fd.sysfd, listenerBacklog)
if err != nil {
closesocket(fd.sysfd)
return nil, &OpError{"listen", net, laddr, err}
}
l := new(TCPListener)
l.fd = fd
return l, nil
}
// AcceptTCP accepts the next incoming call and returns the new connection
// and the remote address.
func (l *TCPListener) AcceptTCP() (c *TCPConn, err error) {
if l == nil || l.fd == nil || l.fd.sysfd < 0 {
return nil, syscall.EINVAL
}
fd, err := l.fd.accept(sockaddrToTCP)
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
// Accept implements the Accept method in the Listener interface;
// it waits for the next call and returns a generic Conn.
func (l *TCPListener) Accept() (c Conn, err error) {
c1, err := l.AcceptTCP()
if err != nil {
return nil, err
}
return c1, nil
}
// Close stops listening on the TCP address.
// Already Accepted connections are not closed.
func (l *TCPListener) Close() error {
if l == nil || l.fd == nil {
return syscall.EINVAL
}
return l.fd.Close()
}
// Addr returns the listener's network address, a *TCPAddr.
func (l *TCPListener) Addr() Addr { return l.fd.laddr }
// SetDeadline sets the deadline associated with the listener.
// A zero time value disables the deadline.
func (l *TCPListener) SetDeadline(t time.Time) error {
if l == nil || l.fd == nil {
return syscall.EINVAL
}
return setDeadline(l.fd, t)
}
// File returns a copy of the underlying os.File, set to blocking mode.
// It is the caller's responsibility to close f when finished.
// Closing c does not affect f, and closing f does not affect c.
func (l *TCPListener) File() (f *os.File, err error) { return l.fd.dup() }
net: panic if sockaddrToTCP returns nil incorrectly
Part of diagnosing the selfConnect bug
TBR=dsymonds
R=golang-dev
CC=golang-dev
http://codereview.appspot.com/5687057
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd linux netbsd openbsd windows
// TCP sockets
package net
import (
"fmt"
"io"
"os"
"syscall"
"time"
)
// BUG(rsc): On OpenBSD, listening on the "tcp" network does not listen for
// both IPv4 and IPv6 connections. This is due to the fact that IPv4 traffic
// will not be routed to an IPv6 socket - two separate sockets are required
// if both AFs are to be supported. See inet6(4) on OpenBSD for details.
func sockaddrToTCP(sa syscall.Sockaddr) Addr {
switch sa := sa.(type) {
case *syscall.SockaddrInet4:
return &TCPAddr{sa.Addr[0:], sa.Port}
case *syscall.SockaddrInet6:
return &TCPAddr{sa.Addr[0:], sa.Port}
default:
if sa != nil {
// TODO(r): Diagnose when we will turn a non-nil sockaddr into a nil.
// Part of diagnosing the selfConnect bug.
panic(fmt.Sprintf("unexpected type in sockaddrToTCP: %T", sa))
}
}
return nil
}
func (a *TCPAddr) family() int {
if a == nil || len(a.IP) <= IPv4len {
return syscall.AF_INET
}
if a.IP.To4() != nil {
return syscall.AF_INET
}
return syscall.AF_INET6
}
func (a *TCPAddr) sockaddr(family int) (syscall.Sockaddr, error) {
return ipToSockaddr(family, a.IP, a.Port)
}
func (a *TCPAddr) toAddr() sockaddr {
if a == nil { // nil *TCPAddr
return nil // nil interface
}
return a
}
// TCPConn is an implementation of the Conn interface
// for TCP network connections.
type TCPConn struct {
fd *netFD
}
func newTCPConn(fd *netFD) *TCPConn {
c := &TCPConn{fd}
c.SetNoDelay(true)
return c
}
func (c *TCPConn) ok() bool { return c != nil && c.fd != nil }
// Implementation of the Conn interface - see Conn for documentation.
// Read implements the Conn Read method.
func (c *TCPConn) Read(b []byte) (n int, err error) {
if !c.ok() {
return 0, syscall.EINVAL
}
return c.fd.Read(b)
}
// ReadFrom implements the io.ReaderFrom ReadFrom method.
func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
if n, err, handled := sendFile(c.fd, r); handled {
return n, err
}
return genericReadFrom(c, r)
}
// Write implements the Conn Write method.
func (c *TCPConn) Write(b []byte) (n int, err error) {
if !c.ok() {
return 0, syscall.EINVAL
}
return c.fd.Write(b)
}
// Close closes the TCP connection.
func (c *TCPConn) Close() error {
if !c.ok() {
return syscall.EINVAL
}
err := c.fd.Close()
c.fd = nil
return err
}
// CloseRead shuts down the reading side of the TCP connection.
// Most callers should just use Close.
func (c *TCPConn) CloseRead() error {
if !c.ok() {
return syscall.EINVAL
}
return c.fd.CloseRead()
}
// CloseWrite shuts down the writing side of the TCP connection.
// Most callers should just use Close.
func (c *TCPConn) CloseWrite() error {
if !c.ok() {
return syscall.EINVAL
}
return c.fd.CloseWrite()
}
// LocalAddr returns the local network address, a *TCPAddr.
func (c *TCPConn) LocalAddr() Addr {
if !c.ok() {
return nil
}
return c.fd.laddr
}
// RemoteAddr returns the remote network address, a *TCPAddr.
func (c *TCPConn) RemoteAddr() Addr {
if !c.ok() {
return nil
}
return c.fd.raddr
}
// SetDeadline implements the Conn SetDeadline method.
func (c *TCPConn) SetDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
return setDeadline(c.fd, t)
}
// SetReadDeadline implements the Conn SetReadDeadline method.
func (c *TCPConn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
return setReadDeadline(c.fd, t)
}
// SetWriteDeadline implements the Conn SetWriteDeadline method.
func (c *TCPConn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
return setWriteDeadline(c.fd, t)
}
// SetReadBuffer sets the size of the operating system's
// receive buffer associated with the connection.
func (c *TCPConn) SetReadBuffer(bytes int) error {
if !c.ok() {
return syscall.EINVAL
}
return setReadBuffer(c.fd, bytes)
}
// SetWriteBuffer sets the size of the operating system's
// transmit buffer associated with the connection.
func (c *TCPConn) SetWriteBuffer(bytes int) error {
if !c.ok() {
return syscall.EINVAL
}
return setWriteBuffer(c.fd, bytes)
}
// SetLinger sets the behavior of Close() on a connection
// which still has data waiting to be sent or to be acknowledged.
//
// If sec < 0 (the default), Close returns immediately and
// the operating system finishes sending the data in the background.
//
// If sec == 0, Close returns immediately and the operating system
// discards any unsent or unacknowledged data.
//
// If sec > 0, Close blocks for at most sec seconds waiting for
// data to be sent and acknowledged.
func (c *TCPConn) SetLinger(sec int) error {
if !c.ok() {
return syscall.EINVAL
}
return setLinger(c.fd, sec)
}
// SetKeepAlive sets whether the operating system should send
// keepalive messages on the connection.
func (c *TCPConn) SetKeepAlive(keepalive bool) error {
if !c.ok() {
return syscall.EINVAL
}
return setKeepAlive(c.fd, keepalive)
}
// SetNoDelay controls whether the operating system should delay
// packet transmission in hopes of sending fewer packets
// (Nagle's algorithm). The default is true (no delay), meaning
// that data is sent as soon as possible after a Write.
func (c *TCPConn) SetNoDelay(noDelay bool) error {
if !c.ok() {
return syscall.EINVAL
}
return setNoDelay(c.fd, noDelay)
}
// File returns a copy of the underlying os.File, set to blocking mode.
// It is the caller's responsibility to close f when finished.
// Closing c does not affect f, and closing f does not affect c.
func (c *TCPConn) File() (f *os.File, err error) { return c.fd.dup() }
// DialTCP connects to the remote address raddr on the network net,
// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
// as the local address for the connection.
func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
if raddr == nil {
return nil, &OpError{"dial", net, nil, errMissingAddress}
}
fd, err := internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP)
checkRaddr := func(s string) {
if err == nil && fd.raddr == nil {
panic("nil raddr in DialTCP: " + s)
}
}
checkRaddr("early")
// TCP has a rarely used mechanism called a 'simultaneous connection' in
// which Dial("tcp", addr1, addr2) run on the machine at addr1 can
// connect to a simultaneous Dial("tcp", addr2, addr1) run on the machine
// at addr2, without either machine executing Listen. If laddr == nil,
// it means we want the kernel to pick an appropriate originating local
// address. Some Linux kernels cycle blindly through a fixed range of
// local ports, regardless of destination port. If a kernel happens to
// pick local port 50001 as the source for a Dial("tcp", "", "localhost:50001"),
// then the Dial will succeed, having simultaneously connected to itself.
// This can only happen when we are letting the kernel pick a port (laddr == nil)
// and when there is no listener for the destination address.
// It's hard to argue this is anything other than a kernel bug. If we
// see this happen, rather than expose the buggy effect to users, we
// close the fd and try again. If it happens twice more, we relent and
// use the result. See also:
// http://golang.org/issue/2690
// http://stackoverflow.com/questions/4949858/
for i := 0; i < 2 && err == nil && laddr == nil && selfConnect(fd); i++ {
fd.Close()
fd, err = internetSocket(net, laddr.toAddr(), raddr.toAddr(), syscall.SOCK_STREAM, 0, "dial", sockaddrToTCP)
checkRaddr("after close")
}
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
func selfConnect(fd *netFD) bool {
l := fd.laddr.(*TCPAddr)
r := fd.raddr.(*TCPAddr)
return l.Port == r.Port && l.IP.Equal(r.IP)
}
// TCPListener is a TCP network listener.
// Clients should typically use variables of type Listener
// instead of assuming TCP.
type TCPListener struct {
fd *netFD
}
// ListenTCP announces on the TCP address laddr and returns a TCP listener.
// Net must be "tcp", "tcp4", or "tcp6".
// If laddr has a port of 0, it means to listen on some available port.
// The caller can use l.Addr() to retrieve the chosen address.
func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
fd, err := internetSocket(net, laddr.toAddr(), nil, syscall.SOCK_STREAM, 0, "listen", sockaddrToTCP)
if err != nil {
return nil, err
}
err = syscall.Listen(fd.sysfd, listenerBacklog)
if err != nil {
closesocket(fd.sysfd)
return nil, &OpError{"listen", net, laddr, err}
}
l := new(TCPListener)
l.fd = fd
return l, nil
}
// AcceptTCP accepts the next incoming call and returns the new connection
// and the remote address.
func (l *TCPListener) AcceptTCP() (c *TCPConn, err error) {
if l == nil || l.fd == nil || l.fd.sysfd < 0 {
return nil, syscall.EINVAL
}
fd, err := l.fd.accept(sockaddrToTCP)
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
// Accept implements the Accept method in the Listener interface;
// it waits for the next call and returns a generic Conn.
func (l *TCPListener) Accept() (c Conn, err error) {
c1, err := l.AcceptTCP()
if err != nil {
return nil, err
}
return c1, nil
}
// Close stops listening on the TCP address.
// Already Accepted connections are not closed.
func (l *TCPListener) Close() error {
if l == nil || l.fd == nil {
return syscall.EINVAL
}
return l.fd.Close()
}
// Addr returns the listener's network address, a *TCPAddr.
func (l *TCPListener) Addr() Addr { return l.fd.laddr }
// SetDeadline sets the deadline associated with the listener.
// A zero time value disables the deadline.
func (l *TCPListener) SetDeadline(t time.Time) error {
if l == nil || l.fd == nil {
return syscall.EINVAL
}
return setDeadline(l.fd, t)
}
// File returns a copy of the underlying os.File, set to blocking mode.
// It is the caller's responsibility to close f when finished.
// Closing c does not affect f, and closing f does not affect c.
func (l *TCPListener) File() (f *os.File, err error) { return l.fd.dup() }
|
Add statics for search
|
package mediasort
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fatih/color"
mediasearch "github.com/jpillora/media-sort/search"
"github.com/jpillora/sizestr"
"gopkg.in/fsnotify.v1"
)
//Config is a sorter configuration
type Config struct {
Targets []string `opts:"mode=arg,min=1"`
TVDir string `opts:"help=tv series base directory (defaults to current directory)"`
MovieDir string `opts:"help=movie base directory (defaults to current directory)"`
PathConfig `mode:"embedded"`
Extensions string `opts:"help=types of files that should be sorted"`
Concurrency int `opts:"help=search concurrency [warning] setting this too high can cause rate-limiting errors"`
FileLimit int `opts:"help=maximum number of files to search"`
AccuracyThreshold int `opts:"help=filename match accuracy threshold" default:"is 95, perfect match is 100"`
MinFileSize sizestr.Bytes `opts:"help=minimum file size"`
Recursive bool `opts:"help=also search through subdirectories"`
DryRun bool `opts:"help=perform sort but don't actually move any files"`
SkipHidden bool `opts:"help=skip dot files"`
Action action `opts:"help=how to tread the files (available <copy|link|move>)"`
SymLink bool `opts:"help=use symlinks instead of hardlinks when linking the new files"`
Overwrite bool `opts:"help=overwrites duplicates"`
OverwriteIfLarger bool `opts:"help=overwrites duplicates if the new file is larger"`
Watch bool `opts:"help=watch the specified directories for changes and re-sort on change"`
WatchDelay time.Duration `opts:"help=delay before next sort after a change"`
Verbose bool `opts:"help=verbose logs"`
}
//fsSort is a media sorter
type fsSort struct {
Config
validExts map[string]bool
sorts map[string]*fileSort
dirs map[string]bool
stats struct {
found, matched, moved int
}
linkType linkType
}
type fileSort struct {
id int
path string
info os.FileInfo
result *Result
err error
}
type action string
const (
moveAction action = "move"
linkAction action = "link"
copyAction action = "copy"
)
type linkType string
const (
hardLink linkType = "hardLink"
symLink linkType = "symLink"
)
//FileSystemSort performs a media sort
//against the file system using the provided
//configuration
func FileSystemSort(c Config) error {
if c.MovieDir == "" {
c.MovieDir = "."
}
if c.TVDir == "" {
c.TVDir = "."
}
if c.Watch && !c.Recursive {
return errors.New("Recursive mode is required to watch directories")
}
if c.Overwrite && c.OverwriteIfLarger {
return errors.New("Overwrite is already specified, overwrite-if-larger is redundant")
}
if c.Action == linkAction && c.Overwrite {
return errors.New("Link is already specified, Overwrite won't do anything")
}
switch c.Action {
case moveAction, linkAction, copyAction:
break
default:
return errors.New("Provided action is not available")
}
//init fs sort
fs := &fsSort{
Config: c,
validExts: map[string]bool{},
linkType: hardLink,
}
if c.SymLink {
fs.linkType = symLink
}
for _, e := range strings.Split(c.Extensions, ",") {
fs.validExts["."+e] = true
}
//sort loop
for {
//reset state
fs.sorts = map[string]*fileSort{}
fs.dirs = map[string]bool{}
//look for files
if err := fs.scan(); err != nil {
return err
}
//ensure we have dirs to watch
if fs.Watch && len(fs.dirs) == 0 {
return errors.New("No directories to watch")
}
if len(fs.sorts) > 0 {
//moment of truth - sort all files!
if err := fs.sortAllFiles(); err != nil {
return err
}
}
//watch directories
if !c.Watch {
break
}
if err := fs.watch(); err != nil {
return err
}
}
return nil
}
func (fs *fsSort) scan() error {
fs.verbf("scanning targets...")
//scan targets for media files
for _, path := range fs.Targets {
fs.verbf("scanning: %s", path)
info, err := os.Stat(path)
if err != nil {
return err
}
if err = fs.add(path, info); err != nil {
return err
}
}
//ensure we found something
if len(fs.sorts) == 0 && (!fs.Watch || len(fs.dirs) == 0) {
return fmt.Errorf("No sortable files found (%d files checked)", fs.stats.found)
}
fs.verbf("scanned targets. found #%d", fs.stats.found)
return nil
}
func (fs *fsSort) sortAllFiles() error {
fs.verbf("sorting files...")
//perform sort
if fs.DryRun {
log.Println(color.CyanString("[Dryrun]"))
}
//sort concurrency-many files at a time,
//wait for all to complete and show errors
queue := make(chan bool, fs.Concurrency)
wg := &sync.WaitGroup{}
sortFile := func(file *fileSort) {
if err := fs.sortFile(file); err != nil {
log.Printf("[#%d/%d] %s\n └─> %s\n", file.id, len(fs.sorts), color.RedString(file.path), err)
}
<-queue
wg.Done()
}
for _, file := range fs.sorts {
wg.Add(1)
queue <- true
go sortFile(file)
}
wg.Wait()
return nil
}
func (fs *fsSort) watch() error {
if len(fs.dirs) == 0 {
return errors.New("No directories to watch")
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("Failed to create file watcher: %s", err)
}
for dir := range fs.dirs {
if err := watcher.Add(dir); err != nil {
return fmt.Errorf("Failed to watch directory: %s", err)
}
log.Printf("Watching %s for changes...", color.CyanString(dir))
}
select {
case <-watcher.Events:
case err := <-watcher.Errors:
fs.verbf("watch error detected: %s", err)
}
go watcher.Close()
log.Printf("Change detected, re-sorting in %s...", fs.WatchDelay)
time.Sleep(fs.WatchDelay)
return nil
}
func (fs *fsSort) add(path string, info os.FileInfo) error {
//skip hidden files and directories
if fs.SkipHidden && strings.HasPrefix(info.Name(), ".") {
fs.verbf("skip hidden file: %s", path)
return nil
}
//limit recursion depth
if len(fs.sorts) >= fs.FileLimit {
fs.verbf("skip file: %s. surpassed file limit: %d", path, fs.FileLimit)
return nil
}
//add regular files (non-symlinks)
if info.Mode().IsRegular() {
fs.stats.found++
//skip unmatched file types
if !fs.validExts[filepath.Ext(path)] {
fs.verbf("skip unmatched file ext: %s", path)
return nil
}
//skip small files
if info.Size() < int64(fs.MinFileSize) {
fs.verbf("skip small file: %s", path)
return nil
}
fs.sorts[path] = &fileSort{id: len(fs.sorts) + 1, path: path, info: info}
fs.stats.matched++
return nil
}
//recurse into directories
if info.IsDir() {
if !fs.Recursive {
return errors.New("Recursive mode (-r) is required to sort directories")
}
//note directory
fs.dirs[path] = true
//add all files in dir
infos, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, info := range infos {
p := filepath.Join(path, info.Name())
//recurse
if err := fs.add(p, info); err != nil {
return err
}
}
}
fs.verbf("skip non-regular file: %s", path)
//skip links,pipes,etc
return nil
}
func (fs *fsSort) sortFile(file *fileSort) error {
result, err := SortThreshold(file.path, fs.AccuracyThreshold)
if err != nil {
return err
}
newPath, err := result.PrettyPath(fs.PathConfig)
if err != nil {
return err
}
baseDir := ""
switch mediasearch.MediaType(result.MType) {
case mediasearch.Series:
baseDir = fs.TVDir
case mediasearch.Movie:
baseDir = fs.MovieDir
default:
return fmt.Errorf("Invalid result type: %s", result.MType)
}
newPath = filepath.Join(baseDir, newPath)
//check for subs.srt file
pathSubs := strings.TrimSuffix(result.Path, filepath.Ext(result.Path)) + ".srt"
_, err = os.Stat(pathSubs)
hasSubs := err == nil
subsExt := ""
if hasSubs {
subsExt = "," + color.GreenString("srt")
}
//found sort path
log.Printf("[#%d/%d] %s\n └─> %s", file.id, len(fs.sorts), color.GreenString(result.Path)+subsExt, color.GreenString(newPath)+subsExt)
if fs.DryRun {
return nil //don't actually move
}
if result.Path == newPath {
return nil //already sorted
}
//check already exists
if newInfo, err := os.Stat(newPath); err == nil {
fileIsLarger := file.info.Size() > newInfo.Size()
overwrite := fs.Overwrite || (fs.OverwriteIfLarger && fileIsLarger)
//check if it the same file
if !os.SameFile(file.info, newInfo) {
if !overwrite {
return fmt.Errorf("File already exists '%s' (try setting --overwrite)", newPath)
}
} else {
return nil // File are the same
}
}
// mkdir -p
err = os.MkdirAll(filepath.Dir(newPath), 0755)
if err != nil {
return err //failed to mkdir
}
// treat the file
err = fs.action(result.Path, newPath)
if err != nil {
return err //failed to move
}
//if .srt file exists for the file, treat it too
if hasSubs {
newPathSubs := strings.TrimSuffix(newPath, filepath.Ext(newPath)) + ".srt"
fs.action(pathSubs, newPathSubs) //best-effort
}
return nil
}
func (fs *fsSort) verbf(f string, args ...interface{}) {
if fs.Verbose {
log.Printf(f, args...)
}
}
func (fs *fsSort) action(src, dst string) (err error) {
switch fs.Action {
case moveAction:
err = move(src, dst)
case copyAction:
err = copy(src, dst)
case linkAction:
err = link(src, dst, fs.linkType)
}
return errors.New("unknown action")
}
expose Action and set back HardLink for backward compatibility
package mediasort
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fatih/color"
mediasearch "github.com/jpillora/media-sort/search"
"github.com/jpillora/sizestr"
"gopkg.in/fsnotify.v1"
)
//Config is a sorter configuration
type Config struct {
Targets []string `opts:"mode=arg,min=1"`
TVDir string `opts:"help=tv series base directory (defaults to current directory)"`
MovieDir string `opts:"help=movie base directory (defaults to current directory)"`
PathConfig `mode:"embedded"`
Extensions string `opts:"help=types of files that should be sorted"`
Concurrency int `opts:"help=search concurrency [warning] setting this too high can cause rate-limiting errors"`
FileLimit int `opts:"help=maximum number of files to search"`
AccuracyThreshold int `opts:"help=filename match accuracy threshold" default:"is 95, perfect match is 100"`
MinFileSize sizestr.Bytes `opts:"help=minimum file size"`
Recursive bool `opts:"help=also search through subdirectories"`
DryRun bool `opts:"help=perform sort but don't actually move any files"`
SkipHidden bool `opts:"help=skip dot files"`
Action Action `opts:"help=how to tread the files (available <copy|link|move>)"`
SymLink bool `opts:"help=use symlinks instead of hardlinks when linking the new files"`
HardLink bool `opts:"help=use links instead of copying when treating the new files (deprecated, used for compatibility)"`
Overwrite bool `opts:"help=overwrites duplicates"`
OverwriteIfLarger bool `opts:"help=overwrites duplicates if the new file is larger"`
Watch bool `opts:"help=watch the specified directories for changes and re-sort on change"`
WatchDelay time.Duration `opts:"help=delay before next sort after a change"`
Verbose bool `opts:"help=verbose logs"`
}
//fsSort is a media sorter
type fsSort struct {
Config
validExts map[string]bool
sorts map[string]*fileSort
dirs map[string]bool
stats struct {
found, matched, moved int
}
linkType linkType
}
type fileSort struct {
id int
path string
info os.FileInfo
result *Result
err error
}
// Action represent the way to treat the created files
type Action string
const (
// MoveAction the new files
MoveAction Action = "move"
// LinkAction the new files
LinkAction Action = "link"
// CopyAction the new files
CopyAction Action = "copy"
)
type linkType string
const (
hardLink linkType = "hardLink"
symLink linkType = "symLink"
)
//FileSystemSort performs a media sort
//against the file system using the provided
//configuration
func FileSystemSort(c Config) error {
if c.MovieDir == "" {
c.MovieDir = "."
}
if c.TVDir == "" {
c.TVDir = "."
}
if c.Watch && !c.Recursive {
return errors.New("Recursive mode is required to watch directories")
}
if c.Overwrite && c.OverwriteIfLarger {
return errors.New("Overwrite is already specified, overwrite-if-larger is redundant")
}
if c.Action == LinkAction && c.Overwrite {
return errors.New("Link is already specified, Overwrite won't do anything")
}
switch c.Action {
case MoveAction, LinkAction, CopyAction:
break
default:
return errors.New("Provided action is not available")
}
//init fs sort
fs := &fsSort{
Config: c,
validExts: map[string]bool{},
linkType: hardLink,
}
if c.SymLink {
fs.linkType = symLink
}
for _, e := range strings.Split(c.Extensions, ",") {
fs.validExts["."+e] = true
}
//sort loop
for {
//reset state
fs.sorts = map[string]*fileSort{}
fs.dirs = map[string]bool{}
//look for files
if err := fs.scan(); err != nil {
return err
}
//ensure we have dirs to watch
if fs.Watch && len(fs.dirs) == 0 {
return errors.New("No directories to watch")
}
if len(fs.sorts) > 0 {
//moment of truth - sort all files!
if err := fs.sortAllFiles(); err != nil {
return err
}
}
//watch directories
if !c.Watch {
break
}
if err := fs.watch(); err != nil {
return err
}
}
return nil
}
func (fs *fsSort) scan() error {
fs.verbf("scanning targets...")
//scan targets for media files
for _, path := range fs.Targets {
fs.verbf("scanning: %s", path)
info, err := os.Stat(path)
if err != nil {
return err
}
if err = fs.add(path, info); err != nil {
return err
}
}
//ensure we found something
if len(fs.sorts) == 0 && (!fs.Watch || len(fs.dirs) == 0) {
return fmt.Errorf("No sortable files found (%d files checked)", fs.stats.found)
}
fs.verbf("scanned targets. found #%d", fs.stats.found)
return nil
}
func (fs *fsSort) sortAllFiles() error {
fs.verbf("sorting files...")
//perform sort
if fs.DryRun {
log.Println(color.CyanString("[Dryrun]"))
}
//sort concurrency-many files at a time,
//wait for all to complete and show errors
queue := make(chan bool, fs.Concurrency)
wg := &sync.WaitGroup{}
sortFile := func(file *fileSort) {
if err := fs.sortFile(file); err != nil {
log.Printf("[#%d/%d] %s\n └─> %s\n", file.id, len(fs.sorts), color.RedString(file.path), err)
}
<-queue
wg.Done()
}
for _, file := range fs.sorts {
wg.Add(1)
queue <- true
go sortFile(file)
}
wg.Wait()
return nil
}
func (fs *fsSort) watch() error {
if len(fs.dirs) == 0 {
return errors.New("No directories to watch")
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("Failed to create file watcher: %s", err)
}
for dir := range fs.dirs {
if err := watcher.Add(dir); err != nil {
return fmt.Errorf("Failed to watch directory: %s", err)
}
log.Printf("Watching %s for changes...", color.CyanString(dir))
}
select {
case <-watcher.Events:
case err := <-watcher.Errors:
fs.verbf("watch error detected: %s", err)
}
go watcher.Close()
log.Printf("Change detected, re-sorting in %s...", fs.WatchDelay)
time.Sleep(fs.WatchDelay)
return nil
}
func (fs *fsSort) add(path string, info os.FileInfo) error {
//skip hidden files and directories
if fs.SkipHidden && strings.HasPrefix(info.Name(), ".") {
fs.verbf("skip hidden file: %s", path)
return nil
}
//limit recursion depth
if len(fs.sorts) >= fs.FileLimit {
fs.verbf("skip file: %s. surpassed file limit: %d", path, fs.FileLimit)
return nil
}
//add regular files (non-symlinks)
if info.Mode().IsRegular() {
fs.stats.found++
//skip unmatched file types
if !fs.validExts[filepath.Ext(path)] {
fs.verbf("skip unmatched file ext: %s", path)
return nil
}
//skip small files
if info.Size() < int64(fs.MinFileSize) {
fs.verbf("skip small file: %s", path)
return nil
}
fs.sorts[path] = &fileSort{id: len(fs.sorts) + 1, path: path, info: info}
fs.stats.matched++
return nil
}
//recurse into directories
if info.IsDir() {
if !fs.Recursive {
return errors.New("Recursive mode (-r) is required to sort directories")
}
//note directory
fs.dirs[path] = true
//add all files in dir
infos, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, info := range infos {
p := filepath.Join(path, info.Name())
//recurse
if err := fs.add(p, info); err != nil {
return err
}
}
}
fs.verbf("skip non-regular file: %s", path)
//skip links,pipes,etc
return nil
}
func (fs *fsSort) sortFile(file *fileSort) error {
result, err := SortThreshold(file.path, fs.AccuracyThreshold)
if err != nil {
return err
}
newPath, err := result.PrettyPath(fs.PathConfig)
if err != nil {
return err
}
baseDir := ""
switch mediasearch.MediaType(result.MType) {
case mediasearch.Series:
baseDir = fs.TVDir
case mediasearch.Movie:
baseDir = fs.MovieDir
default:
return fmt.Errorf("Invalid result type: %s", result.MType)
}
newPath = filepath.Join(baseDir, newPath)
//check for subs.srt file
pathSubs := strings.TrimSuffix(result.Path, filepath.Ext(result.Path)) + ".srt"
_, err = os.Stat(pathSubs)
hasSubs := err == nil
subsExt := ""
if hasSubs {
subsExt = "," + color.GreenString("srt")
}
//found sort path
log.Printf("[#%d/%d] %s\n └─> %s", file.id, len(fs.sorts), color.GreenString(result.Path)+subsExt, color.GreenString(newPath)+subsExt)
if fs.DryRun {
return nil //don't actually move
}
if result.Path == newPath {
return nil //already sorted
}
//check already exists
if newInfo, err := os.Stat(newPath); err == nil {
fileIsLarger := file.info.Size() > newInfo.Size()
overwrite := fs.Overwrite || (fs.OverwriteIfLarger && fileIsLarger)
//check if it the same file
if !os.SameFile(file.info, newInfo) {
if !overwrite {
return fmt.Errorf("File already exists '%s' (try setting --overwrite)", newPath)
}
} else {
return nil // File are the same
}
}
// mkdir -p
err = os.MkdirAll(filepath.Dir(newPath), 0755)
if err != nil {
return err //failed to mkdir
}
// treat the file
err = fs.action(result.Path, newPath)
if err != nil {
return err //failed to move
}
//if .srt file exists for the file, treat it too
if hasSubs {
newPathSubs := strings.TrimSuffix(newPath, filepath.Ext(newPath)) + ".srt"
fs.action(pathSubs, newPathSubs) //best-effort
}
return nil
}
func (fs *fsSort) verbf(f string, args ...interface{}) {
if fs.Verbose {
log.Printf(f, args...)
}
}
func (fs *fsSort) action(src, dst string) (err error) {
switch fs.Action {
case MoveAction:
err = move(src, dst)
case CopyAction:
err = copy(src, dst)
case LinkAction:
err = link(src, dst, fs.linkType)
}
return errors.New("unknown action")
}
|
package zygo
func init() { GITLASTTAG = "v4.0"; GITLASTCOMMIT = "2924d0de39dcf13401e9048ae956f36236e81088" }
link to Go comment definition
package zygo
func init() { GITLASTTAG = "v4.0"; GITLASTCOMMIT = "01ca1478e707401070dedd5738ee450a3ac8ee7f" }
|
package zygo
func init() { GITLASTTAG = "v1.5.1"; GITLASTCOMMIT = "567aa375d1f0c4cbb0ec3994f6df4b93235a4879" }
v1.5.3 correct input continuation.
package zygo
func init() { GITLASTTAG = "v1.5.3"; GITLASTCOMMIT = "ec0e31304ec5c6b8e14d6a7cee286cf084aea194" }
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Data-driven templates for generating textual output such as
HTML.
Templates are executed by applying them to a data structure.
Annotations in the template refer to elements of the data
structure (typically a field of a struct or a key in a map)
to control execution and derive values to be displayed.
The template walks the structure as it executes and the
"cursor" @ represents the value at the current location
in the structure.
Data items may be values or pointers; the interface hides the
indirection.
In the following, 'field' is one of several things, according to the data.
- the name of a field of a struct (result = data.field)
- the value stored in a map under that key (result = data[field])
- the result of invoking a niladic single-valued method with that name
(result = data.field())
Major constructs ({} are metacharacters; [] marks optional elements):
{# comment }
A one-line comment.
{.section field} XXX [ {.or} YYY ] {.end}
Set @ to the value of the field. It may be an explicit @
to stay at the same point in the data. If the field is nil
or empty, execute YYY; otherwise execute XXX.
{.repeated section field} XXX [ {.alternates with} ZZZ ] [ {.or} YYY ] {.end}
Like .section, but field must be an array or slice. XXX
is executed for each element. If the array is nil or empty,
YYY is executed instead. If the {.alternates with} marker
is present, ZZZ is executed between iterations of XXX.
{field}
{field|formatter}
Insert the value of the field into the output. Field is
first looked for in the cursor, as in .section and .repeated.
If it is not found, the search continues in outer sections
until the top level is reached.
If a formatter is specified, it must be named in the formatter
map passed to the template set up routines or in the default
set ("html","str","") and is used to process the data for
output. The formatter function has signature
func(wr io.Write, data interface{}, formatter string)
where wr is the destination for output, data is the field
value, and formatter is its name at the invocation site.
*/
package template
import (
"container/vector"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
)
// Errors returned during parsing and execution. Users may extract the information and reformat
// if they desire.
type Error struct {
Line int
Msg string
}
func (e *Error) String() string { return fmt.Sprintf("line %d: %s", e.Line, e.Msg) }
// Most of the literals are aces.
var lbrace = []byte{'{'}
var rbrace = []byte{'}'}
var space = []byte{' '}
var tab = []byte{'\t'}
// The various types of "tokens", which are plain text or (usually) brace-delimited descriptors
const (
tokAlternates = iota
tokComment
tokEnd
tokLiteral
tokOr
tokRepeated
tokSection
tokText
tokVariable
)
// FormatterMap is the type describing the mapping from formatter
// names to the functions that implement them.
type FormatterMap map[string]func(io.Writer, interface{}, string)
// Built-in formatters.
var builtins = FormatterMap{
"html": HTMLFormatter,
"str": StringFormatter,
"": StringFormatter,
}
// The parsed state of a template is a vector of xxxElement structs.
// Sections have line numbers so errors can be reported better during execution.
// Plain text.
type textElement struct {
text []byte
}
// A literal such as .meta-left or .meta-right
type literalElement struct {
text []byte
}
// A variable to be evaluated
type variableElement struct {
linenum int
name string
formatter string // TODO(r): implement pipelines
}
// A .section block, possibly with a .or
type sectionElement struct {
linenum int // of .section itself
field string // cursor field for this block
start int // first element
or int // first element of .or block
end int // one beyond last element
}
// A .repeated block, possibly with a .or and a .alternates
type repeatedElement struct {
sectionElement // It has the same structure...
altstart int // ... except for alternates
altend int
}
// Template is the type that represents a template definition.
// It is unchanged after parsing.
type Template struct {
fmap FormatterMap // formatters for variables
// Used during parsing:
ldelim, rdelim []byte // delimiters; default {}
buf []byte // input text to process
p int // position in buf
linenum int // position in input
// Parsed results:
elems *vector.Vector
}
// Internal state for executing a Template. As we evaluate the struct,
// the data item descends into the fields associated with sections, etc.
// Parent is used to walk upwards to find variables higher in the tree.
type state struct {
parent *state // parent in hierarchy
data reflect.Value // the driver data for this section etc.
wr io.Writer // where to send output
}
func (parent *state) clone(data reflect.Value) *state {
return &state{parent, data, parent.wr}
}
// New creates a new template with the specified formatter map (which
// may be nil) to define auxiliary functions for formatting variables.
func New(fmap FormatterMap) *Template {
t := new(Template)
t.fmap = fmap
t.ldelim = lbrace
t.rdelim = rbrace
t.elems = new(vector.Vector)
return t
}
// Report error and stop executing. The line number must be provided explicitly.
func (t *Template) execError(st *state, line int, err string, args ...interface{}) {
panic(&Error{line, fmt.Sprintf(err, args)})
}
// Report error, panic to terminate parsing.
// The line number comes from the template state.
func (t *Template) parseError(err string, args ...interface{}) {
panic(&Error{t.linenum, fmt.Sprintf(err, args)})
}
// -- Lexical analysis
// Is c a white space character?
func white(c uint8) bool { return c == ' ' || c == '\t' || c == '\r' || c == '\n' }
// Safely, does s[n:n+len(t)] == t?
func equal(s []byte, n int, t []byte) bool {
b := s[n:]
if len(t) > len(b) { // not enough space left for a match.
return false
}
for i, c := range t {
if c != b[i] {
return false
}
}
return true
}
// nextItem returns the next item from the input buffer. If the returned
// item is empty, we are at EOF. The item will be either a
// delimited string or a non-empty string between delimited
// strings. Tokens stop at (but include, if plain text) a newline.
// Action tokens on a line by themselves drop the white space on
// either side, up to and including the newline.
func (t *Template) nextItem() []byte {
special := false // is this a {.foo} directive, which means trim white space?
// Delete surrounding white space if this {.foo} is the only thing on the line.
trimSpace := t.p == 0 || t.buf[t.p-1] == '\n'
start := t.p
var i int
newline := func() {
t.linenum++
i++
}
// Leading white space up to but not including newline
for i = start; i < len(t.buf); i++ {
if t.buf[i] == '\n' || !white(t.buf[i]) {
break
}
}
if trimSpace {
start = i
} else if i > start {
// white space is valid text
t.p = i
return t.buf[start:i]
}
// What's left is nothing, newline, delimited string, or plain text
Switch:
switch {
case i == len(t.buf):
// EOF; nothing to do
case t.buf[i] == '\n':
newline()
case equal(t.buf, i, t.ldelim):
i += len(t.ldelim) // position after delimiter
if i+1 < len(t.buf) && (t.buf[i] == '.' || t.buf[i] == '#') {
special = true
}
for ; i < len(t.buf); i++ {
if t.buf[i] == '\n' {
break
}
if equal(t.buf, i, t.rdelim) {
i += len(t.rdelim)
break Switch
}
}
t.parseError("unmatched opening delimiter")
return nil
default:
for ; i < len(t.buf); i++ {
if t.buf[i] == '\n' {
newline()
break
}
if equal(t.buf, i, t.ldelim) {
break
}
}
}
item := t.buf[start:i]
if special && trimSpace {
// consume trailing white space
for ; i < len(t.buf) && white(t.buf[i]); i++ {
if t.buf[i] == '\n' {
newline()
break // stop before newline
}
}
}
t.p = i
return item
}
// Turn a byte array into a white-space-split array of strings.
func words(buf []byte) []string {
s := make([]string, 0, 5)
p := 0 // position in buf
// one word per loop
for i := 0; ; i++ {
// skip white space
for ; p < len(buf) && white(buf[p]); p++ {
}
// grab word
start := p
for ; p < len(buf) && !white(buf[p]); p++ {
}
if start == p { // no text left
break
}
if i == cap(s) {
ns := make([]string, 2*cap(s))
for j := range s {
ns[j] = s[j]
}
s = ns
}
s = s[0 : i+1]
s[i] = string(buf[start:p])
}
return s
}
// Analyze an item and return its token type and, if it's an action item, an array of
// its constituent words.
func (t *Template) analyze(item []byte) (tok int, w []string) {
// item is known to be non-empty
if !equal(item, 0, t.ldelim) { // doesn't start with left delimiter
tok = tokText
return
}
if !equal(item, len(item)-len(t.rdelim), t.rdelim) { // doesn't end with right delimiter
t.parseError("internal error: unmatched opening delimiter") // lexing should prevent this
return
}
if len(item) <= len(t.ldelim)+len(t.rdelim) { // no contents
t.parseError("empty directive")
return
}
// Comment
if item[len(t.ldelim)] == '#' {
tok = tokComment
return
}
// Split into words
w = words(item[len(t.ldelim) : len(item)-len(t.rdelim)]) // drop final delimiter
if len(w) == 0 {
t.parseError("empty directive")
return
}
if len(w) == 1 && w[0][0] != '.' {
tok = tokVariable
return
}
switch w[0] {
case ".meta-left", ".meta-right", ".space", ".tab":
tok = tokLiteral
return
case ".or":
tok = tokOr
return
case ".end":
tok = tokEnd
return
case ".section":
if len(w) != 2 {
t.parseError("incorrect fields for .section: %s", item)
return
}
tok = tokSection
return
case ".repeated":
if len(w) != 3 || w[1] != "section" {
t.parseError("incorrect fields for .repeated: %s", item)
return
}
tok = tokRepeated
return
case ".alternates":
if len(w) != 2 || w[1] != "with" {
t.parseError("incorrect fields for .alternates: %s", item)
return
}
tok = tokAlternates
return
}
t.parseError("bad directive: %s", item)
return
}
// -- Parsing
// Allocate a new variable-evaluation element.
func (t *Template) newVariable(name_formatter string) (v *variableElement) {
name := name_formatter
formatter := ""
bar := strings.Index(name_formatter, "|")
if bar >= 0 {
name = name_formatter[0:bar]
formatter = name_formatter[bar+1:]
}
// Probably ok, so let's build it.
v = &variableElement{t.linenum, name, formatter}
// We could remember the function address here and avoid the lookup later,
// but it's more dynamic to let the user change the map contents underfoot.
// We do require the name to be present, though.
// Is it in user-supplied map?
if t.fmap != nil {
if _, ok := t.fmap[formatter]; ok {
return
}
}
// Is it in builtin map?
if _, ok := builtins[formatter]; ok {
return
}
t.parseError("unknown formatter: %s", formatter)
return
}
// Grab the next item. If it's simple, just append it to the template.
// Otherwise return its details.
func (t *Template) parseSimple(item []byte) (done bool, tok int, w []string) {
tok, w = t.analyze(item)
done = true // assume for simplicity
switch tok {
case tokComment:
return
case tokText:
t.elems.Push(&textElement{item})
return
case tokLiteral:
switch w[0] {
case ".meta-left":
t.elems.Push(&literalElement{t.ldelim})
case ".meta-right":
t.elems.Push(&literalElement{t.rdelim})
case ".space":
t.elems.Push(&literalElement{space})
case ".tab":
t.elems.Push(&literalElement{tab})
default:
t.parseError("internal error: unknown literal: %s", w[0])
}
return
case tokVariable:
t.elems.Push(t.newVariable(w[0]))
return
}
return false, tok, w
}
// parseRepeated and parseSection are mutually recursive
func (t *Template) parseRepeated(words []string) *repeatedElement {
r := new(repeatedElement)
t.elems.Push(r)
r.linenum = t.linenum
r.field = words[2]
// Scan section, collecting true and false (.or) blocks.
r.start = t.elems.Len()
r.or = -1
r.altstart = -1
r.altend = -1
Loop:
for {
item := t.nextItem()
if len(item) == 0 {
t.parseError("missing .end for .repeated section")
break
}
done, tok, w := t.parseSimple(item)
if done {
continue
}
switch tok {
case tokEnd:
break Loop
case tokOr:
if r.or >= 0 {
t.parseError("extra .or in .repeated section")
break Loop
}
r.altend = t.elems.Len()
r.or = t.elems.Len()
case tokSection:
t.parseSection(w)
case tokRepeated:
t.parseRepeated(w)
case tokAlternates:
if r.altstart >= 0 {
t.parseError("extra .alternates in .repeated section")
break Loop
}
if r.or >= 0 {
t.parseError(".alternates inside .or block in .repeated section")
break Loop
}
r.altstart = t.elems.Len()
default:
t.parseError("internal error: unknown repeated section item: %s", item)
break Loop
}
}
if r.altend < 0 {
r.altend = t.elems.Len()
}
r.end = t.elems.Len()
return r
}
func (t *Template) parseSection(words []string) *sectionElement {
s := new(sectionElement)
t.elems.Push(s)
s.linenum = t.linenum
s.field = words[1]
// Scan section, collecting true and false (.or) blocks.
s.start = t.elems.Len()
s.or = -1
Loop:
for {
item := t.nextItem()
if len(item) == 0 {
t.parseError("missing .end for .section")
break
}
done, tok, w := t.parseSimple(item)
if done {
continue
}
switch tok {
case tokEnd:
break Loop
case tokOr:
if s.or >= 0 {
t.parseError("extra .or in .section")
break Loop
}
s.or = t.elems.Len()
case tokSection:
t.parseSection(w)
case tokRepeated:
t.parseRepeated(w)
case tokAlternates:
t.parseError(".alternates not in .repeated")
default:
t.parseError("internal error: unknown section item: %s", item)
}
}
s.end = t.elems.Len()
return s
}
func (t *Template) parse() {
for {
item := t.nextItem()
if len(item) == 0 {
break
}
done, tok, w := t.parseSimple(item)
if done {
continue
}
switch tok {
case tokOr, tokEnd, tokAlternates:
t.parseError("unexpected %s", w[0])
case tokSection:
t.parseSection(w)
case tokRepeated:
t.parseRepeated(w)
default:
t.parseError("internal error: bad directive in parse: %s", item)
}
}
}
// -- Execution
// If the data for this template is a struct, find the named variable.
// Names of the form a.b.c are walked down the data tree.
// The special name "@" (the "cursor") denotes the current data.
// The value coming in (st.data) might need indirecting to reach
// a struct while the return value is not indirected - that is,
// it represents the actual named field.
func (st *state) findVar(s string) reflect.Value {
if s == "@" {
return st.data
}
data := st.data
for _, elem := range strings.Split(s, ".", 0) {
origData := data // for method lookup need value before indirection.
// Look up field; data must be a struct or map.
data = reflect.Indirect(data)
if data == nil {
return nil
}
if intf, ok := data.(*reflect.InterfaceValue); ok {
data = reflect.Indirect(intf.Elem())
}
switch typ := data.Type().(type) {
case *reflect.StructType:
if field, ok := typ.FieldByName(elem); ok {
data = data.(*reflect.StructValue).FieldByIndex(field.Index)
continue
}
case *reflect.MapType:
data = data.(*reflect.MapValue).Elem(reflect.NewValue(elem))
continue
}
// No luck with that name; is it a method?
if result, found := callMethod(origData, elem); found {
data = result
continue
}
return nil
}
return data
}
// See if name is a method of the value at some level of indirection.
// The return values are the result of the call (which may be nil if
// there's trouble) and whether a method of the right name exists with
// any signature.
func callMethod(data reflect.Value, name string) (result reflect.Value, found bool) {
found = false
// Method set depends on pointerness, and the value may be arbitrarily
// indirect. Simplest approach is to walk down the pointer chain and
// see if we can find the method at each step.
// Most steps will see NumMethod() == 0.
for {
typ := data.Type()
if nMethod := data.Type().NumMethod(); nMethod > 0 {
for i := 0; i < nMethod; i++ {
method := typ.Method(i)
if method.Name == name {
found = true // we found the name regardless
// does receiver type match? (pointerness might be off)
if typ == method.Type.In(0) {
return call(data, method), found
}
}
}
}
if nd, ok := data.(*reflect.PtrValue); ok {
data = nd.Elem()
} else {
break
}
}
return
}
// Invoke the method. If its signature is wrong, return nil.
func call(v reflect.Value, method reflect.Method) reflect.Value {
funcType := method.Type
// Method must take no arguments, meaning as a func it has one argument (the receiver)
if funcType.NumIn() != 1 {
return nil
}
// Method must return a single value.
if funcType.NumOut() != 1 {
return nil
}
// Result will be the zeroth element of the returned slice.
return method.Func.Call([]reflect.Value{v})[0]
}
// Is there no data to look at?
func empty(v reflect.Value) bool {
v = reflect.Indirect(v)
if v == nil {
return true
}
switch v := v.(type) {
case *reflect.BoolValue:
return v.Get() == false
case *reflect.StringValue:
return v.Get() == ""
case *reflect.StructValue:
return false
case *reflect.MapValue:
return false
case *reflect.ArrayValue:
return v.Len() == 0
case *reflect.SliceValue:
return v.Len() == 0
}
return true
}
// Look up a variable or method, up through the parent if necessary.
func (t *Template) varValue(name string, st *state) reflect.Value {
field := st.findVar(name)
if field == nil {
if st.parent == nil {
t.execError(st, t.linenum, "name not found: %s", name)
}
return t.varValue(name, st.parent)
}
if iface, ok := field.(*reflect.InterfaceValue); ok && !iface.IsNil() {
field = iface.Elem()
}
return field
}
// Evaluate a variable, looking up through the parent if necessary.
// If it has a formatter attached ({var|formatter}) run that too.
func (t *Template) writeVariable(v *variableElement, st *state) {
formatter := v.formatter
val := t.varValue(v.name, st).Interface()
// is it in user-supplied map?
if t.fmap != nil {
if fn, ok := t.fmap[formatter]; ok {
fn(st.wr, val, formatter)
return
}
}
// is it in builtin map?
if fn, ok := builtins[formatter]; ok {
fn(st.wr, val, formatter)
return
}
t.execError(st, v.linenum, "missing formatter %s for variable %s", formatter, v.name)
}
// Execute element i. Return next index to execute.
func (t *Template) executeElement(i int, st *state) int {
switch elem := t.elems.At(i).(type) {
case *textElement:
st.wr.Write(elem.text)
return i + 1
case *literalElement:
st.wr.Write(elem.text)
return i + 1
case *variableElement:
t.writeVariable(elem, st)
return i + 1
case *sectionElement:
t.executeSection(elem, st)
return elem.end
case *repeatedElement:
t.executeRepeated(elem, st)
return elem.end
}
e := t.elems.At(i)
t.execError(st, 0, "internal error: bad directive in execute: %v %T\n", reflect.NewValue(e).Interface(), e)
return 0
}
// Execute the template.
func (t *Template) execute(start, end int, st *state) {
for i := start; i < end; {
i = t.executeElement(i, st)
}
}
// Execute a .section
func (t *Template) executeSection(s *sectionElement, st *state) {
// Find driver data for this section. It must be in the current struct.
field := t.varValue(s.field, st)
if field == nil {
t.execError(st, s.linenum, ".section: cannot find field %s in %s", s.field, reflect.Indirect(st.data).Type())
}
st = st.clone(field)
start, end := s.start, s.or
if !empty(field) {
// Execute the normal block.
if end < 0 {
end = s.end
}
} else {
// Execute the .or block. If it's missing, do nothing.
start, end = s.or, s.end
if start < 0 {
return
}
}
for i := start; i < end; {
i = t.executeElement(i, st)
}
}
// Return the result of calling the Iter method on v, or nil.
func iter(v reflect.Value) *reflect.ChanValue {
for j := 0; j < v.Type().NumMethod(); j++ {
mth := v.Type().Method(j)
fv := v.Method(j)
ft := fv.Type().(*reflect.FuncType)
// TODO(rsc): NumIn() should return 0 here, because ft is from a curried FuncValue.
if mth.Name != "Iter" || ft.NumIn() != 1 || ft.NumOut() != 1 {
continue
}
ct, ok := ft.Out(0).(*reflect.ChanType)
if !ok || ct.Dir()&reflect.RecvDir == 0 {
continue
}
return fv.Call(nil)[0].(*reflect.ChanValue)
}
return nil
}
// Execute a .repeated section
func (t *Template) executeRepeated(r *repeatedElement, st *state) {
// Find driver data for this section. It must be in the current struct.
field := t.varValue(r.field, st)
if field == nil {
t.execError(st, r.linenum, ".repeated: cannot find field %s in %s", r.field, reflect.Indirect(st.data).Type())
}
start, end := r.start, r.or
if end < 0 {
end = r.end
}
if r.altstart >= 0 {
end = r.altstart
}
first := true
// Code common to all the loops.
loopBody := func(newst *state) {
// .alternates between elements
if !first && r.altstart >= 0 {
for i := r.altstart; i < r.altend; {
i = t.executeElement(i, newst)
}
}
first = false
for i := start; i < end; {
i = t.executeElement(i, newst)
}
}
if array, ok := field.(reflect.ArrayOrSliceValue); ok {
for j := 0; j < array.Len(); j++ {
loopBody(st.clone(array.Elem(j)))
}
} else if m, ok := field.(*reflect.MapValue); ok {
for _, key := range m.Keys() {
loopBody(st.clone(m.Elem(key)))
}
} else if ch := iter(field); ch != nil {
for {
e := ch.Recv()
if ch.Closed() {
break
}
loopBody(st.clone(e))
}
} else {
t.execError(st, r.linenum, ".repeated: cannot repeat %s (type %s)",
r.field, field.Type())
}
if first {
// Empty. Execute the .or block, once. If it's missing, do nothing.
start, end := r.or, r.end
if start >= 0 {
newst := st.clone(field)
for i := start; i < end; {
i = t.executeElement(i, newst)
}
}
return
}
}
// A valid delimiter must contain no white space and be non-empty.
func validDelim(d []byte) bool {
if len(d) == 0 {
return false
}
for _, c := range d {
if white(c) {
return false
}
}
return true
}
// checkError is a deferred function to turn a panic with type *Error into a plain error return.
// Other panics are unexpected and so are re-enabled.
func checkError(error *os.Error) {
if v := recover(); v != nil {
if e, ok := v.(*Error); ok {
*error = e
} else {
// runtime errors should crash
panic(v)
}
}
}
// -- Public interface
// Parse initializes a Template by parsing its definition. The string
// s contains the template text. If any errors occur, Parse returns
// the error.
func (t *Template) Parse(s string) (err os.Error) {
if t.elems == nil {
return &Error{1, "template not allocated with New"}
}
if !validDelim(t.ldelim) || !validDelim(t.rdelim) {
return &Error{1, fmt.Sprintf("bad delimiter strings %q %q", t.ldelim, t.rdelim)}
}
defer checkError(&err)
t.buf = []byte(s)
t.p = 0
t.linenum = 1
t.parse()
return nil
}
// Execute applies a parsed template to the specified data object,
// generating output to wr.
func (t *Template) Execute(data interface{}, wr io.Writer) (err os.Error) {
// Extract the driver data.
val := reflect.NewValue(data)
defer checkError(&err)
t.p = 0
t.execute(0, t.elems.Len(), &state{nil, val, wr})
return nil
}
// SetDelims sets the left and right delimiters for operations in the
// template. They are validated during parsing. They could be
// validated here but it's better to keep the routine simple. The
// delimiters are very rarely invalid and Parse has the necessary
// error-handling interface already.
func (t *Template) SetDelims(left, right string) {
t.ldelim = []byte(left)
t.rdelim = []byte(right)
}
// Parse creates a Template with default parameters (such as {} for
// metacharacters). The string s contains the template text while
// the formatter map fmap, which may be nil, defines auxiliary functions
// for formatting variables. The template is returned. If any errors
// occur, err will be non-nil.
func Parse(s string, fmap FormatterMap) (t *Template, err os.Error) {
t = New(fmap)
err = t.Parse(s)
if err != nil {
t = nil
}
return
}
// ParseFile is a wrapper function that creates a Template with default
// parameters (such as {} for // metacharacters). The filename identfies
// a file containing the template text, while the formatter map fmap, which
// may be nil, defines auxiliary functions for formatting variables.
// The template is returned. If any errors occur, err will be non-nil.
func ParseFile(filename string, fmap FormatterMap) (t *Template, err os.Error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return Parse(string(b), fmap)
}
// MustParse is like Parse but panics if the template cannot be parsed.
func MustParse(s string, fmap FormatterMap) *Template {
t, err := Parse(s, fmap)
if err != nil {
panic("template.MustParse error: " + err.String())
}
return t
}
// MustParseFile is like ParseFile but panics if the file cannot be read
// or the template cannot be parsed.
func MustParseFile(filename string, fmap FormatterMap) *Template {
b, err := ioutil.ReadFile(filename)
if err != nil {
panic("template.MustParseFile error: " + err.String())
}
return MustParse(string(b), fmap)
}
template: regularize the handling of interfaces, pointers, and
methods when looking up names.
Fixes issue 764.
R=rsc
CC=golang-dev
http://codereview.appspot.com/1170041
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Data-driven templates for generating textual output such as
HTML.
Templates are executed by applying them to a data structure.
Annotations in the template refer to elements of the data
structure (typically a field of a struct or a key in a map)
to control execution and derive values to be displayed.
The template walks the structure as it executes and the
"cursor" @ represents the value at the current location
in the structure.
Data items may be values or pointers; the interface hides the
indirection.
In the following, 'field' is one of several things, according to the data.
- the name of a field of a struct (result = data.field)
- the value stored in a map under that key (result = data[field])
- the result of invoking a niladic single-valued method with that name
(result = data.field())
Major constructs ({} are metacharacters; [] marks optional elements):
{# comment }
A one-line comment.
{.section field} XXX [ {.or} YYY ] {.end}
Set @ to the value of the field. It may be an explicit @
to stay at the same point in the data. If the field is nil
or empty, execute YYY; otherwise execute XXX.
{.repeated section field} XXX [ {.alternates with} ZZZ ] [ {.or} YYY ] {.end}
Like .section, but field must be an array or slice. XXX
is executed for each element. If the array is nil or empty,
YYY is executed instead. If the {.alternates with} marker
is present, ZZZ is executed between iterations of XXX.
{field}
{field|formatter}
Insert the value of the field into the output. Field is
first looked for in the cursor, as in .section and .repeated.
If it is not found, the search continues in outer sections
until the top level is reached.
If a formatter is specified, it must be named in the formatter
map passed to the template set up routines or in the default
set ("html","str","") and is used to process the data for
output. The formatter function has signature
func(wr io.Write, data interface{}, formatter string)
where wr is the destination for output, data is the field
value, and formatter is its name at the invocation site.
*/
package template
import (
"container/vector"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
)
// Errors returned during parsing and execution. Users may extract the information and reformat
// if they desire.
type Error struct {
Line int
Msg string
}
func (e *Error) String() string { return fmt.Sprintf("line %d: %s", e.Line, e.Msg) }
// Most of the literals are aces.
var lbrace = []byte{'{'}
var rbrace = []byte{'}'}
var space = []byte{' '}
var tab = []byte{'\t'}
// The various types of "tokens", which are plain text or (usually) brace-delimited descriptors
const (
tokAlternates = iota
tokComment
tokEnd
tokLiteral
tokOr
tokRepeated
tokSection
tokText
tokVariable
)
// FormatterMap is the type describing the mapping from formatter
// names to the functions that implement them.
type FormatterMap map[string]func(io.Writer, interface{}, string)
// Built-in formatters.
var builtins = FormatterMap{
"html": HTMLFormatter,
"str": StringFormatter,
"": StringFormatter,
}
// The parsed state of a template is a vector of xxxElement structs.
// Sections have line numbers so errors can be reported better during execution.
// Plain text.
type textElement struct {
text []byte
}
// A literal such as .meta-left or .meta-right
type literalElement struct {
text []byte
}
// A variable to be evaluated
type variableElement struct {
linenum int
name string
formatter string // TODO(r): implement pipelines
}
// A .section block, possibly with a .or
type sectionElement struct {
linenum int // of .section itself
field string // cursor field for this block
start int // first element
or int // first element of .or block
end int // one beyond last element
}
// A .repeated block, possibly with a .or and a .alternates
type repeatedElement struct {
sectionElement // It has the same structure...
altstart int // ... except for alternates
altend int
}
// Template is the type that represents a template definition.
// It is unchanged after parsing.
type Template struct {
fmap FormatterMap // formatters for variables
// Used during parsing:
ldelim, rdelim []byte // delimiters; default {}
buf []byte // input text to process
p int // position in buf
linenum int // position in input
// Parsed results:
elems *vector.Vector
}
// Internal state for executing a Template. As we evaluate the struct,
// the data item descends into the fields associated with sections, etc.
// Parent is used to walk upwards to find variables higher in the tree.
type state struct {
parent *state // parent in hierarchy
data reflect.Value // the driver data for this section etc.
wr io.Writer // where to send output
}
func (parent *state) clone(data reflect.Value) *state {
return &state{parent, data, parent.wr}
}
// New creates a new template with the specified formatter map (which
// may be nil) to define auxiliary functions for formatting variables.
func New(fmap FormatterMap) *Template {
t := new(Template)
t.fmap = fmap
t.ldelim = lbrace
t.rdelim = rbrace
t.elems = new(vector.Vector)
return t
}
// Report error and stop executing. The line number must be provided explicitly.
func (t *Template) execError(st *state, line int, err string, args ...interface{}) {
panic(&Error{line, fmt.Sprintf(err, args)})
}
// Report error, panic to terminate parsing.
// The line number comes from the template state.
func (t *Template) parseError(err string, args ...interface{}) {
panic(&Error{t.linenum, fmt.Sprintf(err, args)})
}
// -- Lexical analysis
// Is c a white space character?
func white(c uint8) bool { return c == ' ' || c == '\t' || c == '\r' || c == '\n' }
// Safely, does s[n:n+len(t)] == t?
func equal(s []byte, n int, t []byte) bool {
b := s[n:]
if len(t) > len(b) { // not enough space left for a match.
return false
}
for i, c := range t {
if c != b[i] {
return false
}
}
return true
}
// nextItem returns the next item from the input buffer. If the returned
// item is empty, we are at EOF. The item will be either a
// delimited string or a non-empty string between delimited
// strings. Tokens stop at (but include, if plain text) a newline.
// Action tokens on a line by themselves drop the white space on
// either side, up to and including the newline.
func (t *Template) nextItem() []byte {
special := false // is this a {.foo} directive, which means trim white space?
// Delete surrounding white space if this {.foo} is the only thing on the line.
trimSpace := t.p == 0 || t.buf[t.p-1] == '\n'
start := t.p
var i int
newline := func() {
t.linenum++
i++
}
// Leading white space up to but not including newline
for i = start; i < len(t.buf); i++ {
if t.buf[i] == '\n' || !white(t.buf[i]) {
break
}
}
if trimSpace {
start = i
} else if i > start {
// white space is valid text
t.p = i
return t.buf[start:i]
}
// What's left is nothing, newline, delimited string, or plain text
Switch:
switch {
case i == len(t.buf):
// EOF; nothing to do
case t.buf[i] == '\n':
newline()
case equal(t.buf, i, t.ldelim):
i += len(t.ldelim) // position after delimiter
if i+1 < len(t.buf) && (t.buf[i] == '.' || t.buf[i] == '#') {
special = true
}
for ; i < len(t.buf); i++ {
if t.buf[i] == '\n' {
break
}
if equal(t.buf, i, t.rdelim) {
i += len(t.rdelim)
break Switch
}
}
t.parseError("unmatched opening delimiter")
return nil
default:
for ; i < len(t.buf); i++ {
if t.buf[i] == '\n' {
newline()
break
}
if equal(t.buf, i, t.ldelim) {
break
}
}
}
item := t.buf[start:i]
if special && trimSpace {
// consume trailing white space
for ; i < len(t.buf) && white(t.buf[i]); i++ {
if t.buf[i] == '\n' {
newline()
break // stop before newline
}
}
}
t.p = i
return item
}
// Turn a byte array into a white-space-split array of strings.
func words(buf []byte) []string {
s := make([]string, 0, 5)
p := 0 // position in buf
// one word per loop
for i := 0; ; i++ {
// skip white space
for ; p < len(buf) && white(buf[p]); p++ {
}
// grab word
start := p
for ; p < len(buf) && !white(buf[p]); p++ {
}
if start == p { // no text left
break
}
if i == cap(s) {
ns := make([]string, 2*cap(s))
for j := range s {
ns[j] = s[j]
}
s = ns
}
s = s[0 : i+1]
s[i] = string(buf[start:p])
}
return s
}
// Analyze an item and return its token type and, if it's an action item, an array of
// its constituent words.
func (t *Template) analyze(item []byte) (tok int, w []string) {
// item is known to be non-empty
if !equal(item, 0, t.ldelim) { // doesn't start with left delimiter
tok = tokText
return
}
if !equal(item, len(item)-len(t.rdelim), t.rdelim) { // doesn't end with right delimiter
t.parseError("internal error: unmatched opening delimiter") // lexing should prevent this
return
}
if len(item) <= len(t.ldelim)+len(t.rdelim) { // no contents
t.parseError("empty directive")
return
}
// Comment
if item[len(t.ldelim)] == '#' {
tok = tokComment
return
}
// Split into words
w = words(item[len(t.ldelim) : len(item)-len(t.rdelim)]) // drop final delimiter
if len(w) == 0 {
t.parseError("empty directive")
return
}
if len(w) == 1 && w[0][0] != '.' {
tok = tokVariable
return
}
switch w[0] {
case ".meta-left", ".meta-right", ".space", ".tab":
tok = tokLiteral
return
case ".or":
tok = tokOr
return
case ".end":
tok = tokEnd
return
case ".section":
if len(w) != 2 {
t.parseError("incorrect fields for .section: %s", item)
return
}
tok = tokSection
return
case ".repeated":
if len(w) != 3 || w[1] != "section" {
t.parseError("incorrect fields for .repeated: %s", item)
return
}
tok = tokRepeated
return
case ".alternates":
if len(w) != 2 || w[1] != "with" {
t.parseError("incorrect fields for .alternates: %s", item)
return
}
tok = tokAlternates
return
}
t.parseError("bad directive: %s", item)
return
}
// -- Parsing
// Allocate a new variable-evaluation element.
func (t *Template) newVariable(name_formatter string) (v *variableElement) {
name := name_formatter
formatter := ""
bar := strings.Index(name_formatter, "|")
if bar >= 0 {
name = name_formatter[0:bar]
formatter = name_formatter[bar+1:]
}
// Probably ok, so let's build it.
v = &variableElement{t.linenum, name, formatter}
// We could remember the function address here and avoid the lookup later,
// but it's more dynamic to let the user change the map contents underfoot.
// We do require the name to be present, though.
// Is it in user-supplied map?
if t.fmap != nil {
if _, ok := t.fmap[formatter]; ok {
return
}
}
// Is it in builtin map?
if _, ok := builtins[formatter]; ok {
return
}
t.parseError("unknown formatter: %s", formatter)
return
}
// Grab the next item. If it's simple, just append it to the template.
// Otherwise return its details.
func (t *Template) parseSimple(item []byte) (done bool, tok int, w []string) {
tok, w = t.analyze(item)
done = true // assume for simplicity
switch tok {
case tokComment:
return
case tokText:
t.elems.Push(&textElement{item})
return
case tokLiteral:
switch w[0] {
case ".meta-left":
t.elems.Push(&literalElement{t.ldelim})
case ".meta-right":
t.elems.Push(&literalElement{t.rdelim})
case ".space":
t.elems.Push(&literalElement{space})
case ".tab":
t.elems.Push(&literalElement{tab})
default:
t.parseError("internal error: unknown literal: %s", w[0])
}
return
case tokVariable:
t.elems.Push(t.newVariable(w[0]))
return
}
return false, tok, w
}
// parseRepeated and parseSection are mutually recursive
func (t *Template) parseRepeated(words []string) *repeatedElement {
r := new(repeatedElement)
t.elems.Push(r)
r.linenum = t.linenum
r.field = words[2]
// Scan section, collecting true and false (.or) blocks.
r.start = t.elems.Len()
r.or = -1
r.altstart = -1
r.altend = -1
Loop:
for {
item := t.nextItem()
if len(item) == 0 {
t.parseError("missing .end for .repeated section")
break
}
done, tok, w := t.parseSimple(item)
if done {
continue
}
switch tok {
case tokEnd:
break Loop
case tokOr:
if r.or >= 0 {
t.parseError("extra .or in .repeated section")
break Loop
}
r.altend = t.elems.Len()
r.or = t.elems.Len()
case tokSection:
t.parseSection(w)
case tokRepeated:
t.parseRepeated(w)
case tokAlternates:
if r.altstart >= 0 {
t.parseError("extra .alternates in .repeated section")
break Loop
}
if r.or >= 0 {
t.parseError(".alternates inside .or block in .repeated section")
break Loop
}
r.altstart = t.elems.Len()
default:
t.parseError("internal error: unknown repeated section item: %s", item)
break Loop
}
}
if r.altend < 0 {
r.altend = t.elems.Len()
}
r.end = t.elems.Len()
return r
}
func (t *Template) parseSection(words []string) *sectionElement {
s := new(sectionElement)
t.elems.Push(s)
s.linenum = t.linenum
s.field = words[1]
// Scan section, collecting true and false (.or) blocks.
s.start = t.elems.Len()
s.or = -1
Loop:
for {
item := t.nextItem()
if len(item) == 0 {
t.parseError("missing .end for .section")
break
}
done, tok, w := t.parseSimple(item)
if done {
continue
}
switch tok {
case tokEnd:
break Loop
case tokOr:
if s.or >= 0 {
t.parseError("extra .or in .section")
break Loop
}
s.or = t.elems.Len()
case tokSection:
t.parseSection(w)
case tokRepeated:
t.parseRepeated(w)
case tokAlternates:
t.parseError(".alternates not in .repeated")
default:
t.parseError("internal error: unknown section item: %s", item)
}
}
s.end = t.elems.Len()
return s
}
func (t *Template) parse() {
for {
item := t.nextItem()
if len(item) == 0 {
break
}
done, tok, w := t.parseSimple(item)
if done {
continue
}
switch tok {
case tokOr, tokEnd, tokAlternates:
t.parseError("unexpected %s", w[0])
case tokSection:
t.parseSection(w)
case tokRepeated:
t.parseRepeated(w)
default:
t.parseError("internal error: bad directive in parse: %s", item)
}
}
}
// -- Execution
// Evaluate interfaces and pointers looking for a value that can look up the name, via a
// struct field, method, or map key, and return the result of the lookup.
func lookup(v reflect.Value, name string) reflect.Value {
for v != nil {
typ := v.Type()
if n := v.Type().NumMethod(); n > 0 {
for i := 0; i < n; i++ {
m := typ.Method(i)
mtyp := m.Type
// We must check receiver type because of a bug in the reflection type tables:
// it should not be possible to find a method with the wrong receiver type but
// this can happen due to value/pointer receiver mismatch.
if m.Name == name && mtyp.NumIn() == 1 && mtyp.NumOut() == 1 && mtyp.In(0) == typ {
return v.Method(i).Call(nil)[0]
}
}
}
switch av := v.(type) {
case *reflect.PtrValue:
v = av.Elem()
case *reflect.InterfaceValue:
v = av.Elem()
case *reflect.StructValue:
return av.FieldByName(name)
case *reflect.MapValue:
return av.Elem(reflect.NewValue(name))
default:
return nil
}
}
return v
}
// Walk v through pointers and interfaces, extracting the elements within.
func indirect(v reflect.Value) reflect.Value {
loop:
for v != nil {
switch av := v.(type) {
case *reflect.PtrValue:
v = av.Elem()
case *reflect.InterfaceValue:
v = av.Elem()
default:
break loop
}
}
return v
}
// If the data for this template is a struct, find the named variable.
// Names of the form a.b.c are walked down the data tree.
// The special name "@" (the "cursor") denotes the current data.
// The value coming in (st.data) might need indirecting to reach
// a struct while the return value is not indirected - that is,
// it represents the actual named field.
func (st *state) findVar(s string) reflect.Value {
if s == "@" {
return st.data
}
data := st.data
for _, elem := range strings.Split(s, ".", 0) {
// Look up field; data must be a struct or map.
data = lookup(data, elem)
if data == nil {
return nil
}
}
return data
}
// Is there no data to look at?
func empty(v reflect.Value) bool {
v = indirect(v)
if v == nil {
return true
}
switch v := v.(type) {
case *reflect.BoolValue:
return v.Get() == false
case *reflect.StringValue:
return v.Get() == ""
case *reflect.StructValue:
return false
case *reflect.MapValue:
return false
case *reflect.ArrayValue:
return v.Len() == 0
case *reflect.SliceValue:
return v.Len() == 0
}
return true
}
// Look up a variable or method, up through the parent if necessary.
func (t *Template) varValue(name string, st *state) reflect.Value {
field := st.findVar(name)
if field == nil {
if st.parent == nil {
t.execError(st, t.linenum, "name not found: %s in type %s", name, st.data.Type())
}
return t.varValue(name, st.parent)
}
return field
}
// Evaluate a variable, looking up through the parent if necessary.
// If it has a formatter attached ({var|formatter}) run that too.
func (t *Template) writeVariable(v *variableElement, st *state) {
formatter := v.formatter
val := t.varValue(v.name, st).Interface()
// is it in user-supplied map?
if t.fmap != nil {
if fn, ok := t.fmap[formatter]; ok {
fn(st.wr, val, formatter)
return
}
}
// is it in builtin map?
if fn, ok := builtins[formatter]; ok {
fn(st.wr, val, formatter)
return
}
t.execError(st, v.linenum, "missing formatter %s for variable %s", formatter, v.name)
}
// Execute element i. Return next index to execute.
func (t *Template) executeElement(i int, st *state) int {
switch elem := t.elems.At(i).(type) {
case *textElement:
st.wr.Write(elem.text)
return i + 1
case *literalElement:
st.wr.Write(elem.text)
return i + 1
case *variableElement:
t.writeVariable(elem, st)
return i + 1
case *sectionElement:
t.executeSection(elem, st)
return elem.end
case *repeatedElement:
t.executeRepeated(elem, st)
return elem.end
}
e := t.elems.At(i)
t.execError(st, 0, "internal error: bad directive in execute: %v %T\n", reflect.NewValue(e).Interface(), e)
return 0
}
// Execute the template.
func (t *Template) execute(start, end int, st *state) {
for i := start; i < end; {
i = t.executeElement(i, st)
}
}
// Execute a .section
func (t *Template) executeSection(s *sectionElement, st *state) {
// Find driver data for this section. It must be in the current struct.
field := t.varValue(s.field, st)
if field == nil {
t.execError(st, s.linenum, ".section: cannot find field %s in %s", s.field, st.data.Type())
}
st = st.clone(field)
start, end := s.start, s.or
if !empty(field) {
// Execute the normal block.
if end < 0 {
end = s.end
}
} else {
// Execute the .or block. If it's missing, do nothing.
start, end = s.or, s.end
if start < 0 {
return
}
}
for i := start; i < end; {
i = t.executeElement(i, st)
}
}
// Return the result of calling the Iter method on v, or nil.
func iter(v reflect.Value) *reflect.ChanValue {
for j := 0; j < v.Type().NumMethod(); j++ {
mth := v.Type().Method(j)
fv := v.Method(j)
ft := fv.Type().(*reflect.FuncType)
// TODO(rsc): NumIn() should return 0 here, because ft is from a curried FuncValue.
if mth.Name != "Iter" || ft.NumIn() != 1 || ft.NumOut() != 1 {
continue
}
ct, ok := ft.Out(0).(*reflect.ChanType)
if !ok || ct.Dir()&reflect.RecvDir == 0 {
continue
}
return fv.Call(nil)[0].(*reflect.ChanValue)
}
return nil
}
// Execute a .repeated section
func (t *Template) executeRepeated(r *repeatedElement, st *state) {
// Find driver data for this section. It must be in the current struct.
field := t.varValue(r.field, st)
if field == nil {
t.execError(st, r.linenum, ".repeated: cannot find field %s in %s", r.field, st.data.Type())
}
field = indirect(field)
start, end := r.start, r.or
if end < 0 {
end = r.end
}
if r.altstart >= 0 {
end = r.altstart
}
first := true
// Code common to all the loops.
loopBody := func(newst *state) {
// .alternates between elements
if !first && r.altstart >= 0 {
for i := r.altstart; i < r.altend; {
i = t.executeElement(i, newst)
}
}
first = false
for i := start; i < end; {
i = t.executeElement(i, newst)
}
}
if array, ok := field.(reflect.ArrayOrSliceValue); ok {
for j := 0; j < array.Len(); j++ {
loopBody(st.clone(array.Elem(j)))
}
} else if m, ok := field.(*reflect.MapValue); ok {
for _, key := range m.Keys() {
loopBody(st.clone(m.Elem(key)))
}
} else if ch := iter(field); ch != nil {
for {
e := ch.Recv()
if ch.Closed() {
break
}
loopBody(st.clone(e))
}
} else {
t.execError(st, r.linenum, ".repeated: cannot repeat %s (type %s)",
r.field, field.Type())
}
if first {
// Empty. Execute the .or block, once. If it's missing, do nothing.
start, end := r.or, r.end
if start >= 0 {
newst := st.clone(field)
for i := start; i < end; {
i = t.executeElement(i, newst)
}
}
return
}
}
// A valid delimiter must contain no white space and be non-empty.
func validDelim(d []byte) bool {
if len(d) == 0 {
return false
}
for _, c := range d {
if white(c) {
return false
}
}
return true
}
// checkError is a deferred function to turn a panic with type *Error into a plain error return.
// Other panics are unexpected and so are re-enabled.
func checkError(error *os.Error) {
if v := recover(); v != nil {
if e, ok := v.(*Error); ok {
*error = e
} else {
// runtime errors should crash
panic(v)
}
}
}
// -- Public interface
// Parse initializes a Template by parsing its definition. The string
// s contains the template text. If any errors occur, Parse returns
// the error.
func (t *Template) Parse(s string) (err os.Error) {
if t.elems == nil {
return &Error{1, "template not allocated with New"}
}
if !validDelim(t.ldelim) || !validDelim(t.rdelim) {
return &Error{1, fmt.Sprintf("bad delimiter strings %q %q", t.ldelim, t.rdelim)}
}
defer checkError(&err)
t.buf = []byte(s)
t.p = 0
t.linenum = 1
t.parse()
return nil
}
// Execute applies a parsed template to the specified data object,
// generating output to wr.
func (t *Template) Execute(data interface{}, wr io.Writer) (err os.Error) {
// Extract the driver data.
val := reflect.NewValue(data)
defer checkError(&err)
t.p = 0
t.execute(0, t.elems.Len(), &state{nil, val, wr})
return nil
}
// SetDelims sets the left and right delimiters for operations in the
// template. They are validated during parsing. They could be
// validated here but it's better to keep the routine simple. The
// delimiters are very rarely invalid and Parse has the necessary
// error-handling interface already.
func (t *Template) SetDelims(left, right string) {
t.ldelim = []byte(left)
t.rdelim = []byte(right)
}
// Parse creates a Template with default parameters (such as {} for
// metacharacters). The string s contains the template text while
// the formatter map fmap, which may be nil, defines auxiliary functions
// for formatting variables. The template is returned. If any errors
// occur, err will be non-nil.
func Parse(s string, fmap FormatterMap) (t *Template, err os.Error) {
t = New(fmap)
err = t.Parse(s)
if err != nil {
t = nil
}
return
}
// ParseFile is a wrapper function that creates a Template with default
// parameters (such as {} for metacharacters). The filename identifies
// a file containing the template text, while the formatter map fmap, which
// may be nil, defines auxiliary functions for formatting variables.
// The template is returned. If any errors occur, err will be non-nil.
func ParseFile(filename string, fmap FormatterMap) (t *Template, err os.Error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return Parse(string(b), fmap)
}
// MustParse is like Parse but panics if the template cannot be parsed.
func MustParse(s string, fmap FormatterMap) *Template {
t, err := Parse(s, fmap)
if err != nil {
panic("template.MustParse error: " + err.String())
}
return t
}
// MustParseFile is like ParseFile but panics if the file cannot be read
// or the template cannot be parsed.
func MustParseFile(filename string, fmap FormatterMap) *Template {
b, err := ioutil.ReadFile(filename)
if err != nil {
panic("template.MustParseFile error: " + err.String())
}
return MustParse(string(b), fmap)
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"encoding/json"
"fmt"
"net/http"
"reflect"
"strings"
"sync"
"testing"
"time"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
serveroptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
etcd3watcher "k8s.io/apiserver/pkg/storage/etcd3"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic"
featuregatetesting "k8s.io/component-base/featuregate/testing"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/test/integration/convert"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apiextensions-apiserver/test/integration/storage"
)
type Checker func(t *testing.T, ctc *conversionTestContext)
func checks(checkers ...Checker) []Checker {
return checkers
}
func TestWebhookConverter(t *testing.T) {
tests := []struct {
group string
handler http.Handler
checks []Checker
}{
{
group: "noop-converter",
handler: convert.NewObjectConverterWebhookHandler(t, noopConverter),
checks: checks(validateStorageVersion, validateServed, validateMixedStorageVersions),
},
{
group: "nontrivial-converter",
handler: convert.NewObjectConverterWebhookHandler(t, nontrivialConverter),
checks: checks(validateStorageVersion, validateServed, validateMixedStorageVersions),
},
{
group: "empty-response",
handler: convert.NewReviewWebhookHandler(t, emptyResponseConverter),
checks: checks(expectConversionFailureMessage("empty-response", "expected 1 converted objects")),
},
{
group: "failure-message",
handler: convert.NewReviewWebhookHandler(t, failureResponseConverter("custom webhook conversion error")),
checks: checks(expectConversionFailureMessage("failure-message", "custom webhook conversion error")),
},
}
// TODO: Added for integration testing of conversion webhooks, where decode errors due to conversion webhook failures need to be tested.
// Maybe we should identify conversion webhook related errors in decoding to avoid triggering this? Or maybe having this special casing
// of test cases in production code should be removed?
etcd3watcher.TestOnlySetFatalOnDecodeError(false)
defer etcd3watcher.TestOnlySetFatalOnDecodeError(true)
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, apiextensionsfeatures.CustomResourceWebhookConversion, true)()
tearDown, config, options, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
apiExtensionsClient, err := clientset.NewForConfig(config)
if err != nil {
tearDown()
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
tearDown()
t.Fatal(err)
}
defer tearDown()
crd := multiVersionFixture.DeepCopy()
RESTOptionsGetter := serveroptions.NewCRDRESTOptionsGetter(*options.RecommendedOptions.Etcd)
restOptions, err := RESTOptionsGetter.GetRESTOptions(schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Spec.Names.Plural})
if err != nil {
t.Fatal(err)
}
etcdClient, _, err := storage.GetEtcdClients(restOptions.StorageConfig.Transport)
if err != nil {
t.Fatal(err)
}
defer etcdClient.Close()
etcdObjectReader := storage.NewEtcdObjectReader(etcdClient, &restOptions, crd)
ctcTearDown, ctc := newConversionTestContext(t, apiExtensionsClient, dynamicClient, etcdObjectReader, crd)
defer ctcTearDown()
// read only object to read at a different version than stored when we need to force conversion
marker, err := ctc.versionedClient("marker", "v1beta2").Create(newConversionMultiVersionFixture("marker", "marker", "v1beta2"), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
for _, test := range tests {
t.Run(test.group, func(t *testing.T) {
upCh, handler := closeOnCall(test.handler)
tearDown, webhookClientConfig, err := convert.StartConversionWebhookServer(handler)
if err != nil {
t.Fatal(err)
}
defer tearDown()
ctc.setConversionWebhook(t, webhookClientConfig)
defer ctc.removeConversionWebhook(t)
// wait until new webhook is called the first time
if err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
_, err := ctc.versionedClient(marker.GetNamespace(), "v1beta1").Get(marker.GetName(), metav1.GetOptions{})
select {
case <-upCh:
return true, nil
default:
t.Logf("Waiting for webhook to become effective, getting marker object: %v", err)
return false, nil
}
}); err != nil {
t.Fatal(err)
}
for i, checkFn := range test.checks {
name := fmt.Sprintf("check-%d", i)
t.Run(name, func(t *testing.T) {
defer ctc.setAndWaitStorageVersion(t, "v1beta2")
ctc.namespace = fmt.Sprintf("webhook-conversion-%s-%s", test.group, name)
checkFn(t, ctc)
})
}
})
}
}
func validateStorageVersion(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
for _, version := range []string{"v1beta1", "v1beta2"} {
t.Run(version, func(t *testing.T) {
name := "storageversion-" + version
client := ctc.versionedClient(ns, version)
obj, err := client.Create(newConversionMultiVersionFixture(ns, name, version), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
ctc.setAndWaitStorageVersion(t, "v1beta2")
obj, err = client.Get(obj.GetName(), metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
ctc.setAndWaitStorageVersion(t, "v1beta1")
})
}
}
// validateMixedStorageVersions ensures that identical custom resources written at different storage versions
// are readable and remain the same.
func validateMixedStorageVersions(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
v1client := ctc.versionedClient(ns, "v1beta1")
v2client := ctc.versionedClient(ns, "v1beta2")
clients := map[string]dynamic.ResourceInterface{"v1beta1": v1client, "v1beta2": v2client}
versions := []string{"v1beta1", "v1beta2"}
// Create CRs at all storage versions
objNames := []string{}
for _, version := range versions {
ctc.setAndWaitStorageVersion(t, version)
name := "stored-at-" + version
obj, err := clients[version].Create(newConversionMultiVersionFixture(ns, name, version), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
objNames = append(objNames, obj.GetName())
}
// Ensure copies of an object have the same fields and values at each custom resource definition version regardless of storage version
for clientVersion, client := range clients {
t.Run(clientVersion, func(t *testing.T) {
o1, err := client.Get(objNames[0], metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
for _, objName := range objNames[1:] {
o2, err := client.Get(objName, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// ignore metadata for comparison purposes
delete(o1.Object, "metadata")
delete(o2.Object, "metadata")
if !reflect.DeepEqual(o1.Object, o2.Object) {
t.Errorf("Expected custom resource to be same regardless of which storage version is used but got %+v != %+v", o1, o2)
}
}
})
}
}
func validateServed(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
for _, version := range []string{"v1beta1", "v1beta2"} {
t.Run(version, func(t *testing.T) {
name := "served-" + version
client := ctc.versionedClient(ns, version)
obj, err := client.Create(newConversionMultiVersionFixture(ns, name, version), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
ctc.setServed(t, version, false)
ctc.waitForServed(t, version, false, client, obj)
ctc.setServed(t, version, true)
ctc.waitForServed(t, version, true, client, obj)
})
}
}
func expectConversionFailureMessage(id, message string) func(t *testing.T, ctc *conversionTestContext) {
return func(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
v1client := ctc.versionedClient(ns, "v1beta1")
v2client := ctc.versionedClient(ns, "v1beta2")
var err error
// storage version is v1beta2, so this skips conversion
obj, err := v2client.Create(newConversionMultiVersionFixture(ns, id, "v1beta2"), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
for _, verb := range []string{"get", "list", "create", "udpate", "patch", "delete", "deletecollection"} {
t.Run(verb, func(t *testing.T) {
switch verb {
case "get":
_, err = v1client.Get(obj.GetName(), metav1.GetOptions{})
case "list":
_, err = v1client.List(metav1.ListOptions{})
case "create":
_, err = v1client.Create(newConversionMultiVersionFixture(ns, id, "v1beta1"), metav1.CreateOptions{})
case "update":
_, err = v1client.Update(obj, metav1.UpdateOptions{})
case "patch":
_, err = v1client.Patch(obj.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"patch":"true"}}}`), metav1.PatchOptions{})
case "delete":
err = v1client.Delete(obj.GetName(), &metav1.DeleteOptions{})
case "deletecollection":
err = v1client.DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
}
if err == nil {
t.Errorf("expected error with message %s, but got no error", message)
} else if !strings.Contains(err.Error(), message) {
t.Errorf("expected error with message %s, but got %v", message, err)
}
})
}
for _, subresource := range []string{"status", "scale"} {
for _, verb := range []string{"get", "udpate", "patch"} {
t.Run(fmt.Sprintf("%s-%s", subresource, verb), func(t *testing.T) {
switch verb {
case "create":
_, err = v1client.Create(newConversionMultiVersionFixture(ns, id, "v1beta1"), metav1.CreateOptions{}, subresource)
case "update":
_, err = v1client.Update(obj, metav1.UpdateOptions{}, subresource)
case "patch":
_, err = v1client.Patch(obj.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"patch":"true"}}}`), metav1.PatchOptions{}, subresource)
}
if err == nil {
t.Errorf("expected error with message %s, but got no error", message)
} else if !strings.Contains(err.Error(), message) {
t.Errorf("expected error with message %s, but got %v", message, err)
}
})
}
}
}
}
func noopConverter(desiredAPIVersion string, obj runtime.RawExtension) (runtime.RawExtension, error) {
u := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := json.Unmarshal(obj.Raw, u); err != nil {
return runtime.RawExtension{}, fmt.Errorf("Fail to deserialize object: %s with error: %v", string(obj.Raw), err)
}
u.Object["apiVersion"] = desiredAPIVersion
raw, err := json.Marshal(u)
if err != nil {
return runtime.RawExtension{}, fmt.Errorf("Fail to serialize object: %v with error: %v", u, err)
}
return runtime.RawExtension{Raw: raw}, nil
}
func emptyResponseConverter(review apiextensionsv1beta1.ConversionReview) (apiextensionsv1beta1.ConversionReview, error) {
review.Response = &apiextensionsv1beta1.ConversionResponse{
UID: review.Request.UID,
ConvertedObjects: []runtime.RawExtension{},
Result: metav1.Status{Status: "Success"},
}
return review, nil
}
func failureResponseConverter(message string) func(review apiextensionsv1beta1.ConversionReview) (apiextensionsv1beta1.ConversionReview, error) {
return func(review apiextensionsv1beta1.ConversionReview) (apiextensionsv1beta1.ConversionReview, error) {
review.Response = &apiextensionsv1beta1.ConversionResponse{
UID: review.Request.UID,
ConvertedObjects: []runtime.RawExtension{},
Result: metav1.Status{Message: message, Status: "Failure"},
}
return review, nil
}
}
func nontrivialConverter(desiredAPIVersion string, obj runtime.RawExtension) (runtime.RawExtension, error) {
u := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := json.Unmarshal(obj.Raw, u); err != nil {
return runtime.RawExtension{}, fmt.Errorf("Fail to deserialize object: %s with error: %v", string(obj.Raw), err)
}
currentAPIVersion := u.Object["apiVersion"]
if currentAPIVersion == "v1beta2" && desiredAPIVersion == "v1beta1" {
u.Object["num"] = u.Object["numv2"]
u.Object["content"] = u.Object["contentv2"]
delete(u.Object, "numv2")
delete(u.Object, "contentv2")
}
if currentAPIVersion == "v1beta1" && desiredAPIVersion == "v1beta2" {
u.Object["numv2"] = u.Object["num"]
u.Object["contentv2"] = u.Object["content"]
delete(u.Object, "num")
delete(u.Object, "content")
}
u.Object["apiVersion"] = desiredAPIVersion
raw, err := json.Marshal(u)
if err != nil {
return runtime.RawExtension{}, fmt.Errorf("Fail to serialize object: %v with error: %v", u, err)
}
return runtime.RawExtension{Raw: raw}, nil
}
func newConversionTestContext(t *testing.T, apiExtensionsClient clientset.Interface, dynamicClient dynamic.Interface, etcdObjectReader *storage.EtcdObjectReader, crd *apiextensionsv1beta1.CustomResourceDefinition) (func(), *conversionTestContext) {
crd, err := fixtures.CreateNewCustomResourceDefinition(crd, apiExtensionsClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
tearDown := func() {
if err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionsClient); err != nil {
t.Fatal(err)
}
}
return tearDown, &conversionTestContext{apiExtensionsClient: apiExtensionsClient, dynamicClient: dynamicClient, crd: crd, etcdObjectReader: etcdObjectReader}
}
type conversionTestContext struct {
namespace string
apiExtensionsClient clientset.Interface
dynamicClient dynamic.Interface
options *options.CustomResourceDefinitionsServerOptions
crd *apiextensionsv1beta1.CustomResourceDefinition
etcdObjectReader *storage.EtcdObjectReader
}
func (c *conversionTestContext) versionedClient(ns string, version string) dynamic.ResourceInterface {
return newNamespacedCustomResourceVersionedClient(ns, c.dynamicClient, c.crd, version)
}
func (c *conversionTestContext) setConversionWebhook(t *testing.T, webhookClientConfig *apiextensionsv1beta1.WebhookClientConfig) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
crd.Spec.Conversion = &apiextensionsv1beta1.CustomResourceConversion{
Strategy: apiextensionsv1beta1.WebhookConverter,
WebhookClientConfig: webhookClientConfig,
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) removeConversionWebhook(t *testing.T) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
crd.Spec.Conversion = &apiextensionsv1beta1.CustomResourceConversion{
Strategy: apiextensionsv1beta1.NoneConverter,
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) setAndWaitStorageVersion(t *testing.T, version string) {
c.setStorageVersion(t, "v1beta2")
client := c.versionedClient("probe", "v1beta2")
name := fmt.Sprintf("probe-%v", uuid.NewUUID())
storageProbe, err := client.Create(newConversionMultiVersionFixture("probe", name, "v1beta2"), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
c.waitForStorageVersion(t, "v1beta2", c.versionedClient(storageProbe.GetNamespace(), "v1beta2"), storageProbe)
err = client.Delete(name, &metav1.DeleteOptions{})
if err != nil {
t.Fatal(err)
}
}
func (c *conversionTestContext) setStorageVersion(t *testing.T, version string) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
for i, v := range crd.Spec.Versions {
crd.Spec.Versions[i].Storage = (v.Name == version)
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) waitForStorageVersion(t *testing.T, version string, versionedClient dynamic.ResourceInterface, obj *unstructured.Unstructured) *unstructured.Unstructured {
c.etcdObjectReader.WaitForStorageVersion(version, obj.GetNamespace(), obj.GetName(), 30*time.Second, func() {
var err error
obj, err = versionedClient.Update(obj, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("failed to update object: %v", err)
}
})
return obj
}
func (c *conversionTestContext) setServed(t *testing.T, version string, served bool) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
for i, v := range crd.Spec.Versions {
if v.Name == version {
crd.Spec.Versions[i].Served = served
}
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) waitForServed(t *testing.T, version string, served bool, versionedClient dynamic.ResourceInterface, obj *unstructured.Unstructured) {
timeout := 30 * time.Second
waitCh := time.After(timeout)
for {
obj, err := versionedClient.Get(obj.GetName(), metav1.GetOptions{})
if (err == nil && served) || (errors.IsNotFound(err) && served == false) {
return
}
select {
case <-waitCh:
t.Fatalf("Timed out after %v waiting for CRD served=%t for version %s for %v. Last error: %v", timeout, served, version, obj, err)
case <-time.After(10 * time.Millisecond):
}
}
}
var multiVersionFixture = &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{Name: "multiversion.stable.example.com"},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: "stable.example.com",
Version: "v1beta1",
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "multiversion",
Singular: "multiversion",
Kind: "MultiVersion",
ShortNames: []string{"mv"},
ListKind: "MultiVersionList",
Categories: []string{"all"},
},
Scope: apiextensionsv1beta1.NamespaceScoped,
Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{
{
Name: "v1beta1",
Served: true,
Storage: false,
},
{
Name: "v1beta2",
Served: true,
Storage: true,
},
},
Subresources: &apiextensionsv1beta1.CustomResourceSubresources{
Status: &apiextensionsv1beta1.CustomResourceSubresourceStatus{},
Scale: &apiextensionsv1beta1.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.num.num1",
StatusReplicasPath: ".status.num.num2",
},
},
},
}
func newConversionMultiVersionFixture(namespace, name, version string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "stable.example.com/" + version,
"kind": "MultiVersion",
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
},
"content": map[string]interface{}{
"key": "value",
},
"num": map[string]interface{}{
"num1": 1,
"num2": 1000000,
},
},
}
}
func closeOnCall(h http.Handler) (chan struct{}, http.Handler) {
ch := make(chan struct{})
once := sync.Once{}
return ch, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
once.Do(func() {
close(ch)
})
h.ServeHTTP(w, r)
})
}
apiextensions: fix non-trivial conversion integration test
This adds a third version v1alpha1 which has the same schema as v1beta1. Moreover, v1beta1 becomes the storage version. Hence, we can do noop webhook conversion from v1alpha1 to v1beta1 and back.
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"encoding/json"
"fmt"
"net/http"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
serveroptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
etcd3watcher "k8s.io/apiserver/pkg/storage/etcd3"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic"
featuregatetesting "k8s.io/component-base/featuregate/testing"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/test/integration/convert"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apiextensions-apiserver/test/integration/storage"
)
type Checker func(t *testing.T, ctc *conversionTestContext)
func checks(checkers ...Checker) []Checker {
return checkers
}
func TestWebhookConverter(t *testing.T) {
tests := []struct {
group string
handler http.Handler
checks []Checker
}{
{
group: "noop-converter",
handler: convert.NewObjectConverterWebhookHandler(t, noopConverter),
checks: checks(validateStorageVersion, validateServed, validateMixedStorageVersions("v1alpha1", "v1beta1")), // no v1beta2 as the schema differs
},
{
group: "nontrivial-converter",
handler: convert.NewObjectConverterWebhookHandler(t, nontrivialConverter),
checks: checks(validateStorageVersion, validateServed, validateMixedStorageVersions("v1alpha1", "v1beta1", "v1beta2")),
},
{
group: "empty-response",
handler: convert.NewReviewWebhookHandler(t, emptyResponseConverter),
checks: checks(expectConversionFailureMessage("empty-response", "expected 1 converted objects")),
},
{
group: "failure-message",
handler: convert.NewReviewWebhookHandler(t, failureResponseConverter("custom webhook conversion error")),
checks: checks(expectConversionFailureMessage("failure-message", "custom webhook conversion error")),
},
}
// TODO: Added for integration testing of conversion webhooks, where decode errors due to conversion webhook failures need to be tested.
// Maybe we should identify conversion webhook related errors in decoding to avoid triggering this? Or maybe having this special casing
// of test cases in production code should be removed?
etcd3watcher.TestOnlySetFatalOnDecodeError(false)
defer etcd3watcher.TestOnlySetFatalOnDecodeError(true)
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, apiextensionsfeatures.CustomResourceWebhookConversion, true)()
tearDown, config, options, err := fixtures.StartDefaultServer(t)
if err != nil {
t.Fatal(err)
}
apiExtensionsClient, err := clientset.NewForConfig(config)
if err != nil {
tearDown()
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
tearDown()
t.Fatal(err)
}
defer tearDown()
crd := multiVersionFixture.DeepCopy()
RESTOptionsGetter := serveroptions.NewCRDRESTOptionsGetter(*options.RecommendedOptions.Etcd)
restOptions, err := RESTOptionsGetter.GetRESTOptions(schema.GroupResource{Group: crd.Spec.Group, Resource: crd.Spec.Names.Plural})
if err != nil {
t.Fatal(err)
}
etcdClient, _, err := storage.GetEtcdClients(restOptions.StorageConfig.Transport)
if err != nil {
t.Fatal(err)
}
defer etcdClient.Close()
etcdObjectReader := storage.NewEtcdObjectReader(etcdClient, &restOptions, crd)
ctcTearDown, ctc := newConversionTestContext(t, apiExtensionsClient, dynamicClient, etcdObjectReader, crd)
defer ctcTearDown()
// read only object to read at a different version than stored when we need to force conversion
marker, err := ctc.versionedClient("marker", "v1beta1").Create(newConversionMultiVersionFixture("marker", "marker", "v1beta1"), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
for _, test := range tests {
t.Run(test.group, func(t *testing.T) {
upCh, handler := closeOnCall(test.handler)
tearDown, webhookClientConfig, err := convert.StartConversionWebhookServer(handler)
if err != nil {
t.Fatal(err)
}
defer tearDown()
ctc.setConversionWebhook(t, webhookClientConfig)
defer ctc.removeConversionWebhook(t)
// wait until new webhook is called the first time
if err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
_, err := ctc.versionedClient(marker.GetNamespace(), "v1alpha1").Get(marker.GetName(), metav1.GetOptions{})
select {
case <-upCh:
return true, nil
default:
t.Logf("Waiting for webhook to become effective, getting marker object: %v", err)
return false, nil
}
}); err != nil {
t.Fatal(err)
}
for i, checkFn := range test.checks {
name := fmt.Sprintf("check-%d", i)
t.Run(name, func(t *testing.T) {
defer ctc.setAndWaitStorageVersion(t, "v1beta1")
ctc.namespace = fmt.Sprintf("webhook-conversion-%s-%s", test.group, name)
checkFn(t, ctc)
})
}
})
}
}
func validateStorageVersion(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
for _, version := range ctc.crd.Spec.Versions {
t.Run(version.Name, func(t *testing.T) {
name := "storageversion-" + version.Name
client := ctc.versionedClient(ns, version.Name)
obj, err := client.Create(newConversionMultiVersionFixture(ns, name, version.Name), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
ctc.setAndWaitStorageVersion(t, "v1beta2")
obj, err = client.Get(obj.GetName(), metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
ctc.setAndWaitStorageVersion(t, "v1beta1")
})
}
}
// validateMixedStorageVersions ensures that identical custom resources written at different storage versions
// are readable and remain the same.
func validateMixedStorageVersions(versions ...string) func(t *testing.T, ctc *conversionTestContext) {
return func(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
clients := ctc.versionedClients(ns)
// Create CRs at all storage versions
objNames := []string{}
for _, version := range versions {
ctc.setAndWaitStorageVersion(t, version)
name := "mixedstorage-stored-as-" + version
obj, err := clients[version].Create(newConversionMultiVersionFixture(ns, name, version), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
objNames = append(objNames, obj.GetName())
}
// Ensure copies of an object have the same fields and values at each custom resource definition version regardless of storage version
for clientVersion, client := range clients {
t.Run(clientVersion, func(t *testing.T) {
o1, err := client.Get(objNames[0], metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
for _, objName := range objNames[1:] {
o2, err := client.Get(objName, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// ignore metadata for comparison purposes
delete(o1.Object, "metadata")
delete(o2.Object, "metadata")
if !reflect.DeepEqual(o1.Object, o2.Object) {
t.Errorf("Expected custom resource to be same regardless of which storage version is used to create, but got: %s", cmp.Diff(o1, o2))
}
}
})
}
}
}
func validateServed(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
for _, version := range ctc.crd.Spec.Versions {
t.Run(version.Name, func(t *testing.T) {
name := "served-" + version.Name
client := ctc.versionedClient(ns, version.Name)
obj, err := client.Create(newConversionMultiVersionFixture(ns, name, version.Name), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
ctc.setServed(t, version.Name, false)
ctc.waitForServed(t, version.Name, false, client, obj)
ctc.setServed(t, version.Name, true)
ctc.waitForServed(t, version.Name, true, client, obj)
})
}
}
func expectConversionFailureMessage(id, message string) func(t *testing.T, ctc *conversionTestContext) {
return func(t *testing.T, ctc *conversionTestContext) {
ns := ctc.namespace
clients := ctc.versionedClients(ns)
var err error
// storage version is v1beta1, so this skips conversion
obj, err := clients["v1beta1"].Create(newConversionMultiVersionFixture(ns, id, "v1beta1"), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
for _, verb := range []string{"get", "list", "create", "udpate", "patch", "delete", "deletecollection"} {
t.Run(verb, func(t *testing.T) {
switch verb {
case "get":
_, err = clients["v1beta2"].Get(obj.GetName(), metav1.GetOptions{})
case "list":
_, err = clients["v1beta2"].List(metav1.ListOptions{})
case "create":
_, err = clients["v1beta2"].Create(newConversionMultiVersionFixture(ns, id, "v1beta2"), metav1.CreateOptions{})
case "update":
_, err = clients["v1beta2"].Update(obj, metav1.UpdateOptions{})
case "patch":
_, err = clients["v1beta2"].Patch(obj.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"patch":"true"}}}`), metav1.PatchOptions{})
case "delete":
err = clients["v1beta2"].Delete(obj.GetName(), &metav1.DeleteOptions{})
case "deletecollection":
err = clients["v1beta2"].DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{})
}
if err == nil {
t.Errorf("expected error with message %s, but got no error", message)
} else if !strings.Contains(err.Error(), message) {
t.Errorf("expected error with message %s, but got %v", message, err)
}
})
}
for _, subresource := range []string{"status", "scale"} {
for _, verb := range []string{"get", "udpate", "patch"} {
t.Run(fmt.Sprintf("%s-%s", subresource, verb), func(t *testing.T) {
switch verb {
case "create":
_, err = clients["v1beta2"].Create(newConversionMultiVersionFixture(ns, id, "v1beta2"), metav1.CreateOptions{}, subresource)
case "update":
_, err = clients["v1beta2"].Update(obj, metav1.UpdateOptions{}, subresource)
case "patch":
_, err = clients["v1beta2"].Patch(obj.GetName(), types.MergePatchType, []byte(`{"metadata":{"annotations":{"patch":"true"}}}`), metav1.PatchOptions{}, subresource)
}
if err == nil {
t.Errorf("expected error with message %s, but got no error", message)
} else if !strings.Contains(err.Error(), message) {
t.Errorf("expected error with message %s, but got %v", message, err)
}
})
}
}
}
}
func noopConverter(desiredAPIVersion string, obj runtime.RawExtension) (runtime.RawExtension, error) {
u := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := json.Unmarshal(obj.Raw, u); err != nil {
return runtime.RawExtension{}, fmt.Errorf("failed to deserialize object: %s with error: %v", string(obj.Raw), err)
}
u.Object["apiVersion"] = desiredAPIVersion
raw, err := json.Marshal(u)
if err != nil {
return runtime.RawExtension{}, fmt.Errorf("failed to serialize object: %v with error: %v", u, err)
}
return runtime.RawExtension{Raw: raw}, nil
}
func emptyResponseConverter(review apiextensionsv1beta1.ConversionReview) (apiextensionsv1beta1.ConversionReview, error) {
review.Response = &apiextensionsv1beta1.ConversionResponse{
UID: review.Request.UID,
ConvertedObjects: []runtime.RawExtension{},
Result: metav1.Status{Status: "Success"},
}
return review, nil
}
func failureResponseConverter(message string) func(review apiextensionsv1beta1.ConversionReview) (apiextensionsv1beta1.ConversionReview, error) {
return func(review apiextensionsv1beta1.ConversionReview) (apiextensionsv1beta1.ConversionReview, error) {
review.Response = &apiextensionsv1beta1.ConversionResponse{
UID: review.Request.UID,
ConvertedObjects: []runtime.RawExtension{},
Result: metav1.Status{Message: message, Status: "Failure"},
}
return review, nil
}
}
func nontrivialConverter(desiredAPIVersion string, obj runtime.RawExtension) (runtime.RawExtension, error) {
u := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := json.Unmarshal(obj.Raw, u); err != nil {
return runtime.RawExtension{}, fmt.Errorf("failed to deserialize object: %s with error: %v", string(obj.Raw), err)
}
currentAPIVersion := u.GetAPIVersion()
if currentAPIVersion == "stable.example.com/v1beta2" && (desiredAPIVersion == "stable.example.com/v1alpha1" || desiredAPIVersion == "stable.example.com/v1beta1") {
u.Object["num"] = u.Object["numv2"]
u.Object["content"] = u.Object["contentv2"]
delete(u.Object, "numv2")
delete(u.Object, "contentv2")
} else if (currentAPIVersion == "stable.example.com/v1alpha1" || currentAPIVersion == "stable.example.com/v1beta1") && desiredAPIVersion == "stable.example.com/v1beta2" {
u.Object["numv2"] = u.Object["num"]
u.Object["contentv2"] = u.Object["content"]
delete(u.Object, "num")
delete(u.Object, "content")
} else if currentAPIVersion == "stable.example.com/v1alpha1" && desiredAPIVersion == "stable.example.com/v1beta1" {
// same schema
} else if currentAPIVersion == "stable.example.com/v1beta1" && desiredAPIVersion == "stable.example.com/v1alpha1" {
// same schema
} else if currentAPIVersion != desiredAPIVersion {
return runtime.RawExtension{}, fmt.Errorf("cannot convert from %s to %s", currentAPIVersion, desiredAPIVersion)
}
u.Object["apiVersion"] = desiredAPIVersion
raw, err := json.Marshal(u)
if err != nil {
return runtime.RawExtension{}, fmt.Errorf("failed to serialize object: %v with error: %v", u, err)
}
return runtime.RawExtension{Raw: raw}, nil
}
func newConversionTestContext(t *testing.T, apiExtensionsClient clientset.Interface, dynamicClient dynamic.Interface, etcdObjectReader *storage.EtcdObjectReader, crd *apiextensionsv1beta1.CustomResourceDefinition) (func(), *conversionTestContext) {
crd, err := fixtures.CreateNewCustomResourceDefinition(crd, apiExtensionsClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
tearDown := func() {
if err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionsClient); err != nil {
t.Fatal(err)
}
}
return tearDown, &conversionTestContext{apiExtensionsClient: apiExtensionsClient, dynamicClient: dynamicClient, crd: crd, etcdObjectReader: etcdObjectReader}
}
type conversionTestContext struct {
namespace string
apiExtensionsClient clientset.Interface
dynamicClient dynamic.Interface
options *options.CustomResourceDefinitionsServerOptions
crd *apiextensionsv1beta1.CustomResourceDefinition
etcdObjectReader *storage.EtcdObjectReader
}
func (c *conversionTestContext) versionedClient(ns string, version string) dynamic.ResourceInterface {
return newNamespacedCustomResourceVersionedClient(ns, c.dynamicClient, c.crd, version)
}
func (c *conversionTestContext) versionedClients(ns string) map[string]dynamic.ResourceInterface {
ret := map[string]dynamic.ResourceInterface{}
for _, v := range c.crd.Spec.Versions {
ret[v.Name] = newNamespacedCustomResourceVersionedClient(ns, c.dynamicClient, c.crd, v.Name)
}
return ret
}
func (c *conversionTestContext) setConversionWebhook(t *testing.T, webhookClientConfig *apiextensionsv1beta1.WebhookClientConfig) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
crd.Spec.Conversion = &apiextensionsv1beta1.CustomResourceConversion{
Strategy: apiextensionsv1beta1.WebhookConverter,
WebhookClientConfig: webhookClientConfig,
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) removeConversionWebhook(t *testing.T) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
crd.Spec.Conversion = &apiextensionsv1beta1.CustomResourceConversion{
Strategy: apiextensionsv1beta1.NoneConverter,
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) setAndWaitStorageVersion(t *testing.T, version string) {
c.setStorageVersion(t, version)
// create probe object. Version should be the default one to avoid webhook calls during test setup.
client := c.versionedClient("probe", "v1beta1")
name := fmt.Sprintf("probe-%v", uuid.NewUUID())
storageProbe, err := client.Create(newConversionMultiVersionFixture("probe", name, "v1beta1"), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// update object continuously and wait for etcd to have the target storage version.
c.waitForStorageVersion(t, version, c.versionedClient(storageProbe.GetNamespace(), "v1beta1"), storageProbe)
err = client.Delete(name, &metav1.DeleteOptions{})
if err != nil {
t.Fatal(err)
}
}
func (c *conversionTestContext) setStorageVersion(t *testing.T, version string) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
for i, v := range crd.Spec.Versions {
crd.Spec.Versions[i].Storage = v.Name == version
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) waitForStorageVersion(t *testing.T, version string, versionedClient dynamic.ResourceInterface, obj *unstructured.Unstructured) *unstructured.Unstructured {
if err := c.etcdObjectReader.WaitForStorageVersion(version, obj.GetNamespace(), obj.GetName(), 30*time.Second, func() {
if _, err := versionedClient.Patch(obj.GetName(), types.MergePatchType, []byte(`{}`), metav1.PatchOptions{}); err != nil {
t.Fatalf("failed to update object: %v", err)
}
}); err != nil {
t.Fatalf("failed waiting for storage version %s: %v", version, err)
}
t.Logf("Effective storage version: %s", version)
return obj
}
func (c *conversionTestContext) setServed(t *testing.T, version string, served bool) {
crd, err := c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(c.crd.Name, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
for i, v := range crd.Spec.Versions {
if v.Name == version {
crd.Spec.Versions[i].Served = served
}
}
crd, err = c.apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd)
if err != nil {
t.Fatal(err)
}
c.crd = crd
}
func (c *conversionTestContext) waitForServed(t *testing.T, version string, served bool, versionedClient dynamic.ResourceInterface, obj *unstructured.Unstructured) {
timeout := 30 * time.Second
waitCh := time.After(timeout)
for {
obj, err := versionedClient.Get(obj.GetName(), metav1.GetOptions{})
if (err == nil && served) || (errors.IsNotFound(err) && served == false) {
return
}
select {
case <-waitCh:
t.Fatalf("Timed out after %v waiting for CRD served=%t for version %s for %v. Last error: %v", timeout, served, version, obj, err)
case <-time.After(10 * time.Millisecond):
}
}
}
var multiVersionFixture = &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{Name: "multiversion.stable.example.com"},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: "stable.example.com",
Version: "v1beta1",
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: "multiversion",
Singular: "multiversion",
Kind: "MultiVersion",
ShortNames: []string{"mv"},
ListKind: "MultiVersionList",
Categories: []string{"all"},
},
Scope: apiextensionsv1beta1.NamespaceScoped,
Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{
{
// storage version, same schema as v1alpha1
Name: "v1beta1",
Served: true,
Storage: true,
},
{
// same schema as v1beta1
Name: "v1alpha1",
Served: true,
Storage: false,
},
{
// different schema than v1beta1 and v1alpha1
Name: "v1beta2",
Served: true,
Storage: false,
},
},
Subresources: &apiextensionsv1beta1.CustomResourceSubresources{
Status: &apiextensionsv1beta1.CustomResourceSubresourceStatus{},
Scale: &apiextensionsv1beta1.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.num.num1",
StatusReplicasPath: ".status.num.num2",
},
},
},
}
func newConversionMultiVersionFixture(namespace, name, version string) *unstructured.Unstructured {
u := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "stable.example.com/" + version,
"kind": "MultiVersion",
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
},
},
}
switch version {
case "v1alpha1":
u.Object["content"] = map[string]interface{}{
"key": "value",
}
u.Object["num"] = map[string]interface{}{
"num1": int64(1),
"num2": int64(1000000),
}
case "v1beta1":
u.Object["content"] = map[string]interface{}{
"key": "value",
}
u.Object["num"] = map[string]interface{}{
"num1": int64(1),
"num2": int64(1000000),
}
case "v1beta2":
u.Object["contentv2"] = map[string]interface{}{
"key": "value",
}
u.Object["numv2"] = map[string]interface{}{
"num1": int64(1),
"num2": int64(1000000),
}
default:
panic(fmt.Sprintf("unknown version %s", version))
}
return u
}
func closeOnCall(h http.Handler) (chan struct{}, http.Handler) {
ch := make(chan struct{})
once := sync.Once{}
return ch, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
once.Do(func() {
close(ch)
})
h.ServeHTTP(w, r)
})
}
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscall_test
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"time"
"unsafe"
)
// chtmpdir changes the working directory to a new temporary directory and
// provides a cleanup function. Used when PWD is read-only.
func chtmpdir(t *testing.T) func() {
oldwd, err := os.Getwd()
if err != nil {
t.Fatalf("chtmpdir: %v", err)
}
d, err := ioutil.TempDir("", "test")
if err != nil {
t.Fatalf("chtmpdir: %v", err)
}
if err := os.Chdir(d); err != nil {
t.Fatalf("chtmpdir: %v", err)
}
return func() {
if err := os.Chdir(oldwd); err != nil {
t.Fatalf("chtmpdir: %v", err)
}
os.RemoveAll(d)
}
}
func touch(t *testing.T, name string) {
f, err := os.Create(name)
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
const (
_AT_SYMLINK_NOFOLLOW = 0x100
_AT_FDCWD = -0x64
_AT_EACCESS = 0x200
_F_OK = 0
_R_OK = 4
)
func TestFaccessat(t *testing.T) {
defer chtmpdir(t)()
touch(t, "file1")
err := syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, 0)
if err != nil {
t.Errorf("Faccessat: unexpected error: %v", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, 2)
if err != syscall.EINVAL {
t.Errorf("Faccessat: unexpected error: %v, want EINVAL", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, _AT_EACCESS)
if err != nil {
t.Errorf("Faccessat: unexpected error: %v", err)
}
err = os.Symlink("file1", "symlink1")
if err != nil {
t.Fatal(err)
}
err = syscall.Faccessat(_AT_FDCWD, "symlink1", _R_OK, _AT_SYMLINK_NOFOLLOW)
if err != nil {
t.Errorf("Faccessat SYMLINK_NOFOLLOW: unexpected error %v", err)
}
// We can't really test _AT_SYMLINK_NOFOLLOW, because there
// doesn't seem to be any way to change the mode of a symlink.
// We don't test _AT_EACCESS because such tests are only
// meaningful if run as root.
err = syscall.Fchmodat(_AT_FDCWD, "file1", 0, 0)
if err != nil {
t.Errorf("Fchmodat: unexpected error %v", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _F_OK, _AT_SYMLINK_NOFOLLOW)
if err != nil {
t.Errorf("Faccessat: unexpected error: %v", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, _AT_SYMLINK_NOFOLLOW)
if err != syscall.EACCES {
if syscall.Getuid() != 0 {
t.Errorf("Faccessat: unexpected error: %v, want EACCES", err)
}
}
}
func TestFchmodat(t *testing.T) {
defer chtmpdir(t)()
touch(t, "file1")
os.Symlink("file1", "symlink1")
err := syscall.Fchmodat(_AT_FDCWD, "symlink1", 0444, 0)
if err != nil {
t.Fatalf("Fchmodat: unexpected error: %v", err)
}
fi, err := os.Stat("file1")
if err != nil {
t.Fatal(err)
}
if fi.Mode() != 0444 {
t.Errorf("Fchmodat: failed to change mode: expected %v, got %v", 0444, fi.Mode())
}
err = syscall.Fchmodat(_AT_FDCWD, "symlink1", 0444, _AT_SYMLINK_NOFOLLOW)
if err != syscall.EOPNOTSUPP {
t.Fatalf("Fchmodat: unexpected error: %v, expected EOPNOTSUPP", err)
}
}
func TestMain(m *testing.M) {
if os.Getenv("GO_DEATHSIG_PARENT") == "1" {
deathSignalParent()
} else if os.Getenv("GO_DEATHSIG_CHILD") == "1" {
deathSignalChild()
} else if os.Getenv("GO_SYSCALL_NOERROR") == "1" {
syscallNoError()
}
os.Exit(m.Run())
}
func TestLinuxDeathSignal(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
// Copy the test binary to a location that a non-root user can read/execute
// after we drop privileges
tempDir, err := ioutil.TempDir("", "TestDeathSignal")
if err != nil {
t.Fatalf("cannot create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
os.Chmod(tempDir, 0755)
tmpBinary := filepath.Join(tempDir, filepath.Base(os.Args[0]))
src, err := os.Open(os.Args[0])
if err != nil {
t.Fatalf("cannot open binary %q, %v", os.Args[0], err)
}
defer src.Close()
dst, err := os.OpenFile(tmpBinary, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
t.Fatalf("cannot create temporary binary %q, %v", tmpBinary, err)
}
if _, err := io.Copy(dst, src); err != nil {
t.Fatalf("failed to copy test binary to %q, %v", tmpBinary, err)
}
err = dst.Close()
if err != nil {
t.Fatalf("failed to close test binary %q, %v", tmpBinary, err)
}
cmd := exec.Command(tmpBinary)
cmd.Env = []string{"GO_DEATHSIG_PARENT=1"}
chldStdin, err := cmd.StdinPipe()
if err != nil {
t.Fatalf("failed to create new stdin pipe: %v", err)
}
chldStdout, err := cmd.StdoutPipe()
if err != nil {
t.Fatalf("failed to create new stdout pipe: %v", err)
}
cmd.Stderr = os.Stderr
err = cmd.Start()
defer cmd.Wait()
if err != nil {
t.Fatalf("failed to start first child process: %v", err)
}
chldPipe := bufio.NewReader(chldStdout)
if got, err := chldPipe.ReadString('\n'); got == "start\n" {
syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
go func() {
time.Sleep(5 * time.Second)
chldStdin.Close()
}()
want := "ok\n"
if got, err = chldPipe.ReadString('\n'); got != want {
t.Fatalf("expected %q, received %q, %v", want, got, err)
}
} else {
t.Fatalf("did not receive start from child, received %q, %v", got, err)
}
}
func deathSignalParent() {
cmd := exec.Command(os.Args[0])
cmd.Env = []string{"GO_DEATHSIG_CHILD=1"}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
attrs := syscall.SysProcAttr{
Pdeathsig: syscall.SIGUSR1,
// UID/GID 99 is the user/group "nobody" on RHEL/Fedora and is
// unused on Ubuntu
Credential: &syscall.Credential{Uid: 99, Gid: 99},
}
cmd.SysProcAttr = &attrs
err := cmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "death signal parent error: %v\n", err)
os.Exit(1)
}
cmd.Wait()
os.Exit(0)
}
func deathSignalChild() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGUSR1)
go func() {
<-c
fmt.Println("ok")
os.Exit(0)
}()
fmt.Println("start")
buf := make([]byte, 32)
os.Stdin.Read(buf)
// We expected to be signaled before stdin closed
fmt.Println("not ok")
os.Exit(1)
}
func TestParseNetlinkMessage(t *testing.T) {
for i, b := range [][]byte{
{103, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 11, 0, 1, 0, 0, 0, 0, 5, 8, 0, 3,
0, 8, 0, 6, 0, 0, 0, 0, 1, 63, 0, 10, 0, 69, 16, 0, 59, 39, 82, 64, 0, 64, 6, 21, 89, 127, 0, 0,
1, 127, 0, 0, 1, 230, 228, 31, 144, 32, 186, 155, 211, 185, 151, 209, 179, 128, 24, 1, 86,
53, 119, 0, 0, 1, 1, 8, 10, 0, 17, 234, 12, 0, 17, 189, 126, 107, 106, 108, 107, 106, 13, 10,
},
{106, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 11, 0, 1, 0, 0, 0, 0, 3, 8, 0, 3,
0, 8, 0, 6, 0, 0, 0, 0, 1, 66, 0, 10, 0, 69, 0, 0, 62, 230, 255, 64, 0, 64, 6, 85, 184, 127, 0, 0,
1, 127, 0, 0, 1, 237, 206, 31, 144, 73, 197, 128, 65, 250, 60, 192, 97, 128, 24, 1, 86, 253, 21, 0,
0, 1, 1, 8, 10, 0, 51, 106, 89, 0, 51, 102, 198, 108, 104, 106, 108, 107, 104, 108, 107, 104, 10,
},
{102, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 11, 0, 1, 0, 0, 0, 0, 1, 8, 0, 3, 0,
8, 0, 6, 0, 0, 0, 0, 1, 62, 0, 10, 0, 69, 0, 0, 58, 231, 2, 64, 0, 64, 6, 85, 185, 127, 0, 0, 1, 127,
0, 0, 1, 237, 206, 31, 144, 73, 197, 128, 86, 250, 60, 192, 97, 128, 24, 1, 86, 104, 64, 0, 0, 1, 1, 8,
10, 0, 52, 198, 200, 0, 51, 135, 232, 101, 115, 97, 103, 103, 10,
},
} {
m, err := syscall.ParseNetlinkMessage(b)
if err != syscall.EINVAL {
t.Errorf("#%d: got %v; want EINVAL", i, err)
}
if m != nil {
t.Errorf("#%d: got %v; want nil", i, m)
}
}
}
func TestSyscallNoError(t *testing.T) {
// On Linux there are currently no syscalls which don't fail and return
// a value larger than 0xfffffffffffff001 so we could test RawSyscall
// vs. RawSyscallNoError on 64bit architectures.
if unsafe.Sizeof(uintptr(0)) != 4 {
t.Skip("skipping on non-32bit architecture")
}
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
if runtime.GOOS == "android" {
t.Skip("skipping on rooted android, see issue 27364")
}
// Copy the test binary to a location that a non-root user can read/execute
// after we drop privileges
tempDir, err := ioutil.TempDir("", "TestSyscallNoError")
if err != nil {
t.Fatalf("cannot create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
os.Chmod(tempDir, 0755)
tmpBinary := filepath.Join(tempDir, filepath.Base(os.Args[0]))
src, err := os.Open(os.Args[0])
if err != nil {
t.Fatalf("cannot open binary %q, %v", os.Args[0], err)
}
defer src.Close()
dst, err := os.OpenFile(tmpBinary, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
t.Fatalf("cannot create temporary binary %q, %v", tmpBinary, err)
}
if _, err := io.Copy(dst, src); err != nil {
t.Fatalf("failed to copy test binary to %q, %v", tmpBinary, err)
}
err = dst.Close()
if err != nil {
t.Fatalf("failed to close test binary %q, %v", tmpBinary, err)
}
uid := uint32(0xfffffffe)
err = os.Chown(tmpBinary, int(uid), -1)
if err != nil {
t.Fatalf("failed to chown test binary %q, %v", tmpBinary, err)
}
err = os.Chmod(tmpBinary, 0755|os.ModeSetuid)
if err != nil {
t.Fatalf("failed to set setuid bit on test binary %q, %v", tmpBinary, err)
}
cmd := exec.Command(tmpBinary)
cmd.Env = []string{"GO_SYSCALL_NOERROR=1"}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("failed to start first child process: %v", err)
}
got := strings.TrimSpace(string(out))
want := strconv.FormatUint(uint64(uid)+1, 10) + " / " +
strconv.FormatUint(uint64(-uid), 10) + " / " +
strconv.FormatUint(uint64(uid), 10)
if got != want {
if filesystemIsNoSUID(tmpBinary) {
t.Skip("skipping test when temp dir is mounted nosuid")
}
t.Errorf("expected %s, got %s", want, got)
}
}
// filesystemIsNoSUID reports whether the filesystem for the given
// path is mounted nosuid.
func filesystemIsNoSUID(path string) bool {
var st syscall.Statfs_t
if syscall.Statfs(path, &st) != nil {
return false
}
return st.Flags&syscall.MS_NOSUID != 0
}
func syscallNoError() {
// Test that the return value from SYS_GETEUID32 (which cannot fail)
// doesn't get treated as an error (see https://golang.org/issue/22924)
euid1, _, e := syscall.RawSyscall(syscall.Sys_GETEUID, 0, 0, 0)
euid2, _ := syscall.RawSyscallNoError(syscall.Sys_GETEUID, 0, 0, 0)
fmt.Println(uintptr(euid1), "/", int(e), "/", uintptr(euid2))
os.Exit(0)
}
syscall: skip TestSyscallNoError on mips{,le}
On MIPS, Linux returns whether the syscall had an error in a separate
register (R7), not using a negative return value as on other
architectures. Thus, skip TestSyscallNoError as there is no error case
for syscall.RawSyscall which it could test against.
Also reformat the error output so the expected and gotten values are
aligned so they're easier to compare.
Fixes #35422
Change-Id: Ibc88f7c5382bb7ee8faf15ad4589ca1f9f017a06
Reviewed-on: https://go-review.googlesource.com/c/go/+/205898
Run-TryBot: Tobias Klauser <0a68dd4915066ec5d3f81f75a828fee53dcc8822@gmail.com>
Reviewed-by: Ian Lance Taylor <87e9c6d529889242b7e184afb632328636553ab4@golang.org>
Reviewed-by: Cherry Zhang <d62e63aa42ce272d7b6a5055d97e942b33a34679@google.com>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscall_test
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"time"
"unsafe"
)
// chtmpdir changes the working directory to a new temporary directory and
// provides a cleanup function. Used when PWD is read-only.
func chtmpdir(t *testing.T) func() {
oldwd, err := os.Getwd()
if err != nil {
t.Fatalf("chtmpdir: %v", err)
}
d, err := ioutil.TempDir("", "test")
if err != nil {
t.Fatalf("chtmpdir: %v", err)
}
if err := os.Chdir(d); err != nil {
t.Fatalf("chtmpdir: %v", err)
}
return func() {
if err := os.Chdir(oldwd); err != nil {
t.Fatalf("chtmpdir: %v", err)
}
os.RemoveAll(d)
}
}
func touch(t *testing.T, name string) {
f, err := os.Create(name)
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
const (
_AT_SYMLINK_NOFOLLOW = 0x100
_AT_FDCWD = -0x64
_AT_EACCESS = 0x200
_F_OK = 0
_R_OK = 4
)
func TestFaccessat(t *testing.T) {
defer chtmpdir(t)()
touch(t, "file1")
err := syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, 0)
if err != nil {
t.Errorf("Faccessat: unexpected error: %v", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, 2)
if err != syscall.EINVAL {
t.Errorf("Faccessat: unexpected error: %v, want EINVAL", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, _AT_EACCESS)
if err != nil {
t.Errorf("Faccessat: unexpected error: %v", err)
}
err = os.Symlink("file1", "symlink1")
if err != nil {
t.Fatal(err)
}
err = syscall.Faccessat(_AT_FDCWD, "symlink1", _R_OK, _AT_SYMLINK_NOFOLLOW)
if err != nil {
t.Errorf("Faccessat SYMLINK_NOFOLLOW: unexpected error %v", err)
}
// We can't really test _AT_SYMLINK_NOFOLLOW, because there
// doesn't seem to be any way to change the mode of a symlink.
// We don't test _AT_EACCESS because such tests are only
// meaningful if run as root.
err = syscall.Fchmodat(_AT_FDCWD, "file1", 0, 0)
if err != nil {
t.Errorf("Fchmodat: unexpected error %v", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _F_OK, _AT_SYMLINK_NOFOLLOW)
if err != nil {
t.Errorf("Faccessat: unexpected error: %v", err)
}
err = syscall.Faccessat(_AT_FDCWD, "file1", _R_OK, _AT_SYMLINK_NOFOLLOW)
if err != syscall.EACCES {
if syscall.Getuid() != 0 {
t.Errorf("Faccessat: unexpected error: %v, want EACCES", err)
}
}
}
func TestFchmodat(t *testing.T) {
defer chtmpdir(t)()
touch(t, "file1")
os.Symlink("file1", "symlink1")
err := syscall.Fchmodat(_AT_FDCWD, "symlink1", 0444, 0)
if err != nil {
t.Fatalf("Fchmodat: unexpected error: %v", err)
}
fi, err := os.Stat("file1")
if err != nil {
t.Fatal(err)
}
if fi.Mode() != 0444 {
t.Errorf("Fchmodat: failed to change mode: expected %v, got %v", 0444, fi.Mode())
}
err = syscall.Fchmodat(_AT_FDCWD, "symlink1", 0444, _AT_SYMLINK_NOFOLLOW)
if err != syscall.EOPNOTSUPP {
t.Fatalf("Fchmodat: unexpected error: %v, expected EOPNOTSUPP", err)
}
}
func TestMain(m *testing.M) {
if os.Getenv("GO_DEATHSIG_PARENT") == "1" {
deathSignalParent()
} else if os.Getenv("GO_DEATHSIG_CHILD") == "1" {
deathSignalChild()
} else if os.Getenv("GO_SYSCALL_NOERROR") == "1" {
syscallNoError()
}
os.Exit(m.Run())
}
func TestLinuxDeathSignal(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
// Copy the test binary to a location that a non-root user can read/execute
// after we drop privileges
tempDir, err := ioutil.TempDir("", "TestDeathSignal")
if err != nil {
t.Fatalf("cannot create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
os.Chmod(tempDir, 0755)
tmpBinary := filepath.Join(tempDir, filepath.Base(os.Args[0]))
src, err := os.Open(os.Args[0])
if err != nil {
t.Fatalf("cannot open binary %q, %v", os.Args[0], err)
}
defer src.Close()
dst, err := os.OpenFile(tmpBinary, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
t.Fatalf("cannot create temporary binary %q, %v", tmpBinary, err)
}
if _, err := io.Copy(dst, src); err != nil {
t.Fatalf("failed to copy test binary to %q, %v", tmpBinary, err)
}
err = dst.Close()
if err != nil {
t.Fatalf("failed to close test binary %q, %v", tmpBinary, err)
}
cmd := exec.Command(tmpBinary)
cmd.Env = []string{"GO_DEATHSIG_PARENT=1"}
chldStdin, err := cmd.StdinPipe()
if err != nil {
t.Fatalf("failed to create new stdin pipe: %v", err)
}
chldStdout, err := cmd.StdoutPipe()
if err != nil {
t.Fatalf("failed to create new stdout pipe: %v", err)
}
cmd.Stderr = os.Stderr
err = cmd.Start()
defer cmd.Wait()
if err != nil {
t.Fatalf("failed to start first child process: %v", err)
}
chldPipe := bufio.NewReader(chldStdout)
if got, err := chldPipe.ReadString('\n'); got == "start\n" {
syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
go func() {
time.Sleep(5 * time.Second)
chldStdin.Close()
}()
want := "ok\n"
if got, err = chldPipe.ReadString('\n'); got != want {
t.Fatalf("expected %q, received %q, %v", want, got, err)
}
} else {
t.Fatalf("did not receive start from child, received %q, %v", got, err)
}
}
func deathSignalParent() {
cmd := exec.Command(os.Args[0])
cmd.Env = []string{"GO_DEATHSIG_CHILD=1"}
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
attrs := syscall.SysProcAttr{
Pdeathsig: syscall.SIGUSR1,
// UID/GID 99 is the user/group "nobody" on RHEL/Fedora and is
// unused on Ubuntu
Credential: &syscall.Credential{Uid: 99, Gid: 99},
}
cmd.SysProcAttr = &attrs
err := cmd.Start()
if err != nil {
fmt.Fprintf(os.Stderr, "death signal parent error: %v\n", err)
os.Exit(1)
}
cmd.Wait()
os.Exit(0)
}
func deathSignalChild() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGUSR1)
go func() {
<-c
fmt.Println("ok")
os.Exit(0)
}()
fmt.Println("start")
buf := make([]byte, 32)
os.Stdin.Read(buf)
// We expected to be signaled before stdin closed
fmt.Println("not ok")
os.Exit(1)
}
func TestParseNetlinkMessage(t *testing.T) {
for i, b := range [][]byte{
{103, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 11, 0, 1, 0, 0, 0, 0, 5, 8, 0, 3,
0, 8, 0, 6, 0, 0, 0, 0, 1, 63, 0, 10, 0, 69, 16, 0, 59, 39, 82, 64, 0, 64, 6, 21, 89, 127, 0, 0,
1, 127, 0, 0, 1, 230, 228, 31, 144, 32, 186, 155, 211, 185, 151, 209, 179, 128, 24, 1, 86,
53, 119, 0, 0, 1, 1, 8, 10, 0, 17, 234, 12, 0, 17, 189, 126, 107, 106, 108, 107, 106, 13, 10,
},
{106, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 11, 0, 1, 0, 0, 0, 0, 3, 8, 0, 3,
0, 8, 0, 6, 0, 0, 0, 0, 1, 66, 0, 10, 0, 69, 0, 0, 62, 230, 255, 64, 0, 64, 6, 85, 184, 127, 0, 0,
1, 127, 0, 0, 1, 237, 206, 31, 144, 73, 197, 128, 65, 250, 60, 192, 97, 128, 24, 1, 86, 253, 21, 0,
0, 1, 1, 8, 10, 0, 51, 106, 89, 0, 51, 102, 198, 108, 104, 106, 108, 107, 104, 108, 107, 104, 10,
},
{102, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 11, 0, 1, 0, 0, 0, 0, 1, 8, 0, 3, 0,
8, 0, 6, 0, 0, 0, 0, 1, 62, 0, 10, 0, 69, 0, 0, 58, 231, 2, 64, 0, 64, 6, 85, 185, 127, 0, 0, 1, 127,
0, 0, 1, 237, 206, 31, 144, 73, 197, 128, 86, 250, 60, 192, 97, 128, 24, 1, 86, 104, 64, 0, 0, 1, 1, 8,
10, 0, 52, 198, 200, 0, 51, 135, 232, 101, 115, 97, 103, 103, 10,
},
} {
m, err := syscall.ParseNetlinkMessage(b)
if err != syscall.EINVAL {
t.Errorf("#%d: got %v; want EINVAL", i, err)
}
if m != nil {
t.Errorf("#%d: got %v; want nil", i, m)
}
}
}
func TestSyscallNoError(t *testing.T) {
// On Linux there are currently no syscalls which don't fail and return
// a value larger than 0xfffffffffffff001 so we could test RawSyscall
// vs. RawSyscallNoError on 64bit architectures.
if unsafe.Sizeof(uintptr(0)) != 4 {
t.Skip("skipping on non-32bit architecture")
}
// See https://golang.org/issue/35422
// On MIPS, Linux returns whether the syscall had an error in a separate
// register (R7), not using a negative return value as on other
// architectures.
if runtime.GOARCH == "mips" || runtime.GOARCH == "mipsle" {
t.Skipf("skipping on %s", runtime.GOARCH)
}
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
if runtime.GOOS == "android" {
t.Skip("skipping on rooted android, see issue 27364")
}
// Copy the test binary to a location that a non-root user can read/execute
// after we drop privileges
tempDir, err := ioutil.TempDir("", "TestSyscallNoError")
if err != nil {
t.Fatalf("cannot create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
os.Chmod(tempDir, 0755)
tmpBinary := filepath.Join(tempDir, filepath.Base(os.Args[0]))
src, err := os.Open(os.Args[0])
if err != nil {
t.Fatalf("cannot open binary %q, %v", os.Args[0], err)
}
defer src.Close()
dst, err := os.OpenFile(tmpBinary, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0755)
if err != nil {
t.Fatalf("cannot create temporary binary %q, %v", tmpBinary, err)
}
if _, err := io.Copy(dst, src); err != nil {
t.Fatalf("failed to copy test binary to %q, %v", tmpBinary, err)
}
err = dst.Close()
if err != nil {
t.Fatalf("failed to close test binary %q, %v", tmpBinary, err)
}
uid := uint32(0xfffffffe)
err = os.Chown(tmpBinary, int(uid), -1)
if err != nil {
t.Fatalf("failed to chown test binary %q, %v", tmpBinary, err)
}
err = os.Chmod(tmpBinary, 0755|os.ModeSetuid)
if err != nil {
t.Fatalf("failed to set setuid bit on test binary %q, %v", tmpBinary, err)
}
cmd := exec.Command(tmpBinary)
cmd.Env = []string{"GO_SYSCALL_NOERROR=1"}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("failed to start first child process: %v", err)
}
got := strings.TrimSpace(string(out))
want := strconv.FormatUint(uint64(uid)+1, 10) + " / " +
strconv.FormatUint(uint64(-uid), 10) + " / " +
strconv.FormatUint(uint64(uid), 10)
if got != want {
if filesystemIsNoSUID(tmpBinary) {
t.Skip("skipping test when temp dir is mounted nosuid")
}
// formatted so the values are aligned for easier comparison
t.Errorf("expected %s,\ngot %s", want, got)
}
}
// filesystemIsNoSUID reports whether the filesystem for the given
// path is mounted nosuid.
func filesystemIsNoSUID(path string) bool {
var st syscall.Statfs_t
if syscall.Statfs(path, &st) != nil {
return false
}
return st.Flags&syscall.MS_NOSUID != 0
}
func syscallNoError() {
// Test that the return value from SYS_GETEUID32 (which cannot fail)
// doesn't get treated as an error (see https://golang.org/issue/22924)
euid1, _, e := syscall.RawSyscall(syscall.Sys_GETEUID, 0, 0, 0)
euid2, _ := syscall.RawSyscallNoError(syscall.Sys_GETEUID, 0, 0, 0)
fmt.Println(uintptr(euid1), "/", int(e), "/", uintptr(euid2))
os.Exit(0)
}
|
package repo
import (
"github.com/OpenBazaar/openbazaar-go/ipfs"
"github.com/OpenBazaar/openbazaar-go/pb"
"gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer"
)
type Datastore interface {
Config() Config
Followers() Followers
Following() Following
OfflineMessages() OfflineMessages
Pointers() Pointers
Settings() Settings
Inventory() Inventory
Purchses() Purchases
Sales() Sales
Close()
}
type Config interface {
// Initialize the database with the node's mnemonic seed and
// identity key. This will be called during repo init
Init(mnemonic string, identityKey []byte, password string) error
// Return the mnemonic string
GetMnemonic() (string, error)
// Return the identity key
GetIdentityKey() ([]byte, error)
// Returns true if the db has failed to decrypt properly ex) wrong pw
IsEncrypted() bool
}
type Followers interface {
// Put a B58 encoded follower ID to the database
Put(follower string) error
// Get followers from the database.
// The offset and limit arguments can be used to for lazy loading.
Get(offsetId string, limit int) ([]string, error)
// Delete a follower from the databse.
Delete(follower string) error
// Return the number of followers in the database.
Count() int
}
type Following interface {
// Put a B58 encoded peer ID to the database
Put(peer string) error
// Get a list of following peers from the database.
// The offset and limit arguments can be used to for lazy loading.
Get(offsetId string, limit int) ([]string, error)
// Delete a peer from the databse.
Delete(peer string) error
// Return the number of peers in the database.
Count() int
}
type OfflineMessages interface {
// Put a url from a retrieved message
Put(url string) error
// Does the given url exist in the db?
Has(url string) bool
}
type Pointers interface {
// Put a pointer to the database.
Put(p ipfs.Pointer) error
// Delete a pointer from the db.
Delete(id peer.ID) error
// Delete all pointers of a given purpose
DeleteAll(purpose ipfs.Purpose) error
// Fetch the entire list of pointers
GetAll() ([]ipfs.Pointer, error)
}
type Settings interface {
// Put settings to the database
// Override all fields
Put(settings SettingsData) error
// Update all non-nil fields
Update(settings SettingsData) error
// Return the settings object
Get() (SettingsData, error)
}
type Inventory interface {
// Put an inventory count for a listing
// Override the existing count if it exists
Put(slug string, count int) error
// Return the count for a specific listing including variants
GetSpecific(path string) (int, error)
// Get the count for all variants of a given listing
Get(slug string) (map[string]int, error)
// Fetch all inventory countes
GetAll() (map[string]int, error)
// Delete a listing and related count
Delete(path string) error
// Delete all variants of a given slug
DeleteAll(slug string) error
}
type Purchases interface {
// Save or update an order
Put(orderID string, contract pb.RicardianContract, state pb.OrderState, read bool) error
// Mark an order as read in the database
MarkAsRead(orderID string) error
// Delete an order
Delete(orderID string) error
// Return the Ids for all orders
GetAll() ([]string, error)
}
type Sales interface {
// Save or update a sale
Put(orderID string, contract pb.RicardianContract, state pb.OrderState, read bool) error
// Mark an order as read in the database
MarkAsRead(orderID string) error
// Delete a sale
Delete(orderID string) error
// Return the Ids for all sales
GetAll() ([]string, error)
}
Fix typo
package repo
import (
"github.com/OpenBazaar/openbazaar-go/ipfs"
"github.com/OpenBazaar/openbazaar-go/pb"
"gx/ipfs/QmRBqJF7hb8ZSpRcMwUt8hNhydWcxGEhtk81HKq6oUwKvs/go-libp2p-peer"
)
type Datastore interface {
Config() Config
Followers() Followers
Following() Following
OfflineMessages() OfflineMessages
Pointers() Pointers
Settings() Settings
Inventory() Inventory
Purchases() Purchases
Sales() Sales
Close()
}
type Config interface {
// Initialize the database with the node's mnemonic seed and
// identity key. This will be called during repo init
Init(mnemonic string, identityKey []byte, password string) error
// Return the mnemonic string
GetMnemonic() (string, error)
// Return the identity key
GetIdentityKey() ([]byte, error)
// Returns true if the db has failed to decrypt properly ex) wrong pw
IsEncrypted() bool
}
type Followers interface {
// Put a B58 encoded follower ID to the database
Put(follower string) error
// Get followers from the database.
// The offset and limit arguments can be used to for lazy loading.
Get(offsetId string, limit int) ([]string, error)
// Delete a follower from the databse.
Delete(follower string) error
// Return the number of followers in the database.
Count() int
}
type Following interface {
// Put a B58 encoded peer ID to the database
Put(peer string) error
// Get a list of following peers from the database.
// The offset and limit arguments can be used to for lazy loading.
Get(offsetId string, limit int) ([]string, error)
// Delete a peer from the databse.
Delete(peer string) error
// Return the number of peers in the database.
Count() int
}
type OfflineMessages interface {
// Put a url from a retrieved message
Put(url string) error
// Does the given url exist in the db?
Has(url string) bool
}
type Pointers interface {
// Put a pointer to the database.
Put(p ipfs.Pointer) error
// Delete a pointer from the db.
Delete(id peer.ID) error
// Delete all pointers of a given purpose
DeleteAll(purpose ipfs.Purpose) error
// Fetch the entire list of pointers
GetAll() ([]ipfs.Pointer, error)
}
type Settings interface {
// Put settings to the database
// Override all fields
Put(settings SettingsData) error
// Update all non-nil fields
Update(settings SettingsData) error
// Return the settings object
Get() (SettingsData, error)
}
type Inventory interface {
// Put an inventory count for a listing
// Override the existing count if it exists
Put(slug string, count int) error
// Return the count for a specific listing including variants
GetSpecific(path string) (int, error)
// Get the count for all variants of a given listing
Get(slug string) (map[string]int, error)
// Fetch all inventory countes
GetAll() (map[string]int, error)
// Delete a listing and related count
Delete(path string) error
// Delete all variants of a given slug
DeleteAll(slug string) error
}
type Purchases interface {
// Save or update an order
Put(orderID string, contract pb.RicardianContract, state pb.OrderState, read bool) error
// Mark an order as read in the database
MarkAsRead(orderID string) error
// Delete an order
Delete(orderID string) error
// Return the Ids for all orders
GetAll() ([]string, error)
}
type Sales interface {
// Save or update a sale
Put(orderID string, contract pb.RicardianContract, state pb.OrderState, read bool) error
// Mark an order as read in the database
MarkAsRead(orderID string) error
// Delete a sale
Delete(orderID string) error
// Return the Ids for all sales
GetAll() ([]string, error)
}
|
package repo
import (
"github.com/OpenBazaar/openbazaar-go/repo/migrations"
"io/ioutil"
"os"
"path"
"strconv"
)
type Migration interface {
Up(repoPath string, dbPassword string, testnet bool) error
Down(repoPath string, dbPassword string, testnet bool) error
}
var Migrations = []Migration{
migrations.Migration000{},
migrations.Migration001{},
migrations.Migration002{},
migrations.Migration003{},
migrations.Migration004{},
migrations.Migration005{},
migrations.Migration006{},
migrations.Migration007{},
migrations.Migration008{},
migrations.Migration009{},
migrations.Migration010{},
}
// MigrateUp looks at the currently active migration version
// and will migrate all the way up (applying all up migrations).
func MigrateUp(repoPath, dbPassword string, testnet bool) error {
version, err := ioutil.ReadFile(path.Join(repoPath, "repover"))
if err != nil && !os.IsNotExist(err) {
return err
} else if err != nil && os.IsNotExist(err) {
version = []byte("0")
}
v, err := strconv.Atoi(string(version))
if err != nil {
return err
}
x := v
for _, m := range Migrations[v:] {
log.Noticef("Migrating repo to version %d\n", x+1)
err := m.Up(repoPath, dbPassword, testnet)
if err != nil {
log.Error(err)
return err
}
x++
}
return nil
}
Format repover string in migration
package repo
import (
"github.com/OpenBazaar/openbazaar-go/repo/migrations"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
)
type Migration interface {
Up(repoPath string, dbPassword string, testnet bool) error
Down(repoPath string, dbPassword string, testnet bool) error
}
var Migrations = []Migration{
migrations.Migration000{},
migrations.Migration001{},
migrations.Migration002{},
migrations.Migration003{},
migrations.Migration004{},
migrations.Migration005{},
migrations.Migration006{},
migrations.Migration007{},
migrations.Migration008{},
migrations.Migration009{},
migrations.Migration010{},
}
// MigrateUp looks at the currently active migration version
// and will migrate all the way up (applying all up migrations).
func MigrateUp(repoPath, dbPassword string, testnet bool) error {
version, err := ioutil.ReadFile(path.Join(repoPath, "repover"))
if err != nil && !os.IsNotExist(err) {
return err
} else if err != nil && os.IsNotExist(err) {
version = []byte("0")
}
if strings.Contains(string(version), "\n") {
version = []byte(strings.Replace(string(version), "\n", "", -1))
}
v, err := strconv.Atoi(string(version))
if err != nil {
return err
}
x := v
for _, m := range Migrations[v:] {
log.Noticef("Migrating repo to version %d\n", x+1)
err := m.Up(repoPath, dbPassword, testnet)
if err != nil {
log.Error(err)
return err
}
x++
}
return nil
}
|
package eighttracks
import (
"fmt"
"net/url"
"github.com/zquestz/s/providers"
)
func init() {
providers.AddProvider("8tracks", &Provider{})
}
// Provider merely implements the Provider interface.
type Provider struct{}
// BuildURI generates a search URL for 8tracks.
func (p *Provider) BuildURI(q string) string {
return fmt.Sprintf("https://8tracks.com/explore/%s", url.QueryEscape(q))
}
// Tags returns the tags relevant to this provider.
func (p *Provider) Tags() []string {
return []string{"music"}
}
Fix 8tracks search to be more robust
package eighttracks
import (
"fmt"
"net/url"
"github.com/zquestz/s/providers"
)
func init() {
providers.AddProvider("8tracks", &Provider{})
}
// Provider merely implements the Provider interface.
type Provider struct{}
// BuildURI generates a search URL for 8tracks.
func (p *Provider) BuildURI(q string) string {
return fmt.Sprintf("https://8tracks.com/search?q=%s", url.QueryEscape(q))
}
// Tags returns the tags relevant to this provider.
func (p *Provider) Tags() []string {
return []string{"music"}
}
|
// Copyright 2016 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
stderror "errors"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/cezarsa/form"
"github.com/tsuru/docker-cluster/cluster"
"github.com/tsuru/monsterqueue"
"github.com/tsuru/tsuru/api"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/auth"
"github.com/tsuru/tsuru/errors"
"github.com/tsuru/tsuru/iaas"
_ "github.com/tsuru/tsuru/iaas/cloudstack"
_ "github.com/tsuru/tsuru/iaas/digitalocean"
_ "github.com/tsuru/tsuru/iaas/ec2"
tsuruIo "github.com/tsuru/tsuru/io"
"github.com/tsuru/tsuru/net"
"github.com/tsuru/tsuru/permission"
"github.com/tsuru/tsuru/provision/docker/container"
"github.com/tsuru/tsuru/provision/docker/healer"
"github.com/tsuru/tsuru/provision/docker/nodecontainer"
"github.com/tsuru/tsuru/queue"
"gopkg.in/mgo.v2"
)
func init() {
api.RegisterHandler("/docker/node", "GET", api.AuthorizationRequiredHandler(listNodesHandler))
api.RegisterHandler("/docker/node/apps/{appname}/containers", "GET", api.AuthorizationRequiredHandler(listContainersHandler))
api.RegisterHandler("/docker/node/{address:.*}/containers", "GET", api.AuthorizationRequiredHandler(listContainersHandler))
api.RegisterHandler("/docker/node", "POST", api.AuthorizationRequiredHandler(addNodeHandler))
api.RegisterHandler("/docker/node", "PUT", api.AuthorizationRequiredHandler(updateNodeHandler))
api.RegisterHandler("/docker/node/{address:.*}", "DELETE", api.AuthorizationRequiredHandler(removeNodeHandler))
api.RegisterHandler("/docker/container/{id}/move", "POST", api.AuthorizationRequiredHandler(moveContainerHandler))
api.RegisterHandler("/docker/containers/move", "POST", api.AuthorizationRequiredHandler(moveContainersHandler))
api.RegisterHandler("/docker/containers/rebalance", "POST", api.AuthorizationRequiredHandler(rebalanceContainersHandler))
api.RegisterHandler("/docker/healing", "GET", api.AuthorizationRequiredHandler(healingHistoryHandler))
api.RegisterHandler("/docker/healing/node", "GET", api.AuthorizationRequiredHandler(nodeHealingRead))
api.RegisterHandler("/docker/healing/node", "POST", api.AuthorizationRequiredHandler(nodeHealingUpdate))
api.RegisterHandler("/docker/healing/node", "DELETE", api.AuthorizationRequiredHandler(nodeHealingDelete))
api.RegisterHandler("/docker/autoscale", "GET", api.AuthorizationRequiredHandler(autoScaleHistoryHandler))
api.RegisterHandler("/docker/autoscale/config", "GET", api.AuthorizationRequiredHandler(autoScaleGetConfig))
api.RegisterHandler("/docker/autoscale/run", "POST", api.AuthorizationRequiredHandler(autoScaleRunHandler))
api.RegisterHandler("/docker/autoscale/rules", "GET", api.AuthorizationRequiredHandler(autoScaleListRules))
api.RegisterHandler("/docker/autoscale/rules", "POST", api.AuthorizationRequiredHandler(autoScaleSetRule))
api.RegisterHandler("/docker/autoscale/rules", "DELETE", api.AuthorizationRequiredHandler(autoScaleDeleteRule))
api.RegisterHandler("/docker/autoscale/rules/{id}", "DELETE", api.AuthorizationRequiredHandler(autoScaleDeleteRule))
api.RegisterHandler("/docker/bs/upgrade", "POST", api.AuthorizationRequiredHandler(bsUpgradeHandler))
api.RegisterHandler("/docker/bs/env", "POST", api.AuthorizationRequiredHandler(bsEnvSetHandler))
api.RegisterHandler("/docker/bs", "GET", api.AuthorizationRequiredHandler(bsConfigGetHandler))
api.RegisterHandler("/docker/nodecontainers", "GET", api.AuthorizationRequiredHandler(nodeContainerList))
api.RegisterHandler("/docker/nodecontainers", "POST", api.AuthorizationRequiredHandler(nodeContainerCreate))
api.RegisterHandler("/docker/nodecontainers/{name}", "GET", api.AuthorizationRequiredHandler(nodeContainerInfo))
api.RegisterHandler("/docker/nodecontainers/{name}", "DELETE", api.AuthorizationRequiredHandler(nodeContainerDelete))
api.RegisterHandler("/docker/nodecontainers/{name}", "POST", api.AuthorizationRequiredHandler(nodeContainerUpdate))
api.RegisterHandler("/docker/nodecontainers/{name}/upgrade", "POST", api.AuthorizationRequiredHandler(nodeContainerUpgrade))
api.RegisterHandler("/docker/logs", "GET", api.AuthorizationRequiredHandler(logsConfigGetHandler))
api.RegisterHandler("/docker/logs", "POST", api.AuthorizationRequiredHandler(logsConfigSetHandler))
}
// title: get autoscale config
// path: /docker/autoscale/config
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func autoScaleGetConfig(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedGetConfig := permission.Check(t, permission.PermNodeAutoscale)
if !allowedGetConfig {
return permission.ErrUnauthorized
}
config := mainDockerProvisioner.initAutoScaleConfig()
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(config)
}
// title: autoscale rules list
// path: /docker/autoscale/rules
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func autoScaleListRules(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedListRule := permission.Check(t, permission.PermNodeAutoscale)
if !allowedListRule {
return permission.ErrUnauthorized
}
rules, err := listAutoScaleRules()
if err != nil {
return err
}
return json.NewEncoder(w).Encode(&rules)
}
// title: autoscale set rule
// path: /docker/autoscale/rules
// method: POST
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
func autoScaleSetRule(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedSetRule := permission.Check(t, permission.PermNodeAutoscale)
if !allowedSetRule {
return permission.ErrUnauthorized
}
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
var rule autoScaleRule
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(&rule, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
return rule.update()
}
// title: delete autoscale rule
// path: /docker/autoscale/rules/{id}
// method: DELETE
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func autoScaleDeleteRule(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedDeleteRule := permission.Check(t, permission.PermNodeAutoscale)
if !allowedDeleteRule {
return permission.ErrUnauthorized
}
ruleID := r.URL.Query().Get(":id")
err := deleteAutoScaleRule(ruleID)
if err == mgo.ErrNotFound {
return &errors.HTTP{Code: http.StatusNotFound, Message: "rule not found"}
}
return nil
}
func validateNodeAddress(address string) error {
if address == "" {
return fmt.Errorf("address=url parameter is required")
}
url, err := url.ParseRequestURI(address)
if err != nil {
return fmt.Errorf("Invalid address url: %s", err.Error())
}
if url.Host == "" {
return fmt.Errorf("Invalid address url: host cannot be empty")
}
if !strings.HasPrefix(url.Scheme, "http") {
return fmt.Errorf("Invalid address url: scheme must be http[s]")
}
return nil
}
func (p *dockerProvisioner) addNodeForParams(params map[string]string, isRegister bool) (map[string]string, error) {
response := make(map[string]string)
var machineID string
var address string
if isRegister {
address, _ = params["address"]
delete(params, "address")
} else {
desc, _ := iaas.Describe(params["iaas"])
response["description"] = desc
m, err := iaas.CreateMachine(params)
if err != nil {
return response, err
}
address = m.FormatNodeAddress()
machineID = m.Id
}
err := validateNodeAddress(address)
if err != nil {
return response, err
}
node := cluster.Node{Address: address, Metadata: params, CreationStatus: cluster.NodeCreationStatusPending}
err = p.Cluster().Register(node)
if err != nil {
return response, err
}
q, err := queue.Queue()
if err != nil {
return response, err
}
jobParams := monsterqueue.JobParams{"endpoint": address, "machine": machineID, "metadata": params}
_, err = q.Enqueue(nodecontainer.QueueTaskName, jobParams)
return response, err
}
// title: add node
// path: /docker/node
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 201: Ok
// 401: Unauthorized
// 404: Not found
func addNodeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
if templateName, ok := params["template"]; ok {
params, err = iaas.ExpandTemplate(templateName)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
}
pool := params["pool"]
if pool == "" {
return &errors.HTTP{Code: http.StatusBadRequest, Message: "pool is required"}
}
if !permission.Check(t, permission.PermNodeCreate, permission.Context(permission.CtxPool, pool)) {
return permission.ErrUnauthorized
}
isRegister, _ := strconv.ParseBool(params["register"])
if !isRegister {
canCreateMachine := permission.Check(t, permission.PermMachineCreate,
permission.Context(permission.CtxIaaS, params["iaas"]))
if !canCreateMachine {
return permission.ErrUnauthorized
}
}
delete(params, "register")
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
response, err := mainDockerProvisioner.addNodeForParams(params, isRegister)
if err != nil {
writer.Encode(tsuruIo.SimpleJsonMessage{
Error: fmt.Sprintf("%s\n\n%s", err, response["description"]),
})
}
w.WriteHeader(http.StatusCreated)
return nil
}
// title: remove node
// path: /docker/node/{address}
// method: DELETE
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func removeNodeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
address := r.URL.Query().Get(":address")
if address == "" {
return fmt.Errorf("Node address is required.")
}
node, err := mainDockerProvisioner.Cluster().GetNode(address)
if err != nil {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: fmt.Sprintf("Node %s not found.", address),
}
}
allowedNodeRemove := permission.Check(t, permission.PermNodeDelete,
permission.Context(permission.CtxPool, node.Metadata["pool"]),
)
if !allowedNodeRemove {
return permission.ErrUnauthorized
}
removeIaaS, _ := strconv.ParseBool(r.URL.Query().Get("remove-iaas"))
if removeIaaS {
allowedIaasRemove := permission.Check(t, permission.PermMachineDelete,
permission.Context(permission.CtxIaaS, node.Metadata["iaas"]),
)
if !allowedIaasRemove {
return permission.ErrUnauthorized
}
}
node.CreationStatus = cluster.NodeCreationStatusDisabled
_, err = mainDockerProvisioner.Cluster().UpdateNode(node)
if err != nil {
return err
}
noRebalance, err := strconv.ParseBool(r.URL.Query().Get("no-rebalance"))
if !noRebalance {
err = mainDockerProvisioner.rebalanceContainersByHost(net.URLToHost(address), w)
if err != nil {
return err
}
}
err = mainDockerProvisioner.Cluster().Unregister(address)
if err != nil {
return err
}
if removeIaaS {
var m iaas.Machine
m, err = iaas.FindMachineByIdOrAddress(node.Metadata["iaas-id"], net.URLToHost(address))
if err != nil && err != mgo.ErrNotFound {
return nil
}
return m.Destroy()
}
return nil
}
// title: list nodes
// path: /docker/node
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 204: No content
func listNodesHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermNodeRead, false)
if err != nil {
return err
}
nodes, err := mainDockerProvisioner.Cluster().UnfilteredNodes()
if err != nil {
return err
}
if pools != nil {
filteredNodes := make([]cluster.Node, 0, len(nodes))
for _, node := range nodes {
for _, pool := range pools {
if node.Metadata["pool"] == pool {
filteredNodes = append(filteredNodes, node)
break
}
}
}
nodes = filteredNodes
}
iaases, err := listContextValues(t, permission.PermMachineRead, false)
if err != nil {
return err
}
machines, err := iaas.ListMachines()
if err != nil {
return err
}
if iaases != nil {
filteredMachines := make([]iaas.Machine, 0, len(machines))
for _, machine := range machines {
for _, iaas := range iaases {
if machine.Iaas == iaas {
filteredMachines = append(filteredMachines, machine)
break
}
}
}
machines = filteredMachines
}
if len(nodes) == 0 && len(machines) == 0 {
w.WriteHeader(http.StatusNoContent)
return nil
}
result := map[string]interface{}{
"nodes": nodes,
"machines": machines,
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(result)
}
// title: update nodes
// path: /docker/node
// method: PUT
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
// 404: Not found
func updateNodeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
address := params["address"]
if params["address"] == "" {
return &errors.HTTP{Code: http.StatusBadRequest, Message: "address is required"}
}
oldNode, err := mainDockerProvisioner.Cluster().GetNode(address)
if err != nil {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
oldPool, _ := oldNode.Metadata["pool"]
allowedOldPool := permission.Check(t, permission.PermNodeUpdate,
permission.Context(permission.CtxPool, oldPool),
)
if !allowedOldPool {
return permission.ErrUnauthorized
}
newPool, ok := params["pool"]
if ok {
allowedNewPool := permission.Check(t, permission.PermNodeUpdate,
permission.Context(permission.CtxPool, newPool),
)
if !allowedNewPool {
return permission.ErrUnauthorized
}
}
delete(params, "address")
node := cluster.Node{Address: address, Metadata: params}
disable, _ := strconv.ParseBool(params["disable"])
enable, _ := strconv.ParseBool(params["enable"])
if disable && enable {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: "You can't make a node enable and disable at the same time.",
}
}
if disable {
node.CreationStatus = cluster.NodeCreationStatusDisabled
}
if enable {
node.CreationStatus = cluster.NodeCreationStatusCreated
}
_, err = mainDockerProvisioner.Cluster().UpdateNode(node)
return err
}
// title: move container
// path: /docker/container/{id}/move
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
// 404: Not found
func moveContainerHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
contId := r.URL.Query().Get(":id")
to := params["to"]
if to == "" {
return fmt.Errorf("Invalid params: id: %s - to: %s", contId, to)
}
cont, err := mainDockerProvisioner.GetContainer(contId)
if err != nil {
return &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
permContexts, err := moveContainersPermissionContexts(cont.HostAddr, to)
if err != nil {
return err
}
if !permission.Check(t, permission.PermNode, permContexts...) {
return permission.ErrUnauthorized
}
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
_, err = mainDockerProvisioner.moveContainer(contId, to, writer)
if err != nil {
fmt.Fprintf(writer, "Error trying to move container: %s\n", err.Error())
} else {
fmt.Fprintf(writer, "Containers moved successfully!\n")
}
return nil
}
// title: move containers
// path: /docker/containers/move
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
// 404: Not found
func moveContainersHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
from := params["from"]
to := params["to"]
if from == "" || to == "" {
return fmt.Errorf("Invalid params: from: %s - to: %s", from, to)
}
permContexts, err := moveContainersPermissionContexts(from, to)
if err != nil {
return err
}
if !permission.Check(t, permission.PermNode, permContexts...) {
return permission.ErrUnauthorized
}
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
err = mainDockerProvisioner.MoveContainers(from, to, writer)
if err != nil {
fmt.Fprintf(writer, "Error trying to move containers: %s\n", err.Error())
} else {
fmt.Fprintf(writer, "Containers moved successfully!\n")
}
return nil
}
func moveContainersPermissionContexts(from, to string) ([]permission.PermissionContext, error) {
originHost, err := mainDockerProvisioner.getNodeByHost(from)
if err != nil {
return nil, &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
destinationHost, err := mainDockerProvisioner.getNodeByHost(to)
if err != nil {
return nil, &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
var permContexts []permission.PermissionContext
originPool, ok := originHost.Metadata["pool"]
if ok {
permContexts = append(permContexts, permission.Context(permission.CtxPool, originPool))
}
if pool, ok := destinationHost.Metadata["pool"]; ok && pool != originPool {
permContexts = append(permContexts, permission.Context(permission.CtxPool, pool))
}
return permContexts, nil
}
func rebalanceContainersHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
var dry bool
var params struct {
Dry string
MetadataFilter map[string]string
AppFilter []string
}
err := json.NewDecoder(r.Body).Decode(¶ms)
if err == nil {
dry, _ = strconv.ParseBool(params.Dry)
}
var permContexts []permission.PermissionContext
if pool, ok := params.MetadataFilter["pool"]; ok {
permContexts = append(permContexts, permission.Context(permission.CtxPool, pool))
}
if !permission.Check(t, permission.PermNode, permContexts...) {
return permission.ErrUnauthorized
}
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
_, err = mainDockerProvisioner.rebalanceContainersByFilter(writer, params.AppFilter, params.MetadataFilter, dry)
if err != nil {
fmt.Fprintf(writer, "Error trying to rebalance containers: %s\n", err)
} else {
fmt.Fprintf(writer, "Containers successfully rebalanced!\n")
}
return nil
}
func listContainersHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
address := r.URL.Query().Get(":address")
if address != "" {
node, err := mainDockerProvisioner.Cluster().GetNode(address)
if err != nil {
return err
}
hasAccess := permission.Check(t, permission.PermNodeRead,
permission.Context(permission.CtxPool, node.Metadata["pool"]))
if !hasAccess {
return permission.ErrUnauthorized
}
containerList, err := mainDockerProvisioner.listContainersByHost(net.URLToHost(address))
if err != nil {
return err
}
return json.NewEncoder(w).Encode(containerList)
}
appName := r.URL.Query().Get(":appname")
_, err := app.GetByName(appName)
if err != nil {
return err
}
containerList, err := mainDockerProvisioner.listContainersByApp(appName)
if err != nil {
return err
}
return json.NewEncoder(w).Encode(containerList)
}
// title: list healing history
// path: /docker/healing
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 204: No content
// 400: Invalid data
// 401: Unauthorized
func healingHistoryHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
if !permission.Check(t, permission.PermHealingRead) {
return permission.ErrUnauthorized
}
filter := r.URL.Query().Get("filter")
if filter != "" && filter != "node" && filter != "container" {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: "invalid filter, possible values are 'node' or 'container'",
}
}
history, err := healer.ListHealingHistory(filter)
if err != nil {
return err
}
if len(history) == 0 {
w.WriteHeader(http.StatusNoContent)
return nil
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(history)
}
// title: list autoscale history
// path: /docker/healing
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 204: No content
// 401: Unauthorized
func autoScaleHistoryHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
if !permission.Check(t, permission.PermNodeAutoscale) {
return permission.ErrUnauthorized
}
skip, _ := strconv.Atoi(r.URL.Query().Get("skip"))
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
history, err := listAutoScaleEvents(skip, limit)
if err != nil {
return err
}
if len(history) == 0 {
w.WriteHeader(http.StatusNoContent)
return nil
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(&history)
}
func autoScaleRunHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
if !permission.Check(t, permission.PermNodeAutoscale) {
return permission.ErrUnauthorized
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{
Encoder: json.NewEncoder(keepAliveWriter),
}
autoScaleConfig := mainDockerProvisioner.initAutoScaleConfig()
autoScaleConfig.writer = writer
err := autoScaleConfig.runOnce()
if err != nil {
writer.Encoder.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})
}
return nil
}
func bsEnvSetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
return stderror.New("this route is deprecated, please use POST /docker/nodecontainer/{name} (node-container-update command)")
}
func bsConfigGetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
return stderror.New("this route is deprecated, please use GET /docker/nodecontainer/{name} (node-container-info command)")
}
func bsUpgradeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
return stderror.New("this route is deprecated, please use POST /docker/nodecontainer/{name}/upgrade (node-container-upgrade command)")
}
func listContextValues(t permission.Token, scheme *permission.PermissionScheme, failIfEmpty bool) ([]string, error) {
contexts := permission.ContextsForPermission(t, scheme)
if len(contexts) == 0 && failIfEmpty {
return nil, permission.ErrUnauthorized
}
values := make([]string, 0, len(contexts))
for _, ctx := range contexts {
if ctx.CtxType == permission.CtxGlobal {
return nil, nil
}
values = append(values, ctx.Value)
}
return values, nil
}
// title: logs config
// path: /docker/logs
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func logsConfigGetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermPoolUpdateLogs, true)
if err != nil {
return err
}
configEntries, err := container.LogLoadAll()
if err != nil {
return err
}
w.Header().Set("Content-Type", "application/json")
if len(pools) == 0 {
return json.NewEncoder(w).Encode(configEntries)
}
newMap := map[string]container.DockerLogConfig{}
for _, p := range pools {
if entry, ok := configEntries[p]; ok {
newMap[p] = entry
}
}
return json.NewEncoder(w).Encode(newMap)
}
func logsConfigSetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: fmt.Sprintf("unable to parse form values: %s", err),
}
}
pool := r.Form.Get("pool")
restart, _ := strconv.ParseBool(r.Form.Get("restart"))
delete(r.Form, "pool")
delete(r.Form, "restart")
var conf container.DockerLogConfig
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(&conf, r.Form)
if err != nil {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: fmt.Sprintf("unable to parse fields in docker log config: %s", err),
}
}
if pool == "" && !permission.Check(t, permission.PermPoolUpdateLogs) {
return permission.ErrUnauthorized
}
hasPermission := permission.Check(t, permission.PermPoolUpdateLogs,
permission.Context(permission.CtxPool, pool))
if !hasPermission {
return permission.ErrUnauthorized
}
err = conf.Save(pool)
if err != nil {
return err
}
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
fmt.Fprintln(writer, "Log config successfully updated.")
if restart {
filter := &app.Filter{}
if pool != "" {
filter.Pools = []string{pool}
}
tryRestartAppsByFilter(filter, writer)
}
return nil
}
func tryRestartAppsByFilter(filter *app.Filter, writer *tsuruIo.SimpleJsonMessageEncoderWriter) {
apps, err := app.List(filter)
if err != nil {
writer.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})
return
}
if len(apps) == 0 {
return
}
appNames := make([]string, len(apps))
for i, a := range apps {
appNames[i] = a.Name
}
sort.Strings(appNames)
fmt.Fprintf(writer, "Restarting %d applications: [%s]\n", len(apps), strings.Join(appNames, ", "))
wg := sync.WaitGroup{}
for i := range apps {
wg.Add(1)
go func(i int) {
defer wg.Done()
a := apps[i]
err := a.Restart("", writer)
if err != nil {
fmt.Fprintf(writer, "Error: unable to restart %s: %s\n", a.Name, err.Error())
} else {
fmt.Fprintf(writer, "App %s successfully restarted\n", a.Name)
}
}(i)
}
wg.Wait()
}
func nodeHealingRead(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermHealingRead, true)
if err != nil {
return err
}
configMap, err := healer.GetConfig()
if err != nil {
return err
}
if len(pools) > 0 {
allowedPoolSet := map[string]struct{}{}
for _, p := range pools {
allowedPoolSet[p] = struct{}{}
}
for k := range configMap {
if k == "" {
continue
}
if _, ok := allowedPoolSet[k]; !ok {
delete(configMap, k)
}
}
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(configMap)
}
func nodeHealingUpdate(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return err
}
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermHealingUpdate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermHealingUpdate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
var config healer.NodeHealerConfig
delete(r.Form, "pool")
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(&config, r.Form)
if err != nil {
return err
}
return healer.UpdateConfig(poolName, config)
}
// title: remove node healing
// path: /docker/healing/node
// method: DELETE
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func nodeHealingDelete(w http.ResponseWriter, r *http.Request, t auth.Token) error {
poolName := r.URL.Query().Get("pool")
if poolName == "" {
if !permission.Check(t, permission.PermHealingUpdate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermHealingUpdate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
if len(r.URL.Query()["name"]) == 0 {
return healer.RemoveConfig(poolName, "")
}
for _, v := range r.URL.Query()["name"] {
err := healer.RemoveConfig(poolName, v)
if err != nil {
return err
}
}
return nil
}
// title: remove node container list
// path: /docker/nodecontainers
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func nodeContainerList(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermNodecontainerRead, true)
if err != nil {
return err
}
lst, err := nodecontainer.AllNodeContainers()
if err != nil {
return err
}
if pools != nil {
poolMap := map[string]struct{}{}
for _, p := range pools {
poolMap[p] = struct{}{}
}
for i, entry := range lst {
for poolName := range entry.ConfigPools {
if poolName == "" {
continue
}
if _, ok := poolMap[poolName]; !ok {
delete(entry.ConfigPools, poolName)
}
}
lst[i] = entry
}
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(lst)
}
func nodeContainerCreate(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return err
}
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerCreate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerCreate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
dec.IgnoreCase(true)
var config nodecontainer.NodeContainerConfig
err = dec.DecodeValues(&config, r.Form)
if err != nil {
return err
}
err = nodecontainer.AddNewContainer(poolName, &config)
if err != nil {
if _, ok := err.(nodecontainer.ValidationErr); ok {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: err.Error(),
}
}
return err
}
return nil
}
// title: node container info
// path: /docker/nodecontainers/{name}
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func nodeContainerInfo(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermNodecontainerRead, true)
if err != nil {
return err
}
name := r.URL.Query().Get(":name")
configMap, err := nodecontainer.LoadNodeContainersForPools(name)
if err != nil {
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
return err
}
if pools != nil {
poolMap := map[string]struct{}{}
for _, p := range pools {
poolMap[p] = struct{}{}
}
for poolName := range configMap {
if poolName == "" {
continue
}
if _, ok := poolMap[poolName]; !ok {
delete(configMap, poolName)
}
}
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(configMap)
}
// title: node container update
// path: /docker/nodecontainers/{name}
// method: POST
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invald data
// 401: Unauthorized
// 404: Not found
func nodeContainerUpdate(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return err
}
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerUpdate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerUpdate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
dec.IgnoreCase(true)
var config nodecontainer.NodeContainerConfig
err = dec.DecodeValues(&config, r.Form)
if err != nil {
return err
}
config.Name = r.URL.Query().Get(":name")
err = nodecontainer.UpdateContainer(poolName, &config)
if err != nil {
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
if _, ok := err.(nodecontainer.ValidationErr); ok {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: err.Error(),
}
}
return err
}
return nil
}
// title: remove node container
// path: /docker/nodecontainers/{name}
// method: DELETE
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func nodeContainerDelete(w http.ResponseWriter, r *http.Request, t auth.Token) error {
name := r.URL.Query().Get(":name")
poolName := r.URL.Query().Get("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerDelete) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerDelete,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
err := nodecontainer.RemoveContainer(poolName, name)
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: fmt.Sprintf("node container %q not found for pool %q", name, poolName),
}
}
return err
}
// title: node container upgrade
// path: /docker/nodecontainers/{name}/upgrade
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 200: Ok
// 400: Invald data
// 401: Unauthorized
// 404: Not found
func nodeContainerUpgrade(w http.ResponseWriter, r *http.Request, t auth.Token) error {
name := r.URL.Query().Get(":name")
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerUpdateUpgrade) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerUpdateUpgrade,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
err := nodecontainer.ResetImage(poolName, name)
if err != nil {
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
return err
}
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
err = nodecontainer.RecreateNamedContainers(mainDockerProvisioner, writer, name)
if err != nil {
writer.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})
}
return nil
}
docker/handlers: add comments to describe node container create
// Copyright 2016 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package docker
import (
"encoding/json"
stderror "errors"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/cezarsa/form"
"github.com/tsuru/docker-cluster/cluster"
"github.com/tsuru/monsterqueue"
"github.com/tsuru/tsuru/api"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/auth"
"github.com/tsuru/tsuru/errors"
"github.com/tsuru/tsuru/iaas"
_ "github.com/tsuru/tsuru/iaas/cloudstack"
_ "github.com/tsuru/tsuru/iaas/digitalocean"
_ "github.com/tsuru/tsuru/iaas/ec2"
tsuruIo "github.com/tsuru/tsuru/io"
"github.com/tsuru/tsuru/net"
"github.com/tsuru/tsuru/permission"
"github.com/tsuru/tsuru/provision/docker/container"
"github.com/tsuru/tsuru/provision/docker/healer"
"github.com/tsuru/tsuru/provision/docker/nodecontainer"
"github.com/tsuru/tsuru/queue"
"gopkg.in/mgo.v2"
)
func init() {
api.RegisterHandler("/docker/node", "GET", api.AuthorizationRequiredHandler(listNodesHandler))
api.RegisterHandler("/docker/node/apps/{appname}/containers", "GET", api.AuthorizationRequiredHandler(listContainersHandler))
api.RegisterHandler("/docker/node/{address:.*}/containers", "GET", api.AuthorizationRequiredHandler(listContainersHandler))
api.RegisterHandler("/docker/node", "POST", api.AuthorizationRequiredHandler(addNodeHandler))
api.RegisterHandler("/docker/node", "PUT", api.AuthorizationRequiredHandler(updateNodeHandler))
api.RegisterHandler("/docker/node/{address:.*}", "DELETE", api.AuthorizationRequiredHandler(removeNodeHandler))
api.RegisterHandler("/docker/container/{id}/move", "POST", api.AuthorizationRequiredHandler(moveContainerHandler))
api.RegisterHandler("/docker/containers/move", "POST", api.AuthorizationRequiredHandler(moveContainersHandler))
api.RegisterHandler("/docker/containers/rebalance", "POST", api.AuthorizationRequiredHandler(rebalanceContainersHandler))
api.RegisterHandler("/docker/healing", "GET", api.AuthorizationRequiredHandler(healingHistoryHandler))
api.RegisterHandler("/docker/healing/node", "GET", api.AuthorizationRequiredHandler(nodeHealingRead))
api.RegisterHandler("/docker/healing/node", "POST", api.AuthorizationRequiredHandler(nodeHealingUpdate))
api.RegisterHandler("/docker/healing/node", "DELETE", api.AuthorizationRequiredHandler(nodeHealingDelete))
api.RegisterHandler("/docker/autoscale", "GET", api.AuthorizationRequiredHandler(autoScaleHistoryHandler))
api.RegisterHandler("/docker/autoscale/config", "GET", api.AuthorizationRequiredHandler(autoScaleGetConfig))
api.RegisterHandler("/docker/autoscale/run", "POST", api.AuthorizationRequiredHandler(autoScaleRunHandler))
api.RegisterHandler("/docker/autoscale/rules", "GET", api.AuthorizationRequiredHandler(autoScaleListRules))
api.RegisterHandler("/docker/autoscale/rules", "POST", api.AuthorizationRequiredHandler(autoScaleSetRule))
api.RegisterHandler("/docker/autoscale/rules", "DELETE", api.AuthorizationRequiredHandler(autoScaleDeleteRule))
api.RegisterHandler("/docker/autoscale/rules/{id}", "DELETE", api.AuthorizationRequiredHandler(autoScaleDeleteRule))
api.RegisterHandler("/docker/bs/upgrade", "POST", api.AuthorizationRequiredHandler(bsUpgradeHandler))
api.RegisterHandler("/docker/bs/env", "POST", api.AuthorizationRequiredHandler(bsEnvSetHandler))
api.RegisterHandler("/docker/bs", "GET", api.AuthorizationRequiredHandler(bsConfigGetHandler))
api.RegisterHandler("/docker/nodecontainers", "GET", api.AuthorizationRequiredHandler(nodeContainerList))
api.RegisterHandler("/docker/nodecontainers", "POST", api.AuthorizationRequiredHandler(nodeContainerCreate))
api.RegisterHandler("/docker/nodecontainers/{name}", "GET", api.AuthorizationRequiredHandler(nodeContainerInfo))
api.RegisterHandler("/docker/nodecontainers/{name}", "DELETE", api.AuthorizationRequiredHandler(nodeContainerDelete))
api.RegisterHandler("/docker/nodecontainers/{name}", "POST", api.AuthorizationRequiredHandler(nodeContainerUpdate))
api.RegisterHandler("/docker/nodecontainers/{name}/upgrade", "POST", api.AuthorizationRequiredHandler(nodeContainerUpgrade))
api.RegisterHandler("/docker/logs", "GET", api.AuthorizationRequiredHandler(logsConfigGetHandler))
api.RegisterHandler("/docker/logs", "POST", api.AuthorizationRequiredHandler(logsConfigSetHandler))
}
// title: get autoscale config
// path: /docker/autoscale/config
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func autoScaleGetConfig(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedGetConfig := permission.Check(t, permission.PermNodeAutoscale)
if !allowedGetConfig {
return permission.ErrUnauthorized
}
config := mainDockerProvisioner.initAutoScaleConfig()
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(config)
}
// title: autoscale rules list
// path: /docker/autoscale/rules
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func autoScaleListRules(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedListRule := permission.Check(t, permission.PermNodeAutoscale)
if !allowedListRule {
return permission.ErrUnauthorized
}
rules, err := listAutoScaleRules()
if err != nil {
return err
}
return json.NewEncoder(w).Encode(&rules)
}
// title: autoscale set rule
// path: /docker/autoscale/rules
// method: POST
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
func autoScaleSetRule(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedSetRule := permission.Check(t, permission.PermNodeAutoscale)
if !allowedSetRule {
return permission.ErrUnauthorized
}
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
var rule autoScaleRule
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(&rule, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
return rule.update()
}
// title: delete autoscale rule
// path: /docker/autoscale/rules/{id}
// method: DELETE
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func autoScaleDeleteRule(w http.ResponseWriter, r *http.Request, t auth.Token) error {
allowedDeleteRule := permission.Check(t, permission.PermNodeAutoscale)
if !allowedDeleteRule {
return permission.ErrUnauthorized
}
ruleID := r.URL.Query().Get(":id")
err := deleteAutoScaleRule(ruleID)
if err == mgo.ErrNotFound {
return &errors.HTTP{Code: http.StatusNotFound, Message: "rule not found"}
}
return nil
}
func validateNodeAddress(address string) error {
if address == "" {
return fmt.Errorf("address=url parameter is required")
}
url, err := url.ParseRequestURI(address)
if err != nil {
return fmt.Errorf("Invalid address url: %s", err.Error())
}
if url.Host == "" {
return fmt.Errorf("Invalid address url: host cannot be empty")
}
if !strings.HasPrefix(url.Scheme, "http") {
return fmt.Errorf("Invalid address url: scheme must be http[s]")
}
return nil
}
func (p *dockerProvisioner) addNodeForParams(params map[string]string, isRegister bool) (map[string]string, error) {
response := make(map[string]string)
var machineID string
var address string
if isRegister {
address, _ = params["address"]
delete(params, "address")
} else {
desc, _ := iaas.Describe(params["iaas"])
response["description"] = desc
m, err := iaas.CreateMachine(params)
if err != nil {
return response, err
}
address = m.FormatNodeAddress()
machineID = m.Id
}
err := validateNodeAddress(address)
if err != nil {
return response, err
}
node := cluster.Node{Address: address, Metadata: params, CreationStatus: cluster.NodeCreationStatusPending}
err = p.Cluster().Register(node)
if err != nil {
return response, err
}
q, err := queue.Queue()
if err != nil {
return response, err
}
jobParams := monsterqueue.JobParams{"endpoint": address, "machine": machineID, "metadata": params}
_, err = q.Enqueue(nodecontainer.QueueTaskName, jobParams)
return response, err
}
// title: add node
// path: /docker/node
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 201: Ok
// 401: Unauthorized
// 404: Not found
func addNodeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
if templateName, ok := params["template"]; ok {
params, err = iaas.ExpandTemplate(templateName)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
}
pool := params["pool"]
if pool == "" {
return &errors.HTTP{Code: http.StatusBadRequest, Message: "pool is required"}
}
if !permission.Check(t, permission.PermNodeCreate, permission.Context(permission.CtxPool, pool)) {
return permission.ErrUnauthorized
}
isRegister, _ := strconv.ParseBool(params["register"])
if !isRegister {
canCreateMachine := permission.Check(t, permission.PermMachineCreate,
permission.Context(permission.CtxIaaS, params["iaas"]))
if !canCreateMachine {
return permission.ErrUnauthorized
}
}
delete(params, "register")
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
response, err := mainDockerProvisioner.addNodeForParams(params, isRegister)
if err != nil {
writer.Encode(tsuruIo.SimpleJsonMessage{
Error: fmt.Sprintf("%s\n\n%s", err, response["description"]),
})
}
w.WriteHeader(http.StatusCreated)
return nil
}
// title: remove node
// path: /docker/node/{address}
// method: DELETE
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func removeNodeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
address := r.URL.Query().Get(":address")
if address == "" {
return fmt.Errorf("Node address is required.")
}
node, err := mainDockerProvisioner.Cluster().GetNode(address)
if err != nil {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: fmt.Sprintf("Node %s not found.", address),
}
}
allowedNodeRemove := permission.Check(t, permission.PermNodeDelete,
permission.Context(permission.CtxPool, node.Metadata["pool"]),
)
if !allowedNodeRemove {
return permission.ErrUnauthorized
}
removeIaaS, _ := strconv.ParseBool(r.URL.Query().Get("remove-iaas"))
if removeIaaS {
allowedIaasRemove := permission.Check(t, permission.PermMachineDelete,
permission.Context(permission.CtxIaaS, node.Metadata["iaas"]),
)
if !allowedIaasRemove {
return permission.ErrUnauthorized
}
}
node.CreationStatus = cluster.NodeCreationStatusDisabled
_, err = mainDockerProvisioner.Cluster().UpdateNode(node)
if err != nil {
return err
}
noRebalance, err := strconv.ParseBool(r.URL.Query().Get("no-rebalance"))
if !noRebalance {
err = mainDockerProvisioner.rebalanceContainersByHost(net.URLToHost(address), w)
if err != nil {
return err
}
}
err = mainDockerProvisioner.Cluster().Unregister(address)
if err != nil {
return err
}
if removeIaaS {
var m iaas.Machine
m, err = iaas.FindMachineByIdOrAddress(node.Metadata["iaas-id"], net.URLToHost(address))
if err != nil && err != mgo.ErrNotFound {
return nil
}
return m.Destroy()
}
return nil
}
// title: list nodes
// path: /docker/node
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 204: No content
func listNodesHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermNodeRead, false)
if err != nil {
return err
}
nodes, err := mainDockerProvisioner.Cluster().UnfilteredNodes()
if err != nil {
return err
}
if pools != nil {
filteredNodes := make([]cluster.Node, 0, len(nodes))
for _, node := range nodes {
for _, pool := range pools {
if node.Metadata["pool"] == pool {
filteredNodes = append(filteredNodes, node)
break
}
}
}
nodes = filteredNodes
}
iaases, err := listContextValues(t, permission.PermMachineRead, false)
if err != nil {
return err
}
machines, err := iaas.ListMachines()
if err != nil {
return err
}
if iaases != nil {
filteredMachines := make([]iaas.Machine, 0, len(machines))
for _, machine := range machines {
for _, iaas := range iaases {
if machine.Iaas == iaas {
filteredMachines = append(filteredMachines, machine)
break
}
}
}
machines = filteredMachines
}
if len(nodes) == 0 && len(machines) == 0 {
w.WriteHeader(http.StatusNoContent)
return nil
}
result := map[string]interface{}{
"nodes": nodes,
"machines": machines,
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(result)
}
// title: update nodes
// path: /docker/node
// method: PUT
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
// 404: Not found
func updateNodeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
address := params["address"]
if params["address"] == "" {
return &errors.HTTP{Code: http.StatusBadRequest, Message: "address is required"}
}
oldNode, err := mainDockerProvisioner.Cluster().GetNode(address)
if err != nil {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
oldPool, _ := oldNode.Metadata["pool"]
allowedOldPool := permission.Check(t, permission.PermNodeUpdate,
permission.Context(permission.CtxPool, oldPool),
)
if !allowedOldPool {
return permission.ErrUnauthorized
}
newPool, ok := params["pool"]
if ok {
allowedNewPool := permission.Check(t, permission.PermNodeUpdate,
permission.Context(permission.CtxPool, newPool),
)
if !allowedNewPool {
return permission.ErrUnauthorized
}
}
delete(params, "address")
node := cluster.Node{Address: address, Metadata: params}
disable, _ := strconv.ParseBool(params["disable"])
enable, _ := strconv.ParseBool(params["enable"])
if disable && enable {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: "You can't make a node enable and disable at the same time.",
}
}
if disable {
node.CreationStatus = cluster.NodeCreationStatusDisabled
}
if enable {
node.CreationStatus = cluster.NodeCreationStatusCreated
}
_, err = mainDockerProvisioner.Cluster().UpdateNode(node)
return err
}
// title: move container
// path: /docker/container/{id}/move
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
// 404: Not found
func moveContainerHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
contId := r.URL.Query().Get(":id")
to := params["to"]
if to == "" {
return fmt.Errorf("Invalid params: id: %s - to: %s", contId, to)
}
cont, err := mainDockerProvisioner.GetContainer(contId)
if err != nil {
return &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
permContexts, err := moveContainersPermissionContexts(cont.HostAddr, to)
if err != nil {
return err
}
if !permission.Check(t, permission.PermNode, permContexts...) {
return permission.ErrUnauthorized
}
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
_, err = mainDockerProvisioner.moveContainer(contId, to, writer)
if err != nil {
fmt.Fprintf(writer, "Error trying to move container: %s\n", err.Error())
} else {
fmt.Fprintf(writer, "Containers moved successfully!\n")
}
return nil
}
// title: move containers
// path: /docker/containers/move
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 200: Ok
// 400: Invalid data
// 401: Unauthorized
// 404: Not found
func moveContainersHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
params := map[string]string{}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(¶ms, r.Form)
if err != nil {
return &errors.HTTP{Code: http.StatusBadRequest, Message: err.Error()}
}
from := params["from"]
to := params["to"]
if from == "" || to == "" {
return fmt.Errorf("Invalid params: from: %s - to: %s", from, to)
}
permContexts, err := moveContainersPermissionContexts(from, to)
if err != nil {
return err
}
if !permission.Check(t, permission.PermNode, permContexts...) {
return permission.ErrUnauthorized
}
w.Header().Set("Content-Type", "application/x-json-stream")
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
err = mainDockerProvisioner.MoveContainers(from, to, writer)
if err != nil {
fmt.Fprintf(writer, "Error trying to move containers: %s\n", err.Error())
} else {
fmt.Fprintf(writer, "Containers moved successfully!\n")
}
return nil
}
func moveContainersPermissionContexts(from, to string) ([]permission.PermissionContext, error) {
originHost, err := mainDockerProvisioner.getNodeByHost(from)
if err != nil {
return nil, &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
destinationHost, err := mainDockerProvisioner.getNodeByHost(to)
if err != nil {
return nil, &errors.HTTP{Code: http.StatusNotFound, Message: err.Error()}
}
var permContexts []permission.PermissionContext
originPool, ok := originHost.Metadata["pool"]
if ok {
permContexts = append(permContexts, permission.Context(permission.CtxPool, originPool))
}
if pool, ok := destinationHost.Metadata["pool"]; ok && pool != originPool {
permContexts = append(permContexts, permission.Context(permission.CtxPool, pool))
}
return permContexts, nil
}
func rebalanceContainersHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
var dry bool
var params struct {
Dry string
MetadataFilter map[string]string
AppFilter []string
}
err := json.NewDecoder(r.Body).Decode(¶ms)
if err == nil {
dry, _ = strconv.ParseBool(params.Dry)
}
var permContexts []permission.PermissionContext
if pool, ok := params.MetadataFilter["pool"]; ok {
permContexts = append(permContexts, permission.Context(permission.CtxPool, pool))
}
if !permission.Check(t, permission.PermNode, permContexts...) {
return permission.ErrUnauthorized
}
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
_, err = mainDockerProvisioner.rebalanceContainersByFilter(writer, params.AppFilter, params.MetadataFilter, dry)
if err != nil {
fmt.Fprintf(writer, "Error trying to rebalance containers: %s\n", err)
} else {
fmt.Fprintf(writer, "Containers successfully rebalanced!\n")
}
return nil
}
func listContainersHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
address := r.URL.Query().Get(":address")
if address != "" {
node, err := mainDockerProvisioner.Cluster().GetNode(address)
if err != nil {
return err
}
hasAccess := permission.Check(t, permission.PermNodeRead,
permission.Context(permission.CtxPool, node.Metadata["pool"]))
if !hasAccess {
return permission.ErrUnauthorized
}
containerList, err := mainDockerProvisioner.listContainersByHost(net.URLToHost(address))
if err != nil {
return err
}
return json.NewEncoder(w).Encode(containerList)
}
appName := r.URL.Query().Get(":appname")
_, err := app.GetByName(appName)
if err != nil {
return err
}
containerList, err := mainDockerProvisioner.listContainersByApp(appName)
if err != nil {
return err
}
return json.NewEncoder(w).Encode(containerList)
}
// title: list healing history
// path: /docker/healing
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 204: No content
// 400: Invalid data
// 401: Unauthorized
func healingHistoryHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
if !permission.Check(t, permission.PermHealingRead) {
return permission.ErrUnauthorized
}
filter := r.URL.Query().Get("filter")
if filter != "" && filter != "node" && filter != "container" {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: "invalid filter, possible values are 'node' or 'container'",
}
}
history, err := healer.ListHealingHistory(filter)
if err != nil {
return err
}
if len(history) == 0 {
w.WriteHeader(http.StatusNoContent)
return nil
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(history)
}
// title: list autoscale history
// path: /docker/healing
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 204: No content
// 401: Unauthorized
func autoScaleHistoryHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
if !permission.Check(t, permission.PermNodeAutoscale) {
return permission.ErrUnauthorized
}
skip, _ := strconv.Atoi(r.URL.Query().Get("skip"))
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
history, err := listAutoScaleEvents(skip, limit)
if err != nil {
return err
}
if len(history) == 0 {
w.WriteHeader(http.StatusNoContent)
return nil
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(&history)
}
func autoScaleRunHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
if !permission.Check(t, permission.PermNodeAutoscale) {
return permission.ErrUnauthorized
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{
Encoder: json.NewEncoder(keepAliveWriter),
}
autoScaleConfig := mainDockerProvisioner.initAutoScaleConfig()
autoScaleConfig.writer = writer
err := autoScaleConfig.runOnce()
if err != nil {
writer.Encoder.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})
}
return nil
}
func bsEnvSetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
return stderror.New("this route is deprecated, please use POST /docker/nodecontainer/{name} (node-container-update command)")
}
func bsConfigGetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
return stderror.New("this route is deprecated, please use GET /docker/nodecontainer/{name} (node-container-info command)")
}
func bsUpgradeHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
return stderror.New("this route is deprecated, please use POST /docker/nodecontainer/{name}/upgrade (node-container-upgrade command)")
}
func listContextValues(t permission.Token, scheme *permission.PermissionScheme, failIfEmpty bool) ([]string, error) {
contexts := permission.ContextsForPermission(t, scheme)
if len(contexts) == 0 && failIfEmpty {
return nil, permission.ErrUnauthorized
}
values := make([]string, 0, len(contexts))
for _, ctx := range contexts {
if ctx.CtxType == permission.CtxGlobal {
return nil, nil
}
values = append(values, ctx.Value)
}
return values, nil
}
// title: logs config
// path: /docker/logs
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func logsConfigGetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermPoolUpdateLogs, true)
if err != nil {
return err
}
configEntries, err := container.LogLoadAll()
if err != nil {
return err
}
w.Header().Set("Content-Type", "application/json")
if len(pools) == 0 {
return json.NewEncoder(w).Encode(configEntries)
}
newMap := map[string]container.DockerLogConfig{}
for _, p := range pools {
if entry, ok := configEntries[p]; ok {
newMap[p] = entry
}
}
return json.NewEncoder(w).Encode(newMap)
}
func logsConfigSetHandler(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: fmt.Sprintf("unable to parse form values: %s", err),
}
}
pool := r.Form.Get("pool")
restart, _ := strconv.ParseBool(r.Form.Get("restart"))
delete(r.Form, "pool")
delete(r.Form, "restart")
var conf container.DockerLogConfig
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(&conf, r.Form)
if err != nil {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: fmt.Sprintf("unable to parse fields in docker log config: %s", err),
}
}
if pool == "" && !permission.Check(t, permission.PermPoolUpdateLogs) {
return permission.ErrUnauthorized
}
hasPermission := permission.Check(t, permission.PermPoolUpdateLogs,
permission.Context(permission.CtxPool, pool))
if !hasPermission {
return permission.ErrUnauthorized
}
err = conf.Save(pool)
if err != nil {
return err
}
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
fmt.Fprintln(writer, "Log config successfully updated.")
if restart {
filter := &app.Filter{}
if pool != "" {
filter.Pools = []string{pool}
}
tryRestartAppsByFilter(filter, writer)
}
return nil
}
func tryRestartAppsByFilter(filter *app.Filter, writer *tsuruIo.SimpleJsonMessageEncoderWriter) {
apps, err := app.List(filter)
if err != nil {
writer.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})
return
}
if len(apps) == 0 {
return
}
appNames := make([]string, len(apps))
for i, a := range apps {
appNames[i] = a.Name
}
sort.Strings(appNames)
fmt.Fprintf(writer, "Restarting %d applications: [%s]\n", len(apps), strings.Join(appNames, ", "))
wg := sync.WaitGroup{}
for i := range apps {
wg.Add(1)
go func(i int) {
defer wg.Done()
a := apps[i]
err := a.Restart("", writer)
if err != nil {
fmt.Fprintf(writer, "Error: unable to restart %s: %s\n", a.Name, err.Error())
} else {
fmt.Fprintf(writer, "App %s successfully restarted\n", a.Name)
}
}(i)
}
wg.Wait()
}
func nodeHealingRead(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermHealingRead, true)
if err != nil {
return err
}
configMap, err := healer.GetConfig()
if err != nil {
return err
}
if len(pools) > 0 {
allowedPoolSet := map[string]struct{}{}
for _, p := range pools {
allowedPoolSet[p] = struct{}{}
}
for k := range configMap {
if k == "" {
continue
}
if _, ok := allowedPoolSet[k]; !ok {
delete(configMap, k)
}
}
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(configMap)
}
func nodeHealingUpdate(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return err
}
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermHealingUpdate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermHealingUpdate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
var config healer.NodeHealerConfig
delete(r.Form, "pool")
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
err = dec.DecodeValues(&config, r.Form)
if err != nil {
return err
}
return healer.UpdateConfig(poolName, config)
}
// title: remove node healing
// path: /docker/healing/node
// method: DELETE
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func nodeHealingDelete(w http.ResponseWriter, r *http.Request, t auth.Token) error {
poolName := r.URL.Query().Get("pool")
if poolName == "" {
if !permission.Check(t, permission.PermHealingUpdate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermHealingUpdate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
if len(r.URL.Query()["name"]) == 0 {
return healer.RemoveConfig(poolName, "")
}
for _, v := range r.URL.Query()["name"] {
err := healer.RemoveConfig(poolName, v)
if err != nil {
return err
}
}
return nil
}
// title: remove node container list
// path: /docker/nodecontainers
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
func nodeContainerList(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermNodecontainerRead, true)
if err != nil {
return err
}
lst, err := nodecontainer.AllNodeContainers()
if err != nil {
return err
}
if pools != nil {
poolMap := map[string]struct{}{}
for _, p := range pools {
poolMap[p] = struct{}{}
}
for i, entry := range lst {
for poolName := range entry.ConfigPools {
if poolName == "" {
continue
}
if _, ok := poolMap[poolName]; !ok {
delete(entry.ConfigPools, poolName)
}
}
lst[i] = entry
}
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(lst)
}
// title: node container create
// path: /docker/nodecontainers
// method: POST
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invald data
// 401: Unauthorized
func nodeContainerCreate(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return err
}
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerCreate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerCreate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
dec.IgnoreCase(true)
var config nodecontainer.NodeContainerConfig
err = dec.DecodeValues(&config, r.Form)
if err != nil {
return err
}
err = nodecontainer.AddNewContainer(poolName, &config)
if err != nil {
if _, ok := err.(nodecontainer.ValidationErr); ok {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: err.Error(),
}
}
return err
}
return nil
}
// title: node container info
// path: /docker/nodecontainers/{name}
// method: GET
// produce: application/json
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func nodeContainerInfo(w http.ResponseWriter, r *http.Request, t auth.Token) error {
pools, err := listContextValues(t, permission.PermNodecontainerRead, true)
if err != nil {
return err
}
name := r.URL.Query().Get(":name")
configMap, err := nodecontainer.LoadNodeContainersForPools(name)
if err != nil {
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
return err
}
if pools != nil {
poolMap := map[string]struct{}{}
for _, p := range pools {
poolMap[p] = struct{}{}
}
for poolName := range configMap {
if poolName == "" {
continue
}
if _, ok := poolMap[poolName]; !ok {
delete(configMap, poolName)
}
}
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(configMap)
}
// title: node container update
// path: /docker/nodecontainers/{name}
// method: POST
// consume: application/x-www-form-urlencoded
// responses:
// 200: Ok
// 400: Invald data
// 401: Unauthorized
// 404: Not found
func nodeContainerUpdate(w http.ResponseWriter, r *http.Request, t auth.Token) error {
err := r.ParseForm()
if err != nil {
return err
}
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerUpdate) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerUpdate,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
dec := form.NewDecoder(nil)
dec.IgnoreUnknownKeys(true)
dec.IgnoreCase(true)
var config nodecontainer.NodeContainerConfig
err = dec.DecodeValues(&config, r.Form)
if err != nil {
return err
}
config.Name = r.URL.Query().Get(":name")
err = nodecontainer.UpdateContainer(poolName, &config)
if err != nil {
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
if _, ok := err.(nodecontainer.ValidationErr); ok {
return &errors.HTTP{
Code: http.StatusBadRequest,
Message: err.Error(),
}
}
return err
}
return nil
}
// title: remove node container
// path: /docker/nodecontainers/{name}
// method: DELETE
// responses:
// 200: Ok
// 401: Unauthorized
// 404: Not found
func nodeContainerDelete(w http.ResponseWriter, r *http.Request, t auth.Token) error {
name := r.URL.Query().Get(":name")
poolName := r.URL.Query().Get("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerDelete) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerDelete,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
err := nodecontainer.RemoveContainer(poolName, name)
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: fmt.Sprintf("node container %q not found for pool %q", name, poolName),
}
}
return err
}
// title: node container upgrade
// path: /docker/nodecontainers/{name}/upgrade
// method: POST
// consume: application/x-www-form-urlencoded
// produce: application/x-json-stream
// responses:
// 200: Ok
// 400: Invald data
// 401: Unauthorized
// 404: Not found
func nodeContainerUpgrade(w http.ResponseWriter, r *http.Request, t auth.Token) error {
name := r.URL.Query().Get(":name")
poolName := r.FormValue("pool")
if poolName == "" {
if !permission.Check(t, permission.PermNodecontainerUpdateUpgrade) {
return permission.ErrUnauthorized
}
} else {
if !permission.Check(t, permission.PermNodecontainerUpdateUpgrade,
permission.Context(permission.CtxPool, poolName)) {
return permission.ErrUnauthorized
}
}
err := nodecontainer.ResetImage(poolName, name)
if err != nil {
if err == nodecontainer.ErrNodeContainerNotFound {
return &errors.HTTP{
Code: http.StatusNotFound,
Message: err.Error(),
}
}
return err
}
keepAliveWriter := tsuruIo.NewKeepAliveWriter(w, 15*time.Second, "")
defer keepAliveWriter.Stop()
writer := &tsuruIo.SimpleJsonMessageEncoderWriter{Encoder: json.NewEncoder(keepAliveWriter)}
err = nodecontainer.RecreateNamedContainers(mainDockerProvisioner, writer, name)
if err != nil {
writer.Encode(tsuruIo.SimpleJsonMessage{Error: err.Error()})
}
return nil
}
|
package setup
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
)
// RestoreOptions : Define the options for restore a dump file into a database
type RestoreOptions struct {
CustomArgs []string
}
/*
RestoreDumpFile : Calls the 'pg_restore' to restore a dump file gererated by pg_dump
*/
func RestoreDumpFile(connDetail ConnectionDetails, dumpFile string, options RestoreOptions) error {
pgRestoreBin := "pg_restore"
if pgsqlBinPATH != "" {
pgRestoreBin = fmt.Sprintf("%s/pg_restore", pgsqlBinPATH)
}
args := fmt.Sprintf(
"-U %s -d %s %s %s",
connDetail.Username,
connDetail.Database,
strings.Join(options.CustomArgs, " "),
dumpFile)
if connDetail.Password != "" {
err := os.Setenv("PGPASSWORD", connDetail.Password)
if err != nil {
return err
}
}
/// ... at the right means turn the slide in a variadic variable
cmd := exec.Command(pgRestoreBin, strings.Split(args, " ")...)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf(fmt.Sprint(err) + ". " + stderr.String() + "(" + args + ")")
}
return nil
}
better formating on restore errors
package setup
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
)
// RestoreOptions : Define the options for restore a dump file into a database
type RestoreOptions struct {
CustomArgs []string
}
/*
RestoreDumpFile : Calls the 'pg_restore' to restore a dump file gererated by pg_dump
*/
func RestoreDumpFile(connDetail ConnectionDetails, dumpFile string, options RestoreOptions) error {
pgRestoreBin := "pg_restore"
if pgsqlBinPATH != "" {
pgRestoreBin = fmt.Sprintf("%s/pg_restore", pgsqlBinPATH)
}
args := fmt.Sprintf(
"-U %s -d %s %s %s",
connDetail.Username,
connDetail.Database,
strings.Join(options.CustomArgs, " "),
dumpFile)
if connDetail.Password != "" {
err := os.Setenv("PGPASSWORD", connDetail.Password)
if err != nil {
return err
}
}
/// ... at the right means turn the slide in a variadic variable
cmd := exec.Command(pgRestoreBin, strings.Split(args, " ")...)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf(
fmt.Sprintf(
"%s. %s\nCMD: %s %s",
fmt.Sprint(err),
stderr.String(),
pgRestoreBin,
args))
}
return nil
}
|
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package pio is a peripherals I/O library. It contains host, devices, and
// test packages to emulate the hardware.
//
// pio acts as a registry of drivers.
//
// Every device driver should register itself in their package init() function
// by calling pio.Register().
//
// The user call pio.Init() on startup to initialize all the registered drivers
// in the correct order all at once.
//
// - cmd/ contains executables to communicate directly with the devices or the
// buses using raw protocols.
// - devices/ contains devices drivers that are connected to a bus (i.e I²C,
// SPI, GPIO) that can be controlled by the host, i.e. ssd1306 (display
// controller), bm280 (environmental sensor), etc. 'devices' contains the
// interfaces and subpackages contain contain concrete types.
// - experimental/ contains the drivers that are in the experimental area,
// not yet considered stable. See DESIGN.md for the process to move drivers
// out of this area.
// - host/ contains all the implementations relating to the host itself, the
// CPU and buses that are exposed by the host onto which devices can be
// connected, i.e. I²C, SPI, GPIO, etc. 'host' contains the interfaces
// and subpackages contain contain concrete types.
// - conn/ contains interfaces for all the supported protocols and
// connections (I²C, SPI, GPIO, etc).
// - tests/ contains smoke tests.
package pio
import (
"errors"
"fmt"
"sync"
)
// Type represent the type of driver.
//
// Lower is more important.
type Type int
const (
// Processor is the first driver to be loaded.
Processor Type = iota
// Pins is basic pin functionality driver, additional to Processor.
//
// This includes all headers description.
Pins
// Functional is for functionality pin driver, additional to Pins.
Functional
// Bus is higher level protocol drivers.
Bus
// Device is drivers connecting to buses.
Device
nbPriorities
)
const typeName = "ProcessorPinsFunctionalBusDevicenbPriorities"
var typeIndex = [...]uint8{0, 9, 13, 23, 26, 32, 44}
func (i Type) String() string {
if i < 0 || i >= Type(len(typeIndex)-1) {
return fmt.Sprintf("Type(%d)", i)
}
return typeName[typeIndex[i]:typeIndex[i+1]]
}
// Driver is an implementation for a protocol.
type Driver interface {
// String returns the name of the driver, as to be presented to the user. It
// should be unique.
String() string
// Type is the type of driver.
//
// This is used to load the drivers in order.
//
// If a driver implements multiple levels of functionality, it should return
// the most important one, the one with the lowest value.
Type() Type
// Prerequisites returns a list of drivers that must be successfully loaded
// first before attempting to load this driver.
Prerequisites() []string
// Init initializes the driver.
//
// A driver may enter one of the three following state: loaded successfully,
// was skipped as irrelevant on this host, failed to load.
//
// On success, it must return true, nil.
//
// When irrelevant (skipped), it must return false, errors.New(<reason>).
//
// On failure, it must return true, errors.New(<reason>). The failure must
// state why it failed, for example an expected OS provided driver couldn't
// be opened, e.g. /dev/gpiomem on Raspbian.
Init() (bool, error)
}
// DriverFailure is a driver that failed loaded.
type DriverFailure struct {
D Driver
Err error
}
func (d DriverFailure) String() string {
return fmt.Sprintf("%s: %v", d.D, d.Err)
}
// State is the state of loaded device drivers.
type State struct {
Loaded []Driver
Skipped []DriverFailure
Failed []DriverFailure
}
// Init initially all the relevant drivers.
//
// Drivers are started concurrently for Type.
//
// It returns the list of all drivers loaded and errors on the first call, if
// any. They are ordered by Type but unordered within each type.
//
// Second call is ignored and errors are discarded.
//
// Users will want to use host.Init(), which guarantees a baseline of included
// drivers.
func Init() (*State, error) {
lockState.Lock()
defer lockState.Unlock()
if state != nil {
return state, nil
}
state = &State{}
cD := make(chan Driver)
cS := make(chan DriverFailure)
cE := make(chan DriverFailure)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for d := range cD {
state.Loaded = append(state.Loaded, d)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for d := range cS {
state.Skipped = append(state.Skipped, d)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for f := range cE {
state.Failed = append(state.Failed, f)
}
}()
stages, err := getStages()
if err != nil {
return state, err
}
loaded := map[string]struct{}{}
for _, drivers := range stages {
loadStage(drivers, loaded, cD, cS, cE)
}
close(cD)
close(cS)
close(cE)
wg.Wait()
return state, nil
}
// Register registers a driver to be initialized automatically on Init().
//
// The d.String() value must be unique across all registered drivers.
//
// It is an error to call Register() after Init() was called.
func Register(d Driver) error {
lockState.Lock()
loaded := state != nil
lockState.Unlock()
if loaded {
return errors.New("drivers: can't call Register() after Init()")
}
lockDrivers.Lock()
defer lockDrivers.Unlock()
n := d.String()
if _, ok := byName[n]; ok {
return fmt.Errorf("drivers.Register(%q): driver with same name was already registered", d)
}
byName[n] = d
t := d.Type()
allDrivers[t] = append(allDrivers[t], d)
return nil
}
// MustRegister calls Register and panics if registration fails.
func MustRegister(d Driver) {
if err := Register(d); err != nil {
panic(err)
}
}
//
var (
lockDrivers sync.Mutex
allDrivers [nbPriorities][]Driver
byName = map[string]Driver{}
lockState sync.Mutex
state *State
)
// getStages returns a set of stages to load the drivers.
//
// Loading is done using two blocking mechanism:
// - By type
// - By prerequisites
// So create a DAG but reduce it as a list of stages.
//
// This cannot be done in Register() since the drivers are not registered in
// order.
func getStages() ([][]Driver, error) {
lockDrivers.Lock()
defer lockDrivers.Unlock()
var stages [][]Driver
for _, drivers := range allDrivers {
if len(drivers) == 0 {
// No driver registered for this type.
continue
}
inner, err := explodeStages(drivers)
if err != nil {
return nil, err
}
if len(inner) != 0 {
stages = append(stages, inner...)
}
}
return stages, nil
}
// explodeStages creates multiple intermediate stages if needed.
//
// It searches if there's any driver than has dependency on another driver from
// this stage and creates intermediate stage if so.
func explodeStages(drivers []Driver) ([][]Driver, error) {
dependencies := map[string]map[string]struct{}{}
for _, d := range drivers {
dependencies[d.String()] = map[string]struct{}{}
}
for _, d := range drivers {
name := d.String()
t := d.Type()
for _, depName := range d.Prerequisites() {
dep, ok := byName[depName]
if !ok {
return nil, fmt.Errorf("drivers: unsatified dependency %q->%q; it is missing; skipping", name, depName)
}
dt := dep.Type()
if dt > t {
return nil, fmt.Errorf("drivers: inversed dependency %q(%q)->%q(%q); skipping", name, t, depName, dt)
}
if dt < t {
// Staging already takes care of this.
continue
}
// Dependency between two drivers of the same type. This can happen
// when there's a process class driver and a processor specialization
// driver. As an example, allwinner->R8, allwinner->A64, etc.
dependencies[name][depName] = struct{}{}
}
}
var stages [][]Driver
for len(dependencies) != 0 {
// Create a stage.
var stage []string
var l []Driver
for name, deps := range dependencies {
if len(deps) == 0 {
stage = append(stage, name)
l = append(l, byName[name])
delete(dependencies, name)
}
}
if len(stage) == 0 {
return nil, fmt.Errorf("drivers: found cycle(s) in drivers dependencies; %v", dependencies)
}
stages = append(stages, l)
// Trim off.
for _, passed := range stage {
for name := range dependencies {
delete(dependencies[name], passed)
}
}
}
return stages, nil
}
// loadStage loads all the drivers in this stage concurrently.
func loadStage(drivers []Driver, loaded map[string]struct{}, cD chan<- Driver, cS chan<- DriverFailure, cE chan<- DriverFailure) {
var wg sync.WaitGroup
// Use int for concurrent access.
skip := make([]error, len(drivers))
for i, driver := range drivers {
// Load only the driver if prerequisites were loaded. They are
// guaranteed to be in a previous stage by getStages().
for _, dep := range driver.Prerequisites() {
if _, ok := loaded[dep]; !ok {
skip[i] = fmt.Errorf("dependency not loaded: %q", dep)
break
}
}
}
for i, driver := range drivers {
if err := skip[i]; err != nil {
cS <- DriverFailure{driver, err}
continue
}
wg.Add(1)
go func(d Driver, j int) {
defer wg.Done()
if ok, err := d.Init(); ok {
if err == nil {
cD <- d
return
}
cE <- DriverFailure{d, err}
} else {
// Do not assert that err != nil, as this is hard to test thoroughly.
cS <- DriverFailure{d, err}
if err != nil {
err = errors.New("no reason was given")
}
skip[j] = err
}
}(driver, i)
}
wg.Wait()
for i, driver := range drivers {
if skip[i] != nil {
continue
}
loaded[driver.String()] = struct{}{}
}
}
First typo fix.
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
// Package pio is a peripherals I/O library. It contains host, devices, and
// test packages to emulate the hardware.
//
// pio acts as a registry of drivers.
//
// Every device driver should register itself in their package init() function
// by calling pio.Register().
//
// The user call pio.Init() on startup to initialize all the registered drivers
// in the correct order all at once.
//
// - cmd/ contains executables to communicate directly with the devices or the
// buses using raw protocols.
// - devices/ contains devices drivers that are connected to a bus (i.e I²C,
// SPI, GPIO) that can be controlled by the host, i.e. ssd1306 (display
// controller), bm280 (environmental sensor), etc. 'devices' contains the
// interfaces and subpackages contain contain concrete types.
// - experimental/ contains the drivers that are in the experimental area,
// not yet considered stable. See DESIGN.md for the process to move drivers
// out of this area.
// - host/ contains all the implementations relating to the host itself, the
// CPU and buses that are exposed by the host onto which devices can be
// connected, i.e. I²C, SPI, GPIO, etc. 'host' contains the interfaces
// and subpackages contain contain concrete types.
// - conn/ contains interfaces for all the supported protocols and
// connections (I²C, SPI, GPIO, etc).
// - tests/ contains smoke tests.
package pio
import (
"errors"
"fmt"
"sync"
)
// Type represent the type of driver.
//
// Lower is more important.
type Type int
const (
// Processor is the first driver to be loaded.
Processor Type = iota
// Pins is basic pin functionality driver, additional to Processor.
//
// This includes all headers description.
Pins
// Functional is for functionality pin driver, additional to Pins.
Functional
// Bus is higher level protocol drivers.
Bus
// Device is drivers connecting to buses.
Device
nbPriorities
)
const typeName = "ProcessorPinsFunctionalBusDevicenbPriorities"
var typeIndex = [...]uint8{0, 9, 13, 23, 26, 32, 44}
func (i Type) String() string {
if i < 0 || i >= Type(len(typeIndex)-1) {
return fmt.Sprintf("Type(%d)", i)
}
return typeName[typeIndex[i]:typeIndex[i+1]]
}
// Driver is an implementation for a protocol.
type Driver interface {
// String returns the name of the driver, as to be presented to the user. It
// should be unique.
String() string
// Type is the type of driver.
//
// This is used to load the drivers in order.
//
// If a driver implements multiple levels of functionality, it should return
// the most important one, the one with the lowest value.
Type() Type
// Prerequisites returns a list of drivers that must be successfully loaded
// first before attempting to load this driver.
Prerequisites() []string
// Init initializes the driver.
//
// A driver may enter one of the three following state: loaded successfully,
// was skipped as irrelevant on this host, failed to load.
//
// On success, it must return true, nil.
//
// When irrelevant (skipped), it must return false, errors.New(<reason>).
//
// On failure, it must return true, errors.New(<reason>). The failure must
// state why it failed, for example an expected OS provided driver couldn't
// be opened, e.g. /dev/gpiomem on Raspbian.
Init() (bool, error)
}
// DriverFailure is a driver that failed loaded.
type DriverFailure struct {
D Driver
Err error
}
func (d DriverFailure) String() string {
return fmt.Sprintf("%s: %v", d.D, d.Err)
}
// State is the state of loaded device drivers.
type State struct {
Loaded []Driver
Skipped []DriverFailure
Failed []DriverFailure
}
// Init initially all the relevant drivers.
//
// Drivers are started concurrently for Type.
//
// It returns the list of all drivers loaded and errors on the first call, if
// any. They are ordered by Type but unordered within each type.
//
// Second call is ignored and errors are discarded.
//
// Users will want to use host.Init(), which guarantees a baseline of included
// drivers.
func Init() (*State, error) {
lockState.Lock()
defer lockState.Unlock()
if state != nil {
return state, nil
}
state = &State{}
cD := make(chan Driver)
cS := make(chan DriverFailure)
cE := make(chan DriverFailure)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for d := range cD {
state.Loaded = append(state.Loaded, d)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for d := range cS {
state.Skipped = append(state.Skipped, d)
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for f := range cE {
state.Failed = append(state.Failed, f)
}
}()
stages, err := getStages()
if err != nil {
return state, err
}
loaded := map[string]struct{}{}
for _, drivers := range stages {
loadStage(drivers, loaded, cD, cS, cE)
}
close(cD)
close(cS)
close(cE)
wg.Wait()
return state, nil
}
// Register registers a driver to be initialized automatically on Init().
//
// The d.String() value must be unique across all registered drivers.
//
// It is an error to call Register() after Init() was called.
func Register(d Driver) error {
lockState.Lock()
loaded := state != nil
lockState.Unlock()
if loaded {
return errors.New("drivers: can't call Register() after Init()")
}
lockDrivers.Lock()
defer lockDrivers.Unlock()
n := d.String()
if _, ok := byName[n]; ok {
return fmt.Errorf("drivers.Register(%q): driver with same name was already registered", d)
}
byName[n] = d
t := d.Type()
allDrivers[t] = append(allDrivers[t], d)
return nil
}
// MustRegister calls Register and panics if registration fails.
func MustRegister(d Driver) {
if err := Register(d); err != nil {
panic(err)
}
}
//
var (
lockDrivers sync.Mutex
allDrivers [nbPriorities][]Driver
byName = map[string]Driver{}
lockState sync.Mutex
state *State
)
// getStages returns a set of stages to load the drivers.
//
// Loading is done using two blocking mechanism:
// - By type
// - By prerequisites
// So create a DAG but reduce it as a list of stages.
//
// This cannot be done in Register() since the drivers are not registered in
// order.
func getStages() ([][]Driver, error) {
lockDrivers.Lock()
defer lockDrivers.Unlock()
var stages [][]Driver
for _, drivers := range allDrivers {
if len(drivers) == 0 {
// No driver registered for this type.
continue
}
inner, err := explodeStages(drivers)
if err != nil {
return nil, err
}
if len(inner) != 0 {
stages = append(stages, inner...)
}
}
return stages, nil
}
// explodeStages creates multiple intermediate stages if needed.
//
// It searches if there's any driver than has dependency on another driver from
// this stage and creates intermediate stage if so.
func explodeStages(drivers []Driver) ([][]Driver, error) {
dependencies := map[string]map[string]struct{}{}
for _, d := range drivers {
dependencies[d.String()] = map[string]struct{}{}
}
for _, d := range drivers {
name := d.String()
t := d.Type()
for _, depName := range d.Prerequisites() {
dep, ok := byName[depName]
if !ok {
return nil, fmt.Errorf("drivers: unsatisfied dependency %q->%q; it is missing; skipping", name, depName)
}
dt := dep.Type()
if dt > t {
return nil, fmt.Errorf("drivers: inversed dependency %q(%q)->%q(%q); skipping", name, t, depName, dt)
}
if dt < t {
// Staging already takes care of this.
continue
}
// Dependency between two drivers of the same type. This can happen
// when there's a process class driver and a processor specialization
// driver. As an example, allwinner->R8, allwinner->A64, etc.
dependencies[name][depName] = struct{}{}
}
}
var stages [][]Driver
for len(dependencies) != 0 {
// Create a stage.
var stage []string
var l []Driver
for name, deps := range dependencies {
if len(deps) == 0 {
stage = append(stage, name)
l = append(l, byName[name])
delete(dependencies, name)
}
}
if len(stage) == 0 {
return nil, fmt.Errorf("drivers: found cycle(s) in drivers dependencies; %v", dependencies)
}
stages = append(stages, l)
// Trim off.
for _, passed := range stage {
for name := range dependencies {
delete(dependencies[name], passed)
}
}
}
return stages, nil
}
// loadStage loads all the drivers in this stage concurrently.
func loadStage(drivers []Driver, loaded map[string]struct{}, cD chan<- Driver, cS chan<- DriverFailure, cE chan<- DriverFailure) {
var wg sync.WaitGroup
// Use int for concurrent access.
skip := make([]error, len(drivers))
for i, driver := range drivers {
// Load only the driver if prerequisites were loaded. They are
// guaranteed to be in a previous stage by getStages().
for _, dep := range driver.Prerequisites() {
if _, ok := loaded[dep]; !ok {
skip[i] = fmt.Errorf("dependency not loaded: %q", dep)
break
}
}
}
for i, driver := range drivers {
if err := skip[i]; err != nil {
cS <- DriverFailure{driver, err}
continue
}
wg.Add(1)
go func(d Driver, j int) {
defer wg.Done()
if ok, err := d.Init(); ok {
if err == nil {
cD <- d
return
}
cE <- DriverFailure{d, err}
} else {
// Do not assert that err != nil, as this is hard to test thoroughly.
cS <- DriverFailure{d, err}
if err != nil {
err = errors.New("no reason was given")
}
skip[j] = err
}
}(driver, i)
}
wg.Wait()
for i, driver := range drivers {
if skip[i] != nil {
continue
}
loaded[driver.String()] = struct{}{}
}
}
|
package pip
import (
"encoding/csv"
"fmt"
rtreego "github.com/dhconnelly/rtreego"
lru "github.com/hashicorp/golang-lru"
geo "github.com/kellydunn/golang-geo"
geojson "github.com/whosonfirst/go-whosonfirst-geojson"
utils "github.com/whosonfirst/go-whosonfirst-utils"
metrics "github.com/rcrowley/go-metrics"
"io"
"os"
"path"
"time"
)
type WOFPointInPolygonTiming struct {
Event string
Duration float64
}
type WOFPointInPolygonMetrics struct {
Registry *metrics.Registry
Lookups *metrics.Counter
Timer *metrics.Gauge
}
type WOFPointInPolygon struct {
Rtree *rtreego.Rtree
Cache *lru.Cache
Source string
Placetypes map[string]int
}
func PointInPolygon(source string) (*WOFPointInPolygon, error) {
rt := rtreego.NewTree(2, 25, 50)
cache_size := 256
cache, err := lru.New(cache_size)
if err != nil {
return nil, err
}
pt := make(map[string]int)
pip := WOFPointInPolygon{
Rtree: rt,
Source: source,
Cache: cache,
Placetypes: pt,
}
return &pip, nil
}
func PointInPolygonMetrics () (*WOFPointInPolygonMetrics) {
registry := metrics.NewRegistry()
lookups := metrics.NewCounter()
timer := metrics.NewGauge()
m := WOFPointInPolygonMetrics{
Registry: ®istry,
Lookups: &lookups,
Timer: &timer,
}
return &m
}
func (p WOFPointInPolygon) IndexGeoJSONFile(source string) error {
feature, parse_err := geojson.UnmarshalFile(source)
if parse_err != nil {
return parse_err
}
return p.IndexGeoJSONFeature(*feature)
}
func (p WOFPointInPolygon) IndexGeoJSONFeature(feature geojson.WOFFeature) error {
spatial, spatial_err := feature.EnSpatialize()
if spatial_err != nil {
return spatial_err
}
pt := spatial.Placetype
_, ok := p.Placetypes[pt]
if ok {
p.Placetypes[pt] += 1
} else {
p.Placetypes[pt] = 1
}
p.Rtree.Insert(spatial)
return nil
}
func (p WOFPointInPolygon) IndexMetaFile(csv_file string, offset int) error {
body, read_err := os.Open(csv_file)
if read_err != nil {
return read_err
}
r := csv.NewReader(body)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
// sudo my kingdom for a DictReader in Go...
// (20151013/thisisaaronland)
rel_path := record[offset]
abs_path := path.Join(p.Source, rel_path)
_, err = os.Stat(abs_path)
if os.IsNotExist(err) {
// fmt.Printf("OH NO - can't find %s\n", abs_path)
continue
}
index_err := p.IndexGeoJSONFile(abs_path)
if index_err != nil {
// fmt.Printf("FAILED TO INDEX %s, because %s", abs_path, index_err)
return index_err
}
}
return nil
}
func (p WOFPointInPolygon) GetIntersectsByLatLon(lat float64, lon float64) []rtreego.Spatial {
pt := rtreego.Point{lon, lat}
bbox, _ := rtreego.NewRect(pt, []float64{0.0001, 0.0001}) // how small can I make this?
results := p.Rtree.SearchIntersect(bbox)
return results
}
// maybe just merge this above - still unsure (20151013/thisisaaronland)
func (p WOFPointInPolygon) InflateSpatialResults(results []rtreego.Spatial) []*geojson.WOFSpatial {
inflated := make([]*geojson.WOFSpatial, 0)
for _, r := range results {
// https://golang.org/doc/effective_go.html#interface_conversions
wof := r.(*geojson.WOFSpatial)
inflated = append(inflated, wof)
}
return inflated
}
func (p WOFPointInPolygon) GetByLatLon(lat float64, lon float64) ([]*geojson.WOFSpatial, []*WOFPointInPolygonTiming) {
// See that: placetype == ""; see below for details
return p.GetByLatLonForPlacetype(lat, lon, "")
}
func (p WOFPointInPolygon) GetByLatLonForPlacetype(lat float64, lon float64, placetype string) ([]*geojson.WOFSpatial, []*WOFPointInPolygonTiming) {
timings := make([]*WOFPointInPolygonTiming, 0)
t1a := time.Now()
intersects := p.GetIntersectsByLatLon(lat, lon)
t1b := float64(time.Since(t1a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"intersects", t1b})
t2a := time.Now()
inflated := p.InflateSpatialResults(intersects)
t2b := float64(time.Since(t2a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"inflate", t2b})
// See what's going on here? We are filtering by placetype before
// do a final point-in-poly lookup so we don't try to load country
// records while only searching for localities
filtered := make([]*geojson.WOFSpatial, 0)
if placetype != "" {
t3a := time.Now()
filtered = p.FilterByPlacetype(inflated, placetype)
t3b := float64(time.Since(t3a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"placetype", t3b})
} else {
filtered = inflated
}
t4a := time.Now()
contained := p.EnsureContained(lat, lon, filtered)
t4b := float64(time.Since(t4a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"contained", t4b})
return contained, timings
}
func (p WOFPointInPolygon) FilterByPlacetype(results []*geojson.WOFSpatial, placetype string) []*geojson.WOFSpatial {
filtered := make([]*geojson.WOFSpatial, 0)
for _, r := range results {
if r.Placetype == placetype {
filtered = append(filtered, r)
}
}
return filtered
}
func (p WOFPointInPolygon) EnsureContained(lat float64, lon float64, results []*geojson.WOFSpatial) []*geojson.WOFSpatial {
contained := make([]*geojson.WOFSpatial, 0)
pt := geo.NewPoint(lat, lon)
for _, wof := range results {
t1a := time.Now()
polygons, err := p.LoadPolygons(wof)
if err != nil {
// please log me
continue
}
t1b := float64(time.Since(t1a)) / 1e9
fmt.Printf("[debug] time to load polygons is %f\n", t1b)
is_contained := false
count := len(polygons)
iters := 0
t3a := time.Now()
for _, poly := range polygons {
iters += 1
if poly.Contains(pt) {
is_contained = true
break
}
}
t3b := float64(time.Since(t3a)) / 1e9
fmt.Printf("[debug] time to check containment (%t) after %d/%d possible iterations is %f\n", is_contained, iters, count, t3b)
if is_contained {
contained = append(contained, wof)
}
}
count_in := len(results)
count_out := len(contained)
fmt.Printf("[debug] contained: %d/%d\n", count_out, count_in)
return contained
}
func (p WOFPointInPolygon) LoadPolygons(wof *geojson.WOFSpatial) ([]*geo.Polygon, error) {
id := wof.Id
cache, ok := p.Cache.Get(id)
if ok {
fmt.Printf("[debug] return polygons from cache for %d\n", id)
polygons := cache.([]*geo.Polygon)
return polygons, nil
}
t2a := time.Now()
abs_path := utils.Id2AbsPath(p.Source, id)
feature, err := geojson.UnmarshalFile(abs_path)
t2b := float64(time.Since(t2a)) / 1e9
fmt.Printf("[debug] time to marshal %s is %f\n", abs_path, t2b)
if err != nil {
return nil, err
}
t3a := time.Now()
polygons := feature.GeomToPolygons()
var points int
for _, p := range polygons {
points += len(p.Points())
}
t3b := float64(time.Since(t3a)) / 1e9
fmt.Printf("[debug] time to convert geom to polygons (%d points) is %f\n", points, t3b)
if points >= 100 {
if p.Cache.Len() == 256 { // PLEASE DO NOT HARDCODE ME...
p.Cache.RemoveOldest()
}
_ = p.Cache.Add(id, polygons)
fmt.Printf("[cache] %d because so many points (%d)\n", id, points)
}
return polygons, nil
}
func (p WOFPointInPolygon) IsKnownPlacetype(pt string) bool {
_, ok := p.Placetypes[pt]
if ok {
return true
} else {
return false
}
}
make all the metrics stuff build and try to Log stuff... but where does it go?
package pip
import (
"encoding/csv"
"fmt"
rtreego "github.com/dhconnelly/rtreego"
lru "github.com/hashicorp/golang-lru"
geo "github.com/kellydunn/golang-geo"
metrics "github.com/rcrowley/go-metrics"
geojson "github.com/whosonfirst/go-whosonfirst-geojson"
utils "github.com/whosonfirst/go-whosonfirst-utils"
"io"
_ "log"
"os"
"path"
"time"
)
type WOFPointInPolygonTiming struct {
Event string
Duration float64
}
type WOFPointInPolygonMetrics struct {
Registry *metrics.Registry
Lookups *metrics.Counter
TimeToProcess *metrics.GaugeFloat64
}
type WOFPointInPolygon struct {
Rtree *rtreego.Rtree
Cache *lru.Cache
Source string
Placetypes map[string]int
Metrics *WOFPointInPolygonMetrics
}
func PointInPolygon(source string) (*WOFPointInPolygon, error) {
rt := rtreego.NewTree(2, 25, 50)
cache_size := 256
cache, err := lru.New(cache_size)
if err != nil {
return nil, err
}
m := NewPointInPolygonMetrics()
pt := make(map[string]int)
pip := WOFPointInPolygon{
Rtree: rt,
Source: source,
Cache: cache,
Placetypes: pt,
Metrics: m,
}
return &pip, nil
}
func NewPointInPolygonMetrics() *WOFPointInPolygonMetrics {
registry := metrics.NewRegistry()
lookups := metrics.NewCounter()
ttp := metrics.NewGaugeFloat64()
registry.Register("lookups", lookups)
registry.Register("time-to-process", ttp)
m := WOFPointInPolygonMetrics{
Registry: ®istry,
Lookups: &lookups,
TimeToProcess: &ttp,
}
return &m
}
func (p WOFPointInPolygon) IndexGeoJSONFile(source string) error {
feature, parse_err := geojson.UnmarshalFile(source)
if parse_err != nil {
return parse_err
}
return p.IndexGeoJSONFeature(*feature)
}
func (p WOFPointInPolygon) IndexGeoJSONFeature(feature geojson.WOFFeature) error {
spatial, spatial_err := feature.EnSpatialize()
if spatial_err != nil {
return spatial_err
}
pt := spatial.Placetype
_, ok := p.Placetypes[pt]
if ok {
p.Placetypes[pt] += 1
} else {
p.Placetypes[pt] = 1
}
p.Rtree.Insert(spatial)
return nil
}
func (p WOFPointInPolygon) IndexMetaFile(csv_file string, offset int) error {
body, read_err := os.Open(csv_file)
if read_err != nil {
return read_err
}
r := csv.NewReader(body)
for {
record, err := r.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
// sudo my kingdom for a DictReader in Go...
// (20151013/thisisaaronland)
rel_path := record[offset]
abs_path := path.Join(p.Source, rel_path)
_, err = os.Stat(abs_path)
if os.IsNotExist(err) {
// fmt.Printf("OH NO - can't find %s\n", abs_path)
continue
}
index_err := p.IndexGeoJSONFile(abs_path)
if index_err != nil {
// fmt.Printf("FAILED TO INDEX %s, because %s", abs_path, index_err)
return index_err
}
}
return nil
}
func (p WOFPointInPolygon) GetIntersectsByLatLon(lat float64, lon float64) []rtreego.Spatial {
pt := rtreego.Point{lon, lat}
bbox, _ := rtreego.NewRect(pt, []float64{0.0001, 0.0001}) // how small can I make this?
results := p.Rtree.SearchIntersect(bbox)
return results
}
// maybe just merge this above - still unsure (20151013/thisisaaronland)
func (p WOFPointInPolygon) InflateSpatialResults(results []rtreego.Spatial) []*geojson.WOFSpatial {
inflated := make([]*geojson.WOFSpatial, 0)
for _, r := range results {
// https://golang.org/doc/effective_go.html#interface_conversions
wof := r.(*geojson.WOFSpatial)
inflated = append(inflated, wof)
}
return inflated
}
func (p WOFPointInPolygon) GetByLatLon(lat float64, lon float64) ([]*geojson.WOFSpatial, []*WOFPointInPolygonTiming) {
// See that: placetype == ""; see below for details
return p.GetByLatLonForPlacetype(lat, lon, "")
}
func (p WOFPointInPolygon) GetByLatLonForPlacetype(lat float64, lon float64, placetype string) ([]*geojson.WOFSpatial, []*WOFPointInPolygonTiming) {
// Really? Go, you SO weird...
/*
var c metrics.Counter
c = *p.Metrics.Lookups
c.Inc(1)
*/
timings := make([]*WOFPointInPolygonTiming, 0)
t1a := time.Now()
intersects := p.GetIntersectsByLatLon(lat, lon)
t1b := float64(time.Since(t1a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"intersects", t1b})
t2a := time.Now()
inflated := p.InflateSpatialResults(intersects)
t2b := float64(time.Since(t2a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"inflate", t2b})
// See what's going on here? We are filtering by placetype before
// do a final point-in-poly lookup so we don't try to load country
// records while only searching for localities
filtered := make([]*geojson.WOFSpatial, 0)
if placetype != "" {
t3a := time.Now()
filtered = p.FilterByPlacetype(inflated, placetype)
t3b := float64(time.Since(t3a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"placetype", t3b})
} else {
filtered = inflated
}
t4a := time.Now()
contained := p.EnsureContained(lat, lon, filtered)
t4b := float64(time.Since(t4a)) / 1e9
timings = append(timings, &WOFPointInPolygonTiming{"contained", t4b})
/*
ttp := float64(time.Since(t1a)) / 1e9
var g metrics.GaugeFloat64
g = *p.Metrics.TimeToProcess
g.Update(ttp)
var r metrics.Registry
r = *p.Metrics.Registry
go metrics.Log(r, 60e9, log.New(os.Stdout, "metrics: ", log.Lmicroseconds))
*/
return contained, timings
}
func (p WOFPointInPolygon) FilterByPlacetype(results []*geojson.WOFSpatial, placetype string) []*geojson.WOFSpatial {
filtered := make([]*geojson.WOFSpatial, 0)
for _, r := range results {
if r.Placetype == placetype {
filtered = append(filtered, r)
}
}
return filtered
}
func (p WOFPointInPolygon) EnsureContained(lat float64, lon float64, results []*geojson.WOFSpatial) []*geojson.WOFSpatial {
contained := make([]*geojson.WOFSpatial, 0)
pt := geo.NewPoint(lat, lon)
for _, wof := range results {
t1a := time.Now()
polygons, err := p.LoadPolygons(wof)
if err != nil {
// please log me
continue
}
t1b := float64(time.Since(t1a)) / 1e9
fmt.Printf("[debug] time to load polygons is %f\n", t1b)
is_contained := false
count := len(polygons)
iters := 0
t3a := time.Now()
for _, poly := range polygons {
iters += 1
if poly.Contains(pt) {
is_contained = true
break
}
}
t3b := float64(time.Since(t3a)) / 1e9
fmt.Printf("[debug] time to check containment (%t) after %d/%d possible iterations is %f\n", is_contained, iters, count, t3b)
if is_contained {
contained = append(contained, wof)
}
}
count_in := len(results)
count_out := len(contained)
fmt.Printf("[debug] contained: %d/%d\n", count_out, count_in)
return contained
}
func (p WOFPointInPolygon) LoadPolygons(wof *geojson.WOFSpatial) ([]*geo.Polygon, error) {
id := wof.Id
cache, ok := p.Cache.Get(id)
if ok {
fmt.Printf("[debug] return polygons from cache for %d\n", id)
polygons := cache.([]*geo.Polygon)
return polygons, nil
}
t2a := time.Now()
abs_path := utils.Id2AbsPath(p.Source, id)
feature, err := geojson.UnmarshalFile(abs_path)
t2b := float64(time.Since(t2a)) / 1e9
fmt.Printf("[debug] time to marshal %s is %f\n", abs_path, t2b)
if err != nil {
return nil, err
}
t3a := time.Now()
polygons := feature.GeomToPolygons()
var points int
for _, p := range polygons {
points += len(p.Points())
}
t3b := float64(time.Since(t3a)) / 1e9
fmt.Printf("[debug] time to convert geom to polygons (%d points) is %f\n", points, t3b)
if points >= 100 {
if p.Cache.Len() == 256 { // PLEASE DO NOT HARDCODE ME...
p.Cache.RemoveOldest()
}
_ = p.Cache.Add(id, polygons)
fmt.Printf("[cache] %d because so many points (%d)\n", id, points)
}
return polygons, nil
}
func (p WOFPointInPolygon) IsKnownPlacetype(pt string) bool {
_, ok := p.Placetypes[pt]
if ok {
return true
} else {
return false
}
}
|
package pip
import (
"github.com/dhconnelly/rtreego"
"github.com/whosonfirst/go-whosonfirst-geojson"
)
type WOFPointInPolygon struct {
Rtree *rtreego.Rtree
}
func PointInPolygon() *WOFPointInPolygon {
rt := rtreego.NewTree(2, 25, 50)
return &WOFPointInPolygon{
Rtree: rt,
}
}
func (p WOFPointInPolygon) IndexGeoJSONFile(source string) error {
feature, parse_err := geojson.UnmarshalFile(source)
if parse_err != nil {
return parse_err
}
return p.IndexGeoJSONFeature(*feature)
}
func (p WOFPointInPolygon) IndexGeoJSONFeature(feature geojson.WOFFeature) error {
bounds, bounds_err := feature.Bounds()
if bounds_err != nil {
return bounds_err
}
p.Rtree.Insert(bounds)
return nil
}
func (p WOFPointInPolygon) GetByLatLon(lat float64, lon float64) []rtreego.Spatial{
pt := rtreego.Point{lon, lat}
bbox, _ := rtreego.NewRect(pt, []float64{0.0001, 0.0001}) // how small can I make this?
results := p.Rtree.SearchIntersect(bbox)
return results
}
func (p WOFPointInPolygon) InflateResults(results []rtreego.Spatial) []*geojson.WOFRTree {
inflated := make([]*geojson.WOFRTree, 0)
for _, r := range results {
// https://golang.org/doc/effective_go.html#interface_conversions
wof := r.(*geojson.WOFRTree)
inflated = append(inflated, wof)
}
return inflated
}
update to reflect EnSpatialize changes in go-whosonfirst-geojson
package pip
import (
"github.com/dhconnelly/rtreego"
"github.com/whosonfirst/go-whosonfirst-geojson"
)
type WOFPointInPolygon struct {
Rtree *rtreego.Rtree
}
func PointInPolygon() *WOFPointInPolygon {
rt := rtreego.NewTree(2, 25, 50)
return &WOFPointInPolygon{
Rtree: rt,
}
}
func (p WOFPointInPolygon) IndexGeoJSONFile(source string) error {
feature, parse_err := geojson.UnmarshalFile(source)
if parse_err != nil {
return parse_err
}
return p.IndexGeoJSONFeature(*feature)
}
func (p WOFPointInPolygon) IndexGeoJSONFeature(feature geojson.WOFFeature) error {
spatial, spatial_err := feature.EnSpatialize()
if spatial_err != nil {
return spatial_err
}
p.Rtree.Insert(spatial)
return nil
}
func (p WOFPointInPolygon) GetByLatLon(lat float64, lon float64) []rtreego.Spatial{
pt := rtreego.Point{lon, lat}
bbox, _ := rtreego.NewRect(pt, []float64{0.0001, 0.0001}) // how small can I make this?
results := p.Rtree.SearchIntersect(bbox)
return results
}
func (p WOFPointInPolygon) InflateResults(results []rtreego.Spatial) []*geojson.WOFSpatial {
inflated := make([]*geojson.WOFSpatial, 0)
for _, r := range results {
// https://golang.org/doc/effective_go.html#interface_conversions
wof := r.(*geojson.WOFSpatial)
inflated = append(inflated, wof)
}
return inflated
} |
package pmc
import (
"errors"
"fmt"
"math"
"github.com/dgryski/go-bits"
"github.com/dgryski/go-farm"
"github.com/dgryski/go-pcgr"
"github.com/willf/bitset"
)
var (
rnd = pcgr.Rand{State: 42, Inc: 0xcafebabe}
// Use const due to quick conversion against 0.78 (n = 1000000.0)
// Actual implementation => n := -2 * sketch.m * math.Log(k) / (m * (1 - p))
n = 10000000.0
)
// non-receiver methods
func georand(w uint) uint {
val := rnd.Next()
// Calculate the position of the leftmost 1-bit.
res := uint(bits.Clz(uint64(val)^0)) - 32
if res >= w {
res = w - 1
}
return res
}
func rand(m uint) uint {
return uint(rnd.Next()) % m
}
/*
We start with the probability qk(n) that at least the first k bits in a sketch row are set after n additions as given in (4).
We observe that qk is now also a function of p, and obtain a modified version of (4) as follows:
*/
func qk(k, n, p float64) float64 {
result := 1.0
for i := 1.0; i <= k; i++ {
result *= (1.0 - math.Pow(1.0-math.Pow(2, -i), n)*(1.0-p))
}
return result
}
/*
Sketch is a Probabilistic Multiplicity Counting Sketch, a novel data structure
that is capable of accounting traffic per flow probabilistically, that can be
used as an alternative to Count-min sketch.
*/
type Sketch struct {
l float64
m float64
w float64
bitmap *bitset.BitSet // FIXME: Get Rid of bitmap and use uint32 array
p float64
}
/*
New returns a PMC Sketch with the properties:
l = total number of bits for sketch
m = total number of rows for each flow
w = total number of columns for each flow
*/
func New(l uint, m uint, w uint) (*Sketch, error) {
if l == 0 {
return nil, errors.New("Expected l > 0, got 0")
}
if m == 0 {
return nil, errors.New("Expected m > 0, got 0")
}
if w == 0 {
return nil, errors.New("Expected w > 0, got 0")
}
return &Sketch{l: float64(l), m: float64(m), w: float64(w), bitmap: bitset.New(l)}, nil
}
/*
NewForMaxFlows returns a PMC Sketch adapted to the size of the max number of
flows expected.
*/
func NewForMaxFlows(maxFlows uint) (*Sketch, error) {
l := maxFlows * 32
return New(l, 256, 32)
}
func (sketch *Sketch) printVirtualMatrix(flow []byte) {
for i := 0.0; i < sketch.m; i++ {
for j := 0.0; j < sketch.w; j++ {
pos := sketch.getPos(flow, i, j)
if sketch.bitmap.Test(pos) == false {
fmt.Print(0)
} else {
fmt.Print(1)
}
}
fmt.Println("")
}
}
/*
GetFillRate ...
*/
func (sketch *Sketch) GetFillRate() float64 {
return sketch.getP() * 100
}
/*
It is straightforward to use any uniformly distributed hash function with
sufficiently random output in the role of H: the input parameters can
simply be concatenated to a single bit string.
*/
func (sketch *Sketch) getPos(f []byte, i, j float64) uint {
hash := farm.Hash64WithSeeds(f, uint64(i), uint64(j))
return uint(hash) % uint(sketch.l)
}
/*
Increment the count of the flow by 1
*/
func (sketch *Sketch) Increment(flow []byte) {
sketch.p = 0
i := rand(uint(sketch.m))
j := georand(uint(sketch.w))
pos := sketch.getPos(flow, float64(i), float64(j))
sketch.bitmap.Set(pos)
}
func (sketch *Sketch) getZSum(flow []byte) float64 {
z := 0.0
for i := 0.0; i < sketch.m; i++ {
for j := 0.0; j < sketch.w; j++ {
pos := sketch.getPos(flow, i, j)
if sketch.bitmap.Test(pos) == false {
z += j
break
}
}
}
return z
}
func (sketch *Sketch) getEmptyRows(flow []byte) float64 {
k := 0.0
for i := 0.0; i < sketch.m; i++ {
pos := sketch.getPos(flow, i, 0)
if sketch.bitmap.Test(pos) == false {
k++
}
}
return k
}
func (sketch *Sketch) getP() float64 {
ones := 0.0
for i := uint(0); i < uint(sketch.l); i++ {
if sketch.bitmap.Test(i) == true {
ones++
}
}
return ones / sketch.l
}
func (sketch *Sketch) getE(n, p float64) float64 {
result := 0.0
for k := 1.0; k <= sketch.w; k++ {
result += (k * (qk(k, n, p) - qk(k+1, n, p)))
}
return result
}
func (sketch *Sketch) rho(n, p float64) float64 {
return math.Pow(2, sketch.getE(n, p)) / n
}
/*
GetEstimate returns the estimated count of a given flow
*/
func (sketch *Sketch) GetEstimate(flow []byte) float64 {
if sketch.p == 0 {
sketch.p = sketch.getP()
}
k := sketch.getEmptyRows(flow)
e := 0.0
// Dealing with small multiplicities
if kp := k / (1 - sketch.p); kp > 0.3*sketch.m {
e = -2 * sketch.m * math.Log(kp/sketch.m)
} else {
z := sketch.getZSum(flow)
e = sketch.m * math.Pow(2, z/sketch.m) / sketch.rho(n, sketch.p)
}
return math.Abs(e)
}
Fix rho calculation
package pmc
import (
"errors"
"fmt"
"math"
"github.com/dgryski/go-bits"
"github.com/dgryski/go-farm"
"github.com/dgryski/go-pcgr"
"github.com/willf/bitset"
)
var (
rnd = pcgr.Rand{State: 42, Inc: 0xcafebabe}
// Use const due to quick conversion against 0.78 (n = 1000000.0)
// Actual implementation => n := -2 * sketch.m * math.Log(k) / (m * (1 - p))
maxN = 10000000.0
)
// non-receiver methods
func georand(w uint) uint {
val := rnd.Next()
// Calculate the position of the leftmost 1-bit.
res := uint(bits.Clz(uint64(val)^0)) - 32
if res >= w {
res = w - 1
}
return res
}
func rand(m uint) uint {
return uint(rnd.Next()) % m
}
/*
We start with the probability qk(n) that at least the first k bits in a sketch row are set after n additions as given in (4).
We observe that qk is now also a function of p, and obtain a modified version of (4) as follows:
*/
func qk(k, n, p float64) float64 {
result := 1.0
for i := 1.0; i <= k; i++ {
result *= (1.0 - math.Pow(1.0-math.Pow(2, -i), n)*(1.0-p))
}
return result
}
/*
Sketch is a Probabilistic Multiplicity Counting Sketch, a novel data structure
that is capable of accounting traffic per flow probabilistically, that can be
used as an alternative to Count-min sketch.
*/
type Sketch struct {
l float64
m float64
w float64
bitmap *bitset.BitSet // FIXME: Get Rid of bitmap and use uint32 array
p float64
n uint
}
/*
New returns a PMC Sketch with the properties:
l = total number of bits for sketch
m = total number of rows for each flow
w = total number of columns for each flow
*/
func New(l uint, m uint, w uint) (*Sketch, error) {
if l == 0 {
return nil, errors.New("Expected l > 0, got 0")
}
if m == 0 {
return nil, errors.New("Expected m > 0, got 0")
}
if w == 0 {
return nil, errors.New("Expected w > 0, got 0")
}
return &Sketch{l: float64(l), m: float64(m), w: float64(w),
bitmap: bitset.New(l), n: 0}, nil
}
/*
NewForMaxFlows returns a PMC Sketch adapted to the size of the max number of
flows expected.
*/
func NewForMaxFlows(maxFlows uint) (*Sketch, error) {
l := maxFlows * 32
return New(l, 256, 32)
}
func (sketch *Sketch) printVirtualMatrix(flow []byte) {
for i := 0.0; i < sketch.m; i++ {
for j := 0.0; j < sketch.w; j++ {
pos := sketch.getPos(flow, i, j)
if sketch.bitmap.Test(pos) == false {
fmt.Print(0)
} else {
fmt.Print(1)
}
}
fmt.Println("")
}
}
/*
GetFillRate ...
*/
func (sketch *Sketch) GetFillRate() float64 {
return sketch.getP() * 100
}
/*
It is straightforward to use any uniformly distributed hash function with
sufficiently random output in the role of H: the input parameters can
simply be concatenated to a single bit string.
*/
func (sketch *Sketch) getPos(f []byte, i, j float64) uint {
hash := farm.Hash64WithSeeds(f, uint64(i), uint64(j))
return uint(hash) % uint(sketch.l)
}
/*
Increment the count of the flow by 1
*/
func (sketch *Sketch) Increment(flow []byte) {
sketch.p = 0
i := rand(uint(sketch.m))
j := georand(uint(sketch.w))
pos := sketch.getPos(flow, float64(i), float64(j))
sketch.bitmap.Set(pos)
sketch.n++
}
func (sketch *Sketch) getZSum(flow []byte) float64 {
z := 0.0
for i := 0.0; i < sketch.m; i++ {
for j := 0.0; j < sketch.w; j++ {
pos := sketch.getPos(flow, i, j)
if sketch.bitmap.Test(pos) == false {
z += j
break
}
}
}
return z
}
func (sketch *Sketch) getEmptyRows(flow []byte) float64 {
k := 0.0
for i := 0.0; i < sketch.m; i++ {
pos := sketch.getPos(flow, i, 0)
if sketch.bitmap.Test(pos) == false {
k++
}
}
return k
}
func (sketch *Sketch) getP() float64 {
ones := 0.0
for i := uint(0); i < uint(sketch.l); i++ {
if sketch.bitmap.Test(i) == true {
ones++
}
}
return ones / sketch.l
}
func (sketch *Sketch) getE(n, p float64) float64 {
result := 0.0
for k := 1.0; k <= sketch.w; k++ {
result += (k * (qk(k, n, p) - qk(k+1, n, p)))
}
return result
}
func (sketch *Sketch) rho(n, p float64) float64 {
if n >= maxN {
return 0.78
}
return math.Pow(2, sketch.getE(n, p)) / n
}
/*
GetEstimate returns the estimated count of a given flow
*/
func (sketch *Sketch) GetEstimate(flow []byte) float64 {
if sketch.p == 0 {
sketch.p = sketch.getP()
}
k := sketch.getEmptyRows(flow)
n := float64(sketch.n)
m := sketch.m
e := 0.0
// Dealing with small multiplicities
if kp := k / (1 - sketch.p); kp > 0.3*sketch.m {
e = -2 * sketch.m * math.Log(kp/sketch.m)
} else {
z := sketch.getZSum(flow)
e = m * math.Pow(2, z/m) / sketch.rho(n, sketch.p)
}
return math.Abs(e)
}
|
package shared
import (
"fmt"
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ecs"
)
// Save some repetition, formatting the output of these.
func FormatAwsError(err error) string {
var result string
if awsErr, ok := err.(awserr.Error); ok {
// Generic AWS Error with Code, Message, and original error (if any)
result = fmt.Sprintf("%s %s %s", awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
if reqErr, ok := err.(awserr.RequestFailure); ok {
// A service error occurred
result = fmt.Sprintf("%s\n%s %s %s", result, reqErr.StatusCode(), reqErr.RequestID())
}
} else {
result = err.Error()
}
return result
}
// Verify that the ECS cluster exists.
func VerifyClusterExists(ecs_obj *ecs.ECS, cluster string) error {
params := &ecs.DescribeClustersInput{
Clusters: []*string{
aws.String(cluster),
},
}
clusters, err := ecs_obj.DescribeClusters(params)
if err != nil {
return fmt.Errorf("Cannot verify if ECS cluster exists: %s", FormatAwsError(err))
}
if len(clusters.Clusters) == 0 {
return fmt.Errorf("Error: ECS Cluster '%s' does not exist, cannot proceed.\n", cluster)
}
if len(clusters.Clusters) != 1 {
return fmt.Errorf("Error: Unexpected number of ECS Clusters returned when searching for '%s'. Received: %+v\n", cluster, clusters.Clusters)
}
}
// Verify that the ECS service exists.
func VerifyServiceExists(ecs_obj *ecs.ECS, cluster string, service string) error {
params := &ecs.DescribeServicesInput{
Cluster: &cluster,
Services: []*string{ // Required
aws.String(service), // Required
},
}
_, err := ecs_obj.DescribeServices(params)
if err != nil {
return fmt.Errorf("Cannot verify if ECS service exists: %s", FormatAwsError(err))
}
}
func GetContainerInstanceArnsForService(ecs_obj *ecs.ECS, cluster string, service string, local_container_instance_arn string, debug bool) ([]string, error) {
// Fetch a task list based on the service name.
list_tasks_params := &ecs.ListTasksInput{
Cluster: &cluster,
ServiceName: &service,
}
list_tasks_resp, list_tasks_err := ecs_obj.ListTasks(list_tasks_params)
if list_tasks_err != nil {
return []string{}, fmt.Errorf("Cannot retrieve ECS task list: %s", FormatAwsError(list_tasks_err))
}
if len(list_tasks_resp.TaskArns) <= 0 {
return []string{}, fmt.Errorf("No ECS tasks found with specified filter - cluster: ", cluster, ", service:", service)
}
// Describe the tasks retrieved above.
describe_tasks_params := &ecs.DescribeTasksInput{
Cluster: &cluster,
Tasks: list_tasks_resp.TaskArns,
}
describe_tasks_resp, describe_tasks_err := ecs_obj.DescribeTasks(describe_tasks_params)
if describe_tasks_err != nil {
return []string{}, fmt.Errorf("Cannot retrieve ECS task details:", FormatAwsError(describe_tasks_err))
}
if len(describe_tasks_resp.Tasks) <= 0 {
return []string{}, fmt.Errorf("No ECS task details found with specified filter - tasks:", strings.Join(aws.StringValueSlice(list_tasks_resp.TaskArns), ", "))
}
var result []string
for _, value := range describe_tasks_resp.Tasks {
if *value.LastStatus == "RUNNING" && *value.ContainerInstanceArn != local_container_instance_arn {
result = append(result, *value.ContainerInstanceArn)
} else {
if debug == true {
fmt.Println(*value.ContainerInstanceArn, "is not in a RUNNING state, or is this instance (we dont return ourself). Excluded from results.")
}
}
}
if len(result) == 0 {
return []string{}, fmt.Errorf("No ECS task results found in RUNNING state, no ECS container instances to return.")
}
return result, nil
}
func GetEc2InstanceIdsFromContainerInstances(ecs_obj *ecs.ECS, cluster string, container_instances []string, debug bool) ([]string, error) {
params := &ecs.DescribeContainerInstancesInput{
Cluster: aws.String(cluster),
ContainerInstances: aws.StringSlice(container_instances),
}
resp, err := ecs_obj.DescribeContainerInstances(params)
if err != nil {
return []string{}, fmt.Errorf("Cannot retrieve ECS container instance information: %s", FormatAwsError(err))
}
if len(resp.ContainerInstances) <= 0 {
return []string{}, fmt.Errorf("No ECS container instances found with specified filter - cluster:", cluster, "- instances:", strings.Join(container_instances, ", "))
}
var result []string
for _, value := range resp.ContainerInstances {
if *value.Status == "ACTIVE" {
result = append(result, *value.Ec2InstanceId)
} else {
if debug == true {
fmt.Println(*value.Ec2InstanceId, "is not in an ACTIVE state, excluded from results.")
}
}
}
if len(result) == 0 {
return []string{}, fmt.Errorf("No running ECS container instances found in result set, cannot proceed.")
}
return result, nil
}
func GetEc2PrivateIpsFromInstanceIds(ec2_obj *ec2.EC2, instance_ids []string, debug bool) ([]string, error) {
params := &ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice(instance_ids),
}
resp, err := ec2_obj.DescribeInstances(params)
if err != nil {
return []string{}, fmt.Errorf("Cannot retrieve EC2 instance information: %s", FormatAwsError(err))
}
if len(resp.Reservations) <= 0 {
return []string{}, fmt.Errorf("No EC2 instances found (Reservations.*) with specified Instance IDs filter: ", strings.Join(instance_ids, ", "))
}
if len(resp.Reservations[0].Instances) <= 0 {
return []string{}, fmt.Errorf("No EC2 instances found (Reservations[0].* with specified Instance IDs filter: ", strings.Join(instance_ids, ", "))
}
var result []string
for idx, _ := range resp.Reservations {
for _, value := range resp.Reservations[idx].Instances {
if *value.State.Name == "running" {
result = append(result, *value.PrivateIpAddress)
} else {
if debug == true {
fmt.Println(*value.InstanceId, "is not in a running state, excluded from results.")
}
}
}
}
if len(result) == 0 {
return []string{}, fmt.Errorf("No running EC2 instances found in result set, cannot proceed.")
}
return result, nil
}
fixes
package shared
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ecs"
)
// Save some repetition, formatting the output of these.
func FormatAwsError(err error) string {
var result string
if awsErr, ok := err.(awserr.Error); ok {
// Generic AWS Error with Code, Message, and original error (if any)
result = fmt.Sprintf("%s %s %s", awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
if reqErr, ok := err.(awserr.RequestFailure); ok {
// A service error occurred
result = fmt.Sprintf("%s\n%s %s %s", result, reqErr.StatusCode(), reqErr.RequestID())
}
} else {
result = err.Error()
}
return result
}
// Verify that the ECS cluster exists.
func VerifyClusterExists(ecs_obj *ecs.ECS, cluster string) error {
params := &ecs.DescribeClustersInput{
Clusters: []*string{
aws.String(cluster),
},
}
clusters, err := ecs_obj.DescribeClusters(params)
if err != nil {
return fmt.Errorf("Cannot verify if ECS cluster exists: %s", FormatAwsError(err))
}
if len(clusters.Clusters) == 0 {
return fmt.Errorf("Error: ECS Cluster '%s' does not exist, cannot proceed.\n", cluster)
}
if len(clusters.Clusters) != 1 {
return fmt.Errorf("Error: Unexpected number of ECS Clusters returned when searching for '%s'. Received: %+v\n", cluster, clusters.Clusters)
}
return nil
}
// Verify that the ECS service exists.
func VerifyServiceExists(ecs_obj *ecs.ECS, cluster string, service string) error {
params := &ecs.DescribeServicesInput{
Cluster: &cluster,
Services: []*string{ // Required
aws.String(service), // Required
},
}
_, err := ecs_obj.DescribeServices(params)
if err != nil {
return fmt.Errorf("Cannot verify if ECS service exists: %s", FormatAwsError(err))
}
return nil
}
func GetContainerInstanceArnsForService(ecs_obj *ecs.ECS, cluster string, service string, local_container_instance_arn string, debug bool) ([]string, error) {
// Fetch a task list based on the service name.
list_tasks_params := &ecs.ListTasksInput{
Cluster: &cluster,
ServiceName: &service,
}
list_tasks_resp, list_tasks_err := ecs_obj.ListTasks(list_tasks_params)
if list_tasks_err != nil {
return []string{}, fmt.Errorf("Cannot retrieve ECS task list: %s", FormatAwsError(list_tasks_err))
}
if len(list_tasks_resp.TaskArns) <= 0 {
return []string{}, fmt.Errorf("No ECS tasks found with specified filter - cluster: ", cluster, ", service:", service)
}
// Describe the tasks retrieved above.
describe_tasks_params := &ecs.DescribeTasksInput{
Cluster: &cluster,
Tasks: list_tasks_resp.TaskArns,
}
describe_tasks_resp, describe_tasks_err := ecs_obj.DescribeTasks(describe_tasks_params)
if describe_tasks_err != nil {
return []string{}, fmt.Errorf("Cannot retrieve ECS task details:", FormatAwsError(describe_tasks_err))
}
if len(describe_tasks_resp.Tasks) <= 0 {
return []string{}, fmt.Errorf("No ECS task details found with specified filter - tasks:", strings.Join(aws.StringValueSlice(list_tasks_resp.TaskArns), ", "))
}
var result []string
for _, value := range describe_tasks_resp.Tasks {
if *value.LastStatus == "RUNNING" && *value.ContainerInstanceArn != local_container_instance_arn {
result = append(result, *value.ContainerInstanceArn)
} else {
if debug == true {
fmt.Println(*value.ContainerInstanceArn, "is not in a RUNNING state, or is this instance (we dont return ourself). Excluded from results.")
}
}
}
if len(result) == 0 {
return []string{}, fmt.Errorf("No ECS task results found in RUNNING state, no ECS container instances to return.")
}
return result, nil
}
func GetEc2InstanceIdsFromContainerInstances(ecs_obj *ecs.ECS, cluster string, container_instances []string, debug bool) ([]string, error) {
params := &ecs.DescribeContainerInstancesInput{
Cluster: aws.String(cluster),
ContainerInstances: aws.StringSlice(container_instances),
}
resp, err := ecs_obj.DescribeContainerInstances(params)
if err != nil {
return []string{}, fmt.Errorf("Cannot retrieve ECS container instance information: %s", FormatAwsError(err))
}
if len(resp.ContainerInstances) <= 0 {
return []string{}, fmt.Errorf("No ECS container instances found with specified filter - cluster:", cluster, "- instances:", strings.Join(container_instances, ", "))
}
var result []string
for _, value := range resp.ContainerInstances {
if *value.Status == "ACTIVE" {
result = append(result, *value.Ec2InstanceId)
} else {
if debug == true {
fmt.Println(*value.Ec2InstanceId, "is not in an ACTIVE state, excluded from results.")
}
}
}
if len(result) == 0 {
return []string{}, fmt.Errorf("No running ECS container instances found in result set, cannot proceed.")
}
return result, nil
}
func GetEc2PrivateIpsFromInstanceIds(ec2_obj *ec2.EC2, instance_ids []string, debug bool) ([]string, error) {
params := &ec2.DescribeInstancesInput{
InstanceIds: aws.StringSlice(instance_ids),
}
resp, err := ec2_obj.DescribeInstances(params)
if err != nil {
return []string{}, fmt.Errorf("Cannot retrieve EC2 instance information: %s", FormatAwsError(err))
}
if len(resp.Reservations) <= 0 {
return []string{}, fmt.Errorf("No EC2 instances found (Reservations.*) with specified Instance IDs filter: ", strings.Join(instance_ids, ", "))
}
if len(resp.Reservations[0].Instances) <= 0 {
return []string{}, fmt.Errorf("No EC2 instances found (Reservations[0].* with specified Instance IDs filter: ", strings.Join(instance_ids, ", "))
}
var result []string
for idx, _ := range resp.Reservations {
for _, value := range resp.Reservations[idx].Instances {
if *value.State.Name == "running" {
result = append(result, *value.PrivateIpAddress)
} else {
if debug == true {
fmt.Println(*value.InstanceId, "is not in a running state, excluded from results.")
}
}
}
}
if len(result) == 0 {
return []string{}, fmt.Errorf("No running EC2 instances found in result set, cannot proceed.")
}
return result, nil
}
|
// Copyright 2012 Joseph Hager. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package rog provides algorithms and data structures for creating roguelike games.
package main
import (
"github.com/ajhager/rog"
)
func main() {
rog.Open(48, 32, "rog")
for rog.IsOpen() {
rog.Set(20, 15, nil, nil, "Hello, 世界")
if rog.Key == "escape" {
rog.Close()
}
rog.Flush()
}
}
*/
package rog
import (
"fmt"
"image"
"image/draw"
"image/png"
"os"
"time"
"github.com/skelterjohn/go.wde"
_ "github.com/skelterjohn/go.wde/init"
)
var (
open = false
window wde.Window
console *Console
drawer func(draw.Image)
stats *timing
Mouse *mouse
Key string
)
// IsOpen returns whether the rog window is open or not.
func IsOpen() bool {
return open
}
// Open creates a window and a root console with size width by height cells.
func Open(width, height int, title string) (err error) {
window, err = wde.NewWindow(width*16, height*16)
if err != nil {
return
}
window.SetTitle(title)
window.Show()
console = NewConsole(width, height)
stats = new(timing)
Mouse = new(mouse)
go func() {
wde.Run()
}()
open = true
return
}
// Close shuts down the windowing system.
// No rog functions should be called after this.
func Close() {
open = false
window.Close()
wde.Stop()
}
// Screenshot will save the window buffer as an image to name.png.
func Screenshot(name string) (err error) {
file, err := os.Create(fmt.Sprintf("%v.%v", name, "png"))
if err != nil {
return
}
defer file.Close()
err = png.Encode(file, window.Screen())
return
}
// SetTitle changes the title of the window.
func SetTitle(title string) {
window.SetTitle(title)
}
// Flush renders the root console to the window.
func Flush() {
handleEvents()
if open {
console.Render(window.Screen())
if drawer != nil {
drawer(window.Screen())
}
window.FlushImage()
}
stats.Update(time.Now())
}
// SetDrawer registers a callback that runs after the console has been rendered, but before the buffer image is flushed to the window.
func SetDrawer(d func(draw.Image)) {
drawer = d
}
// Dt returns length of the last frame in seconds.
func Dt() float64 {
return stats.Dt
}
// Fps returns the number of rendered frames per second.
func Fps() int64 {
return stats.Fps
}
// Set draws on the root console.
func Set(x, y int, fg, bg interface{}, data string, rest ...interface{}) {
console.Set(x, y, fg, bg, data, rest...)
}
// Set draws on the root console with wrapping bounds of x, y, w, h.
func SetR(x, y, w, h int, fg, bg interface{}, data string, rest ...interface{}) {
console.SetR(x, y, w, h, fg, bg, data, rest...)
}
// Fill draws a rect on the root console.
func Fill(x, y, w, h int, fg, bg interface{}, ch rune) {
console.Fill(x, y, w, h, fg, bg, ch)
}
// Clear draws a rect over the entire root console.
func Clear(fg, bg interface{}, ch rune) {
console.Clear(fg, bg, ch)
}
// Width returns the width of the root console in cells.
func Width() int {
return console.Width()
}
// Height returns the height of the root console in cells.
func Height() int {
return console.Height()
}
func handleEvents() {
Mouse.DPos.X = 0
Mouse.DPos.Y = 0
Mouse.DCell.X = 0
Mouse.DCell.Y = 0
Mouse.Left.Released = false
Mouse.Right.Released = false
Mouse.Middle.Released = false
Key = ""
select {
case ei := <-window.EventChan():
switch e := ei.(type) {
case wde.MouseMovedEvent:
Mouse.Pos.X = e.Where.X
Mouse.Pos.Y = e.Where.Y
Mouse.DPos.X = e.From.X
Mouse.DPos.Y = e.From.Y
Mouse.Cell.X = e.Where.X / 16
Mouse.Cell.Y = e.Where.Y / 16
Mouse.DCell.X = e.From.X / 16
Mouse.DCell.Y = e.From.Y / 16
case wde.MouseDownEvent:
switch e.Which {
case wde.LeftButton:
Mouse.Left.Pressed = true
case wde.RightButton:
Mouse.Right.Pressed = true
case wde.MiddleButton:
Mouse.Right.Pressed = true
}
case wde.MouseUpEvent:
switch e.Which {
case wde.LeftButton:
Mouse.Left.Pressed = false
Mouse.Left.Released = true
case wde.RightButton:
Mouse.Right.Pressed = false
Mouse.Right.Released = true
case wde.MiddleButton:
Mouse.Right.Pressed = false
Mouse.Right.Released = true
}
case wde.KeyTypedEvent:
Key = e.Key
case wde.ResizeEvent:
case wde.CloseEvent:
Close()
}
default:
}
}
type mouseButton struct {
Pressed, Released bool
}
type mouse struct {
Pos, DPos, Cell, DCell image.Point
Left, Right, Middle mouseButton
}
type timing struct {
Then, Now time.Time
Elapsed, Dt float64
Frames, Fps int64
}
func (t *timing) Update(now time.Time) {
t.Then = t.Now
t.Now = now
t.Dt = t.Now.Sub(t.Then).Seconds()
t.Elapsed += t.Dt
t.Frames += 1
if t.Elapsed >= 1 {
t.Fps = t.Frames
t.Frames = 0
t.Elapsed -= t.Elapsed
}
}
Handle mouse drag and middle mouse clicks
// Copyright 2012 Joseph Hager. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package rog provides algorithms and data structures for creating roguelike games.
package main
import (
"github.com/ajhager/rog"
)
func main() {
rog.Open(48, 32, "rog")
for rog.IsOpen() {
rog.Set(20, 15, nil, nil, "Hello, 世界")
if rog.Key == "escape" {
rog.Close()
}
rog.Flush()
}
}
*/
package rog
import (
"fmt"
"image"
"image/draw"
"image/png"
"os"
"time"
"github.com/skelterjohn/go.wde"
_ "github.com/skelterjohn/go.wde/init"
)
var (
open = false
window wde.Window
console *Console
drawer func(draw.Image)
stats *timing
Mouse *mouse
Key string
)
// IsOpen returns whether the rog window is open or not.
func IsOpen() bool {
return open
}
// Open creates a window and a root console with size width by height cells.
func Open(width, height int, title string) (err error) {
window, err = wde.NewWindow(width*16, height*16)
if err != nil {
return
}
window.SetTitle(title)
window.Show()
console = NewConsole(width, height)
stats = new(timing)
Mouse = new(mouse)
go func() {
wde.Run()
}()
open = true
return
}
// Close shuts down the windowing system.
// No rog functions should be called after this.
func Close() {
open = false
window.Close()
wde.Stop()
}
// Screenshot will save the window buffer as an image to name.png.
func Screenshot(name string) (err error) {
file, err := os.Create(fmt.Sprintf("%v.%v", name, "png"))
if err != nil {
return
}
defer file.Close()
err = png.Encode(file, window.Screen())
return
}
// SetTitle changes the title of the window.
func SetTitle(title string) {
window.SetTitle(title)
}
// Flush renders the root console to the window.
func Flush() {
handleEvents()
if open {
console.Render(window.Screen())
if drawer != nil {
drawer(window.Screen())
}
window.FlushImage()
}
stats.Update(time.Now())
}
// SetDrawer registers a callback that runs after the console has been rendered, but before the buffer image is flushed to the window.
func SetDrawer(d func(draw.Image)) {
drawer = d
}
// Dt returns length of the last frame in seconds.
func Dt() float64 {
return stats.Dt
}
// Fps returns the number of rendered frames per second.
func Fps() int64 {
return stats.Fps
}
// Set draws on the root console.
func Set(x, y int, fg, bg interface{}, data string, rest ...interface{}) {
console.Set(x, y, fg, bg, data, rest...)
}
// Set draws on the root console with wrapping bounds of x, y, w, h.
func SetR(x, y, w, h int, fg, bg interface{}, data string, rest ...interface{}) {
console.SetR(x, y, w, h, fg, bg, data, rest...)
}
// Fill draws a rect on the root console.
func Fill(x, y, w, h int, fg, bg interface{}, ch rune) {
console.Fill(x, y, w, h, fg, bg, ch)
}
// Clear draws a rect over the entire root console.
func Clear(fg, bg interface{}, ch rune) {
console.Clear(fg, bg, ch)
}
// Width returns the width of the root console in cells.
func Width() int {
return console.Width()
}
// Height returns the height of the root console in cells.
func Height() int {
return console.Height()
}
func handleEvents() {
Mouse.DPos.X = 0
Mouse.DPos.Y = 0
Mouse.DCell.X = 0
Mouse.DCell.Y = 0
Mouse.Left.Released = false
Mouse.Right.Released = false
Mouse.Middle.Released = false
Key = ""
select {
case ei := <-window.EventChan():
switch e := ei.(type) {
case wde.MouseMovedEvent:
Mouse.Pos.X = e.Where.X
Mouse.Pos.Y = e.Where.Y
Mouse.DPos.X = e.From.X
Mouse.DPos.Y = e.From.Y
Mouse.Cell.X = e.Where.X / 16
Mouse.Cell.Y = e.Where.Y / 16
Mouse.DCell.X = e.From.X / 16
Mouse.DCell.Y = e.From.Y / 16
case wde.MouseDraggedEvent:
Mouse.Pos.X = e.Where.X
Mouse.Pos.Y = e.Where.Y
Mouse.DPos.X = e.From.X
Mouse.DPos.Y = e.From.Y
Mouse.Cell.X = e.Where.X / 16
Mouse.Cell.Y = e.Where.Y / 16
Mouse.DCell.X = e.From.X / 16
Mouse.DCell.Y = e.From.Y / 16
case wde.MouseDownEvent:
switch e.Which {
case wde.LeftButton:
Mouse.Left.Pressed = true
case wde.RightButton:
Mouse.Right.Pressed = true
case wde.MiddleButton:
Mouse.Middle.Pressed = true
}
case wde.MouseUpEvent:
switch e.Which {
case wde.LeftButton:
Mouse.Left.Pressed = false
Mouse.Left.Released = true
case wde.RightButton:
Mouse.Right.Pressed = false
Mouse.Right.Released = true
case wde.MiddleButton:
Mouse.Middle.Pressed = false
Mouse.Middle.Released = true
}
case wde.KeyTypedEvent:
Key = e.Key
case wde.ResizeEvent:
case wde.CloseEvent:
Close()
}
default:
}
}
type mouseButton struct {
Pressed, Released bool
}
type mouse struct {
Pos, DPos, Cell, DCell image.Point
Left, Right, Middle mouseButton
}
type timing struct {
Then, Now time.Time
Elapsed, Dt float64
Frames, Fps int64
}
func (t *timing) Update(now time.Time) {
t.Then = t.Now
t.Now = now
t.Dt = t.Now.Sub(t.Then).Seconds()
t.Elapsed += t.Dt
t.Frames += 1
if t.Elapsed >= 1 {
t.Fps = t.Frames
t.Frames = 0
t.Elapsed -= t.Elapsed
}
}
|
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2018
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
package gopi
import (
"context"
"net"
"regexp"
"time"
)
////////////////////////////////////////////////////////////////////////////////
// TYPES
// RPCServiceRecord defines a service which can be registered or discovered
// on the network
type RPCServiceRecord interface {
Name() string
Subtype() string
Service() string
Port() uint
Text() []string
Host() string
IP4() []net.IP
IP6() []net.IP
TTL() time.Duration
}
// RPCEventType is an enumeration of event types
type RPCEventType uint
// RPCFlag is a set of flags modifying behavior of client/service
type RPCFlag uint
// RPCNewClientFunc creates a new client with a network connection
// returns nil otherwise
type RPCNewClientFunc func(RPCClientConn) RPCClient
////////////////////////////////////////////////////////////////////////////////
// INTERFACES
// RPCServiceDiscovery is the driver for discovering services on the network using
// mDNS or another mechanism
type RPCServiceDiscovery interface {
Driver
Publisher
// Register a service record on the network, and cache it
Register(RPCServiceRecord) error
// Lookup service instances by name
Lookup(ctx context.Context, service string) ([]RPCServiceRecord, error)
// Return list of service names
EnumerateServices(ctx context.Context) ([]string, error)
// Return all cached service instances for a service name
ServiceInstances(service string) []RPCServiceRecord
}
// RPCService is a driver which implements all the necessary methods to
// handle remote calls
type RPCService interface {
Driver
// CancelRequests is called by the server to gracefully end any
// on-going streaming requests, but before the service is shutdown
CancelRequests() error
}
// RPCServer is the server which serves RPCModule methods to
// a remote RPCClient
type RPCServer interface {
Driver
Publisher
// Starts an RPC server in currently running thread.
// The method will not return until Stop is called
// which needs to be done in a different thread
Start() error
// Stop RPC server. If halt is true then it immediately
// ends the server without waiting for current requests to
// be served
Stop(halt bool) error
// Return address the server is bound to, or nil if
// the server is not running
Addr() net.Addr
// Return service record, or nil when the service record
// cannot be generated. The first version uses the current
// hostname as the name. You can also include text
// records.
Service(service string, text ...string) RPCServiceRecord
ServiceWithName(service, name string, text ...string) RPCServiceRecord
}
// RPCClientPool implements a pool of client connections for communicating
// with an RPC server and aides discovery new service records
type RPCClientPool interface {
Driver
Publisher
// Connect and disconnect
Connect(service RPCServiceRecord, flags RPCFlag) (RPCClientConn, error)
Disconnect(RPCClientConn) error
// Register clients and create new ones given a service name
RegisterClient(string, RPCNewClientFunc) error
NewClient(string, RPCClientConn) RPCClient
// Lookup service records by parameter - returns records
// which match either name or addr up to max number of records
// Can wait for new records and block until cancelled
Lookup(ctx context.Context, name, addr string, max int) ([]RPCServiceRecord, error)
}
// RPCClientConn implements a single client connection for
// communicating with an RPC server
type RPCClientConn interface {
Driver
// Mutex lock for the connection
Lock()
Unlock()
// Properties
Name() string
Addr() string
Connected() bool
Timeout() time.Duration
Services() ([]string, error)
}
// RPCClient contains the set of RPC methods. Currently
// anything can be an RPCClient
type RPCClient interface{}
////////////////////////////////////////////////////////////////////////////////
// CONSTANTS
const (
RPC_EVENT_NONE RPCEventType = iota
RPC_EVENT_SERVER_STARTED // RPC Server started
RPC_EVENT_SERVER_STOPPED // RPC Server stopped
RPC_EVENT_SERVICE_ADDED // Service instance lookup (new)
RPC_EVENT_SERVICE_UPDATED // Service instance lookup (updated)
RPC_EVENT_SERVICE_REMOVED // Service instance lookup (removed)
RPC_EVENT_SERVICE_EXPIRED // Service instance lookup (expired)
RPC_EVENT_SERVICE_NAME // Service name discovered
RPC_EVENT_CLIENT_CONNECTED
RPC_EVENT_CLIENT_DISCONNECTED
)
const (
RPC_FLAG_NONE RPCFlag = 0
RPC_FLAG_INET_UDP = (1 << iota) // Use UDP protocol (TCP assumed otherwise)
RPC_FLAG_INET_V4 = (1 << iota) // Use V4 addressing
RPC_FLAG_INET_V6 = (1 << iota) // Use V6 addressing
)
////////////////////////////////////////////////////////////////////////////////
// VARIABLES
var (
reService = regexp.MustCompile("[A-za-z][A-Za-z0-9\\-]*")
)
func (t RPCEventType) String() string {
switch t {
case RPC_EVENT_NONE:
return "RPC_EVENT_NONE"
case RPC_EVENT_SERVER_STARTED:
return "RPC_EVENT_SERVER_STARTED"
case RPC_EVENT_SERVER_STOPPED:
return "RPC_EVENT_SERVER_STOPPED"
case RPC_EVENT_SERVICE_ADDED:
return "RPC_EVENT_SERVICE_ADDED"
case RPC_EVENT_SERVICE_UPDATED:
return "RPC_EVENT_SERVICE_UPDATED"
case RPC_EVENT_SERVICE_REMOVED:
return "RPC_EVENT_SERVICE_REMOVED"
case RPC_EVENT_SERVICE_EXPIRED:
return "RPC_EVENT_SERVICE_EXPIRED"
case RPC_EVENT_SERVICE_NAME:
return "RPC_EVENT_SERVICE_NAME"
case RPC_EVENT_CLIENT_CONNECTED:
return "RPC_EVENT_CLIENT_CONNECTED"
case RPC_EVENT_CLIENT_DISCONNECTED:
return "RPC_EVENT_CLIENT_DISCONNECTED"
default:
return "[?? Invalid RPCEventType value]"
}
}
////////////////////////////////////////////////////////////////////////////////
// RETURN DOMAIN FROM SERVICE TYPE
func RPCServiceType(service_type string, flags RPCFlag) (string, error) {
if reService.MatchString(service_type) == false {
return "", ErrBadParameter
}
if flags&RPC_FLAG_INET_UDP != 0 {
service_type = "_" + service_type + "._udp"
} else {
service_type = "_" + service_type + "._tcp"
}
return service_type, nil
}
Updated RPC code
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2018
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
package gopi
import (
"context"
"net"
"time"
)
////////////////////////////////////////////////////////////////////////////////
// TYPES
// RPCServiceRecord defines a service which can be registered or discovered
// on the network
type RPCServiceRecord interface {
Name() string
Subtype() string
Service() string
Port() uint
Text() []string
Host() string
IP4() []net.IP
IP6() []net.IP
TTL() time.Duration
}
// RPCEventType is an enumeration of event types
type RPCEventType uint
// RPCFlag is a set of flags modifying behavior of client/service
type RPCFlag uint
// RPCNewClientFunc creates a new client with a network connection
// returns nil otherwise
type RPCNewClientFunc func(RPCClientConn) RPCClient
////////////////////////////////////////////////////////////////////////////////
// INTERFACES
// RPCServiceDiscovery is the driver for discovering services on the network using
// mDNS or another mechanism
type RPCServiceDiscovery interface {
Driver
Publisher
// Register a service record on the network, and cache it
Register(RPCServiceRecord) error
// Lookup service instances by name
Lookup(ctx context.Context, service string) ([]RPCServiceRecord, error)
// Return list of service names
EnumerateServices(ctx context.Context) ([]string, error)
// Return all cached service instances for a service name
ServiceInstances(service string) []RPCServiceRecord
}
// RPCService is a driver which implements all the necessary methods to
// handle remote calls
type RPCService interface {
Driver
// CancelRequests is called by the server to gracefully end any
// on-going streaming requests, but before the service is shutdown
CancelRequests() error
}
// RPCServer is the server which serves RPCModule methods to
// a remote RPCClient
type RPCServer interface {
Driver
Publisher
// Starts an RPC server in currently running thread.
// The method will not return until Stop is called
// which needs to be done in a different thread
Start() error
// Stop RPC server. If halt is true then it immediately
// ends the server without waiting for current requests to
// be served
Stop(halt bool) error
// Return address the server is bound to, or nil if
// the server is not running
Addr() net.Addr
// Return service record, or nil when the service record
// cannot be generated. The service should be of the format
// _<service>._tcp and the subtype can only be alphanumeric
Service(service, subtype, name string, text ...string) RPCServiceRecord
}
// RPCClientPool implements a pool of client connections for communicating
// with an RPC server and aides discovery new service records
type RPCClientPool interface {
Driver
Publisher
// Connect and disconnect
Connect(service RPCServiceRecord, flags RPCFlag) (RPCClientConn, error)
Disconnect(RPCClientConn) error
// Register clients and create new ones given a service name
RegisterClient(string, RPCNewClientFunc) error
NewClient(string, RPCClientConn) RPCClient
// Lookup service records by parameter - returns records
// which match either name or addr up to max number of records
// Can wait for new records and block until cancelled
Lookup(ctx context.Context, name, addr string, max int) ([]RPCServiceRecord, error)
}
// RPCClientConn implements a single client connection for
// communicating with an RPC server
type RPCClientConn interface {
Driver
// Mutex lock for the connection
Lock()
Unlock()
// Properties
Name() string
Addr() string
Connected() bool
Timeout() time.Duration
Services() ([]string, error)
}
// RPCClient contains the set of RPC methods. Currently
// anything can be an RPCClient
type RPCClient interface{}
////////////////////////////////////////////////////////////////////////////////
// CONSTANTS
const (
RPC_EVENT_NONE RPCEventType = iota
RPC_EVENT_SERVER_STARTED // RPC Server started
RPC_EVENT_SERVER_STOPPED // RPC Server stopped
RPC_EVENT_SERVICE_ADDED // Service instance lookup (new)
RPC_EVENT_SERVICE_UPDATED // Service instance lookup (updated)
RPC_EVENT_SERVICE_REMOVED // Service instance lookup (removed)
RPC_EVENT_SERVICE_EXPIRED // Service instance lookup (expired)
RPC_EVENT_SERVICE_NAME // Service name discovered
RPC_EVENT_CLIENT_CONNECTED
RPC_EVENT_CLIENT_DISCONNECTED
)
const (
RPC_FLAG_NONE RPCFlag = 0
RPC_FLAG_INET_UDP RPCFlag = (1 << iota) // Use UDP protocol (TCP assumed otherwise)
RPC_FLAG_INET_V4 RPCFlag = (1 << iota) // Use V4 addressing
RPC_FLAG_INET_V6 RPCFlag = (1 << iota) // Use V6 addressing
)
////////////////////////////////////////////////////////////////////////////////
// VARIABLES
func (t RPCEventType) String() string {
switch t {
case RPC_EVENT_NONE:
return "RPC_EVENT_NONE"
case RPC_EVENT_SERVER_STARTED:
return "RPC_EVENT_SERVER_STARTED"
case RPC_EVENT_SERVER_STOPPED:
return "RPC_EVENT_SERVER_STOPPED"
case RPC_EVENT_SERVICE_ADDED:
return "RPC_EVENT_SERVICE_ADDED"
case RPC_EVENT_SERVICE_UPDATED:
return "RPC_EVENT_SERVICE_UPDATED"
case RPC_EVENT_SERVICE_REMOVED:
return "RPC_EVENT_SERVICE_REMOVED"
case RPC_EVENT_SERVICE_EXPIRED:
return "RPC_EVENT_SERVICE_EXPIRED"
case RPC_EVENT_SERVICE_NAME:
return "RPC_EVENT_SERVICE_NAME"
case RPC_EVENT_CLIENT_CONNECTED:
return "RPC_EVENT_CLIENT_CONNECTED"
case RPC_EVENT_CLIENT_DISCONNECTED:
return "RPC_EVENT_CLIENT_DISCONNECTED"
default:
return "[?? Invalid RPCEventType value]"
}
}
|
package pixel
import (
"github.com/faiface/mainthread"
"github.com/go-gl/glfw/v3.2/glfw"
)
// Run is essentialy the "main" function of Pixel. It exists mainly due to the technical
// limitations of OpenGL and operating systems. In short, all graphics and window manipulating
// calls must be done from the main thread. Run makes this possible.
//
// Call this function from the main function of your application. This is necessary, so that
// Run runs on the main thread.
//
// func run() {
// window := pixel.NewWindow(...)
// for {
// // your game's main loop
// }
// }
//
// func main() {
// pixel.Run(run)
// }
//
// You can spawn any number of goroutines from you run function and interact with Pixel
// concurrently. The only condition is that the Run function is be called from your main
// function.
func Run(run func()) {
defer glfw.Terminate()
mainthread.Run(run)
}
this should've been with prev commit
package pixel
import (
"github.com/faiface/mainthread"
"github.com/go-gl/glfw/v3.2/glfw"
"github.com/pkg/errors"
)
// Run is essentialy the "main" function of Pixel. It exists mainly due to the technical
// limitations of OpenGL and operating systems. In short, all graphics and window manipulating
// calls must be done from the main thread. Run makes this possible.
//
// Call this function from the main function of your application. This is necessary, so that
// Run runs on the main thread.
//
// func run() {
// window := pixel.NewWindow(...)
// for {
// // your game's main loop
// }
// }
//
// func main() {
// pixel.Run(run)
// }
//
// You can spawn any number of goroutines from you run function and interact with Pixel
// concurrently. The only condition is that the Run function is be called from your main
// function.
func Run(run func()) {
err := glfw.Init()
if err != nil {
panic(errors.Wrap(err, "failed to initialize GLFW"))
}
defer glfw.Terminate()
mainthread.Run(run)
}
|
// Package blockfetcher aims to reduce boilerplate code that one needs to write
// over and over again when implementing a program that is processing blocks
// that are being fetched over steemd WebSocket RPC endpoint.
//
// All you need from now in is to import this package and implement BlockProcessor interface,
// then run the block fetcher with your custom BlockProcessor implementation:
//
// ctx, err := blockfetcher.Run(blockProcessor)
//
// You can wait for the fetcher to be done by calling
//
// err := ctx.Wait()
//
// In case you want to interrupt the process, just call
//
// ctx.Interrupt()
// err := ctx.Wait()
package blockfetcher
import (
"time"
"github.com/go-steem/rpc"
"github.com/pkg/errors"
"gopkg.in/tomb.v2"
)
// BlockProcessor is the interface that represents the block processing logic.
//
// When an error is returned from any for the following methods,
// the fetching process is interrupted and Finalize() is called.
type BlockProcessor interface {
// Initialise is called at the beginning to pass the RPC client to the block processor.
Initialise(client *rpc.Client) error
// BlockRange is called at the beginning to let the block fetching logic know
// what blocks to fetch and pass to ProcessBlock.
//
// In case blockRangeTo returned is 0, the fetcher will keep fetching new blocks
// forever as they arrive (until interrupted, of course).
BlockRange() (blockRangeFrom, blockRangeTo uint32)
// ProcessBlock is called when a new block is received.
ProcessBlock(block *rpc.Block) error
// Finalize is called when the whole block range is fetcher or the process is interrupted.
// It is passed the first unprocessed block number, i.e. the block number that would be
// processed next had the fetching process continued.
//
// It can be remembered somehow and then returned from BlockRange() to just keep
// processing new blocks incrementally.
Finalize(nextBlockToProcess uint32) error
}
// Context represents a running block fetcher that can be interrupted.
type Context struct {
client *rpc.Client
processor BlockProcessor
blockCh chan *rpc.Block
t tomb.Tomb
}
// Run spawns a new block fetcher using the given BlockProcessor.
//
// The fetcher keeps fetching blocks until the whole block range specified is fetched
// or an error is encountered. It is not trying to be clever about closed connections and such.
//
// client.Close() is not called by this package, it has to be called manually.
func Run(client *rpc.Client, processor BlockProcessor) (*Context, error) {
// Prepare a new Context object.
ctx := &Context{
client: client,
processor: processor,
blockCh: make(chan *rpc.Block),
}
// Initialise the processor.
if err := processor.Initialise(client); err != nil {
return nil, errors.Wrap(err, "BlockProcessor.Initialise() failed")
}
// Start the fetcher and the processor.
ctx.t.Go(ctx.fetcher)
// Return the new context.
return ctx, nil
}
// Interrupt interrupts the block fetcher and stops the fetching process.
func (ctx *Context) Interrupt() {
ctx.t.Kill(nil)
}
// Wait blocks until the fetcher is stopped and returns any error encountered.
func (ctx *Context) Wait() error {
return ctx.t.Wait()
}
func (ctx *Context) fetcher() error {
// Get the block range to process.
from, to := ctx.processor.BlockRange()
// Decide whether to fetch a closed range or watch
// and enter the right loop accordingly.
var (
next uint32
err error
)
if to == 0 {
next, err = ctx.blockWatcher(from)
} else {
next, err = ctx.blockFetcher(from, to)
}
// Call Finalize().
if ex := ctx.processor.Finalize(next); ex != nil && err == nil {
err = errors.Wrap(ex, "BlockProcessor.Finalize() failed")
}
// Return the first error that occurred.
return err
}
func (ctx *Context) blockWatcher(from uint32) (uint32, error) {
next := from
// Get config.
config, err := ctx.client.GetConfig()
if err != nil {
return next, errors.Wrap(err, "failed to get steemd config")
}
// Fetch and process all blocks matching the given range.
for {
// Get current properties.
props, err := ctx.client.GetDynamicGlobalProperties()
if err != nil {
return next, errors.Wrap(err, "failed to get steemd dynamic global properties")
}
// Process new blocks.
for ; props.LastIrreversibleBlockNum >= next; next++ {
if err := ctx.fetchAndProcess(next); err != nil {
return next, err
}
}
// Wait for STEEMIT_BLOCK_INTERVAL seconds before the next iteration.
// In case Interrupt() is called, we exit immediately.
select {
case <-time.After(time.Duration(config.SteemitBlockInterval) * time.Second):
case <-ctx.t.Dying():
return next, nil
}
}
}
func (ctx *Context) blockFetcher(from, to uint32) (uint32, error) {
next := from
// Make sure we are not doing bullshit.
if from > to {
return next, errors.Errorf("invalid block range: [%v, %v]", from, to)
}
// Fetch and process all blocks matching the given range.
for ; next <= to; next++ {
// Check for interrupts.
select {
case <-ctx.t.Dying():
return next, nil
default:
}
// Fetch and process the next block.
if err := ctx.fetchAndProcess(next); err != nil {
return next, err
}
}
// The whole range has been processed, we are done.
return next, nil
}
func (ctx *Context) fetchAndProcess(blockNum uint32) (err error) {
defer handlePanic(&err)
// Fetch the block.
block, err := ctx.client.GetBlock(blockNum)
if err != nil {
return errors.Wrapf(err, "failed to fetch block %v", blockNum)
}
// Process the block.
if err := ctx.processor.ProcessBlock(block); err != nil {
return errors.Wrapf(err, "failed to process block %v", blockNum)
}
return nil
}
func handlePanic(errPtr *error) {
if r := recover(); r != nil {
switch r := r.(type) {
case error:
*errPtr = errors.Wrap(r, "panic recovered")
case string:
*errPtr = errors.New(r)
default:
panic(r)
}
}
}
Simplify the BlockProcessor interface
Get rid of the cool things to make the interfaces as simple as possible. All the
data formerly passed around can be anyway easily stored in the BlockProcessor.
// Package blockfetcher aims to reduce boilerplate code that one needs to write
// over and over again when implementing a program that is processing blocks
// that are being fetched over steemd WebSocket RPC endpoint.
//
// All you need from now in is to import this package and implement BlockProcessor interface,
// then run the block fetcher with your custom BlockProcessor implementation:
//
// ctx, err := blockfetcher.Run(blockProcessor)
//
// You can wait for the fetcher to be done by calling
//
// err := ctx.Wait()
//
// In case you want to interrupt the process, just call
//
// ctx.Interrupt()
// err := ctx.Wait()
package blockfetcher
import (
"time"
"github.com/go-steem/rpc"
"github.com/pkg/errors"
"gopkg.in/tomb.v2"
)
// BlockProcessor is the interface that represents the block processing logic.
//
// When an error is returned from any for the following methods,
// the fetching process is interrupted and Finalize() is called.
type BlockProcessor interface {
// BlockRange is called at the beginning to let the block fetching logic know
// what blocks to fetch and pass to ProcessBlock.
//
// In case blockRangeTo returned is 0, the fetcher will keep fetching new blocks
// forever as they arrive (until interrupted, of course).
BlockRange() (blockRangeFrom, blockRangeTo uint32)
// ProcessBlock is called when a new block is received.
ProcessBlock(block *rpc.Block) error
// Finalize is called when the whole block range is fetcher or the process is interrupted.
Finalize() error
}
// Context represents a running block fetcher that can be interrupted.
type Context struct {
client *rpc.Client
processor BlockProcessor
blockCh chan *rpc.Block
t tomb.Tomb
}
// Run spawns a new block fetcher using the given BlockProcessor.
//
// The fetcher keeps fetching blocks until the whole block range specified is fetched
// or an error is encountered. It is not trying to be clever about closed connections and such.
//
// client.Close() is not called by this package, it has to be called manually.
func Run(client *rpc.Client, processor BlockProcessor) (*Context, error) {
// Prepare a new Context object.
ctx := &Context{
client: client,
processor: processor,
blockCh: make(chan *rpc.Block),
}
// Start the fetcher and the processor.
ctx.t.Go(ctx.fetcher)
// Return the new context.
return ctx, nil
}
// Interrupt interrupts the block fetcher and stops the fetching process.
func (ctx *Context) Interrupt() {
ctx.t.Kill(nil)
}
// Wait blocks until the fetcher is stopped and returns any error encountered.
func (ctx *Context) Wait() error {
return ctx.t.Wait()
}
func (ctx *Context) fetcher() error {
// Get the block range to process.
from, to := ctx.processor.BlockRange()
// Decide whether to fetch a closed range or watch
// and enter the right loop accordingly.
var err error
if to == 0 {
err = ctx.blockWatcher(from)
} else {
err = ctx.blockFetcher(from, to)
}
// Call Finalize().
if ex := ctx.processor.Finalize(); ex != nil && err == nil {
err = errors.Wrap(ex, "BlockProcessor.Finalize() failed")
}
// Return the first error that occurred.
return err
}
func (ctx *Context) blockWatcher(from uint32) error {
next := from
// Get config.
config, err := ctx.client.GetConfig()
if err != nil {
return errors.Wrap(err, "failed to get steemd config")
}
// Fetch and process all blocks matching the given range.
for {
// Get current properties.
props, err := ctx.client.GetDynamicGlobalProperties()
if err != nil {
return errors.Wrap(err, "failed to get steemd dynamic global properties")
}
// Process new blocks.
for ; props.LastIrreversibleBlockNum >= next; next++ {
if err := ctx.fetchAndProcess(next); err != nil {
return err
}
}
// Wait for STEEMIT_BLOCK_INTERVAL seconds before the next iteration.
// In case Interrupt() is called, we exit immediately.
select {
case <-time.After(time.Duration(config.SteemitBlockInterval) * time.Second):
case <-ctx.t.Dying():
return nil
}
}
}
func (ctx *Context) blockFetcher(from, to uint32) error {
next := from
// Make sure we are not doing bullshit.
if from > to {
return errors.Errorf("invalid block range: [%v, %v]", from, to)
}
// Fetch and process all blocks matching the given range.
for ; next <= to; next++ {
// Check for interrupts.
select {
case <-ctx.t.Dying():
return nil
default:
}
// Fetch and process the next block.
if err := ctx.fetchAndProcess(next); err != nil {
return err
}
}
// The whole range has been processed, we are done.
return nil
}
func (ctx *Context) fetchAndProcess(blockNum uint32) (err error) {
defer handlePanic(&err)
// Fetch the block.
block, err := ctx.client.GetBlock(blockNum)
if err != nil {
return errors.Wrapf(err, "failed to fetch block %v", blockNum)
}
// Process the block.
if err := ctx.processor.ProcessBlock(block); err != nil {
return errors.Wrapf(err, "failed to process block %v", blockNum)
}
return nil
}
func handlePanic(errPtr *error) {
if r := recover(); r != nil {
switch r := r.(type) {
case error:
*errPtr = errors.Wrap(r, "panic recovered")
case string:
*errPtr = errors.New(r)
default:
panic(r)
}
}
}
|
package buildah
import (
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
)
const (
// DefaultWorkingDir is used if none was specified.
DefaultWorkingDir = "/"
// DefaultRuntime is the default command to use to run the container.
DefaultRuntime = "runc"
)
const (
// DefaultTerminal indicates that this Run invocation should be
// connected to a pseudoterminal if we're connected to a terminal.
DefaultTerminal = iota
// WithoutTerminal indicates that this Run invocation should NOT be
// connected to a pseudoterminal.
WithoutTerminal
// WithTerminal indicates that this Run invocation should be connected
// to a pseudoterminal.
WithTerminal
)
// RunOptions can be used to alter how a command is run in the container.
type RunOptions struct {
// Hostname is the hostname we set for the running container.
Hostname string
// Runtime is the name of the command to run. It should accept the same arguments that runc does.
Runtime string
// Args adds global arguments for the runtime.
Args []string
// Mounts are additional mount points which we want to provide.
Mounts []specs.Mount
// Env is additional environment variables to set.
Env []string
// User is the user as whom to run the command.
User string
// WorkingDir is an override for the working directory.
WorkingDir string
// Cmd is an override for the configured default command.
Cmd []string
// Entrypoint is an override for the configured entry point.
Entrypoint []string
// NetworkDisabled puts the container into its own network namespace.
NetworkDisabled bool
// Terminal provides a way to specify whether or not the command should
// be run with a pseudoterminal. By default (DefaultTerminal), a
// terminal is used if os.Stdout is connected to a terminal, but that
// decision can be overridden by specifying either WithTerminal or
// WithoutTerminal.
Terminal int
}
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, volumes []string) error {
// The passed-in mounts matter the most to us.
mounts := make([]specs.Mount, len(optionMounts))
copy(mounts, optionMounts)
haveMount := func(destination string) bool {
for _, mount := range mounts {
if mount.Destination == destination {
// Already have something to mount there.
return true
}
}
return false
}
// Add mounts from the generated list, unless they conflict.
for _, specMount := range spec.Mounts {
if haveMount(specMount.Destination) {
// Already have something to mount there, so skip this one.
continue
}
mounts = append(mounts, specMount)
}
// Add bind mounts for important files, unless they conflict.
for _, boundFile := range bindFiles {
if haveMount(boundFile) {
// Already have something to mount there, so skip this one.
continue
}
mounts = append(mounts, specs.Mount{
Source: boundFile,
Destination: boundFile,
Type: "bind",
Options: []string{"rbind", "ro"},
})
}
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
}
// Add secrets mounts
mountsFiles := []string{OverrideMountsFile, b.DefaultMountsFilePath}
for _, file := range mountsFiles {
secretMounts, err := secretMounts(file, b.MountLabel, cdir)
if err != nil {
logrus.Warn("error mounting secrets, skipping...")
}
for _, mount := range secretMounts {
if haveMount(mount.Destination) {
continue
}
mounts = append(mounts, mount)
}
}
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range volumes {
if haveMount(volume) {
// Already mounting something there, no need to bother.
continue
}
subdir := digest.Canonical.FromString(volume).Hex()
volumePath := filepath.Join(cdir, "buildah-volumes", subdir)
// If we need to, initialize the volume path's initial contents.
if _, err = os.Stat(volumePath); os.IsNotExist(err) {
if err = os.MkdirAll(volumePath, 0755); err != nil {
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
}
srcPath := filepath.Join(mountPoint, volume)
if err = copyFileWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
}
}
// Add the bind mount.
mounts = append(mounts, specs.Mount{
Source: volumePath,
Destination: volume,
Type: "bind",
Options: []string{"bind"},
})
}
// Set the list in the spec.
spec.Mounts = mounts
return nil
}
// Run runs the specified command in the container's root filesystem.
func (b *Builder) Run(command []string, options RunOptions) error {
var user specs.User
path, err := ioutil.TempDir(os.TempDir(), Package)
if err != nil {
return err
}
logrus.Debugf("using %q to hold bundle data", path)
defer func() {
if err2 := os.RemoveAll(path); err2 != nil {
logrus.Errorf("error removing %q: %v", path, err2)
}
}()
g := generate.New()
for _, envSpec := range append(b.Env(), options.Env...) {
env := strings.SplitN(envSpec, "=", 2)
if len(env) > 1 {
g.AddProcessEnv(env[0], env[1])
}
}
if len(command) > 0 {
g.SetProcessArgs(command)
} else {
cmd := b.Cmd()
if len(options.Cmd) > 0 {
cmd = options.Cmd
}
ep := b.Entrypoint()
if len(options.Entrypoint) > 0 {
ep = options.Entrypoint
}
g.SetProcessArgs(append(ep, cmd...))
}
if options.WorkingDir != "" {
g.SetProcessCwd(options.WorkingDir)
} else if b.WorkDir() != "" {
g.SetProcessCwd(b.WorkDir())
}
if options.Hostname != "" {
g.SetHostname(options.Hostname)
} else if b.Hostname() != "" {
g.SetHostname(b.Hostname())
}
g.SetProcessSelinuxLabel(b.ProcessLabel)
g.SetLinuxMountLabel(b.MountLabel)
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
}
defer func() {
if err2 := b.Unmount(); err2 != nil {
logrus.Errorf("error unmounting container: %v", err2)
}
}()
g.SetRootPath(mountPoint)
switch options.Terminal {
case DefaultTerminal:
g.SetProcessTerminal(terminal.IsTerminal(int(os.Stdout.Fd())))
case WithTerminal:
g.SetProcessTerminal(true)
case WithoutTerminal:
g.SetProcessTerminal(false)
}
if !options.NetworkDisabled {
if err = g.RemoveLinuxNamespace("network"); err != nil {
return errors.Wrapf(err, "error removing network namespace for run")
}
}
if options.User != "" {
user, err = getUser(mountPoint, options.User)
} else {
user, err = getUser(mountPoint, b.User())
}
if err != nil {
return err
}
g.SetProcessUID(user.UID)
g.SetProcessGID(user.GID)
spec := g.Spec()
if spec.Process.Cwd == "" {
spec.Process.Cwd = DefaultWorkingDir
}
if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil {
return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd)
}
bindFiles := []string{"/etc/hosts", "/etc/resolv.conf"}
err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes())
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container")
}
specbytes, err := json.Marshal(spec)
if err != nil {
return err
}
err = ioutils.AtomicWriteFile(filepath.Join(path, "config.json"), specbytes, 0600)
if err != nil {
return errors.Wrapf(err, "error storing runtime configuration")
}
logrus.Debugf("config = %v", string(specbytes))
runtime := options.Runtime
if runtime == "" {
runtime = DefaultRuntime
}
args := append(options.Args, "run", "-b", path, Package+"-"+b.ContainerID)
cmd := exec.Command(runtime, args...)
cmd.Dir = mountPoint
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
logrus.Debugf("error running runc %v: %v", spec.Process.Args, err)
}
return err
}
Need to block access to kernel files systems in /proc and /sys
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
Closes: #333
Approved by: TomSweeneyRedHat
package buildah
import (
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/ioutils"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh/terminal"
)
const (
// DefaultWorkingDir is used if none was specified.
DefaultWorkingDir = "/"
// DefaultRuntime is the default command to use to run the container.
DefaultRuntime = "runc"
)
const (
// DefaultTerminal indicates that this Run invocation should be
// connected to a pseudoterminal if we're connected to a terminal.
DefaultTerminal = iota
// WithoutTerminal indicates that this Run invocation should NOT be
// connected to a pseudoterminal.
WithoutTerminal
// WithTerminal indicates that this Run invocation should be connected
// to a pseudoterminal.
WithTerminal
)
// RunOptions can be used to alter how a command is run in the container.
type RunOptions struct {
// Hostname is the hostname we set for the running container.
Hostname string
// Runtime is the name of the command to run. It should accept the same arguments that runc does.
Runtime string
// Args adds global arguments for the runtime.
Args []string
// Mounts are additional mount points which we want to provide.
Mounts []specs.Mount
// Env is additional environment variables to set.
Env []string
// User is the user as whom to run the command.
User string
// WorkingDir is an override for the working directory.
WorkingDir string
// Cmd is an override for the configured default command.
Cmd []string
// Entrypoint is an override for the configured entry point.
Entrypoint []string
// NetworkDisabled puts the container into its own network namespace.
NetworkDisabled bool
// Terminal provides a way to specify whether or not the command should
// be run with a pseudoterminal. By default (DefaultTerminal), a
// terminal is used if os.Stdout is connected to a terminal, but that
// decision can be overridden by specifying either WithTerminal or
// WithoutTerminal.
Terminal int
}
func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, optionMounts []specs.Mount, bindFiles, volumes []string) error {
// The passed-in mounts matter the most to us.
mounts := make([]specs.Mount, len(optionMounts))
copy(mounts, optionMounts)
haveMount := func(destination string) bool {
for _, mount := range mounts {
if mount.Destination == destination {
// Already have something to mount there.
return true
}
}
return false
}
// Add mounts from the generated list, unless they conflict.
for _, specMount := range spec.Mounts {
if haveMount(specMount.Destination) {
// Already have something to mount there, so skip this one.
continue
}
mounts = append(mounts, specMount)
}
// Add bind mounts for important files, unless they conflict.
for _, boundFile := range bindFiles {
if haveMount(boundFile) {
// Already have something to mount there, so skip this one.
continue
}
mounts = append(mounts, specs.Mount{
Source: boundFile,
Destination: boundFile,
Type: "bind",
Options: []string{"rbind", "ro"},
})
}
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return errors.Wrapf(err, "error determining work directory for container %q", b.ContainerID)
}
// Add secrets mounts
mountsFiles := []string{OverrideMountsFile, b.DefaultMountsFilePath}
for _, file := range mountsFiles {
secretMounts, err := secretMounts(file, b.MountLabel, cdir)
if err != nil {
logrus.Warn("error mounting secrets, skipping...")
}
for _, mount := range secretMounts {
if haveMount(mount.Destination) {
continue
}
mounts = append(mounts, mount)
}
}
// Add temporary copies of the contents of volume locations at the
// volume locations, unless we already have something there.
for _, volume := range volumes {
if haveMount(volume) {
// Already mounting something there, no need to bother.
continue
}
subdir := digest.Canonical.FromString(volume).Hex()
volumePath := filepath.Join(cdir, "buildah-volumes", subdir)
// If we need to, initialize the volume path's initial contents.
if _, err = os.Stat(volumePath); os.IsNotExist(err) {
if err = os.MkdirAll(volumePath, 0755); err != nil {
return errors.Wrapf(err, "error creating directory %q for volume %q in container %q", volumePath, volume, b.ContainerID)
}
srcPath := filepath.Join(mountPoint, volume)
if err = copyFileWithTar(srcPath, volumePath); err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "error populating directory %q for volume %q in container %q using contents of %q", volumePath, volume, b.ContainerID, srcPath)
}
}
// Add the bind mount.
mounts = append(mounts, specs.Mount{
Source: volumePath,
Destination: volume,
Type: "bind",
Options: []string{"bind"},
})
}
// Set the list in the spec.
spec.Mounts = mounts
return nil
}
// Run runs the specified command in the container's root filesystem.
func (b *Builder) Run(command []string, options RunOptions) error {
var user specs.User
path, err := ioutil.TempDir(os.TempDir(), Package)
if err != nil {
return err
}
logrus.Debugf("using %q to hold bundle data", path)
defer func() {
if err2 := os.RemoveAll(path); err2 != nil {
logrus.Errorf("error removing %q: %v", path, err2)
}
}()
g := generate.New()
for _, envSpec := range append(b.Env(), options.Env...) {
env := strings.SplitN(envSpec, "=", 2)
if len(env) > 1 {
g.AddProcessEnv(env[0], env[1])
}
}
if len(command) > 0 {
g.SetProcessArgs(command)
} else {
cmd := b.Cmd()
if len(options.Cmd) > 0 {
cmd = options.Cmd
}
ep := b.Entrypoint()
if len(options.Entrypoint) > 0 {
ep = options.Entrypoint
}
g.SetProcessArgs(append(ep, cmd...))
}
if options.WorkingDir != "" {
g.SetProcessCwd(options.WorkingDir)
} else if b.WorkDir() != "" {
g.SetProcessCwd(b.WorkDir())
}
if options.Hostname != "" {
g.SetHostname(options.Hostname)
} else if b.Hostname() != "" {
g.SetHostname(b.Hostname())
}
g.SetProcessSelinuxLabel(b.ProcessLabel)
g.SetLinuxMountLabel(b.MountLabel)
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
}
defer func() {
if err2 := b.Unmount(); err2 != nil {
logrus.Errorf("error unmounting container: %v", err2)
}
}()
for _, mp := range []string{
"/proc/kcore",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware",
} {
g.AddLinuxMaskedPaths(mp)
}
for _, rp := range []string{
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger",
} {
g.AddLinuxReadonlyPaths(rp)
}
g.SetRootPath(mountPoint)
switch options.Terminal {
case DefaultTerminal:
g.SetProcessTerminal(terminal.IsTerminal(int(os.Stdout.Fd())))
case WithTerminal:
g.SetProcessTerminal(true)
case WithoutTerminal:
g.SetProcessTerminal(false)
}
if !options.NetworkDisabled {
if err = g.RemoveLinuxNamespace("network"); err != nil {
return errors.Wrapf(err, "error removing network namespace for run")
}
}
if options.User != "" {
user, err = getUser(mountPoint, options.User)
} else {
user, err = getUser(mountPoint, b.User())
}
if err != nil {
return err
}
g.SetProcessUID(user.UID)
g.SetProcessGID(user.GID)
spec := g.Spec()
if spec.Process.Cwd == "" {
spec.Process.Cwd = DefaultWorkingDir
}
if err = os.MkdirAll(filepath.Join(mountPoint, spec.Process.Cwd), 0755); err != nil {
return errors.Wrapf(err, "error ensuring working directory %q exists", spec.Process.Cwd)
}
bindFiles := []string{"/etc/hosts", "/etc/resolv.conf"}
err = b.setupMounts(mountPoint, spec, options.Mounts, bindFiles, b.Volumes())
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container")
}
specbytes, err := json.Marshal(spec)
if err != nil {
return err
}
err = ioutils.AtomicWriteFile(filepath.Join(path, "config.json"), specbytes, 0600)
if err != nil {
return errors.Wrapf(err, "error storing runtime configuration")
}
logrus.Debugf("config = %v", string(specbytes))
runtime := options.Runtime
if runtime == "" {
runtime = DefaultRuntime
}
args := append(options.Args, "run", "-b", path, Package+"-"+b.ContainerID)
cmd := exec.Command(runtime, args...)
cmd.Dir = mountPoint
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
logrus.Debugf("error running runc %v: %v", spec.Process.Args, err)
}
return err
}
|
/*
Package test161 implements a library for testing OS/161 kernels. We use expect
to drive the sys161 system simulator and collect useful output using the stat
socket.
*/
package test161
import (
"bytes"
"errors"
"fmt"
"github.com/kr/pty"
"github.com/ops-class/test161/expect"
"github.com/termie/go-shutil"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"regexp"
"sync"
"time"
)
const KERNEL_PROMPT = `OS/161 kernel [? for menu]: `
const SHELL_PROMPT = `OS/161$ `
const PROMPT_PATTERN = `(OS/161 kernel \[\? for menu\]\:|OS/161\$)\s$`
const BOOT = -1
type Test struct {
// Input
// Metadata
Name string `yaml:"name" json:"name"`
Description string `yaml:"description" json:"description"`
Tags []string `yaml:"tags" json:"tags"`
Depends []string `yaml:"depends" json:"depends"`
// Configuration chunks
Sys161 Sys161Conf `yaml:"sys161" json:"sys161"`
Stat StatConf `yaml:"stat" json:"stat"`
Monitor MonitorConf `yaml:"monitor" json:"monitor"`
Misc MiscConf `yaml:"misc" json:"misc"`
// Actual test commands to run
Content string `fm:"content" yaml:"-" json:"-"`
// Big lock that protects most fields shared between Run and getStats
L *sync.Mutex `json:"-"`
// Output
ConfString string `json:"confstring"` // Only set during once
WallTime TimeFixedPoint `json:"walltime"` // Protected by L
SimTime TimeFixedPoint `json:"simtime"` // Protected by L
Commands []Command `json:"commands"` // Protected by L
Status []Status `json:"status"` // Protected by L
// Unproctected Private fields
tempDir string // Only set once
startTime int64 // Only set once
statStarted bool // Only changed once
sys161 *expect.Expect // Protected by L
progressTime float64 // Protected by L
currentCommand *Command // Protected by L
commandCounter uint // Protected by L
currentOutput OutputLine // Protected by L
// Fields used by getStats but shared with Run
statCond *sync.Cond // Used by the main loop to wait for stat reception
statRecord bool // Protected by statCond.L
statMonitor bool // Protected by statCond.L
// Output channels
statChan chan Stat // Nonblocking write
}
type Command struct {
Type string `json:"type"`
Input InputLine `json:"input"`
Output []OutputLine `json:"output"`
SummaryStats Stat `json:"summarystats"`
AllStats []Stat `json:"stats"`
Monitored bool `json:"monitored"`
}
type InputLine struct {
WallTime TimeFixedPoint `json:"walltime"`
SimTime TimeFixedPoint `json:"simtime"`
Line string `json:"line"`
}
type OutputLine struct {
WallTime TimeFixedPoint `json:"walltime"`
SimTime TimeFixedPoint `json:"simtime"`
Buffer bytes.Buffer `json:"-"`
Line string `json:"line"`
}
type Status struct {
WallTime TimeFixedPoint `json:"walltime"`
SimTime TimeFixedPoint `json:"simtime"`
Status string `json:"status"`
Message string `json:"message"`
}
type TimeFixedPoint float64
// MarshalJSON prints our TimeFixedPoint type as a fixed point float for JSON.
func (t TimeFixedPoint) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("%.6f", t)), nil
}
// getTimeFixedPoint returns the current wall clock time as a TimeFixedPoint
func (t *Test) getWallTime() TimeFixedPoint {
return TimeFixedPoint(float64(time.Now().UnixNano()-t.startTime) / float64(1000*1000*1000))
}
// Run a test161 test.
func (t *Test) Run(root string) (err error) {
// Merge in test161 defaults for any missing configuration values
err = t.MergeConf(CONF_DEFAULTS)
if err != nil {
t.addStatus("aborted", "")
return err
}
// Create temp directory.
tempRoot, err := ioutil.TempDir(t.Misc.TempDir, "test161")
if err != nil {
t.addStatus("aborted", "")
return err
}
defer os.RemoveAll(tempRoot)
t.tempDir = path.Join(tempRoot, "root")
// Copy root.
err = shutil.CopyTree(root, t.tempDir, nil)
if err != nil {
t.addStatus("aborted", "")
return err
}
// Make sure we have a kernel.
kernelTarget := path.Join(t.tempDir, "kernel")
_, err = os.Stat(kernelTarget)
if err != nil {
t.addStatus("aborted", "")
return err
}
// Generate an alternate configuration to prevent collisions.
confTarget := path.Join(t.tempDir, "test161.conf")
t.ConfString, err = t.PrintConf()
if err != nil {
t.addStatus("aborted", "")
return err
}
err = ioutil.WriteFile(confTarget, []byte(t.ConfString), 0440)
if err != nil {
t.addStatus("aborted", "")
return err
}
if _, err := os.Stat(confTarget); os.IsNotExist(err) {
t.addStatus("aborted", "")
return err
}
// Create disks.
if t.Sys161.Disk1.Enabled == "true" {
create := exec.Command("disk161", "create", "LHD0.img", t.Sys161.Disk1.Bytes)
create.Dir = t.tempDir
err = create.Run()
if err != nil {
t.addStatus("aborted", "")
return err
}
}
if t.Sys161.Disk2.Enabled == "true" {
create := exec.Command("disk161", "create", "LHD1.img", t.Sys161.Disk2.Bytes)
create.Dir = t.tempDir
err = create.Run()
if err != nil {
t.addStatus("aborted", "")
return err
}
}
// Serialize the current command state.
t.L = &sync.Mutex{}
// Coordinated with the getStat goroutine. I don't think that a channel
// would work here.
t.statCond = &sync.Cond{L: &sync.Mutex{}}
// Initialize stat channel. Closed by getStats
t.statChan = make(chan Stat)
// Record stats during boot, but don't activate the monitor.
t.statRecord = true
t.statMonitor = false
// Wait for either kernel or user prompts.
prompts := regexp.MustCompile(PROMPT_PATTERN)
// Set up the current command to point at boot
t.commandCounter = 0
t.currentCommand = &t.Commands[t.commandCounter]
// Start sys161 and defer close.
err = t.start161()
if err != nil {
t.addStatus("aborted", "")
return err
}
defer t.stop161()
t.addStatus("started", "")
for int(t.commandCounter) < len(t.Commands) {
if t.commandCounter != 0 {
err = t.sendCommand(t.currentCommand.Input.Line + "\n")
if err != nil {
t.addStatus("expect", "couldn't send a command")
return nil
}
t.enableStats()
}
if int(t.commandCounter) == len(t.Commands)-1 {
t.sys161.ExpectEOF()
t.addStatus("shutdown", "")
break
}
_, err := t.sys161.ExpectRegexp(prompts)
t.disableStats()
// Handle timeouts, unexpected shutdowns, and other errors
if err == expect.ErrTimeout {
t.addStatus("timeout", fmt.Sprintf("no prompt for %v s", t.Misc.PromptTimeout))
return nil
} else if err == io.EOF {
t.addStatus("crash", "")
return nil
} else if err != nil {
t.addStatus("expect", "")
return nil
}
// Rotate running command to the next command, saving any previous
// output as needed.
t.L.Lock()
if t.currentOutput.WallTime != 0.0 {
t.currentOutput.Line = t.currentOutput.Buffer.String()
t.currentCommand.Output = append(t.currentCommand.Output, t.currentOutput)
}
t.currentOutput = OutputLine{}
t.commandCounter++
t.currentCommand = &t.Commands[t.commandCounter]
t.L.Unlock()
}
return nil
}
// sendCommand sends a command persistently. All the retry logic to deal with
// dropped characters is now here.
func (t *Test) sendCommand(commandLine string) error {
// Temporarily lower the expect timeout.
t.sys161.SetTimeout(time.Duration(t.Misc.CharacterTimeout) * time.Millisecond)
defer t.sys161.SetTimeout(time.Duration(t.Misc.PromptTimeout) * time.Second)
for _, character := range commandLine {
retryCount := uint(0)
for ; retryCount < t.Misc.CommandRetries; retryCount++ {
err := t.sys161.Send(string(character))
if err != nil {
return err
}
_, err = t.sys161.ExpectRegexp(regexp.MustCompile(regexp.QuoteMeta(string(character))))
if err == nil {
break
} else if err == expect.ErrTimeout {
continue
} else {
return err
}
}
if retryCount == t.Misc.CommandRetries {
return errors.New("test161: timeout sending command")
}
}
return nil
}
// start161 is a private helper function to start the sys161 expect process.
func (t *Test) start161() error {
// Disable debugger connections on panic and set our alternate
// configuration.
run := exec.Command("sys161", "-X", "-c", "test161.conf", "kernel")
run.Dir = t.tempDir
pty, err := pty.Start(run)
if err != nil {
return err
}
// Get serious about killing things.
killer := func() {
run.Process.Signal(os.Kill)
}
// Set timeout at create to avoid hanging with early failures.
t.sys161 = expect.Create(pty, killer, t, time.Duration(t.Misc.PromptTimeout)*time.Second)
t.startTime = time.Now().UnixNano()
return nil
}
func (t *Test) stop161() {
t.Commands = t.Commands[0 : t.commandCounter+1]
t.WallTime = t.getWallTime()
t.sys161.Close()
}
func (t *Test) addStatus(status string, message string) {
t.L.Lock()
t.Status = append(t.Status, Status{
WallTime: t.getWallTime(),
SimTime: t.SimTime,
Status: status,
Message: message,
})
t.L.Unlock()
}
Small fix.
/*
Package test161 implements a library for testing OS/161 kernels. We use expect
to drive the sys161 system simulator and collect useful output using the stat
socket.
*/
package test161
import (
"bytes"
"errors"
"fmt"
"github.com/kr/pty"
"github.com/ops-class/test161/expect"
"github.com/termie/go-shutil"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"regexp"
"sync"
"time"
)
const KERNEL_PROMPT = `OS/161 kernel [? for menu]: `
const SHELL_PROMPT = `OS/161$ `
const PROMPT_PATTERN = `(OS/161 kernel \[\? for menu\]\:|OS/161\$)\s$`
const BOOT = -1
type Test struct {
// Input
// Metadata
Name string `yaml:"name" json:"name"`
Description string `yaml:"description" json:"description"`
Tags []string `yaml:"tags" json:"tags"`
Depends []string `yaml:"depends" json:"depends"`
// Configuration chunks
Sys161 Sys161Conf `yaml:"sys161" json:"sys161"`
Stat StatConf `yaml:"stat" json:"stat"`
Monitor MonitorConf `yaml:"monitor" json:"monitor"`
Misc MiscConf `yaml:"misc" json:"misc"`
// Actual test commands to run
Content string `fm:"content" yaml:"-" json:"-"`
// Big lock that protects most fields shared between Run and getStats
L *sync.Mutex `json:"-"`
// Output
ConfString string `json:"confstring"` // Only set during once
WallTime TimeFixedPoint `json:"walltime"` // Protected by L
SimTime TimeFixedPoint `json:"simtime"` // Protected by L
Commands []Command `json:"commands"` // Protected by L
Status []Status `json:"status"` // Protected by L
// Unproctected Private fields
tempDir string // Only set once
startTime int64 // Only set once
statStarted bool // Only changed once
sys161 *expect.Expect // Protected by L
progressTime float64 // Protected by L
currentCommand *Command // Protected by L
commandCounter uint // Protected by L
currentOutput OutputLine // Protected by L
// Fields used by getStats but shared with Run
statCond *sync.Cond // Used by the main loop to wait for stat reception
statRecord bool // Protected by statCond.L
statMonitor bool // Protected by statCond.L
// Output channels
statChan chan Stat // Nonblocking write
}
type Command struct {
Type string `json:"type"`
Input InputLine `json:"input"`
Output []OutputLine `json:"output"`
SummaryStats Stat `json:"summarystats"`
AllStats []Stat `json:"stats"`
Monitored bool `json:"monitored"`
}
type InputLine struct {
WallTime TimeFixedPoint `json:"walltime"`
SimTime TimeFixedPoint `json:"simtime"`
Line string `json:"line"`
}
type OutputLine struct {
WallTime TimeFixedPoint `json:"walltime"`
SimTime TimeFixedPoint `json:"simtime"`
Buffer bytes.Buffer `json:"-"`
Line string `json:"line"`
}
type Status struct {
WallTime TimeFixedPoint `json:"walltime"`
SimTime TimeFixedPoint `json:"simtime"`
Status string `json:"status"`
Message string `json:"message"`
}
type TimeFixedPoint float64
// MarshalJSON prints our TimeFixedPoint type as a fixed point float for JSON.
func (t TimeFixedPoint) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("%.6f", t)), nil
}
// getTimeFixedPoint returns the current wall clock time as a TimeFixedPoint
func (t *Test) getWallTime() TimeFixedPoint {
return TimeFixedPoint(float64(time.Now().UnixNano()-t.startTime) / float64(1000*1000*1000))
}
// Run a test161 test.
func (t *Test) Run(root string) (err error) {
// Serialize the current command state.
t.L = &sync.Mutex{}
// Merge in test161 defaults for any missing configuration values
err = t.MergeConf(CONF_DEFAULTS)
if err != nil {
t.addStatus("aborted", "")
return err
}
// Create temp directory.
tempRoot, err := ioutil.TempDir(t.Misc.TempDir, "test161")
if err != nil {
t.addStatus("aborted", "")
return err
}
defer os.RemoveAll(tempRoot)
t.tempDir = path.Join(tempRoot, "root")
// Copy root.
err = shutil.CopyTree(root, t.tempDir, nil)
if err != nil {
t.addStatus("aborted", "")
return err
}
// Make sure we have a kernel.
kernelTarget := path.Join(t.tempDir, "kernel")
_, err = os.Stat(kernelTarget)
if err != nil {
t.addStatus("aborted", "")
return err
}
// Generate an alternate configuration to prevent collisions.
confTarget := path.Join(t.tempDir, "test161.conf")
t.ConfString, err = t.PrintConf()
if err != nil {
t.addStatus("aborted", "")
return err
}
err = ioutil.WriteFile(confTarget, []byte(t.ConfString), 0440)
if err != nil {
t.addStatus("aborted", "")
return err
}
if _, err := os.Stat(confTarget); os.IsNotExist(err) {
t.addStatus("aborted", "")
return err
}
// Create disks.
if t.Sys161.Disk1.Enabled == "true" {
create := exec.Command("disk161", "create", "LHD0.img", t.Sys161.Disk1.Bytes)
create.Dir = t.tempDir
err = create.Run()
if err != nil {
t.addStatus("aborted", "")
return err
}
}
if t.Sys161.Disk2.Enabled == "true" {
create := exec.Command("disk161", "create", "LHD1.img", t.Sys161.Disk2.Bytes)
create.Dir = t.tempDir
err = create.Run()
if err != nil {
t.addStatus("aborted", "")
return err
}
}
// Coordinated with the getStat goroutine. I don't think that a channel
// would work here.
t.statCond = &sync.Cond{L: &sync.Mutex{}}
// Initialize stat channel. Closed by getStats
t.statChan = make(chan Stat)
// Record stats during boot, but don't activate the monitor.
t.statRecord = true
t.statMonitor = false
// Wait for either kernel or user prompts.
prompts := regexp.MustCompile(PROMPT_PATTERN)
// Set up the current command to point at boot
t.commandCounter = 0
t.currentCommand = &t.Commands[t.commandCounter]
// Start sys161 and defer close.
err = t.start161()
if err != nil {
t.addStatus("aborted", "")
return err
}
defer t.stop161()
t.addStatus("started", "")
for int(t.commandCounter) < len(t.Commands) {
if t.commandCounter != 0 {
err = t.sendCommand(t.currentCommand.Input.Line + "\n")
if err != nil {
t.addStatus("expect", "couldn't send a command")
return nil
}
t.enableStats()
}
if int(t.commandCounter) == len(t.Commands)-1 {
t.sys161.ExpectEOF()
t.addStatus("shutdown", "")
break
}
_, err := t.sys161.ExpectRegexp(prompts)
t.disableStats()
// Handle timeouts, unexpected shutdowns, and other errors
if err == expect.ErrTimeout {
t.addStatus("timeout", fmt.Sprintf("no prompt for %v s", t.Misc.PromptTimeout))
return nil
} else if err == io.EOF {
t.addStatus("crash", "")
return nil
} else if err != nil {
t.addStatus("expect", "")
return nil
}
// Rotate running command to the next command, saving any previous
// output as needed.
t.L.Lock()
if t.currentOutput.WallTime != 0.0 {
t.currentOutput.Line = t.currentOutput.Buffer.String()
t.currentCommand.Output = append(t.currentCommand.Output, t.currentOutput)
}
t.currentOutput = OutputLine{}
t.commandCounter++
t.currentCommand = &t.Commands[t.commandCounter]
t.L.Unlock()
}
return nil
}
// sendCommand sends a command persistently. All the retry logic to deal with
// dropped characters is now here.
func (t *Test) sendCommand(commandLine string) error {
// Temporarily lower the expect timeout.
t.sys161.SetTimeout(time.Duration(t.Misc.CharacterTimeout) * time.Millisecond)
defer t.sys161.SetTimeout(time.Duration(t.Misc.PromptTimeout) * time.Second)
for _, character := range commandLine {
retryCount := uint(0)
for ; retryCount < t.Misc.CommandRetries; retryCount++ {
err := t.sys161.Send(string(character))
if err != nil {
return err
}
_, err = t.sys161.ExpectRegexp(regexp.MustCompile(regexp.QuoteMeta(string(character))))
if err == nil {
break
} else if err == expect.ErrTimeout {
continue
} else {
return err
}
}
if retryCount == t.Misc.CommandRetries {
return errors.New("test161: timeout sending command")
}
}
return nil
}
// start161 is a private helper function to start the sys161 expect process.
func (t *Test) start161() error {
// Disable debugger connections on panic and set our alternate
// configuration.
run := exec.Command("sys161", "-X", "-c", "test161.conf", "kernel")
run.Dir = t.tempDir
pty, err := pty.Start(run)
if err != nil {
return err
}
// Get serious about killing things.
killer := func() {
run.Process.Signal(os.Kill)
}
// Set timeout at create to avoid hanging with early failures.
t.sys161 = expect.Create(pty, killer, t, time.Duration(t.Misc.PromptTimeout)*time.Second)
t.startTime = time.Now().UnixNano()
return nil
}
func (t *Test) stop161() {
t.Commands = t.Commands[0 : t.commandCounter+1]
t.WallTime = t.getWallTime()
t.sys161.Close()
}
func (t *Test) addStatus(status string, message string) {
t.L.Lock()
t.Status = append(t.Status, Status{
WallTime: t.getWallTime(),
SimTime: t.SimTime,
Status: status,
Message: message,
})
t.L.Unlock()
}
|
// Copyright 2014 Hajime Hoshi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ebiten
import (
"sync/atomic"
"github.com/hajimehoshi/ebiten/v2/internal/clock"
"github.com/hajimehoshi/ebiten/v2/internal/driver"
)
// Game defines necessary functions for a game.
type Game interface {
// Update updates a game by one tick. The given argument represents a screen image.
//
// Update updates only the game logic and Draw draws the screen.
//
// In the first frame, it is ensured that Update is called at least once before Draw. You can use Update
// to initialize the game state.
//
// After the first frame, Update might not be called or might be called once
// or more for one frame. The frequency is determined by the current TPS (tick-per-second).
Update() error
// Draw draws the game screen by one frame.
//
// The give argument represents a screen image. The updated content is adopted as the game screen.
Draw(screen *Image)
// Layout accepts a native outside size in device-independent pixels and returns the game's logical screen
// size.
//
// On desktops, the outside is a window or a monitor (fullscreen mode). On browsers, the outside is a body
// element. On mobiles, the outside is the view's size.
//
// Even though the outside size and the screen size differ, the rendering scale is automatically adjusted to
// fit with the outside.
//
// Layout is called almost every frame.
//
// It is ensured that Layout is invoked before Update is called in the first frame.
//
// If Layout returns non-positive numbers, the caller can panic.
//
// You can return a fixed screen size if you don't care, or you can also return a calculated screen size
// adjusted with the given outside size.
Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int)
}
// DefaultTPS represents a default ticks per second, that represents how many times game updating happens in a second.
const DefaultTPS = 60
// CurrentFPS returns the current number of FPS (frames per second), that represents
// how many swapping buffer happens per second.
//
// On some environments, CurrentFPS doesn't return a reliable value since vsync doesn't work well there.
// If you want to measure the application's speed, Use CurrentTPS.
//
// CurrentFPS is concurrent-safe.
func CurrentFPS() float64 {
return clock.CurrentFPS()
}
var (
isScreenClearedEveryFrame = int32(1)
currentMaxTPS = int32(DefaultTPS)
)
// SetScreenClearedEveryFrame enables or disables the clearing of the screen at the beginning of each frame.
// The default value is true and the screen is cleared each frame by default.
//
// SetScreenClearedEveryFrame is concurrent-safe.
func SetScreenClearedEveryFrame(cleared bool) {
v := int32(0)
if cleared {
v = 1
}
atomic.StoreInt32(&isScreenClearedEveryFrame, v)
theUIContext.setScreenClearedEveryFrame(cleared)
}
// IsScreenClearedEveryFrame returns true if the frame isn't cleared at the beginning.
//
// IsScreenClearedEveryFrame is concurrent-safe.
func IsScreenClearedEveryFrame() bool {
return atomic.LoadInt32(&isScreenClearedEveryFrame) != 0
}
type imageDumperGame struct {
game Game
d *imageDumper
err error
}
func (i *imageDumperGame) Update() error {
if i.err != nil {
return i.err
}
if i.d == nil {
i.d = &imageDumper{g: i.game}
}
return i.d.update()
}
func (i *imageDumperGame) Draw(screen *Image) {
if i.err != nil {
return
}
i.game.Draw(screen)
i.err = i.d.dump(screen)
}
func (i *imageDumperGame) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {
return i.game.Layout(outsideWidth, outsideHeight)
}
// RunGame starts the main loop and runs the game.
// game's Update function is called every tick to update the game logic.
// game's Draw function is, if it exists, called every frame to draw the screen.
// game's Layout function is called when necessary, and you can specify the logical screen size by the function.
//
// game must implement Game interface.
// Game's Draw function is optional, but it is recommended to implement Draw to seperate updating the logic and
// rendering.
//
// RunGame is a more flexibile form of Run due to game's Layout function.
// You can make a resizable window if you use RunGame, while you cannot if you use Run.
// RunGame is more sophisticated way than Run and hides the notion of 'scale'.
//
// While Run specifies the window size, RunGame does not.
// You need to call SetWindowSize before RunGame if you want.
// Otherwise, a default window size is adopted.
//
// Some functions (ScreenScale, SetScreenScale, SetScreenSize) are not available with RunGame.
//
// On browsers, it is strongly recommended to use iframe if you embed an Ebiten application in your website.
//
// RunGame must be called on the main thread.
// Note that Ebiten bounds the main goroutine to the main OS thread by runtime.LockOSThread.
//
// Ebiten tries to call game's Update function 60 times a second by default. In other words,
// TPS (ticks per second) is 60 by default.
// This is not related to framerate (display's refresh rate).
//
// RunGame returns error when 1) error happens in the underlying graphics driver, 2) audio error happens or
// 3) f returns error. In the case of 3), RunGame returns the same error.
//
// The size unit is device-independent pixel.
//
// Don't call RunGame twice or more in one process.
func RunGame(game Game) error {
fixWindowPosition(WindowSize())
theUIContext.set(&imageDumperGame{
game: game,
})
if err := uiDriver().Run(theUIContext); err != nil {
if err == driver.RegularTermination {
return nil
}
return err
}
return nil
}
// RunGameWithoutMainLoop runs the game, but don't call the loop on the main (UI) thread.
// Different from Run, RunGameWithoutMainLoop returns immediately.
//
// Ebiten users should NOT call RunGameWithoutMainLoop.
// Instead, functions in github.com/hajimehoshi/ebiten/v2/mobile package calls this.
//
// TODO: Remove this. In order to remove this, the uiContext should be in another package.
func RunGameWithoutMainLoop(game Game) {
fixWindowPosition(WindowSize())
theUIContext.set(&imageDumperGame{
game: game,
})
uiDriver().RunWithoutMainLoop(theUIContext)
}
// ScreenSizeInFullscreen returns the size in device-independent pixels when the game is fullscreen.
// The adopted monitor is the 'current' monitor which the window belongs to.
// The returned value can be given to Run or SetSize function if the perfectly fit fullscreen is needed.
//
// On browsers, ScreenSizeInFullscreen returns the 'window' (global object) size, not 'screen' size since an Ebiten
// game should not know the outside of the window object. For more details, see SetFullscreen API comment.
//
// On mobiles, ScreenSizeInFullscreen returns (0, 0) so far.
//
// ScreenSizeInFullscreen's use cases are limited. If you are making a fullscreen application, you can use RunGame and
// the Game interface's Layout function instead. If you are making a not-fullscreen application but the application's
// behavior depends on the monitor size, ScreenSizeInFullscreen is useful.
//
// ScreenSizeInFullscreen must be called on the main thread before ebiten.Run, and is concurrent-safe after
// ebiten.Run.
func ScreenSizeInFullscreen() (int, int) {
return uiDriver().ScreenSizeInFullscreen()
}
// CursorMode returns the current cursor mode.
//
// On browsers, only CursorModeVisible and CursorModeHidden are supported.
//
// CursorMode returns CursorModeHidden on mobiles.
//
// CursorMode is concurrent-safe.
func CursorMode() CursorModeType {
return CursorModeType(uiDriver().CursorMode())
}
// SetCursorMode sets the render and capture mode of the mouse cursor.
// CursorModeVisible sets the cursor to always be visible.
// CursorModeHidden hides the system cursor when over the window.
// CursorModeCaptured hides the system cursor and locks it to the window.
//
// On browsers, only CursorModeVisible and CursorModeHidden are supported.
//
// SetCursorMode does nothing on mobiles.
//
// SetCursorMode is concurrent-safe.
func SetCursorMode(mode CursorModeType) {
uiDriver().SetCursorMode(driver.CursorMode(mode))
}
// IsFullscreen reports whether the current mode is fullscreen or not.
//
// IsFullscreen always returns false on browsers or mobiles.
//
// IsFullscreen is concurrent-safe.
func IsFullscreen() bool {
return uiDriver().IsFullscreen()
}
// SetFullscreen changes the current mode to fullscreen or not on desktops.
//
// On fullscreen mode, the game screen is automatically enlarged
// to fit with the monitor. The current scale value is ignored.
//
// On desktops, Ebiten uses 'windowed' fullscreen mode, which doesn't change
// your monitor's resolution.
//
// SetFullscreen does nothing on browsers or mobiles.
//
// SetFullscreen is concurrent-safe.
func SetFullscreen(fullscreen bool) {
uiDriver().SetFullscreen(fullscreen)
}
// IsFocused returns a boolean value indicating whether
// the game is in focus or in the foreground.
//
// IsFocused will only return true if IsRunnableOnUnfocused is false.
//
// IsFocused is concurrent-safe.
func IsFocused() bool {
return uiDriver().IsFocused()
}
// IsRunnableOnUnfocused returns a boolean value indicating whether
// the game runs even in background.
//
// IsRunnableOnUnfocused is concurrent-safe.
func IsRunnableOnUnfocused() bool {
return uiDriver().IsRunnableOnUnfocused()
}
// SetRunnableOnUnfocused sets the state if the game runs even in background.
//
// If the given value is true, the game runs even in background e.g. when losing focus.
// The initial state is true.
//
// Known issue: On browsers, even if the state is on, the game doesn't run in background tabs.
// This is because browsers throttles background tabs not to often update.
//
// SetRunnableOnUnfocused does nothing on mobiles so far.
//
// SetRunnableOnUnfocused is concurrent-safe.
func SetRunnableOnUnfocused(runnableOnUnfocused bool) {
uiDriver().SetRunnableOnUnfocused(runnableOnUnfocused)
}
// DeviceScaleFactor returns a device scale factor value of the current monitor which the window belongs to.
//
// DeviceScaleFactor returns a meaningful value on high-DPI display environment,
// otherwise DeviceScaleFactor returns 1.
//
// DeviceScaleFactor might panic on init function on some devices like Android.
// Then, it is not recommended to call DeviceScaleFactor from init functions.
//
// DeviceScaleFactor must be called on the main thread before the main loop, and is concurrent-safe after the main
// loop.
func DeviceScaleFactor() float64 {
return uiDriver().DeviceScaleFactor()
}
// IsVsyncEnabled returns a boolean value indicating whether
// the game uses the display's vsync.
//
// IsVsyncEnabled is concurrent-safe.
func IsVsyncEnabled() bool {
return uiDriver().IsVsyncEnabled()
}
// SetVsyncEnabled sets a boolean value indicating whether
// the game uses the display's vsync.
//
// If the given value is true, the game tries to sync the display's refresh rate.
// If false, the game ignores the display's refresh rate.
// The initial value is true.
// By disabling vsync, the game works more efficiently but consumes more CPU.
//
// Note that the state doesn't affect TPS (ticks per second, i.e. how many the run function is
// updated per second).
//
// SetVsyncEnabled does nothing on mobiles so far.
//
// SetVsyncEnabled is concurrent-safe.
func SetVsyncEnabled(enabled bool) {
uiDriver().SetVsyncEnabled(enabled)
}
// MaxTPS returns the current maximum TPS.
//
// MaxTPS is concurrent-safe.
func MaxTPS() int {
return int(atomic.LoadInt32(¤tMaxTPS))
}
// CurrentTPS returns the current TPS (ticks per second),
// that represents how many update function is called in a second.
//
// CurrentTPS is concurrent-safe.
func CurrentTPS() float64 {
return clock.CurrentTPS()
}
// UncappedTPS is a special TPS value that means the game doesn't have limitation on TPS.
const UncappedTPS = clock.UncappedTPS
// SetMaxTPS sets the maximum TPS (ticks per second),
// that represents how many updating function is called per second.
// The initial value is 60.
//
// If tps is UncappedTPS, TPS is uncapped and the game is updated per frame.
// If tps is negative but not UncappedTPS, SetMaxTPS panics.
//
// SetMaxTPS is concurrent-safe.
func SetMaxTPS(tps int) {
if tps < 0 && tps != UncappedTPS {
panic("ebiten: tps must be >= 0 or UncappedTPS")
}
atomic.StoreInt32(¤tMaxTPS, int32(tps))
}
// IsScreenTransparent reports whether the window is transparent.
//
// IsScreenTransparent is concurrent-safe.
func IsScreenTransparent() bool {
return uiDriver().IsScreenTransparent()
}
// SetScreenTransparent sets the state if the window is transparent.
//
// SetScreenTransparent panics if SetScreenTransparent is called after the main loop.
//
// SetScreenTransparent does nothing on mobiles.
//
// SetScreenTransparent is concurrent-safe.
func SetScreenTransparent(transparent bool) {
uiDriver().SetScreenTransparent(transparent)
}
// SetInitFocused sets whether the application is focused on show.
// The default value is true, i.e., the application is focused.
// Note that the application does not proceed if this is not focused by default.
// This behavior can be changed by SetRunnableInBackground.
//
// SetInitFocused does nothing on mobile.
//
// SetInitFocused panics if this is called after the main loop.
//
// SetInitFocused is cuncurrent-safe.
func SetInitFocused(focused bool) {
uiDriver().SetInitFocused(focused)
}
ebiten: Update comments
// Copyright 2014 Hajime Hoshi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ebiten
import (
"sync/atomic"
"github.com/hajimehoshi/ebiten/v2/internal/clock"
"github.com/hajimehoshi/ebiten/v2/internal/driver"
)
// Game defines necessary functions for a game.
type Game interface {
// Update updates a game by one tick. The given argument represents a screen image.
//
// Update updates only the game logic and Draw draws the screen.
//
// In the first frame, it is ensured that Update is called at least once before Draw. You can use Update
// to initialize the game state.
//
// After the first frame, Update might not be called or might be called once
// or more for one frame. The frequency is determined by the current TPS (tick-per-second).
Update() error
// Draw draws the game screen by one frame.
//
// The give argument represents a screen image. The updated content is adopted as the game screen.
Draw(screen *Image)
// Layout accepts a native outside size in device-independent pixels and returns the game's logical screen
// size.
//
// On desktops, the outside is a window or a monitor (fullscreen mode). On browsers, the outside is a body
// element. On mobiles, the outside is the view's size.
//
// Even though the outside size and the screen size differ, the rendering scale is automatically adjusted to
// fit with the outside.
//
// Layout is called almost every frame.
//
// It is ensured that Layout is invoked before Update is called in the first frame.
//
// If Layout returns non-positive numbers, the caller can panic.
//
// You can return a fixed screen size if you don't care, or you can also return a calculated screen size
// adjusted with the given outside size.
Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int)
}
// DefaultTPS represents a default ticks per second, that represents how many times game updating happens in a second.
const DefaultTPS = 60
// CurrentFPS returns the current number of FPS (frames per second), that represents
// how many swapping buffer happens per second.
//
// On some environments, CurrentFPS doesn't return a reliable value since vsync doesn't work well there.
// If you want to measure the application's speed, Use CurrentTPS.
//
// CurrentFPS is concurrent-safe.
func CurrentFPS() float64 {
return clock.CurrentFPS()
}
var (
isScreenClearedEveryFrame = int32(1)
currentMaxTPS = int32(DefaultTPS)
)
// SetScreenClearedEveryFrame enables or disables the clearing of the screen at the beginning of each frame.
// The default value is true and the screen is cleared each frame by default.
//
// SetScreenClearedEveryFrame is concurrent-safe.
func SetScreenClearedEveryFrame(cleared bool) {
v := int32(0)
if cleared {
v = 1
}
atomic.StoreInt32(&isScreenClearedEveryFrame, v)
theUIContext.setScreenClearedEveryFrame(cleared)
}
// IsScreenClearedEveryFrame returns true if the frame isn't cleared at the beginning.
//
// IsScreenClearedEveryFrame is concurrent-safe.
func IsScreenClearedEveryFrame() bool {
return atomic.LoadInt32(&isScreenClearedEveryFrame) != 0
}
type imageDumperGame struct {
game Game
d *imageDumper
err error
}
func (i *imageDumperGame) Update() error {
if i.err != nil {
return i.err
}
if i.d == nil {
i.d = &imageDumper{g: i.game}
}
return i.d.update()
}
func (i *imageDumperGame) Draw(screen *Image) {
if i.err != nil {
return
}
i.game.Draw(screen)
i.err = i.d.dump(screen)
}
func (i *imageDumperGame) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {
return i.game.Layout(outsideWidth, outsideHeight)
}
// RunGame starts the main loop and runs the game.
// game's Update function is called every tick to update the game logic.
// game's Draw function is, if it exists, called every frame to draw the screen.
// game's Layout function is called when necessary, and you can specify the logical screen size by the function.
//
// game must implement Game interface.
// Game's Draw function is optional, but it is recommended to implement Draw to seperate updating the logic and
// rendering.
//
// RunGame is a more flexibile form of Run due to game's Layout function.
// You can make a resizable window if you use RunGame, while you cannot if you use Run.
// RunGame is more sophisticated way than Run and hides the notion of 'scale'.
//
// While Run specifies the window size, RunGame does not.
// You need to call SetWindowSize before RunGame if you want.
// Otherwise, a default window size is adopted.
//
// Some functions (ScreenScale, SetScreenScale, SetScreenSize) are not available with RunGame.
//
// On browsers, it is strongly recommended to use iframe if you embed an Ebiten application in your website.
//
// RunGame must be called on the main thread.
// Note that Ebiten bounds the main goroutine to the main OS thread by runtime.LockOSThread.
//
// Ebiten tries to call game's Update function 60 times a second by default. In other words,
// TPS (ticks per second) is 60 by default.
// This is not related to framerate (display's refresh rate).
//
// RunGame returns error when 1) error happens in the underlying graphics driver, 2) audio error happens or
// 3) f returns error. In the case of 3), RunGame returns the same error.
//
// The size unit is device-independent pixel.
//
// Don't call RunGame twice or more in one process.
func RunGame(game Game) error {
fixWindowPosition(WindowSize())
theUIContext.set(&imageDumperGame{
game: game,
})
if err := uiDriver().Run(theUIContext); err != nil {
if err == driver.RegularTermination {
return nil
}
return err
}
return nil
}
// RunGameWithoutMainLoop runs the game, but doesn't call the loop on the main (UI) thread.
// Different from Run, RunGameWithoutMainLoop returns immediately.
//
// Ebiten users should NOT call RunGameWithoutMainLoop.
// Instead, functions in github.com/hajimehoshi/ebiten/v2/mobile package calls this.
//
// TODO: Remove this. In order to remove this, the uiContext should be in another package.
func RunGameWithoutMainLoop(game Game) {
fixWindowPosition(WindowSize())
theUIContext.set(&imageDumperGame{
game: game,
})
uiDriver().RunWithoutMainLoop(theUIContext)
}
// ScreenSizeInFullscreen returns the size in device-independent pixels when the game is fullscreen.
// The adopted monitor is the 'current' monitor which the window belongs to.
// The returned value can be given to Run or SetSize function if the perfectly fit fullscreen is needed.
//
// On browsers, ScreenSizeInFullscreen returns the 'window' (global object) size, not 'screen' size since an Ebiten
// game should not know the outside of the window object. For more details, see SetFullscreen API comment.
//
// On mobiles, ScreenSizeInFullscreen returns (0, 0) so far.
//
// ScreenSizeInFullscreen's use cases are limited. If you are making a fullscreen application, you can use RunGame and
// the Game interface's Layout function instead. If you are making a not-fullscreen application but the application's
// behavior depends on the monitor size, ScreenSizeInFullscreen is useful.
//
// ScreenSizeInFullscreen must be called on the main thread before ebiten.Run, and is concurrent-safe after
// ebiten.Run.
func ScreenSizeInFullscreen() (int, int) {
return uiDriver().ScreenSizeInFullscreen()
}
// CursorMode returns the current cursor mode.
//
// On browsers, only CursorModeVisible and CursorModeHidden are supported.
//
// CursorMode returns CursorModeHidden on mobiles.
//
// CursorMode is concurrent-safe.
func CursorMode() CursorModeType {
return CursorModeType(uiDriver().CursorMode())
}
// SetCursorMode sets the render and capture mode of the mouse cursor.
// CursorModeVisible sets the cursor to always be visible.
// CursorModeHidden hides the system cursor when over the window.
// CursorModeCaptured hides the system cursor and locks it to the window.
//
// On browsers, only CursorModeVisible and CursorModeHidden are supported.
//
// SetCursorMode does nothing on mobiles.
//
// SetCursorMode is concurrent-safe.
func SetCursorMode(mode CursorModeType) {
uiDriver().SetCursorMode(driver.CursorMode(mode))
}
// IsFullscreen reports whether the current mode is fullscreen or not.
//
// IsFullscreen always returns false on browsers or mobiles.
//
// IsFullscreen is concurrent-safe.
func IsFullscreen() bool {
return uiDriver().IsFullscreen()
}
// SetFullscreen changes the current mode to fullscreen or not on desktops.
//
// On fullscreen mode, the game screen is automatically enlarged
// to fit with the monitor. The current scale value is ignored.
//
// On desktops, Ebiten uses 'windowed' fullscreen mode, which doesn't change
// your monitor's resolution.
//
// SetFullscreen does nothing on browsers or mobiles.
//
// SetFullscreen is concurrent-safe.
func SetFullscreen(fullscreen bool) {
uiDriver().SetFullscreen(fullscreen)
}
// IsFocused returns a boolean value indicating whether
// the game is in focus or in the foreground.
//
// IsFocused will only return true if IsRunnableOnUnfocused is false.
//
// IsFocused is concurrent-safe.
func IsFocused() bool {
return uiDriver().IsFocused()
}
// IsRunnableOnUnfocused returns a boolean value indicating whether
// the game runs even in background.
//
// IsRunnableOnUnfocused is concurrent-safe.
func IsRunnableOnUnfocused() bool {
return uiDriver().IsRunnableOnUnfocused()
}
// SetRunnableOnUnfocused sets the state if the game runs even in background.
//
// If the given value is true, the game runs even in background e.g. when losing focus.
// The initial state is true.
//
// Known issue: On browsers, even if the state is on, the game doesn't run in background tabs.
// This is because browsers throttles background tabs not to often update.
//
// SetRunnableOnUnfocused does nothing on mobiles so far.
//
// SetRunnableOnUnfocused is concurrent-safe.
func SetRunnableOnUnfocused(runnableOnUnfocused bool) {
uiDriver().SetRunnableOnUnfocused(runnableOnUnfocused)
}
// DeviceScaleFactor returns a device scale factor value of the current monitor which the window belongs to.
//
// DeviceScaleFactor returns a meaningful value on high-DPI display environment,
// otherwise DeviceScaleFactor returns 1.
//
// DeviceScaleFactor might panic on init function on some devices like Android.
// Then, it is not recommended to call DeviceScaleFactor from init functions.
//
// DeviceScaleFactor must be called on the main thread before the main loop, and is concurrent-safe after the main
// loop.
func DeviceScaleFactor() float64 {
return uiDriver().DeviceScaleFactor()
}
// IsVsyncEnabled returns a boolean value indicating whether
// the game uses the display's vsync.
//
// IsVsyncEnabled is concurrent-safe.
func IsVsyncEnabled() bool {
return uiDriver().IsVsyncEnabled()
}
// SetVsyncEnabled sets a boolean value indicating whether
// the game uses the display's vsync.
//
// If the given value is true, the game tries to sync the display's refresh rate.
// If false, the game ignores the display's refresh rate.
// The initial value is true.
// By disabling vsync, the game works more efficiently but consumes more CPU.
//
// Note that the state doesn't affect TPS (ticks per second, i.e. how many the run function is
// updated per second).
//
// SetVsyncEnabled does nothing on mobiles so far.
//
// SetVsyncEnabled is concurrent-safe.
func SetVsyncEnabled(enabled bool) {
uiDriver().SetVsyncEnabled(enabled)
}
// MaxTPS returns the current maximum TPS.
//
// MaxTPS is concurrent-safe.
func MaxTPS() int {
return int(atomic.LoadInt32(¤tMaxTPS))
}
// CurrentTPS returns the current TPS (ticks per second),
// that represents how many update function is called in a second.
//
// CurrentTPS is concurrent-safe.
func CurrentTPS() float64 {
return clock.CurrentTPS()
}
// UncappedTPS is a special TPS value that means the game doesn't have limitation on TPS.
const UncappedTPS = clock.UncappedTPS
// SetMaxTPS sets the maximum TPS (ticks per second),
// that represents how many updating function is called per second.
// The initial value is 60.
//
// If tps is UncappedTPS, TPS is uncapped and the game is updated per frame.
// If tps is negative but not UncappedTPS, SetMaxTPS panics.
//
// SetMaxTPS is concurrent-safe.
func SetMaxTPS(tps int) {
if tps < 0 && tps != UncappedTPS {
panic("ebiten: tps must be >= 0 or UncappedTPS")
}
atomic.StoreInt32(¤tMaxTPS, int32(tps))
}
// IsScreenTransparent reports whether the window is transparent.
//
// IsScreenTransparent is concurrent-safe.
func IsScreenTransparent() bool {
return uiDriver().IsScreenTransparent()
}
// SetScreenTransparent sets the state if the window is transparent.
//
// SetScreenTransparent panics if SetScreenTransparent is called after the main loop.
//
// SetScreenTransparent does nothing on mobiles.
//
// SetScreenTransparent is concurrent-safe.
func SetScreenTransparent(transparent bool) {
uiDriver().SetScreenTransparent(transparent)
}
// SetInitFocused sets whether the application is focused on show.
// The default value is true, i.e., the application is focused.
// Note that the application does not proceed if this is not focused by default.
// This behavior can be changed by SetRunnableInBackground.
//
// SetInitFocused does nothing on mobile.
//
// SetInitFocused panics if this is called after the main loop.
//
// SetInitFocused is cuncurrent-safe.
func SetInitFocused(focused bool) {
uiDriver().SetInitFocused(focused)
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"github.com/tilezen/tapalcatl"
"io"
"net"
"net/http"
"time"
)
type HttpRequestData struct {
Path string
ApiKey string
UserAgent string
Referrer string
Format string
}
type ParseResult struct {
Coord tapalcatl.TileCoord
Cond Condition
ContentType string
HttpData HttpRequestData
}
type Parser interface {
Parse(*http.Request) (*ParseResult, error)
}
type BufferManager interface {
Get() *bytes.Buffer
Put(*bytes.Buffer)
}
// the reqState structures and string generation serve to emit a single log entry line
// a log parser will pick this up and use it to persist metrics
// the string functions here are specific to the format used and should be updated with care
type ReqResponseState int32
const (
ResponseState_Nil ReqResponseState = iota
ResponseState_Success
ResponseState_NotModified
ResponseState_NotFound
ResponseState_BadRequest
ResponseState_Error
ResponseState_Count
)
func (rrs ReqResponseState) String() string {
switch rrs {
case ResponseState_Nil:
return "nil"
case ResponseState_Success:
return "ok"
case ResponseState_NotModified:
return "notmod"
case ResponseState_NotFound:
return "notfound"
case ResponseState_BadRequest:
return "badreq"
case ResponseState_Error:
return "err"
default:
return "unknown"
}
}
func (rrs ReqResponseState) AsStatusCode() int {
switch rrs {
case ResponseState_Nil:
return 0
case ResponseState_Success:
return 200
case ResponseState_NotModified:
return 304
case ResponseState_NotFound:
return 404
case ResponseState_BadRequest:
return 400
case ResponseState_Error:
return 500
default:
return -1
}
}
type ReqFetchState int32
const (
FetchState_Nil ReqFetchState = iota
FetchState_Success
FetchState_NotFound
FetchState_FetchError
FetchState_ReadError
FetchState_ConfigError
FetchState_Count
)
func (rfs ReqFetchState) String() string {
switch rfs {
case FetchState_Nil:
return "nil"
case FetchState_Success:
return "ok"
case FetchState_NotFound:
return "notfound"
case FetchState_FetchError:
return "fetcherr"
case FetchState_ReadError:
return "readerr"
case FetchState_ConfigError:
return "configerr"
default:
return "unknown"
}
}
type ReqFetchSize struct {
BodySize int64
BytesLength int64
BytesCap int64
}
type ReqStorageMetadata struct {
HasLastModified bool
HasEtag bool
}
type ReqDuration struct {
Parse, StorageFetch, StorageRead, MetatileFind, RespWrite, Total time.Duration
}
// durations will be logged in milliseconds
type JsonReqDuration struct {
Parse int64
StorageFetch int64
StorageRead int64
MetatileFind int64
RespWrite int64
Total int64
}
type RequestState struct {
ResponseState ReqResponseState
FetchState ReqFetchState
FetchSize ReqFetchSize
StorageMetadata ReqStorageMetadata
IsZipError bool
IsResponseWriteError bool
IsCondError bool
Duration ReqDuration
Coord *tapalcatl.TileCoord
HttpData *HttpRequestData
ResponseSize int
}
func convertDurationToMillis(x time.Duration) int64 {
return int64(x / time.Millisecond)
}
func (reqState *RequestState) AsJsonMap() map[string]interface{} {
result := make(map[string]interface{})
result["response_state"] = reqState.ResponseState.AsStatusCode()
result["fetch_state"] = reqState.FetchState.String()
reqStateErrs := make(map[string]bool)
if reqState.IsZipError {
reqStateErrs["zip"] = true
}
if reqState.IsResponseWriteError {
reqStateErrs["response_write"] = true
}
if reqState.IsCondError {
reqStateErrs["cond"] = true
}
if len(reqStateErrs) > 0 {
result["error"] = reqStateErrs
}
if reqState.FetchSize.BodySize > 0 {
result["fetch_size"] = map[string]int64{
"body_size": reqState.FetchSize.BodySize,
"bytes_length": reqState.FetchSize.BytesLength,
"bytes_cap": reqState.FetchSize.BytesCap,
}
}
result["storageMetadata"] = map[string]bool{
"has_last_modified": reqState.StorageMetadata.HasLastModified,
"has_etag": reqState.StorageMetadata.HasEtag,
}
result["duration"] = map[string]int64{
"parse": convertDurationToMillis(reqState.Duration.Parse),
"storage_fetch": convertDurationToMillis(reqState.Duration.StorageFetch),
"storage_read": convertDurationToMillis(reqState.Duration.StorageRead),
"metatile_find": convertDurationToMillis(reqState.Duration.MetatileFind),
"resp_write": convertDurationToMillis(reqState.Duration.RespWrite),
"total": convertDurationToMillis(reqState.Duration.Total),
}
if reqState.Coord != nil {
result["coord"] = map[string]int{
"x": reqState.Coord.X,
"y": reqState.Coord.Y,
"z": reqState.Coord.Z,
}
}
if reqState.HttpData != nil {
httpJsonData := make(map[string]interface{})
httpJsonData["path"] = reqState.HttpData.Path
if userAgent := reqState.HttpData.UserAgent; userAgent != "" {
httpJsonData["user_agent"] = userAgent
}
if referrer := reqState.HttpData.Referrer; referrer != "" {
httpJsonData["referer"] = referrer
}
if apiKey := reqState.HttpData.ApiKey; apiKey != "" {
httpJsonData["api_key"] = apiKey
}
if format := reqState.HttpData.Format; format != "" {
httpJsonData["format"] = format
}
if responseSize := reqState.ResponseSize; responseSize > 0 {
httpJsonData["response_size"] = responseSize
}
result["http"] = httpJsonData
}
return result
}
type metricsWriter interface {
Write(*RequestState)
}
type nilMetricsWriter struct{}
func (_ *nilMetricsWriter) Write(reqState *RequestState) {}
type statsdMetricsWriter struct {
addr *net.UDPAddr
prefix string
logger JsonLogger
queue chan *RequestState
}
func makeMetricPrefix(prefix string, metric string) string {
if prefix == "" {
return metric
} else {
return fmt.Sprintf("%s.%s", prefix, metric)
}
}
func makeStatsdLineCount(prefix string, metric string, value int) string {
return fmt.Sprintf("%s:%d|c\n", makeMetricPrefix(prefix, metric), value)
}
func makeStatsdLineGauge(prefix string, metric string, value int) string {
return fmt.Sprintf("%s:%d|g\n", makeMetricPrefix(prefix, metric), value)
}
func makeStatsdLineTimer(prefix string, metric string, value time.Duration) string {
millis := convertDurationToMillis(value)
return fmt.Sprintf("%s:%d|ms\n", makeMetricPrefix(prefix, metric), millis)
}
func writeStatsdCount(w io.Writer, prefix string, metric string, value int) {
w.Write([]byte(makeStatsdLineCount(prefix, metric, value)))
}
func writeStatsdGauge(w io.Writer, prefix string, metric string, value int) {
w.Write([]byte(makeStatsdLineGauge(prefix, metric, value)))
}
func writeStatsdTimer(w io.Writer, prefix string, metric string, value time.Duration) {
w.Write([]byte(makeStatsdLineTimer(prefix, metric, value)))
}
type prefixedStatsdWriter struct {
prefix string
w io.Writer
}
func (psw *prefixedStatsdWriter) WriteCount(metric string, value int) {
writeStatsdCount(psw.w, psw.prefix, metric, value)
}
func (psw *prefixedStatsdWriter) WriteGauge(metric string, value int) {
writeStatsdGauge(psw.w, psw.prefix, metric, value)
}
func (psw *prefixedStatsdWriter) WriteBool(metric string, value bool) {
if value {
psw.WriteCount(metric, 1)
}
}
func (psw *prefixedStatsdWriter) WriteTimer(metric string, value time.Duration) {
writeStatsdTimer(psw.w, psw.prefix, metric, value)
}
func (smw *statsdMetricsWriter) Process(reqState *RequestState) {
conn, err := net.DialUDP("udp", nil, smw.addr)
if err != nil {
smw.logger.Error(LogCategory_Metrics, "Metrics Writer failed to connect to %s: %s\n", smw.addr, err)
return
}
defer conn.Close()
w := bufio.NewWriter(conn)
defer w.Flush()
psw := prefixedStatsdWriter{
prefix: smw.prefix,
w: w,
}
psw.WriteCount("count", 1)
if reqState.ResponseState > ResponseState_Nil && reqState.ResponseState < ResponseState_Count {
respStateName := reqState.ResponseState.String()
respMetricName := fmt.Sprintf("responsestate.%s", respStateName)
psw.WriteCount(respMetricName, 1)
} else {
smw.logger.Error(LogCategory_InvalidCodeState, "Invalid response state: %d", int32(reqState.ResponseState))
}
if reqState.FetchState > FetchState_Nil && reqState.FetchState < FetchState_Count {
fetchStateName := reqState.FetchState.String()
fetchMetricName := fmt.Sprintf("fetchstate.%s", fetchStateName)
psw.WriteCount(fetchMetricName, 1)
} else if reqState.FetchState != FetchState_Nil {
smw.logger.Error(LogCategory_InvalidCodeState, "Invalid fetch state: %d", int32(reqState.FetchState))
}
if reqState.FetchSize.BodySize > 0 {
psw.WriteGauge("fetchsize.body-size", int(reqState.FetchSize.BodySize))
psw.WriteGauge("fetchsize.buffer-length", int(reqState.FetchSize.BytesLength))
psw.WriteGauge("fetchsize.buffer-capacity", int(reqState.FetchSize.BytesCap))
}
psw.WriteBool("counts.lastmodified", reqState.StorageMetadata.HasLastModified)
psw.WriteBool("counts.etag", reqState.StorageMetadata.HasEtag)
psw.WriteBool("errors.response-write-error", reqState.IsResponseWriteError)
psw.WriteBool("errors.condition-parse-error", reqState.IsCondError)
psw.WriteTimer("timers.parse", reqState.Duration.Parse)
psw.WriteTimer("timers.storage-fetch", reqState.Duration.StorageFetch)
psw.WriteTimer("timers.storage-read", reqState.Duration.StorageRead)
psw.WriteTimer("timers.metatile-find", reqState.Duration.MetatileFind)
psw.WriteTimer("timers.response-write", reqState.Duration.RespWrite)
psw.WriteTimer("timers.total", reqState.Duration.Total)
if reqState.HttpData != nil {
if format := reqState.HttpData.Format; format != "" {
psw.WriteCount(fmt.Sprintf("formats.%s", format), 1)
}
}
if responseSize := reqState.ResponseSize; responseSize > 0 {
psw.WriteGauge("response-size", responseSize)
}
}
func (smw *statsdMetricsWriter) Write(reqState *RequestState) {
select {
case smw.queue <- reqState:
default:
smw.logger.Warning(LogCategory_Metrics, "Metrics Writer queue full\n")
}
}
func NewStatsdMetricsWriter(addr *net.UDPAddr, metricsPrefix string, logger JsonLogger) metricsWriter {
maxQueueSize := 4096
queue := make(chan *RequestState, maxQueueSize)
smw := &statsdMetricsWriter{
addr: addr,
prefix: metricsPrefix,
logger: logger,
queue: queue,
}
go func(smw *statsdMetricsWriter) {
for reqState := range smw.queue {
smw.Process(reqState)
}
}(smw)
return smw
}
func MetatileHandler(p Parser, metatileSize, tileSize int, mimeMap map[string]string, storage Storage, bufferManager BufferManager, mw metricsWriter, logger JsonLogger) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
reqState := RequestState{}
numRequests.Add(1)
startTime := time.Now()
defer func() {
totalDuration := time.Since(startTime)
reqState.Duration.Total = totalDuration
// update expvar state
updateCounters(totalDuration)
if reqState.ResponseState == ResponseState_Nil {
logger.Error(LogCategory_InvalidCodeState, "handler did not set response state")
}
jsonReqData := reqState.AsJsonMap()
logger.Metrics(jsonReqData)
// write out metrics
mw.Write(&reqState)
}()
parseStart := time.Now()
parseResult, err := p.Parse(req)
reqState.Duration.Parse = time.Since(parseStart)
if err != nil {
requestParseErrors.Add(1)
var sc int
var response string
if pe, ok := err.(*ParseError); ok {
logger.Warning(LogCategory_ParseError, err.Error())
if pe.MimeError != nil {
sc = http.StatusNotFound
reqState.ResponseState = ResponseState_NotFound
response = pe.MimeError.Error()
} else if pe.CoordError != nil {
sc = http.StatusBadRequest
reqState.ResponseState = ResponseState_BadRequest
response = pe.CoordError.Error()
} else if pe.CondError != nil {
reqState.IsCondError = true
logger.Warning(LogCategory_ConditionError, pe.CondError.Error())
}
} else {
logger.Error(LogCategory_ParseError, "Unknown parse error: %#v\n", err)
sc = http.StatusInternalServerError
response = "Internal server error"
reqState.ResponseState = ResponseState_Error
}
// only return an error response when not a condition parse error
// NOTE: maybe it's better to not consider this an error, but
// capture it in the parse result state and handle it that way?
if sc > 0 {
http.Error(rw, response, sc)
return
}
}
reqState.Coord = &parseResult.Coord
reqState.HttpData = &parseResult.HttpData
metaCoord, offset, err := parseResult.Coord.MetaAndOffset(metatileSize, tileSize)
if err != nil {
configErrors.Add(1)
logger.Warning(LogCategory_ConfigError, "MetaAndOffset could not be calculated: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.ResponseState = ResponseState_Error
// Note: FetchState is left as nil, since no fetch was performed
return
}
storageFetchStart := time.Now()
storageResult, err := storage.Fetch(metaCoord, parseResult.Cond)
reqState.Duration.StorageFetch = time.Since(storageFetchStart)
if err != nil || storageResult.NotFound {
if err != nil {
storageFetchErrors.Add(1)
logger.Warning(LogCategory_StorageError, "Metatile storage fetch failure: %#v", err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.FetchState = FetchState_FetchError
reqState.ResponseState = ResponseState_Error
} else {
numStorageMisses.Add(1)
http.NotFound(rw, req)
reqState.FetchState = FetchState_NotFound
reqState.ResponseState = ResponseState_NotFound
}
return
}
numStorageHits.Add(1)
reqState.FetchState = FetchState_Success
if storageResult.NotModified {
numStorageNotModified.Add(1)
rw.WriteHeader(http.StatusNotModified)
reqState.ResponseState = ResponseState_NotModified
return
}
numStorageReads.Add(1)
// metatile reader needs to be able to seek in the buffer and know
// its size. the easiest way to ensure that is to buffer the whole
// thing into memory.
storageResp := storageResult.Response
buf := bufferManager.Get()
defer bufferManager.Put(buf)
storageReadStart := time.Now()
bodySize, err := io.Copy(buf, storageResp.Body)
reqState.Duration.StorageRead = time.Since(storageReadStart)
if err != nil {
storageReadErrors.Add(1)
logger.Error(LogCategory_StorageError, "Failed to read storage body: %#v", err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.FetchState = FetchState_ReadError
reqState.ResponseState = ResponseState_Error
return
}
reqState.FetchState = FetchState_Success
storageBytes := buf.Bytes()
reqState.FetchSize.BodySize = bodySize
reqState.FetchSize.BytesLength = int64(len(storageBytes))
reqState.FetchSize.BytesCap = int64(cap(storageBytes))
headers := rw.Header()
headers.Set("Content-Type", parseResult.ContentType)
if lastMod := storageResp.LastModified; lastMod != nil {
// important! we must format times in an HTTP-compliant way, which
// apparently doesn't match any existing Go time format string, so the
// recommended way is to switch to UTC and use the format string that
// the net/http package exposes.
lastModifiedFormatted := lastMod.UTC().Format(http.TimeFormat)
headers.Set("Last-Modified", lastModifiedFormatted)
reqState.StorageMetadata.HasLastModified = true
}
if etag := storageResp.ETag; etag != nil {
headers.Set("ETag", *etag)
reqState.StorageMetadata.HasEtag = true
}
metatileReaderFindStart := time.Now()
reader, formatSize, err := tapalcatl.NewMetatileReader(offset, bytes.NewReader(storageBytes), bodySize)
reqState.Duration.MetatileFind = time.Since(metatileReaderFindStart)
if err != nil {
metatileReadErrors.Add(1)
logger.Error(LogCategory_MetatileError, "Failed to read metatile: %#v", err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.IsZipError = true
reqState.ResponseState = ResponseState_Error
return
}
reqState.ResponseSize = int(formatSize)
rw.WriteHeader(http.StatusOK)
reqState.ResponseState = ResponseState_Success
respWriteStart := time.Now()
_, err = io.Copy(rw, reader)
reqState.Duration.RespWrite = time.Since(respWriteStart)
if err != nil {
responseWriteErrors.Add(1)
logger.Error(LogCategory_ResponseError, "Failed to write response body: %#v", err)
reqState.IsResponseWriteError = true
}
})
}
Group the json logging a little bit more
I updated the names and structure of the json log messages for the
request metrics. The storage and http bits were grouped together in a
more coherent way, and some keys were renamed for more clarity.
package main
import (
"bufio"
"bytes"
"fmt"
"github.com/tilezen/tapalcatl"
"io"
"net"
"net/http"
"time"
)
type HttpRequestData struct {
Path string
ApiKey string
UserAgent string
Referrer string
Format string
}
type ParseResult struct {
Coord tapalcatl.TileCoord
Cond Condition
ContentType string
HttpData HttpRequestData
}
type Parser interface {
Parse(*http.Request) (*ParseResult, error)
}
type BufferManager interface {
Get() *bytes.Buffer
Put(*bytes.Buffer)
}
// the reqState structures and string generation serve to emit a single log entry line
// a log parser will pick this up and use it to persist metrics
// the string functions here are specific to the format used and should be updated with care
type ReqResponseState int32
const (
ResponseState_Nil ReqResponseState = iota
ResponseState_Success
ResponseState_NotModified
ResponseState_NotFound
ResponseState_BadRequest
ResponseState_Error
ResponseState_Count
)
func (rrs ReqResponseState) String() string {
switch rrs {
case ResponseState_Nil:
return "nil"
case ResponseState_Success:
return "ok"
case ResponseState_NotModified:
return "notmod"
case ResponseState_NotFound:
return "notfound"
case ResponseState_BadRequest:
return "badreq"
case ResponseState_Error:
return "err"
default:
return "unknown"
}
}
func (rrs ReqResponseState) AsStatusCode() int {
switch rrs {
case ResponseState_Nil:
return 0
case ResponseState_Success:
return 200
case ResponseState_NotModified:
return 304
case ResponseState_NotFound:
return 404
case ResponseState_BadRequest:
return 400
case ResponseState_Error:
return 500
default:
return -1
}
}
type ReqFetchState int32
const (
FetchState_Nil ReqFetchState = iota
FetchState_Success
FetchState_NotFound
FetchState_FetchError
FetchState_ReadError
FetchState_ConfigError
FetchState_Count
)
func (rfs ReqFetchState) String() string {
switch rfs {
case FetchState_Nil:
return "nil"
case FetchState_Success:
return "ok"
case FetchState_NotFound:
return "notfound"
case FetchState_FetchError:
return "fetcherr"
case FetchState_ReadError:
return "readerr"
case FetchState_ConfigError:
return "configerr"
default:
return "unknown"
}
}
type ReqFetchSize struct {
BodySize int64
BytesLength int64
BytesCap int64
}
type ReqStorageMetadata struct {
HasLastModified bool
HasEtag bool
}
type ReqDuration struct {
Parse, StorageFetch, StorageRead, MetatileFind, RespWrite, Total time.Duration
}
// durations will be logged in milliseconds
type JsonReqDuration struct {
Parse int64
StorageFetch int64
StorageRead int64
MetatileFind int64
RespWrite int64
Total int64
}
type RequestState struct {
ResponseState ReqResponseState
FetchState ReqFetchState
FetchSize ReqFetchSize
StorageMetadata ReqStorageMetadata
IsZipError bool
IsResponseWriteError bool
IsCondError bool
Duration ReqDuration
Coord *tapalcatl.TileCoord
HttpData HttpRequestData
ResponseSize int
}
func convertDurationToMillis(x time.Duration) int64 {
return int64(x / time.Millisecond)
}
func (reqState *RequestState) AsJsonMap() map[string]interface{} {
result := make(map[string]interface{})
if reqState.FetchState > FetchState_Nil {
fetchResult := make(map[string]interface{})
fetchResult["state"] = reqState.FetchState.String()
if reqState.FetchSize.BodySize > 0 {
fetchResult["size"] = map[string]int64{
"body": reqState.FetchSize.BodySize,
"bytes_len": reqState.FetchSize.BytesLength,
"bytes_cap": reqState.FetchSize.BytesCap,
}
}
fetchResult["metadata"] = map[string]bool{
"has_last_modified": reqState.StorageMetadata.HasLastModified,
"has_etag": reqState.StorageMetadata.HasEtag,
}
result["fetch"] = fetchResult
}
reqStateErrs := make(map[string]bool)
if reqState.IsZipError {
reqStateErrs["zip"] = true
}
if reqState.IsResponseWriteError {
reqStateErrs["response_write"] = true
}
if reqState.IsCondError {
reqStateErrs["cond"] = true
}
if len(reqStateErrs) > 0 {
result["error"] = reqStateErrs
}
result["timing"] = map[string]int64{
"parse": convertDurationToMillis(reqState.Duration.Parse),
"storage_fetch": convertDurationToMillis(reqState.Duration.StorageFetch),
"storage_read": convertDurationToMillis(reqState.Duration.StorageRead),
"metatile_find": convertDurationToMillis(reqState.Duration.MetatileFind),
"resp_write": convertDurationToMillis(reqState.Duration.RespWrite),
"total": convertDurationToMillis(reqState.Duration.Total),
}
if reqState.Coord != nil {
result["coord"] = map[string]int{
"x": reqState.Coord.X,
"y": reqState.Coord.Y,
"z": reqState.Coord.Z,
}
}
httpJsonData := make(map[string]interface{})
httpJsonData["path"] = reqState.HttpData.Path
if userAgent := reqState.HttpData.UserAgent; userAgent != "" {
httpJsonData["user_agent"] = userAgent
}
if referrer := reqState.HttpData.Referrer; referrer != "" {
httpJsonData["referer"] = referrer
}
if apiKey := reqState.HttpData.ApiKey; apiKey != "" {
httpJsonData["api_key"] = apiKey
}
if format := reqState.HttpData.Format; format != "" {
httpJsonData["format"] = format
}
if responseSize := reqState.ResponseSize; responseSize > 0 {
httpJsonData["response_size"] = responseSize
}
httpJsonData["status"] = reqState.ResponseState.AsStatusCode()
result["http"] = httpJsonData
return result
}
type metricsWriter interface {
Write(*RequestState)
}
type nilMetricsWriter struct{}
func (_ *nilMetricsWriter) Write(reqState *RequestState) {}
type statsdMetricsWriter struct {
addr *net.UDPAddr
prefix string
logger JsonLogger
queue chan *RequestState
}
func makeMetricPrefix(prefix string, metric string) string {
if prefix == "" {
return metric
} else {
return fmt.Sprintf("%s.%s", prefix, metric)
}
}
func makeStatsdLineCount(prefix string, metric string, value int) string {
return fmt.Sprintf("%s:%d|c\n", makeMetricPrefix(prefix, metric), value)
}
func makeStatsdLineGauge(prefix string, metric string, value int) string {
return fmt.Sprintf("%s:%d|g\n", makeMetricPrefix(prefix, metric), value)
}
func makeStatsdLineTimer(prefix string, metric string, value time.Duration) string {
millis := convertDurationToMillis(value)
return fmt.Sprintf("%s:%d|ms\n", makeMetricPrefix(prefix, metric), millis)
}
func writeStatsdCount(w io.Writer, prefix string, metric string, value int) {
w.Write([]byte(makeStatsdLineCount(prefix, metric, value)))
}
func writeStatsdGauge(w io.Writer, prefix string, metric string, value int) {
w.Write([]byte(makeStatsdLineGauge(prefix, metric, value)))
}
func writeStatsdTimer(w io.Writer, prefix string, metric string, value time.Duration) {
w.Write([]byte(makeStatsdLineTimer(prefix, metric, value)))
}
type prefixedStatsdWriter struct {
prefix string
w io.Writer
}
func (psw *prefixedStatsdWriter) WriteCount(metric string, value int) {
writeStatsdCount(psw.w, psw.prefix, metric, value)
}
func (psw *prefixedStatsdWriter) WriteGauge(metric string, value int) {
writeStatsdGauge(psw.w, psw.prefix, metric, value)
}
func (psw *prefixedStatsdWriter) WriteBool(metric string, value bool) {
if value {
psw.WriteCount(metric, 1)
}
}
func (psw *prefixedStatsdWriter) WriteTimer(metric string, value time.Duration) {
writeStatsdTimer(psw.w, psw.prefix, metric, value)
}
func (smw *statsdMetricsWriter) Process(reqState *RequestState) {
conn, err := net.DialUDP("udp", nil, smw.addr)
if err != nil {
smw.logger.Error(LogCategory_Metrics, "Metrics Writer failed to connect to %s: %s\n", smw.addr, err)
return
}
defer conn.Close()
w := bufio.NewWriter(conn)
defer w.Flush()
psw := prefixedStatsdWriter{
prefix: smw.prefix,
w: w,
}
psw.WriteCount("count", 1)
if reqState.ResponseState > ResponseState_Nil && reqState.ResponseState < ResponseState_Count {
respStateName := reqState.ResponseState.String()
respMetricName := fmt.Sprintf("responsestate.%s", respStateName)
psw.WriteCount(respMetricName, 1)
} else {
smw.logger.Error(LogCategory_InvalidCodeState, "Invalid response state: %d", int32(reqState.ResponseState))
}
if reqState.FetchState > FetchState_Nil && reqState.FetchState < FetchState_Count {
fetchStateName := reqState.FetchState.String()
fetchMetricName := fmt.Sprintf("fetchstate.%s", fetchStateName)
psw.WriteCount(fetchMetricName, 1)
} else if reqState.FetchState != FetchState_Nil {
smw.logger.Error(LogCategory_InvalidCodeState, "Invalid fetch state: %d", int32(reqState.FetchState))
}
if reqState.FetchSize.BodySize > 0 {
psw.WriteGauge("fetchsize.body-size", int(reqState.FetchSize.BodySize))
psw.WriteGauge("fetchsize.buffer-length", int(reqState.FetchSize.BytesLength))
psw.WriteGauge("fetchsize.buffer-capacity", int(reqState.FetchSize.BytesCap))
}
psw.WriteBool("counts.lastmodified", reqState.StorageMetadata.HasLastModified)
psw.WriteBool("counts.etag", reqState.StorageMetadata.HasEtag)
psw.WriteBool("errors.response-write-error", reqState.IsResponseWriteError)
psw.WriteBool("errors.condition-parse-error", reqState.IsCondError)
psw.WriteTimer("timers.parse", reqState.Duration.Parse)
psw.WriteTimer("timers.storage-fetch", reqState.Duration.StorageFetch)
psw.WriteTimer("timers.storage-read", reqState.Duration.StorageRead)
psw.WriteTimer("timers.metatile-find", reqState.Duration.MetatileFind)
psw.WriteTimer("timers.response-write", reqState.Duration.RespWrite)
psw.WriteTimer("timers.total", reqState.Duration.Total)
if format := reqState.HttpData.Format; format != "" {
psw.WriteCount(fmt.Sprintf("formats.%s", format), 1)
}
if responseSize := reqState.ResponseSize; responseSize > 0 {
psw.WriteGauge("response-size", responseSize)
}
}
func (smw *statsdMetricsWriter) Write(reqState *RequestState) {
select {
case smw.queue <- reqState:
default:
smw.logger.Warning(LogCategory_Metrics, "Metrics Writer queue full\n")
}
}
func NewStatsdMetricsWriter(addr *net.UDPAddr, metricsPrefix string, logger JsonLogger) metricsWriter {
maxQueueSize := 4096
queue := make(chan *RequestState, maxQueueSize)
smw := &statsdMetricsWriter{
addr: addr,
prefix: metricsPrefix,
logger: logger,
queue: queue,
}
go func(smw *statsdMetricsWriter) {
for reqState := range smw.queue {
smw.Process(reqState)
}
}(smw)
return smw
}
func MetatileHandler(p Parser, metatileSize, tileSize int, mimeMap map[string]string, storage Storage, bufferManager BufferManager, mw metricsWriter, logger JsonLogger) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
reqState := RequestState{}
numRequests.Add(1)
startTime := time.Now()
defer func() {
totalDuration := time.Since(startTime)
reqState.Duration.Total = totalDuration
// update expvar state
updateCounters(totalDuration)
if reqState.ResponseState == ResponseState_Nil {
logger.Error(LogCategory_InvalidCodeState, "handler did not set response state")
}
jsonReqData := reqState.AsJsonMap()
logger.Metrics(jsonReqData)
// write out metrics
mw.Write(&reqState)
}()
parseStart := time.Now()
parseResult, err := p.Parse(req)
reqState.Duration.Parse = time.Since(parseStart)
if err != nil {
requestParseErrors.Add(1)
var sc int
var response string
if pe, ok := err.(*ParseError); ok {
logger.Warning(LogCategory_ParseError, err.Error())
if pe.MimeError != nil {
sc = http.StatusNotFound
reqState.ResponseState = ResponseState_NotFound
response = pe.MimeError.Error()
} else if pe.CoordError != nil {
sc = http.StatusBadRequest
reqState.ResponseState = ResponseState_BadRequest
response = pe.CoordError.Error()
} else if pe.CondError != nil {
reqState.IsCondError = true
logger.Warning(LogCategory_ConditionError, pe.CondError.Error())
}
} else {
logger.Error(LogCategory_ParseError, "Unknown parse error: %#v\n", err)
sc = http.StatusInternalServerError
response = "Internal server error"
reqState.ResponseState = ResponseState_Error
}
// only return an error response when not a condition parse error
// NOTE: maybe it's better to not consider this an error, but
// capture it in the parse result state and handle it that way?
if sc > 0 {
http.Error(rw, response, sc)
return
}
}
reqState.Coord = &parseResult.Coord
reqState.HttpData = parseResult.HttpData
metaCoord, offset, err := parseResult.Coord.MetaAndOffset(metatileSize, tileSize)
if err != nil {
configErrors.Add(1)
logger.Warning(LogCategory_ConfigError, "MetaAndOffset could not be calculated: %s", err.Error())
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.ResponseState = ResponseState_Error
// Note: FetchState is left as nil, since no fetch was performed
return
}
storageFetchStart := time.Now()
storageResult, err := storage.Fetch(metaCoord, parseResult.Cond)
reqState.Duration.StorageFetch = time.Since(storageFetchStart)
if err != nil || storageResult.NotFound {
if err != nil {
storageFetchErrors.Add(1)
logger.Warning(LogCategory_StorageError, "Metatile storage fetch failure: %#v", err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.FetchState = FetchState_FetchError
reqState.ResponseState = ResponseState_Error
} else {
numStorageMisses.Add(1)
http.NotFound(rw, req)
reqState.FetchState = FetchState_NotFound
reqState.ResponseState = ResponseState_NotFound
}
return
}
numStorageHits.Add(1)
reqState.FetchState = FetchState_Success
if storageResult.NotModified {
numStorageNotModified.Add(1)
rw.WriteHeader(http.StatusNotModified)
reqState.ResponseState = ResponseState_NotModified
return
}
numStorageReads.Add(1)
// metatile reader needs to be able to seek in the buffer and know
// its size. the easiest way to ensure that is to buffer the whole
// thing into memory.
storageResp := storageResult.Response
buf := bufferManager.Get()
defer bufferManager.Put(buf)
storageReadStart := time.Now()
bodySize, err := io.Copy(buf, storageResp.Body)
reqState.Duration.StorageRead = time.Since(storageReadStart)
if err != nil {
storageReadErrors.Add(1)
logger.Error(LogCategory_StorageError, "Failed to read storage body: %#v", err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.FetchState = FetchState_ReadError
reqState.ResponseState = ResponseState_Error
return
}
reqState.FetchState = FetchState_Success
storageBytes := buf.Bytes()
reqState.FetchSize.BodySize = bodySize
reqState.FetchSize.BytesLength = int64(len(storageBytes))
reqState.FetchSize.BytesCap = int64(cap(storageBytes))
headers := rw.Header()
headers.Set("Content-Type", parseResult.ContentType)
if lastMod := storageResp.LastModified; lastMod != nil {
// important! we must format times in an HTTP-compliant way, which
// apparently doesn't match any existing Go time format string, so the
// recommended way is to switch to UTC and use the format string that
// the net/http package exposes.
lastModifiedFormatted := lastMod.UTC().Format(http.TimeFormat)
headers.Set("Last-Modified", lastModifiedFormatted)
reqState.StorageMetadata.HasLastModified = true
}
if etag := storageResp.ETag; etag != nil {
headers.Set("ETag", *etag)
reqState.StorageMetadata.HasEtag = true
}
metatileReaderFindStart := time.Now()
reader, formatSize, err := tapalcatl.NewMetatileReader(offset, bytes.NewReader(storageBytes), bodySize)
reqState.Duration.MetatileFind = time.Since(metatileReaderFindStart)
if err != nil {
metatileReadErrors.Add(1)
logger.Error(LogCategory_MetatileError, "Failed to read metatile: %#v", err)
http.Error(rw, err.Error(), http.StatusInternalServerError)
reqState.IsZipError = true
reqState.ResponseState = ResponseState_Error
return
}
reqState.ResponseSize = int(formatSize)
rw.WriteHeader(http.StatusOK)
reqState.ResponseState = ResponseState_Success
respWriteStart := time.Now()
_, err = io.Copy(rw, reader)
reqState.Duration.RespWrite = time.Since(respWriteStart)
if err != nil {
responseWriteErrors.Add(1)
logger.Error(LogCategory_ResponseError, "Failed to write response body: %#v", err)
reqState.IsResponseWriteError = true
}
})
}
|
package cwl
import (
"fmt"
"io/ioutil"
"sort"
"strings"
)
// Input represents "CommandInputParameter".
// @see http://www.commonwl.org/v1.0/CommandLineTool.html#CommandInputParameter
type Input struct {
ID string `json:"id"`
Label string `json:"label"`
Doc string `json:"doc"`
Format string `json:"format"`
Binding *Binding `json:"inputBinding"`
Default *InputDefault `json:"default"`
Types []Type `json:"type"`
SecondaryFiles []SecondaryFile `json:"secondary_files"`
// Input.Provided is what provided by parameters.(json|yaml)
Provided interface{} `json:"-"`
// Requirement ..
RequiredType *Type
Requirements Requirements
}
// New constructs "Input" struct from interface{}.
func (_ Input) New(i interface{}) Input {
dest := Input{}
switch x := i.(type) {
case map[string]interface{}:
for key, v := range x {
switch key {
case "id":
dest.ID = v.(string)
case "type":
dest.Types = Type{}.NewList(v)
case "label":
dest.Label = v.(string)
case "doc":
dest.Doc = v.(string)
case "inputBinding":
dest.Binding = Binding{}.New(v)
case "default":
dest.Default = InputDefault{}.New(v)
case "format":
dest.Format = v.(string)
case "secondaryFiles":
dest.SecondaryFiles = SecondaryFile{}.NewList(v)
}
}
case string:
dest.Types = Type{}.NewList(x)
case []interface{}:
for _, v := range x {
dest.Types = append(dest.Types, Type{}.New(v))
}
}
return dest
}
// flatten
func (input Input) flatten(typ Type, binding *Binding) []string {
flattened := []string{}
switch typ.Type {
case "int": // Array of Int
tobejoined := []string{}
for _, e := range input.Provided.([]interface{}) {
tobejoined = append(tobejoined, fmt.Sprintf("%v", e))
}
flattened = append(flattened, strings.Join(tobejoined, input.Binding.Separator))
case "File": // Array of Files
switch arr := input.Provided.(type) {
case []string:
// TODO:
case []interface{}:
separated := []string{}
for _, e := range arr {
switch v := e.(type) {
case map[interface{}]interface{}:
if binding != nil && binding.Prefix != "" {
separated = append(separated, binding.Prefix)
}
separated = append(separated, fmt.Sprintf("%v", v["location"]))
default:
// TODO:
}
}
// In case it's Array of Files, unlike array of int,
// it's NOT gonna be joined with .Binding.Separator.
flattened = append(flattened, separated...)
}
default:
if input.RequiredType != nil {
flattened = append(flattened, input.flattenWithRequiredType()...)
} else {
// TODO
}
}
return flattened
}
func (input Input) flattenWithRequiredType() []string {
flattened := []string{}
key, needed := input.Types[0].NeedRequirement()
if !needed {
return flattened
}
if input.RequiredType.Name != key {
return flattened
}
switch provided := input.Provided.(type) {
case []interface{}:
for _, e := range provided {
switch v := e.(type) {
case map[interface{}]interface{}:
for _, field := range input.RequiredType.Fields {
if val, ok := v[field.Name]; ok {
if field.Binding == nil {
// Without thinking anything, just append it!!!
flattened = append(flattened, fmt.Sprintf("%v", val))
} else {
if field.Binding.Prefix != "" {
if field.Binding.Separate {
flattened = append(flattened, field.Binding.Prefix, fmt.Sprintf("%v", val))
} else {
// TODO: Join if .Separator is given
flattened = append(flattened, fmt.Sprintf("%s%v", field.Binding.Prefix, val))
}
} else {
switch v2 := val.(type) {
case []interface{}:
for _, val2 := range v2 {
switch v3 := val2.(type) {
case []interface{}:
case map[interface{}]interface{}:
for _, types := range input.Requirements[0].SchemaDefRequirement.Types {
val3array := []string{}
var val3count int = 0
sort.Sort(types.Fields)
for _, fields := range types.Fields {
for key3, val3 := range v3 {
if fields.Name == key3 {
for _, val3type := range fields.Types {
if val3type.Type == "" {
} else {
switch val3type.Type {
case "enum":
for _, symbol := range val3type.Symbols {
if symbol == val3 {
val3array = append(val3array, fmt.Sprintf("%v", val3))
val3count = val3count + 1
}
}
case "int":
if fields.Binding.Prefix != "" {
val3array = append(val3array, fields.Binding.Prefix, fmt.Sprintf("%v", val3))
val3count = val3count + 1
} else {
val3array = append(val3array, fmt.Sprintf("%v", val3))
val3count = val3count + 1
}
}
}
}
}
}
}
if len(v3) == val3count {
flattened = append(flattened, val3array...)
}
}
}
}
}
}
}
}
}
}
}
}
return flattened
}
// Flatten ...
func (input Input) Flatten() []string {
if input.Provided == nil {
// In case "input.Default == nil" should be validated by usage layer.
if input.Default != nil {
return input.Default.Flatten(input.Binding)
} else {
return []string{}
}
}
flattened := []string{}
if repr := input.Types[0]; len(input.Types) == 1 {
switch repr.Type {
case "array":
flattened = append(flattened, input.flatten(repr.Items[0], repr.Binding)...)
case "int":
flattened = append(flattened, fmt.Sprintf("%v", input.Provided.(int)))
case "File":
switch provided := input.Provided.(type) {
case map[interface{}]interface{}:
// TODO: more strict type casting
if provided["location"] != nil {
flattened = append(flattened, fmt.Sprintf("%v", provided["location"]))
} else {
fp, _ := ioutil.TempFile("", "ioutil")
fpath := fp.Name()
flattened = append(flattened, fpath)
fp.Write([]byte(fmt.Sprintf("%v", provided["contents"])))
// TODO: remove after finish program
defer fp.Close()
}
default:
}
default:
flattened = append(flattened, fmt.Sprintf("%v", input.Provided))
}
}
if input.Binding != nil && input.Binding.Prefix != "" {
flattened = append([]string{input.Binding.Prefix}, flattened...)
}
return flattened
}
// Inputs represents "inputs" field in CWL.
type Inputs []Input
// New constructs new "Inputs" struct.
func (_ Inputs) New(i interface{}) Inputs {
dest := Inputs{}
switch x := i.(type) {
case []interface{}:
for _, v := range x {
dest = append(dest, Input{}.New(v))
}
case map[string]interface{}:
for key, v := range x {
input := Input{}.New(v)
input.ID = key
dest = append(dest, input)
}
}
return dest
}
// Len for sorting.
func (ins Inputs) Len() int {
return len(ins)
}
// Less for sorting.
func (ins Inputs) Less(i, j int) bool {
prev, next := ins[i].Binding, ins[j].Binding
switch [2]bool{prev == nil, next == nil} {
case [2]bool{true, true}:
return true
case [2]bool{false, true}:
return prev.Position < 0
case [2]bool{true, false}:
return next.Position > 0
default:
return prev.Position <= next.Position
}
}
// Swap for sorting.
func (ins Inputs) Swap(i, j int) {
ins[i], ins[j] = ins[j], ins[i]
}
Support nested array of inputs
for comformance test 86
cwl/v1.0/v1.0/nested-array.cwl cwl/v1.0/v1.0/nested-array-job.ym
package cwl
import (
"fmt"
"io/ioutil"
"sort"
"strings"
)
// Input represents "CommandInputParameter".
// @see http://www.commonwl.org/v1.0/CommandLineTool.html#CommandInputParameter
type Input struct {
ID string `json:"id"`
Label string `json:"label"`
Doc string `json:"doc"`
Format string `json:"format"`
Binding *Binding `json:"inputBinding"`
Default *InputDefault `json:"default"`
Types []Type `json:"type"`
SecondaryFiles []SecondaryFile `json:"secondary_files"`
// Input.Provided is what provided by parameters.(json|yaml)
Provided interface{} `json:"-"`
// Requirement ..
RequiredType *Type
Requirements Requirements
}
// New constructs "Input" struct from interface{}.
func (_ Input) New(i interface{}) Input {
dest := Input{}
switch x := i.(type) {
case map[string]interface{}:
for key, v := range x {
switch key {
case "id":
dest.ID = v.(string)
case "type":
dest.Types = Type{}.NewList(v)
case "label":
dest.Label = v.(string)
case "doc":
dest.Doc = v.(string)
case "inputBinding":
dest.Binding = Binding{}.New(v)
case "default":
dest.Default = InputDefault{}.New(v)
case "format":
dest.Format = v.(string)
case "secondaryFiles":
dest.SecondaryFiles = SecondaryFile{}.NewList(v)
}
}
case string:
dest.Types = Type{}.NewList(x)
case []interface{}:
for _, v := range x {
dest.Types = append(dest.Types, Type{}.New(v))
}
}
return dest
}
// flatten
func (input Input) flatten(typ Type, binding *Binding, prov interface{}) []string {
flattened := []string{}
switch typ.Type {
case "int": // Array of Int
tobejoined := []string{}
for _, e := range input.Provided.([]interface{}) {
tobejoined = append(tobejoined, fmt.Sprintf("%v", e))
}
flattened = append(flattened, strings.Join(tobejoined, input.Binding.Separator))
case "File": // Array of Files
switch arr := input.Provided.(type) {
case []string:
// TODO:
case []interface{}:
separated := []string{}
for _, e := range arr {
switch v := e.(type) {
case map[interface{}]interface{}:
if binding != nil && binding.Prefix != "" {
separated = append(separated, binding.Prefix)
}
separated = append(separated, fmt.Sprintf("%v", v["location"]))
default:
// TODO:
}
}
// In case it's Array of Files, unlike array of int,
// it's NOT gonna be joined with .Binding.Separator.
flattened = append(flattened, separated...)
}
case "string": // Array of string
switch arr := prov.(type) {
case []interface{}:
separated := []string{}
for _, e := range arr {
switch v := e.(type) {
case string:
if binding != nil && binding.Prefix != "" {
separated = append(separated, binding.Prefix)
}
separated = append(separated, fmt.Sprintf("%v", v))
default:
// TODO
}
}
flattened = append(flattened, separated...)
default:
// TODO
}
case "array":
switch arr := prov.(type) {
case []interface{}:
flattened = append(flattened, input.flatten(typ.Items[0], typ.Binding, arr[0])...)
default:
// TODO
}
default:
if input.RequiredType != nil {
flattened = append(flattened, input.flattenWithRequiredType()...)
} else {
// TODO
}
}
return flattened
}
func (input Input) flattenWithRequiredType() []string {
flattened := []string{}
key, needed := input.Types[0].NeedRequirement()
if !needed {
return flattened
}
if input.RequiredType.Name != key {
return flattened
}
switch provided := input.Provided.(type) {
case []interface{}:
for _, e := range provided {
switch v := e.(type) {
case map[interface{}]interface{}:
for _, field := range input.RequiredType.Fields {
if val, ok := v[field.Name]; ok {
if field.Binding == nil {
// Without thinking anything, just append it!!!
flattened = append(flattened, fmt.Sprintf("%v", val))
} else {
if field.Binding.Prefix != "" {
if field.Binding.Separate {
flattened = append(flattened, field.Binding.Prefix, fmt.Sprintf("%v", val))
} else {
// TODO: Join if .Separator is given
flattened = append(flattened, fmt.Sprintf("%s%v", field.Binding.Prefix, val))
}
} else {
switch v2 := val.(type) {
case []interface{}:
for _, val2 := range v2 {
switch v3 := val2.(type) {
case []interface{}:
case map[interface{}]interface{}:
for _, types := range input.Requirements[0].SchemaDefRequirement.Types {
val3array := []string{}
var val3count int = 0
sort.Sort(types.Fields)
for _, fields := range types.Fields {
for key3, val3 := range v3 {
if fields.Name == key3 {
for _, val3type := range fields.Types {
if val3type.Type == "" {
} else {
switch val3type.Type {
case "enum":
for _, symbol := range val3type.Symbols {
if symbol == val3 {
val3array = append(val3array, fmt.Sprintf("%v", val3))
val3count = val3count + 1
}
}
case "int":
if fields.Binding.Prefix != "" {
val3array = append(val3array, fields.Binding.Prefix, fmt.Sprintf("%v", val3))
val3count = val3count + 1
} else {
val3array = append(val3array, fmt.Sprintf("%v", val3))
val3count = val3count + 1
}
}
}
}
}
}
}
if len(v3) == val3count {
flattened = append(flattened, val3array...)
}
}
}
}
}
}
}
}
}
}
}
}
return flattened
}
// Flatten ...
func (input Input) Flatten() []string {
if input.Provided == nil {
// In case "input.Default == nil" should be validated by usage layer.
if input.Default != nil {
return input.Default.Flatten(input.Binding)
} else {
return []string{}
}
}
flattened := []string{}
if repr := input.Types[0]; len(input.Types) == 1 {
switch repr.Type {
case "array":
flattened = append(flattened, input.flatten(repr.Items[0], repr.Binding, input.Provided)...)
case "int":
flattened = append(flattened, fmt.Sprintf("%v", input.Provided.(int)))
case "File":
switch provided := input.Provided.(type) {
case map[interface{}]interface{}:
// TODO: more strict type casting
if provided["location"] != nil {
flattened = append(flattened, fmt.Sprintf("%v", provided["location"]))
} else {
fp, _ := ioutil.TempFile("", "ioutil")
fpath := fp.Name()
flattened = append(flattened, fpath)
fp.Write([]byte(fmt.Sprintf("%v", provided["contents"])))
// TODO: remove after finish program
defer fp.Close()
}
default:
}
default:
flattened = append(flattened, fmt.Sprintf("%v", input.Provided))
}
}
if input.Binding != nil && input.Binding.Prefix != "" {
flattened = append([]string{input.Binding.Prefix}, flattened...)
}
return flattened
}
// Inputs represents "inputs" field in CWL.
type Inputs []Input
// New constructs new "Inputs" struct.
func (_ Inputs) New(i interface{}) Inputs {
dest := Inputs{}
switch x := i.(type) {
case []interface{}:
for _, v := range x {
dest = append(dest, Input{}.New(v))
}
case map[string]interface{}:
for key, v := range x {
input := Input{}.New(v)
input.ID = key
dest = append(dest, input)
}
}
return dest
}
// Len for sorting.
func (ins Inputs) Len() int {
return len(ins)
}
// Less for sorting.
func (ins Inputs) Less(i, j int) bool {
prev, next := ins[i].Binding, ins[j].Binding
switch [2]bool{prev == nil, next == nil} {
case [2]bool{true, true}:
return true
case [2]bool{false, true}:
return prev.Position < 0
case [2]bool{true, false}:
return next.Position > 0
default:
return prev.Position <= next.Position
}
}
// Swap for sorting.
func (ins Inputs) Swap(i, j int) {
ins[i], ins[j] = ins[j], ins[i]
}
|
/*
Open Source Initiative OSI - The MIT License (MIT):Licensing
The MIT License (MIT)
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Package mapset implements a simple and generic set collection.
// Items stored within it are unordered and unique
// It supports typical set operations: membership testing, intersection, union, difference and symmetric difference
package mapset
import (
"fmt"
"strings"
)
// The primary type that represents a set
type Set map[interface{}]struct{}
// Creates and returns a reference to an empty set.
func NewSet() Set {
return make(Set)
}
// Creates and returns a reference to a set from an existing slice
func NewSetFromSlice(s []interface{}) Set {
a := NewSet()
for _, item := range s {
a.Add(item)
}
return a
}
// Adds an item to the current set if it doesn't already exist in the set.
func (set Set) Add(i interface{}) bool {
_, found := set[i]
set[i] = struct{}{}
return !found //False if it existed already
}
// Determines if a given item is already in the set.
func (set Set) Contains(i interface{}) bool {
_, found := set[i]
return found
}
// Determines if the given items are all in the set
func (set Set) ContainsAll(i ...interface{}) bool {
allSet := NewSetFromSlice(i)
if allSet.IsSubset(set) {
return true
}
return false
}
// Determines if every item in the other set is in this set.
func (set Set) IsSubset(other Set) bool {
for elem := range set {
if !other.Contains(elem) {
return false
}
}
return true
}
// Determines if every item of this set is in the other set.
func (set Set) IsSuperset(other Set) bool {
return other.IsSubset(set)
}
// Returns a new set with all items in both sets.
func (set Set) Union(other Set) Set {
unionedSet := NewSet()
for elem := range set {
unionedSet.Add(elem)
}
for elem := range other {
unionedSet.Add(elem)
}
return unionedSet
}
// Returns a new set with items that exist only in both sets.
func (set Set) Intersect(other Set) Set {
intersection := NewSet()
// loop over smaller set
if set.Size() < other.Size() {
for elem := range set {
if other.Contains(elem) {
intersection.Add(elem)
}
}
} else {
for elem := range other {
if set.Contains(elem) {
intersection.Add(elem)
}
}
}
return intersection
}
// Returns a new set with items in the current set but not in the other set
func (set Set) Difference(other Set) Set {
differencedSet := NewSet()
for elem := range set {
if !other.Contains(elem) {
differencedSet.Add(elem)
}
}
return differencedSet
}
// Returns a new set with items in the current set or the other set but not in both.
func (set Set) SymmetricDifference(other Set) Set {
aDiff := set.Difference(other)
bDiff := other.Difference(set)
symDifferencedSet := aDiff.Union(bDiff)
return symDifferencedSet
}
// Clears the entire set to be the empty set.
func (set *Set) Clear() {
*set = make(map[interface{}]struct{})
}
// Allows the removal of a single item in the set.
func (set Set) Remove(i interface{}) {
delete(set, i)
}
// Size returns the how many items are currently in the set.
func (set Set) Size() int {
return len(set)
}
// Equal determines if two sets are equal to each other.
// If they both are the same size and have the same items they are considered equal.
// Order of items is not relevent for sets to be equal.
func (set Set) Equal(other Set) bool {
if set.Size() != other.Size() {
return false
} else {
for elem, _ := range set {
if !other.Contains(elem) {
return false
}
}
return true
}
}
// Provides a convenient string representation of the current state of the set.
func (set Set) String() string {
items := make([]string, 0, len(set))
for key := range set {
items = append(items, fmt.Sprintf("%v", key))
}
return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
}
Aesthetic improvement in SymmetricDifference
/*
Open Source Initiative OSI - The MIT License (MIT):Licensing
The MIT License (MIT)
Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
// Package mapset implements a simple and generic set collection.
// Items stored within it are unordered and unique
// It supports typical set operations: membership testing, intersection, union, difference and symmetric difference
package mapset
import (
"fmt"
"strings"
)
// The primary type that represents a set
type Set map[interface{}]struct{}
// Creates and returns a reference to an empty set.
func NewSet() Set {
return make(Set)
}
// Creates and returns a reference to a set from an existing slice
func NewSetFromSlice(s []interface{}) Set {
a := NewSet()
for _, item := range s {
a.Add(item)
}
return a
}
// Adds an item to the current set if it doesn't already exist in the set.
func (set Set) Add(i interface{}) bool {
_, found := set[i]
set[i] = struct{}{}
return !found //False if it existed already
}
// Determines if a given item is already in the set.
func (set Set) Contains(i interface{}) bool {
_, found := set[i]
return found
}
// Determines if the given items are all in the set
func (set Set) ContainsAll(i ...interface{}) bool {
allSet := NewSetFromSlice(i)
if allSet.IsSubset(set) {
return true
}
return false
}
// Determines if every item in the other set is in this set.
func (set Set) IsSubset(other Set) bool {
for elem := range set {
if !other.Contains(elem) {
return false
}
}
return true
}
// Determines if every item of this set is in the other set.
func (set Set) IsSuperset(other Set) bool {
return other.IsSubset(set)
}
// Returns a new set with all items in both sets.
func (set Set) Union(other Set) Set {
unionedSet := NewSet()
for elem := range set {
unionedSet.Add(elem)
}
for elem := range other {
unionedSet.Add(elem)
}
return unionedSet
}
// Returns a new set with items that exist only in both sets.
func (set Set) Intersect(other Set) Set {
intersection := NewSet()
// loop over smaller set
if set.Size() < other.Size() {
for elem := range set {
if other.Contains(elem) {
intersection.Add(elem)
}
}
} else {
for elem := range other {
if set.Contains(elem) {
intersection.Add(elem)
}
}
}
return intersection
}
// Returns a new set with items in the current set but not in the other set
func (set Set) Difference(other Set) Set {
differencedSet := NewSet()
for elem := range set {
if !other.Contains(elem) {
differencedSet.Add(elem)
}
}
return differencedSet
}
// Returns a new set with items in the current set or the other set but not in both.
func (set Set) SymmetricDifference(other Set) Set {
aDiff := set.Difference(other)
bDiff := other.Difference(set)
return aDiff.Union(bDiff)
}
// Clears the entire set to be the empty set.
func (set *Set) Clear() {
*set = make(map[interface{}]struct{})
}
// Allows the removal of a single item in the set.
func (set Set) Remove(i interface{}) {
delete(set, i)
}
// Size returns the how many items are currently in the set.
func (set Set) Size() int {
return len(set)
}
// Equal determines if two sets are equal to each other.
// If they both are the same size and have the same items they are considered equal.
// Order of items is not relevent for sets to be equal.
func (set Set) Equal(other Set) bool {
if set.Size() != other.Size() {
return false
} else {
for elem, _ := range set {
if !other.Contains(elem) {
return false
}
}
return true
}
}
// Provides a convenient string representation of the current state of the set.
func (set Set) String() string {
items := make([]string, 0, len(set))
for key := range set {
items = append(items, fmt.Sprintf("%v", key))
}
return fmt.Sprintf("Set{%s}", strings.Join(items, ", "))
}
|
package discrete
// On one hand, using an interface{} as a key works on some levels.
// On the other hand, from experience, I can say that working with interface{} is a pain
// so I don't like it in an API. An alternate idea is to make Set an interface with a method that allows you to GRAB a map[interface{}]struct{} from
// the implementation, but that adds a lot of calls and needless operations, making the library slower
//
// Another point, using an interface{} may be pointless because a map key MUST have == and != defined, limiting the possible keys anyway (for instance, if you had a set of [3]floats I don't think it will do a deep
// comparison, making it rather pointless). Also, keying with a float will mean it does a strict == with the floats, possibly causing bad behavior. It may be best to just make it a map[int]struct{}. Thoughts?
type Set struct {
data map[interface{}]struct{}
id uint
}
// I highly doubt we have to worry about running out of IDs, but we could add a little reclaimID function if we're worried
var globalid uint = 0
// For cleanliness
var flag struct{} = struct{}{}
func NewSet() Set {
defer func() { globalid++ }()
return Set{
data: make(map[interface{}]struct{}, 0),
id: globalid,
}
}
// Reverts the set to the empty set without reallocating
func (s1 Set) Clear() Set {
for el, _ := range s1 {
delete(s1, el)
}
}
// Ensures a perfect copy from s1 to dest (meaning the sets will be equal)
func (s1 Set) CopyTo(dest Set) Set {
if s1.id == dest.id {
return dest
}
if len(dest) > 0 {
for el := range dest {
delete(dest, el)
}
}
for el := range s1 {
dest[el] = flag
}
return dest
}
func (s1 Set) Equal(s2 Set) bool {
if s1.id == s2.id {
return true
} else if len(s1.data) != len(s2.data) {
return false
}
for el := range s1.data {
if _, ok := s2.data[el]; !ok {
return false
}
}
return true
}
func (s1 Set) Union(dest, s2 Set) Set {
if dest == nil {
dest = NewSet()
}
if s1.id == s2.id {
return s1.CopyTo(dest)
}
if s1.id != dest.id && s2.id != dest.id {
dest.Clear()
}
if dest.id != s1.id {
for el := range s1.data {
dest[el] = flag
}
}
if dest.id != s2.id {
for el := range s2.data {
dest[el] = flag
}
}
return dest
}
func (s1 Set) Intersection(dest, s2 Set) Set {
if dest == nil {
dest = NewSet()
}
var swap Set
if s1.id == s2.id {
return s1.CopyTo(dest)
} else if s1.id == dest.id {
swap = s2
} else if s2.id == dest.id {
swap = s1
} else {
dest.Clear()
if len(s1.data) > len(s2.data) {
s1, s2 = s2, s1
}
for el := range s1.data {
if _, ok := s2.data[el]; ok {
dest[el] = flag
}
}
return dest
}
for el := range dest.data {
if _, ok := swap.data[el]; !ok {
delete(dest, el)
}
}
return dest
}
func (s1 Set) Diff(dest, s2 Set) Set {
if dest == nil {
dest = NewSet()
}
if s1.id == s2.id {
return dest.Clear()
} else if s2.id == dest {
tmp := NewSet()
return s1.Diff(tmp, s2).CopyTo(dest)
} else if s1.id == dest {
for el := range dest.data {
if _, ok := s2.data[el]; ok {
delete(dest, el)
}
}
} else {
dest.Clear()
for el := range s1.data {
if _, ok := s2.data[el]; !ok {
dest[el] = flag
}
}
}
return dest
}
// Are Add/Remove necessary?
func (s1 Set) Add(element interface{}) {
s1[element] = flag
}
func (s1 Set) Remove(element interface{}) {
delete(s1, element)
}
Made it build without error
package discrete
// On one hand, using an interface{} as a key works on some levels.
// On the other hand, from experience, I can say that working with interface{} is a pain
// so I don't like it in an API. An alternate idea is to make Set an interface with a method that allows you to GRAB a map[interface{}]struct{} from
// the implementation, but that adds a lot of calls and needless operations, making the library slower
//
// Another point, using an interface{} may be pointless because a map key MUST have == and != defined, limiting the possible keys anyway (for instance, if you had a set of [3]floats I don't think it will do a deep
// comparison, making it rather pointless). Also, keying with a float will mean it does a strict == with the floats, possibly causing bad behavior. It may be best to just make it a map[int]struct{}. Thoughts?
type Set struct {
data map[interface{}]struct{}
id uint
}
// I highly doubt we have to worry about running out of IDs, but we could add a little reclaimID function if we're worried
var globalid uint = 0
// For cleanliness
var flag struct{} = struct{}{}
func NewSet() Set {
defer func() { globalid++ }()
return Set{
data: make(map[interface{}]struct{}, 0),
id: globalid,
}
}
// Reverts the set to the empty set without reallocating
func (s1 Set) Clear() Set {
for el, _ := range s1.data {
delete(s1.data, el)
}
return s1
}
// Ensures a perfect copy from s1 to dest (meaning the sets will be equal)
func (s1 Set) CopyTo(dest Set) Set {
if s1.id == dest.id {
return dest
}
if len(dest.data) > 0 {
for el := range dest.data {
delete(dest.data, el)
}
}
for el := range s1.data {
dest.data[el] = flag
}
return dest
}
func (s1 Set) Equal(s2 Set) bool {
if s1.id == s2.id {
return true
} else if len(s1.data) != len(s2.data) {
return false
}
for el := range s1.data {
if _, ok := s2.data[el]; !ok {
return false
}
}
return true
}
func (s1 Set) Union(dest, s2 Set) Set {
if s1.id == s2.id {
return s1.CopyTo(dest)
}
if s1.id != dest.id && s2.id != dest.id {
dest.Clear()
}
if dest.id != s1.id {
for el := range s1.data {
dest.data[el] = flag
}
}
if dest.id != s2.id {
for el := range s2.data {
dest.data[el] = flag
}
}
return dest
}
func (s1 Set) Intersection(dest, s2 Set) Set {
var swap Set
if s1.id == s2.id {
return s1.CopyTo(dest)
} else if s1.id == dest.id {
swap = s2
} else if s2.id == dest.id {
swap = s1
} else {
dest.Clear()
if len(s1.data) > len(s2.data) {
s1, s2 = s2, s1
}
for el := range s1.data {
if _, ok := s2.data[el]; ok {
dest.data[el] = flag
}
}
return dest
}
for el := range dest.data {
if _, ok := swap.data[el]; !ok {
delete(dest.data, el)
}
}
return dest
}
func (s1 Set) Diff(dest, s2 Set) Set {
if s1.id == s2.id {
return dest.Clear()
} else if s2.id == dest.id {
tmp := NewSet()
return s1.Diff(tmp, s2).CopyTo(dest)
} else if s1.id == dest.id {
for el := range dest.data {
if _, ok := s2.data[el]; ok {
delete(dest.data, el)
}
}
} else {
dest.Clear()
for el := range s1.data {
if _, ok := s2.data[el]; !ok {
dest.data[el] = flag
}
}
}
return dest
}
// Are Add/Remove necessary?
func (s1 Set) Add(element interface{}) {
s1.data[element] = flag
}
func (s1 Set) Remove(element interface{}) {
delete(s1.data, element)
}
|
package cmds
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/pachyderm/pachyderm/src/client"
deployclient "github.com/pachyderm/pachyderm/src/client/deploy"
"github.com/pachyderm/pachyderm/src/client/version"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/deploy"
"github.com/pachyderm/pachyderm/src/server/pkg/deploy/assets"
"github.com/pachyderm/pachyderm/src/server/pkg/deploy/images"
_metrics "github.com/pachyderm/pachyderm/src/server/pkg/metrics"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
)
var defaultDashImage = "pachyderm/dash:1.8-preview-7"
var awsAccessKeyIDRE = regexp.MustCompile("^[A-Z0-9]{20}$")
var awsSecretRE = regexp.MustCompile("^[A-Za-z0-9/+=]{40}$")
var awsRegionRE = regexp.MustCompile("^[a-z]{2}(?:-gov)?-[a-z]+-[0-9]$")
// BytesEncoder is an Encoder with bytes content.
type BytesEncoder interface {
assets.Encoder
// Return the current buffer of the encoder.
Buffer() *bytes.Buffer
}
// JSON assets.Encoder.
type jsonEncoder struct {
encoder *json.Encoder
buffer *bytes.Buffer
}
func newJSONEncoder() *jsonEncoder {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetIndent("", "\t")
return &jsonEncoder{encoder, buffer}
}
func (e *jsonEncoder) Encode(item interface{}) error {
if err := e.encoder.Encode(item); err != nil {
return err
}
_, err := fmt.Fprintf(e.buffer, "\n")
return err
}
// Return the current bytes content.
func (e *jsonEncoder) Buffer() *bytes.Buffer {
return e.buffer
}
// YAML assets.Encoder.
type yamlEncoder struct {
buffer *bytes.Buffer
}
func newYAMLEncoder() *yamlEncoder {
buffer := &bytes.Buffer{}
return &yamlEncoder{buffer}
}
func (e *yamlEncoder) Encode(item interface{}) error {
bytes, err := yaml.Marshal(item)
if err != nil {
return err
}
_, err = e.buffer.Write(bytes)
if err != nil {
return err
}
_, err = fmt.Fprintf(e.buffer, "---\n")
return err
}
// Return the current bytes content.
func (e *yamlEncoder) Buffer() *bytes.Buffer {
return e.buffer
}
// Return the appropriate encoder for the given output format.
func getEncoder(outputFormat string) BytesEncoder {
switch outputFormat {
case "yaml":
return newYAMLEncoder()
case "json":
return newJSONEncoder()
default:
return newJSONEncoder()
}
}
func kubectlCreate(dryRun bool, manifest BytesEncoder, opts *assets.AssetOpts, metrics bool) error {
if dryRun {
_, err := os.Stdout.Write(manifest.Buffer().Bytes())
return err
}
io := cmdutil.IO{
Stdin: manifest.Buffer(),
Stdout: os.Stdout,
Stderr: os.Stderr,
}
// we set --validate=false due to https://github.com/kubernetes/kubernetes/issues/53309
if err := cmdutil.RunIO(io, "kubectl", "apply", "-f", "-", "--validate=false"); err != nil {
return err
}
fmt.Println("\nPachyderm is launching. Check its status with \"kubectl get all\"")
if opts.DashOnly || !opts.NoDash {
fmt.Println("Once launched, access the dashboard by running \"pachctl port-forward\"")
}
fmt.Println("")
return nil
}
// containsEmpty is a helper function used for validation (particularly for
// validating that creds arguments aren't empty
func containsEmpty(vals []string) bool {
for _, val := range vals {
if val == "" {
return true
}
}
return false
}
// DeployCmd returns a cobra.Command to deploy pachyderm.
func DeployCmd(noMetrics *bool, noPortForwarding *bool) *cobra.Command {
var pachdShards int
var hostPath string
var dev bool
var dryRun bool
var outputFormat string
var secure bool
var isS3V2 bool
var etcdNodes int
var etcdVolume string
var etcdStorageClassName string
var pachdCPURequest string
var pachdNonCacheMemRequest string
var blockCacheSize string
var etcdCPURequest string
var etcdMemRequest string
var logLevel string
var persistentDiskBackend string
var objectStoreBackend string
var opts *assets.AssetOpts
var dashOnly bool
var noDash bool
var dashImage string
var registry string
var imagePullSecret string
var noGuaranteed bool
var noRBAC bool
var localRoles bool
var namespace string
var noExposeDockerSocket bool
var exposeObjectAPI bool
var tlsCertKey string
deployLocal := &cobra.Command{
Use: "local",
Short: "Deploy a single-node Pachyderm cluster with local metadata storage.",
Long: "Deploy a single-node Pachyderm cluster with local metadata storage.",
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
manifest := getEncoder(outputFormat)
if dev {
// Use dev build instead of release build
opts.Version = deploy.DevVersionTag
// we turn metrics off this is a dev cluster. The default is set by
// deploy.PersistentPreRun, below.
opts.Metrics = false
// Disable authentication, for tests
opts.DisableAuthentication = true
// Serve the Pachyderm object/block API locally, as this is needed by
// our tests (and authentication is disabled anyway)
opts.ExposeObjectAPI = true
}
if err := assets.WriteLocalAssets(manifest, opts, hostPath); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployLocal.Flags().StringVar(&hostPath, "host-path", "/var/pachyderm", "Location on the host machine where PFS metadata will be stored.")
deployLocal.Flags().BoolVarP(&dev, "dev", "d", false, "Deploy pachd with local version tags, disable metrics, expose Pachyderm's object/block API, and use an insecure authentication mechanism (do not set on any cluster with sensitive data)")
deployGoogle := &cobra.Command{
Use: "google <GCS bucket> <size of disk(s) (in GB)> [<service account creds file>]",
Short: "Deploy a Pachyderm cluster running on GCP.",
Long: "Deploy a Pachyderm cluster running on GCP.\n" +
"Arguments are:\n" +
" <GCS bucket>: A GCS bucket where Pachyderm will store PFS data.\n" +
" <GCE persistent disks>: A comma-separated list of GCE persistent disks, one per etcd node (see --etcd-nodes).\n" +
" <size of disks>: Size of GCE persistent disks in GB (assumed to all be the same).\n" +
" <service account creds file>: a file contain a private key for a service account (downloaded from GCE).\n",
Run: cmdutil.RunBoundedArgs(2, 3, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
volumeSize, err := strconv.Atoi(args[1])
if err != nil {
return fmt.Errorf("volume size needs to be an integer; instead got %v", args[1])
}
manifest := getEncoder(outputFormat)
opts.BlockCacheSize = "0G" // GCS is fast so we want to disable the block cache. See issue #1650
var cred string
if len(args) == 3 {
credBytes, err := ioutil.ReadFile(args[2])
if err != nil {
return fmt.Errorf("error reading creds file %s: %v", args[2], err)
}
cred = string(credBytes)
}
bucket := strings.TrimPrefix(args[0], "gs://")
if err = assets.WriteGoogleAssets(manifest, opts, bucket, cred, volumeSize); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployCustom := &cobra.Command{
Use: "custom --persistent-disk <persistent disk backend> --object-store <object store backend> <persistent disk args> <object store args>",
Short: "(in progress) Deploy a custom Pachyderm cluster configuration",
Long: "(in progress) Deploy a custom Pachyderm cluster configuration.\n" +
"If <object store backend> is \"s3\", then the arguments are:\n" +
" <volumes> <size of volumes (in GB)> <bucket> <id> <secret> <endpoint>\n",
Run: cmdutil.RunBoundedArgs(4, 7, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
manifest := getEncoder(outputFormat)
err := assets.WriteCustomAssets(manifest, opts, args, objectStoreBackend, persistentDiskBackend, secure, isS3V2)
if err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployCustom.Flags().BoolVarP(&secure, "secure", "s", false, "Enable secure access to a Minio server.")
deployCustom.Flags().StringVar(&persistentDiskBackend, "persistent-disk", "aws",
"(required) Backend providing persistent local volumes to stateful pods. "+
"One of: aws, google, or azure.")
deployCustom.Flags().StringVar(&objectStoreBackend, "object-store", "s3",
"(required) Backend providing an object-storage API to pachyderm. One of: "+
"s3, gcs, or azure-blob.")
deployCustom.Flags().BoolVar(&isS3V2, "isS3V2", false, "Enable S3V2 client")
var creds string
var vault string
var iamRole string
var cloudfrontDistribution string
deployAmazon := &cobra.Command{
Use: "amazon <S3 bucket> <region> <size of volumes (in GB)>",
Short: "Deploy a Pachyderm cluster running on AWS.",
Long: "Deploy a Pachyderm cluster running on AWS. Arguments are:\n" +
" <S3 bucket>: An S3 bucket where Pachyderm will store PFS data.\n" +
"\n" +
" <region>: The aws region where pachyderm is being deployed (e.g. us-west-1)\n" +
" <size of volumes>: Size of EBS volumes, in GB (assumed to all be the same).\n",
Run: cmdutil.RunFixedArgs(3, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
if creds == "" && vault == "" && iamRole == "" {
return fmt.Errorf("One of --credentials, --vault, or --iam-role needs to be provided")
}
// populate 'amazonCreds' & validate
var amazonCreds *assets.AmazonCreds
s := bufio.NewScanner(os.Stdin)
if creds != "" {
parts := strings.Split(creds, ",")
if len(parts) < 2 || len(parts) > 3 || containsEmpty(parts[:2]) {
return fmt.Errorf("Incorrect format of --credentials")
}
amazonCreds = &assets.AmazonCreds{ID: parts[0], Secret: parts[1]}
if len(parts) > 2 {
amazonCreds.Token = parts[2]
}
if !awsAccessKeyIDRE.MatchString(amazonCreds.ID) {
fmt.Printf("The AWS Access Key seems invalid (does not match %q). "+
"Do you want to continue deploying? [yN]\n", awsAccessKeyIDRE)
if s.Scan(); s.Text()[0] != 'y' && s.Text()[0] != 'Y' {
os.Exit(1)
}
}
if !awsSecretRE.MatchString(amazonCreds.Secret) {
fmt.Printf("The AWS Secret seems invalid (does not match %q). "+
"Do you want to continue deploying? [yN]\n", awsSecretRE)
if s.Scan(); s.Text()[0] != 'y' && s.Text()[0] != 'Y' {
os.Exit(1)
}
}
}
if vault != "" {
if amazonCreds != nil {
return fmt.Errorf("Only one of --credentials, --vault, or --iam-role needs to be provided")
}
parts := strings.Split(vault, ",")
if len(parts) != 3 || containsEmpty(parts) {
return fmt.Errorf("Incorrect format of --vault")
}
amazonCreds = &assets.AmazonCreds{VaultAddress: parts[0], VaultRole: parts[1], VaultToken: parts[2]}
}
if iamRole != "" {
if amazonCreds != nil {
return fmt.Errorf("Only one of --credentials, --vault, or --iam-role needs to be provided")
}
opts.IAMRole = iamRole
}
volumeSize, err := strconv.Atoi(args[2])
if err != nil {
return fmt.Errorf("volume size needs to be an integer; instead got %v", args[2])
}
if strings.TrimSpace(cloudfrontDistribution) != "" {
fmt.Printf("WARNING: You specified a cloudfront distribution. Deploying on AWS with cloudfront is currently " +
"an alpha feature. No security restrictions have been applied to cloudfront, making all data public (obscured but not secured)\n")
}
bucket, region := strings.TrimPrefix(args[0], "s3://"), args[1]
if !awsRegionRE.MatchString(region) {
fmt.Printf("The AWS region seems invalid (does not match %q). "+
"Do you want to continue deploying? [yN]\n", awsRegionRE)
if s.Scan(); s.Text()[0] != 'y' && s.Text()[0] != 'Y' {
os.Exit(1)
}
}
// generate manifest and write assets
manifest := getEncoder(outputFormat)
if err = assets.WriteAmazonAssets(manifest, opts, region, bucket, volumeSize, amazonCreds, cloudfrontDistribution); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployAmazon.Flags().StringVar(&cloudfrontDistribution, "cloudfront-distribution", "",
"Deploying on AWS with cloudfront is currently "+
"an alpha feature. No security restrictions have been"+
"applied to cloudfront, making all data public (obscured but not secured)")
deployAmazon.Flags().StringVar(&creds, "credentials", "", "Use the format \"<id>,<secret>[,<token>]\". You can get a token by running \"aws sts get-session-token\".")
deployAmazon.Flags().StringVar(&vault, "vault", "", "Use the format \"<address/hostport>,<role>,<token>\".")
deployAmazon.Flags().StringVar(&iamRole, "iam-role", "", fmt.Sprintf("Use the given IAM role for authorization, as opposed to using static credentials. The given role will be applied as the annotation %s, this used with a Kubernetes IAM role management system such as kube2iam allows you to give pachd credentials in a more secure way.", assets.IAMAnnotation))
deployMicrosoft := &cobra.Command{
Use: "microsoft <container> <storage account name> <storage account key> <size of volumes (in GB)>",
Short: "Deploy a Pachyderm cluster running on Microsoft Azure.",
Long: "Deploy a Pachyderm cluster running on Microsoft Azure. Arguments are:\n" +
" <container>: An Azure container where Pachyderm will store PFS data.\n" +
" <size of volumes>: Size of persistent volumes, in GB (assumed to all be the same).\n",
Run: cmdutil.RunFixedArgs(4, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
if _, err := base64.StdEncoding.DecodeString(args[2]); err != nil {
return fmt.Errorf("storage-account-key needs to be base64 encoded; instead got '%v'", args[2])
}
if opts.EtcdVolume != "" {
tempURI, err := url.ParseRequestURI(opts.EtcdVolume)
if err != nil {
return fmt.Errorf("Volume URI needs to be a well-formed URI; instead got '%v'", opts.EtcdVolume)
}
opts.EtcdVolume = tempURI.String()
}
volumeSize, err := strconv.Atoi(args[3])
if err != nil {
return fmt.Errorf("volume size needs to be an integer; instead got %v", args[3])
}
manifest := getEncoder(outputFormat)
container := strings.TrimPrefix(args[0], "wasb://")
accountName, accountKey := args[1], args[2]
if err = assets.WriteMicrosoftAssets(manifest, opts, container, accountName, accountKey, volumeSize); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployStorage := &cobra.Command{
Use: "storage <backend> ...",
Short: "Deploy credentials for a particular storage provider.",
Long: `
Deploy credentials for a particular storage provider, so that Pachyderm can
ingress data from and egress data to it. Currently three backends are
supported: aws, google, and azure. To see the required arguments for a
particular backend, run "pachctl deploy storage <backend>"`,
Run: cmdutil.RunBoundedArgs(1, 5, func(args []string) (retErr error) {
var data map[string][]byte
switch args[0] {
case "amazon", "aws":
// Need at least 4 arguments: backend, bucket, id, secret
if len(args) < 4 {
return fmt.Errorf("Usage: pachctl deploy storage amazon <region> <id> <secret> <token>\n\n<token> is optional")
}
var token string
if len(args) == 5 {
token = args[4]
}
data = assets.AmazonSecret(args[1], "", args[2], args[3], token, "")
case "google":
if len(args) < 2 {
return fmt.Errorf("Usage: pachctl deploy storage google <service account creds file>")
}
credBytes, err := ioutil.ReadFile(args[1])
if err != nil {
return fmt.Errorf("error reading creds file %s: %v", args[2], err)
}
data = assets.GoogleSecret("", string(credBytes))
case "azure":
// Need 3 arguments: backend, account name, account key
if len(args) != 3 {
return fmt.Errorf("Usage: pachctl deploy storage azure <account name> <account key>")
}
data = assets.MicrosoftSecret("", args[1], args[2])
}
c, err := client.NewOnUserMachine(!*noMetrics, !*noPortForwarding, "user")
if err != nil {
return fmt.Errorf("error constructing pachyderm client: %v", err)
}
defer c.Close()
_, err = c.DeployStorageSecret(context.Background(), &deployclient.DeployStorageSecretRequest{
Secrets: data,
})
if err != nil {
return fmt.Errorf("error deploying storage secret to pachd: %v", err)
}
return nil
}),
}
listImages := &cobra.Command{
Use: "list-images",
Short: "Output the list of images in a deployment.",
Long: "Output the list of images in a deployment.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
for _, image := range assets.Images(opts) {
fmt.Println(image)
}
return nil
}),
}
exportImages := &cobra.Command{
Use: "export-images output-file",
Short: "Export a tarball (to stdout) containing all of the images in a deployment.",
Long: "Export a tarball (to stdout) containing all of the images in a deployment.",
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
file, err := os.Create(args[0])
if err != nil {
return err
}
defer func() {
if err := file.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return images.Export(opts, file)
}),
}
importImages := &cobra.Command{
Use: "import-images input-file",
Short: "Import a tarball (from stdin) containing all of the images in a deployment and push them to a private registry.",
Long: "Import a tarball (from stdin) containing all of the images in a deployment and push them to a private registry.",
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
file, err := os.Open(args[0])
if err != nil {
return err
}
defer func() {
if err := file.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return images.Import(opts, file)
}),
}
deploy := &cobra.Command{
Use: "deploy amazon|google|microsoft|local|custom|storage",
Short: "Deploy a Pachyderm cluster.",
Long: "Deploy a Pachyderm cluster.",
PersistentPreRun: cmdutil.Run(func([]string) error {
dashImage = getDefaultOrLatestDashImage(dashImage, dryRun)
opts = &assets.AssetOpts{
PachdShards: uint64(pachdShards),
Version: version.PrettyPrintVersion(version.Version),
LogLevel: logLevel,
Metrics: !*noMetrics,
PachdCPURequest: pachdCPURequest,
PachdNonCacheMemRequest: pachdNonCacheMemRequest,
BlockCacheSize: blockCacheSize,
EtcdCPURequest: etcdCPURequest,
EtcdMemRequest: etcdMemRequest,
EtcdNodes: etcdNodes,
EtcdVolume: etcdVolume,
EtcdStorageClassName: etcdStorageClassName,
DashOnly: dashOnly,
NoDash: noDash,
DashImage: dashImage,
Registry: registry,
ImagePullSecret: imagePullSecret,
NoGuaranteed: noGuaranteed,
NoRBAC: noRBAC,
LocalRoles: localRoles,
Namespace: namespace,
NoExposeDockerSocket: noExposeDockerSocket,
ExposeObjectAPI: exposeObjectAPI,
}
if tlsCertKey != "" {
// TODO(msteffen): If either the cert path or the key path contains a
// comma, this doesn't work
certKey := strings.Split(tlsCertKey, ",")
if len(certKey) != 2 {
return fmt.Errorf("could not split TLS certificate and key correctly; must have two parts but got: %#v", certKey)
}
opts.TLS = &assets.TLSOpts{
ServerCert: certKey[0],
ServerKey: certKey[1],
}
}
return nil
}),
}
deploy.PersistentFlags().IntVar(&pachdShards, "shards", 16, "(rarely set) The maximum number of pachd nodes allowed in the cluster; increasing this number blindly can result in degraded performance.")
deploy.PersistentFlags().IntVar(&etcdNodes, "dynamic-etcd-nodes", 0, "Deploy etcd as a StatefulSet with the given number of pods. The persistent volumes used by these pods are provisioned dynamically. Note that StatefulSet is currently a beta kubernetes feature, which might be unavailable in older versions of kubernetes.")
deploy.PersistentFlags().StringVar(&etcdVolume, "static-etcd-volume", "", "Deploy etcd as a ReplicationController with one pod. The pod uses the given persistent volume.")
deploy.PersistentFlags().StringVar(&etcdStorageClassName, "etcd-storage-class", "", "If set, the name of an existing StorageClass to use for etcd storage. Ignored if --static-etcd-volume is set.")
deploy.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "Don't actually deploy pachyderm to Kubernetes, instead just print the manifest.")
deploy.PersistentFlags().StringVarP(&outputFormat, "output", "o", "json", "Output formmat. One of: json|yaml")
deploy.PersistentFlags().StringVar(&logLevel, "log-level", "info", "The level of log messages to print options are, from least to most verbose: \"error\", \"info\", \"debug\".")
deploy.PersistentFlags().BoolVar(&dashOnly, "dashboard-only", false, "Only deploy the Pachyderm UI (experimental), without the rest of pachyderm. This is for launching the UI adjacent to an existing Pachyderm cluster. After deployment, run \"pachctl port-forward\" to connect")
deploy.PersistentFlags().BoolVar(&noDash, "no-dashboard", false, "Don't deploy the Pachyderm UI alongside Pachyderm (experimental).")
deploy.PersistentFlags().StringVar(®istry, "registry", "", "The registry to pull images from.")
deploy.PersistentFlags().StringVar(&imagePullSecret, "image-pull-secret", "", "A secret in Kubernetes that's needed to pull from your private registry.")
deploy.PersistentFlags().StringVar(&dashImage, "dash-image", "", "Image URL for pachyderm dashboard")
deploy.PersistentFlags().BoolVar(&noGuaranteed, "no-guaranteed", false, "Don't use guaranteed QoS for etcd and pachd deployments. Turning this on (turning guaranteed QoS off) can lead to more stable local clusters (such as a on Minikube), it should normally be used for production clusters.")
deploy.PersistentFlags().BoolVar(&noRBAC, "no-rbac", false, "Don't deploy RBAC roles for Pachyderm. (for k8s versions prior to 1.8)")
deploy.PersistentFlags().BoolVar(&localRoles, "local-roles", false, "Use namespace-local roles instead of cluster roles. Ignored if --no-rbac is set.")
deploy.PersistentFlags().StringVar(&namespace, "namespace", "default", "Kubernetes namespace to deploy Pachyderm to.")
deploy.PersistentFlags().BoolVar(&noExposeDockerSocket, "no-expose-docker-socket", false, "Don't expose the Docker socket to worker containers. This limits the privileges of workers which prevents them from automatically setting the container's working dir and user.")
deploy.PersistentFlags().BoolVar(&exposeObjectAPI, "expose-object-api", false, "If set, instruct pachd to serve its object/block API on its public port (not safe with auth enabled, do not set in production).")
deploy.PersistentFlags().StringVar(&tlsCertKey, "tls", "", "string of the form \"<cert path>,<key path>\" of the signed TLS certificate and private key that Pachd should use for TLS authentication (enables TLS-encrypted communication with Pachd)")
deploy.AddCommand(
deployLocal,
deployAmazon,
deployGoogle,
deployMicrosoft,
deployCustom,
deployStorage,
listImages,
exportImages,
importImages,
)
// Flags for setting pachd resource requests. These should rarely be set --
// only if we get the defaults wrong, or users have an unusual access pattern
//
// All of these are empty by default, because the actual default values depend
// on the backend to which we're. The defaults are set in
// s/s/pkg/deploy/assets/assets.go
deploy.PersistentFlags().StringVar(&pachdCPURequest,
"pachd-cpu-request", "", "(rarely set) The size of Pachd's CPU "+
"request, which we give to Kubernetes. Size is in cores (with partial "+
"cores allowed and encouraged).")
deploy.PersistentFlags().StringVar(&blockCacheSize, "block-cache-size", "",
"Size of pachd's in-memory cache for PFS files. Size is specified in "+
"bytes, with allowed SI suffixes (M, K, G, Mi, Ki, Gi, etc).")
deploy.PersistentFlags().StringVar(&pachdNonCacheMemRequest,
"pachd-memory-request", "", "(rarely set) The size of PachD's memory "+
"request in addition to its block cache (set via --block-cache-size). "+
"Size is in bytes, with SI suffixes (M, K, G, Mi, Ki, Gi, etc).")
deploy.PersistentFlags().StringVar(&etcdCPURequest,
"etcd-cpu-request", "", "(rarely set) The size of etcd's CPU request, "+
"which we give to Kubernetes. Size is in cores (with partial cores "+
"allowed and encouraged).")
deploy.PersistentFlags().StringVar(&etcdMemRequest,
"etcd-memory-request", "", "(rarely set) The size of etcd's memory "+
"request. Size is in bytes, with SI suffixes (M, K, G, Mi, Ki, Gi, "+
"etc).")
return deploy
}
// Cmds returns a list of cobra commands for deploying Pachyderm clusters.
func Cmds(noMetrics *bool, noPortForwarding *bool) []*cobra.Command {
deploy := DeployCmd(noMetrics, noPortForwarding)
var all bool
var namespace string
undeploy := &cobra.Command{
Use: "undeploy",
Short: "Tear down a deployed Pachyderm cluster.",
Long: "Tear down a deployed Pachyderm cluster.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
if all {
fmt.Printf(`
By using the --all flag, you are going to delete everything, including the
persistent volumes where metadata is stored. If your persistent volumes
were dynamically provisioned (i.e. if you used the "--dynamic-etcd-nodes"
flag), the underlying volumes will be removed, making metadata such repos,
commits, pipelines, and jobs unrecoverable. If your persistent volume was
manually provisioned (i.e. if you used the "--static-etcd-volume" flag), the
underlying volume will not be removed.
Are you sure you want to proceed? yN
`)
r := bufio.NewReader(os.Stdin)
bytes, err := r.ReadBytes('\n')
if err != nil {
return err
}
if !(bytes[0] == 'y' || bytes[0] == 'Y') {
return nil
}
}
io := cmdutil.IO{
Stdout: os.Stdout,
Stderr: os.Stderr,
}
assets := []string{
"service",
"replicationcontroller",
"deployment",
"serviceaccount",
"secret",
"statefulset",
"clusterrole",
"clusterrolebinding",
}
if all {
assets = append(assets, []string{
"storageclass",
"persistentvolumeclaim",
"persistentvolume",
}...)
}
for _, asset := range assets {
if err := cmdutil.RunIO(io, "kubectl", "delete", asset, "-l", "suite=pachyderm", "--namespace", namespace); err != nil {
return err
}
}
return nil
}),
}
undeploy.Flags().BoolVarP(&all, "all", "a", false, `
Delete everything, including the persistent volumes where metadata
is stored. If your persistent volumes were dynamically provisioned (i.e. if
you used the "--dynamic-etcd-nodes" flag), the underlying volumes will be
removed, making metadata such repos, commits, pipelines, and jobs
unrecoverable. If your persistent volume was manually provisioned (i.e. if
you used the "--static-etcd-volume" flag), the underlying volume will not be
removed.`)
undeploy.Flags().StringVar(&namespace, "namespace", "default", "Kubernetes namespace to undeploy Pachyderm from.")
var updateDashDryRun bool
var updateDashOutputFormat string
updateDash := &cobra.Command{
Use: "update-dash",
Short: "Update and redeploy the Pachyderm Dashboard at the latest compatible version.",
Long: "Update and redeploy the Pachyderm Dashboard at the latest compatible version.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
// Undeploy the dash
if !updateDashDryRun {
io := cmdutil.IO{
Stdout: os.Stdout,
Stderr: os.Stderr,
}
if err := cmdutil.RunIO(io, "kubectl", "delete", "deploy", "-l", "suite=pachyderm,app=dash"); err != nil {
return err
}
if err := cmdutil.RunIO(io, "kubectl", "delete", "svc", "-l", "suite=pachyderm,app=dash"); err != nil {
return err
}
}
// Redeploy the dash
manifest := getEncoder(updateDashOutputFormat)
dashImage := getDefaultOrLatestDashImage("", updateDashDryRun)
opts := &assets.AssetOpts{
DashOnly: true,
DashImage: dashImage,
}
assets.WriteDashboardAssets(manifest, opts)
return kubectlCreate(updateDashDryRun, manifest, opts, false)
}),
}
updateDash.Flags().BoolVar(&updateDashDryRun, "dry-run", false, "Don't actually deploy Pachyderm Dash to Kubernetes, instead just print the manifest.")
updateDash.Flags().StringVarP(&updateDashOutputFormat, "output", "o", "json", "Output formmat. One of: json|yaml")
return []*cobra.Command{deploy, undeploy, updateDash}
}
func getDefaultOrLatestDashImage(dashImage string, dryRun bool) string {
var err error
version := version.PrettyPrintVersion(version.Version)
defer func() {
if err != nil && !dryRun {
fmt.Printf("No updated dash image found for pachctl %v: %v Falling back to dash image %v\n", version, err, defaultDashImage)
}
}()
if dashImage != "" {
// It has been supplied explicitly by version on the command line
return dashImage
}
dashImage = defaultDashImage
compatibleDashVersionsURL := fmt.Sprintf("https://raw.githubusercontent.com/pachyderm/pachyderm/master/etc/compatibility/%v", version)
resp, err := http.Get(compatibleDashVersionsURL)
if err != nil {
return dashImage
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return dashImage
}
if resp.StatusCode != 200 {
err = errors.New(string(body))
return dashImage
}
allVersions := strings.Split(strings.TrimSpace(string(body)), "\n")
if len(allVersions) < 1 {
return dashImage
}
latestVersion := strings.TrimSpace(allVersions[len(allVersions)-1])
return fmt.Sprintf("pachyderm/dash:%v", latestVersion)
}
Ask for confirmation always when undeploying
package cmds
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/pachyderm/pachyderm/src/client"
deployclient "github.com/pachyderm/pachyderm/src/client/deploy"
"github.com/pachyderm/pachyderm/src/client/version"
"github.com/pachyderm/pachyderm/src/server/pkg/cmdutil"
"github.com/pachyderm/pachyderm/src/server/pkg/deploy"
"github.com/pachyderm/pachyderm/src/server/pkg/deploy/assets"
"github.com/pachyderm/pachyderm/src/server/pkg/deploy/images"
_metrics "github.com/pachyderm/pachyderm/src/server/pkg/metrics"
"github.com/ghodss/yaml"
"github.com/spf13/cobra"
)
var defaultDashImage = "pachyderm/dash:1.8-preview-7"
var awsAccessKeyIDRE = regexp.MustCompile("^[A-Z0-9]{20}$")
var awsSecretRE = regexp.MustCompile("^[A-Za-z0-9/+=]{40}$")
var awsRegionRE = regexp.MustCompile("^[a-z]{2}(?:-gov)?-[a-z]+-[0-9]$")
// BytesEncoder is an Encoder with bytes content.
type BytesEncoder interface {
assets.Encoder
// Return the current buffer of the encoder.
Buffer() *bytes.Buffer
}
// JSON assets.Encoder.
type jsonEncoder struct {
encoder *json.Encoder
buffer *bytes.Buffer
}
func newJSONEncoder() *jsonEncoder {
buffer := &bytes.Buffer{}
encoder := json.NewEncoder(buffer)
encoder.SetIndent("", "\t")
return &jsonEncoder{encoder, buffer}
}
func (e *jsonEncoder) Encode(item interface{}) error {
if err := e.encoder.Encode(item); err != nil {
return err
}
_, err := fmt.Fprintf(e.buffer, "\n")
return err
}
// Return the current bytes content.
func (e *jsonEncoder) Buffer() *bytes.Buffer {
return e.buffer
}
// YAML assets.Encoder.
type yamlEncoder struct {
buffer *bytes.Buffer
}
func newYAMLEncoder() *yamlEncoder {
buffer := &bytes.Buffer{}
return &yamlEncoder{buffer}
}
func (e *yamlEncoder) Encode(item interface{}) error {
bytes, err := yaml.Marshal(item)
if err != nil {
return err
}
_, err = e.buffer.Write(bytes)
if err != nil {
return err
}
_, err = fmt.Fprintf(e.buffer, "---\n")
return err
}
// Return the current bytes content.
func (e *yamlEncoder) Buffer() *bytes.Buffer {
return e.buffer
}
// Return the appropriate encoder for the given output format.
func getEncoder(outputFormat string) BytesEncoder {
switch outputFormat {
case "yaml":
return newYAMLEncoder()
case "json":
return newJSONEncoder()
default:
return newJSONEncoder()
}
}
func kubectlCreate(dryRun bool, manifest BytesEncoder, opts *assets.AssetOpts, metrics bool) error {
if dryRun {
_, err := os.Stdout.Write(manifest.Buffer().Bytes())
return err
}
io := cmdutil.IO{
Stdin: manifest.Buffer(),
Stdout: os.Stdout,
Stderr: os.Stderr,
}
// we set --validate=false due to https://github.com/kubernetes/kubernetes/issues/53309
if err := cmdutil.RunIO(io, "kubectl", "apply", "-f", "-", "--validate=false"); err != nil {
return err
}
fmt.Println("\nPachyderm is launching. Check its status with \"kubectl get all\"")
if opts.DashOnly || !opts.NoDash {
fmt.Println("Once launched, access the dashboard by running \"pachctl port-forward\"")
}
fmt.Println("")
return nil
}
// containsEmpty is a helper function used for validation (particularly for
// validating that creds arguments aren't empty
func containsEmpty(vals []string) bool {
for _, val := range vals {
if val == "" {
return true
}
}
return false
}
// DeployCmd returns a cobra.Command to deploy pachyderm.
func DeployCmd(noMetrics *bool, noPortForwarding *bool) *cobra.Command {
var pachdShards int
var hostPath string
var dev bool
var dryRun bool
var outputFormat string
var secure bool
var isS3V2 bool
var etcdNodes int
var etcdVolume string
var etcdStorageClassName string
var pachdCPURequest string
var pachdNonCacheMemRequest string
var blockCacheSize string
var etcdCPURequest string
var etcdMemRequest string
var logLevel string
var persistentDiskBackend string
var objectStoreBackend string
var opts *assets.AssetOpts
var dashOnly bool
var noDash bool
var dashImage string
var registry string
var imagePullSecret string
var noGuaranteed bool
var noRBAC bool
var localRoles bool
var namespace string
var noExposeDockerSocket bool
var exposeObjectAPI bool
var tlsCertKey string
deployLocal := &cobra.Command{
Use: "local",
Short: "Deploy a single-node Pachyderm cluster with local metadata storage.",
Long: "Deploy a single-node Pachyderm cluster with local metadata storage.",
Run: cmdutil.RunFixedArgs(0, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
manifest := getEncoder(outputFormat)
if dev {
// Use dev build instead of release build
opts.Version = deploy.DevVersionTag
// we turn metrics off this is a dev cluster. The default is set by
// deploy.PersistentPreRun, below.
opts.Metrics = false
// Disable authentication, for tests
opts.DisableAuthentication = true
// Serve the Pachyderm object/block API locally, as this is needed by
// our tests (and authentication is disabled anyway)
opts.ExposeObjectAPI = true
}
if err := assets.WriteLocalAssets(manifest, opts, hostPath); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployLocal.Flags().StringVar(&hostPath, "host-path", "/var/pachyderm", "Location on the host machine where PFS metadata will be stored.")
deployLocal.Flags().BoolVarP(&dev, "dev", "d", false, "Deploy pachd with local version tags, disable metrics, expose Pachyderm's object/block API, and use an insecure authentication mechanism (do not set on any cluster with sensitive data)")
deployGoogle := &cobra.Command{
Use: "google <GCS bucket> <size of disk(s) (in GB)> [<service account creds file>]",
Short: "Deploy a Pachyderm cluster running on GCP.",
Long: "Deploy a Pachyderm cluster running on GCP.\n" +
"Arguments are:\n" +
" <GCS bucket>: A GCS bucket where Pachyderm will store PFS data.\n" +
" <GCE persistent disks>: A comma-separated list of GCE persistent disks, one per etcd node (see --etcd-nodes).\n" +
" <size of disks>: Size of GCE persistent disks in GB (assumed to all be the same).\n" +
" <service account creds file>: a file contain a private key for a service account (downloaded from GCE).\n",
Run: cmdutil.RunBoundedArgs(2, 3, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
volumeSize, err := strconv.Atoi(args[1])
if err != nil {
return fmt.Errorf("volume size needs to be an integer; instead got %v", args[1])
}
manifest := getEncoder(outputFormat)
opts.BlockCacheSize = "0G" // GCS is fast so we want to disable the block cache. See issue #1650
var cred string
if len(args) == 3 {
credBytes, err := ioutil.ReadFile(args[2])
if err != nil {
return fmt.Errorf("error reading creds file %s: %v", args[2], err)
}
cred = string(credBytes)
}
bucket := strings.TrimPrefix(args[0], "gs://")
if err = assets.WriteGoogleAssets(manifest, opts, bucket, cred, volumeSize); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployCustom := &cobra.Command{
Use: "custom --persistent-disk <persistent disk backend> --object-store <object store backend> <persistent disk args> <object store args>",
Short: "(in progress) Deploy a custom Pachyderm cluster configuration",
Long: "(in progress) Deploy a custom Pachyderm cluster configuration.\n" +
"If <object store backend> is \"s3\", then the arguments are:\n" +
" <volumes> <size of volumes (in GB)> <bucket> <id> <secret> <endpoint>\n",
Run: cmdutil.RunBoundedArgs(4, 7, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
manifest := getEncoder(outputFormat)
err := assets.WriteCustomAssets(manifest, opts, args, objectStoreBackend, persistentDiskBackend, secure, isS3V2)
if err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployCustom.Flags().BoolVarP(&secure, "secure", "s", false, "Enable secure access to a Minio server.")
deployCustom.Flags().StringVar(&persistentDiskBackend, "persistent-disk", "aws",
"(required) Backend providing persistent local volumes to stateful pods. "+
"One of: aws, google, or azure.")
deployCustom.Flags().StringVar(&objectStoreBackend, "object-store", "s3",
"(required) Backend providing an object-storage API to pachyderm. One of: "+
"s3, gcs, or azure-blob.")
deployCustom.Flags().BoolVar(&isS3V2, "isS3V2", false, "Enable S3V2 client")
var creds string
var vault string
var iamRole string
var cloudfrontDistribution string
deployAmazon := &cobra.Command{
Use: "amazon <S3 bucket> <region> <size of volumes (in GB)>",
Short: "Deploy a Pachyderm cluster running on AWS.",
Long: "Deploy a Pachyderm cluster running on AWS. Arguments are:\n" +
" <S3 bucket>: An S3 bucket where Pachyderm will store PFS data.\n" +
"\n" +
" <region>: The aws region where pachyderm is being deployed (e.g. us-west-1)\n" +
" <size of volumes>: Size of EBS volumes, in GB (assumed to all be the same).\n",
Run: cmdutil.RunFixedArgs(3, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
if creds == "" && vault == "" && iamRole == "" {
return fmt.Errorf("One of --credentials, --vault, or --iam-role needs to be provided")
}
// populate 'amazonCreds' & validate
var amazonCreds *assets.AmazonCreds
s := bufio.NewScanner(os.Stdin)
if creds != "" {
parts := strings.Split(creds, ",")
if len(parts) < 2 || len(parts) > 3 || containsEmpty(parts[:2]) {
return fmt.Errorf("Incorrect format of --credentials")
}
amazonCreds = &assets.AmazonCreds{ID: parts[0], Secret: parts[1]}
if len(parts) > 2 {
amazonCreds.Token = parts[2]
}
if !awsAccessKeyIDRE.MatchString(amazonCreds.ID) {
fmt.Printf("The AWS Access Key seems invalid (does not match %q). "+
"Do you want to continue deploying? [yN]\n", awsAccessKeyIDRE)
if s.Scan(); s.Text()[0] != 'y' && s.Text()[0] != 'Y' {
os.Exit(1)
}
}
if !awsSecretRE.MatchString(amazonCreds.Secret) {
fmt.Printf("The AWS Secret seems invalid (does not match %q). "+
"Do you want to continue deploying? [yN]\n", awsSecretRE)
if s.Scan(); s.Text()[0] != 'y' && s.Text()[0] != 'Y' {
os.Exit(1)
}
}
}
if vault != "" {
if amazonCreds != nil {
return fmt.Errorf("Only one of --credentials, --vault, or --iam-role needs to be provided")
}
parts := strings.Split(vault, ",")
if len(parts) != 3 || containsEmpty(parts) {
return fmt.Errorf("Incorrect format of --vault")
}
amazonCreds = &assets.AmazonCreds{VaultAddress: parts[0], VaultRole: parts[1], VaultToken: parts[2]}
}
if iamRole != "" {
if amazonCreds != nil {
return fmt.Errorf("Only one of --credentials, --vault, or --iam-role needs to be provided")
}
opts.IAMRole = iamRole
}
volumeSize, err := strconv.Atoi(args[2])
if err != nil {
return fmt.Errorf("volume size needs to be an integer; instead got %v", args[2])
}
if strings.TrimSpace(cloudfrontDistribution) != "" {
fmt.Printf("WARNING: You specified a cloudfront distribution. Deploying on AWS with cloudfront is currently " +
"an alpha feature. No security restrictions have been applied to cloudfront, making all data public (obscured but not secured)\n")
}
bucket, region := strings.TrimPrefix(args[0], "s3://"), args[1]
if !awsRegionRE.MatchString(region) {
fmt.Printf("The AWS region seems invalid (does not match %q). "+
"Do you want to continue deploying? [yN]\n", awsRegionRE)
if s.Scan(); s.Text()[0] != 'y' && s.Text()[0] != 'Y' {
os.Exit(1)
}
}
// generate manifest and write assets
manifest := getEncoder(outputFormat)
if err = assets.WriteAmazonAssets(manifest, opts, region, bucket, volumeSize, amazonCreds, cloudfrontDistribution); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployAmazon.Flags().StringVar(&cloudfrontDistribution, "cloudfront-distribution", "",
"Deploying on AWS with cloudfront is currently "+
"an alpha feature. No security restrictions have been"+
"applied to cloudfront, making all data public (obscured but not secured)")
deployAmazon.Flags().StringVar(&creds, "credentials", "", "Use the format \"<id>,<secret>[,<token>]\". You can get a token by running \"aws sts get-session-token\".")
deployAmazon.Flags().StringVar(&vault, "vault", "", "Use the format \"<address/hostport>,<role>,<token>\".")
deployAmazon.Flags().StringVar(&iamRole, "iam-role", "", fmt.Sprintf("Use the given IAM role for authorization, as opposed to using static credentials. The given role will be applied as the annotation %s, this used with a Kubernetes IAM role management system such as kube2iam allows you to give pachd credentials in a more secure way.", assets.IAMAnnotation))
deployMicrosoft := &cobra.Command{
Use: "microsoft <container> <storage account name> <storage account key> <size of volumes (in GB)>",
Short: "Deploy a Pachyderm cluster running on Microsoft Azure.",
Long: "Deploy a Pachyderm cluster running on Microsoft Azure. Arguments are:\n" +
" <container>: An Azure container where Pachyderm will store PFS data.\n" +
" <size of volumes>: Size of persistent volumes, in GB (assumed to all be the same).\n",
Run: cmdutil.RunFixedArgs(4, func(args []string) (retErr error) {
metrics := !*noMetrics
if metrics && !dev {
start := time.Now()
startMetricsWait := _metrics.StartReportAndFlushUserAction("Deploy", start)
defer startMetricsWait()
defer func() {
finishMetricsWait := _metrics.FinishReportAndFlushUserAction("Deploy", retErr, start)
finishMetricsWait()
}()
}
if _, err := base64.StdEncoding.DecodeString(args[2]); err != nil {
return fmt.Errorf("storage-account-key needs to be base64 encoded; instead got '%v'", args[2])
}
if opts.EtcdVolume != "" {
tempURI, err := url.ParseRequestURI(opts.EtcdVolume)
if err != nil {
return fmt.Errorf("Volume URI needs to be a well-formed URI; instead got '%v'", opts.EtcdVolume)
}
opts.EtcdVolume = tempURI.String()
}
volumeSize, err := strconv.Atoi(args[3])
if err != nil {
return fmt.Errorf("volume size needs to be an integer; instead got %v", args[3])
}
manifest := getEncoder(outputFormat)
container := strings.TrimPrefix(args[0], "wasb://")
accountName, accountKey := args[1], args[2]
if err = assets.WriteMicrosoftAssets(manifest, opts, container, accountName, accountKey, volumeSize); err != nil {
return err
}
return kubectlCreate(dryRun, manifest, opts, metrics)
}),
}
deployStorage := &cobra.Command{
Use: "storage <backend> ...",
Short: "Deploy credentials for a particular storage provider.",
Long: `
Deploy credentials for a particular storage provider, so that Pachyderm can
ingress data from and egress data to it. Currently three backends are
supported: aws, google, and azure. To see the required arguments for a
particular backend, run "pachctl deploy storage <backend>"`,
Run: cmdutil.RunBoundedArgs(1, 5, func(args []string) (retErr error) {
var data map[string][]byte
switch args[0] {
case "amazon", "aws":
// Need at least 4 arguments: backend, bucket, id, secret
if len(args) < 4 {
return fmt.Errorf("Usage: pachctl deploy storage amazon <region> <id> <secret> <token>\n\n<token> is optional")
}
var token string
if len(args) == 5 {
token = args[4]
}
data = assets.AmazonSecret(args[1], "", args[2], args[3], token, "")
case "google":
if len(args) < 2 {
return fmt.Errorf("Usage: pachctl deploy storage google <service account creds file>")
}
credBytes, err := ioutil.ReadFile(args[1])
if err != nil {
return fmt.Errorf("error reading creds file %s: %v", args[2], err)
}
data = assets.GoogleSecret("", string(credBytes))
case "azure":
// Need 3 arguments: backend, account name, account key
if len(args) != 3 {
return fmt.Errorf("Usage: pachctl deploy storage azure <account name> <account key>")
}
data = assets.MicrosoftSecret("", args[1], args[2])
}
c, err := client.NewOnUserMachine(!*noMetrics, !*noPortForwarding, "user")
if err != nil {
return fmt.Errorf("error constructing pachyderm client: %v", err)
}
defer c.Close()
_, err = c.DeployStorageSecret(context.Background(), &deployclient.DeployStorageSecretRequest{
Secrets: data,
})
if err != nil {
return fmt.Errorf("error deploying storage secret to pachd: %v", err)
}
return nil
}),
}
listImages := &cobra.Command{
Use: "list-images",
Short: "Output the list of images in a deployment.",
Long: "Output the list of images in a deployment.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
for _, image := range assets.Images(opts) {
fmt.Println(image)
}
return nil
}),
}
exportImages := &cobra.Command{
Use: "export-images output-file",
Short: "Export a tarball (to stdout) containing all of the images in a deployment.",
Long: "Export a tarball (to stdout) containing all of the images in a deployment.",
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
file, err := os.Create(args[0])
if err != nil {
return err
}
defer func() {
if err := file.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return images.Export(opts, file)
}),
}
importImages := &cobra.Command{
Use: "import-images input-file",
Short: "Import a tarball (from stdin) containing all of the images in a deployment and push them to a private registry.",
Long: "Import a tarball (from stdin) containing all of the images in a deployment and push them to a private registry.",
Run: cmdutil.RunFixedArgs(1, func(args []string) (retErr error) {
file, err := os.Open(args[0])
if err != nil {
return err
}
defer func() {
if err := file.Close(); err != nil && retErr == nil {
retErr = err
}
}()
return images.Import(opts, file)
}),
}
deploy := &cobra.Command{
Use: "deploy amazon|google|microsoft|local|custom|storage",
Short: "Deploy a Pachyderm cluster.",
Long: "Deploy a Pachyderm cluster.",
PersistentPreRun: cmdutil.Run(func([]string) error {
dashImage = getDefaultOrLatestDashImage(dashImage, dryRun)
opts = &assets.AssetOpts{
PachdShards: uint64(pachdShards),
Version: version.PrettyPrintVersion(version.Version),
LogLevel: logLevel,
Metrics: !*noMetrics,
PachdCPURequest: pachdCPURequest,
PachdNonCacheMemRequest: pachdNonCacheMemRequest,
BlockCacheSize: blockCacheSize,
EtcdCPURequest: etcdCPURequest,
EtcdMemRequest: etcdMemRequest,
EtcdNodes: etcdNodes,
EtcdVolume: etcdVolume,
EtcdStorageClassName: etcdStorageClassName,
DashOnly: dashOnly,
NoDash: noDash,
DashImage: dashImage,
Registry: registry,
ImagePullSecret: imagePullSecret,
NoGuaranteed: noGuaranteed,
NoRBAC: noRBAC,
LocalRoles: localRoles,
Namespace: namespace,
NoExposeDockerSocket: noExposeDockerSocket,
ExposeObjectAPI: exposeObjectAPI,
}
if tlsCertKey != "" {
// TODO(msteffen): If either the cert path or the key path contains a
// comma, this doesn't work
certKey := strings.Split(tlsCertKey, ",")
if len(certKey) != 2 {
return fmt.Errorf("could not split TLS certificate and key correctly; must have two parts but got: %#v", certKey)
}
opts.TLS = &assets.TLSOpts{
ServerCert: certKey[0],
ServerKey: certKey[1],
}
}
return nil
}),
}
deploy.PersistentFlags().IntVar(&pachdShards, "shards", 16, "(rarely set) The maximum number of pachd nodes allowed in the cluster; increasing this number blindly can result in degraded performance.")
deploy.PersistentFlags().IntVar(&etcdNodes, "dynamic-etcd-nodes", 0, "Deploy etcd as a StatefulSet with the given number of pods. The persistent volumes used by these pods are provisioned dynamically. Note that StatefulSet is currently a beta kubernetes feature, which might be unavailable in older versions of kubernetes.")
deploy.PersistentFlags().StringVar(&etcdVolume, "static-etcd-volume", "", "Deploy etcd as a ReplicationController with one pod. The pod uses the given persistent volume.")
deploy.PersistentFlags().StringVar(&etcdStorageClassName, "etcd-storage-class", "", "If set, the name of an existing StorageClass to use for etcd storage. Ignored if --static-etcd-volume is set.")
deploy.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "Don't actually deploy pachyderm to Kubernetes, instead just print the manifest.")
deploy.PersistentFlags().StringVarP(&outputFormat, "output", "o", "json", "Output formmat. One of: json|yaml")
deploy.PersistentFlags().StringVar(&logLevel, "log-level", "info", "The level of log messages to print options are, from least to most verbose: \"error\", \"info\", \"debug\".")
deploy.PersistentFlags().BoolVar(&dashOnly, "dashboard-only", false, "Only deploy the Pachyderm UI (experimental), without the rest of pachyderm. This is for launching the UI adjacent to an existing Pachyderm cluster. After deployment, run \"pachctl port-forward\" to connect")
deploy.PersistentFlags().BoolVar(&noDash, "no-dashboard", false, "Don't deploy the Pachyderm UI alongside Pachyderm (experimental).")
deploy.PersistentFlags().StringVar(®istry, "registry", "", "The registry to pull images from.")
deploy.PersistentFlags().StringVar(&imagePullSecret, "image-pull-secret", "", "A secret in Kubernetes that's needed to pull from your private registry.")
deploy.PersistentFlags().StringVar(&dashImage, "dash-image", "", "Image URL for pachyderm dashboard")
deploy.PersistentFlags().BoolVar(&noGuaranteed, "no-guaranteed", false, "Don't use guaranteed QoS for etcd and pachd deployments. Turning this on (turning guaranteed QoS off) can lead to more stable local clusters (such as a on Minikube), it should normally be used for production clusters.")
deploy.PersistentFlags().BoolVar(&noRBAC, "no-rbac", false, "Don't deploy RBAC roles for Pachyderm. (for k8s versions prior to 1.8)")
deploy.PersistentFlags().BoolVar(&localRoles, "local-roles", false, "Use namespace-local roles instead of cluster roles. Ignored if --no-rbac is set.")
deploy.PersistentFlags().StringVar(&namespace, "namespace", "default", "Kubernetes namespace to deploy Pachyderm to.")
deploy.PersistentFlags().BoolVar(&noExposeDockerSocket, "no-expose-docker-socket", false, "Don't expose the Docker socket to worker containers. This limits the privileges of workers which prevents them from automatically setting the container's working dir and user.")
deploy.PersistentFlags().BoolVar(&exposeObjectAPI, "expose-object-api", false, "If set, instruct pachd to serve its object/block API on its public port (not safe with auth enabled, do not set in production).")
deploy.PersistentFlags().StringVar(&tlsCertKey, "tls", "", "string of the form \"<cert path>,<key path>\" of the signed TLS certificate and private key that Pachd should use for TLS authentication (enables TLS-encrypted communication with Pachd)")
deploy.AddCommand(
deployLocal,
deployAmazon,
deployGoogle,
deployMicrosoft,
deployCustom,
deployStorage,
listImages,
exportImages,
importImages,
)
// Flags for setting pachd resource requests. These should rarely be set --
// only if we get the defaults wrong, or users have an unusual access pattern
//
// All of these are empty by default, because the actual default values depend
// on the backend to which we're. The defaults are set in
// s/s/pkg/deploy/assets/assets.go
deploy.PersistentFlags().StringVar(&pachdCPURequest,
"pachd-cpu-request", "", "(rarely set) The size of Pachd's CPU "+
"request, which we give to Kubernetes. Size is in cores (with partial "+
"cores allowed and encouraged).")
deploy.PersistentFlags().StringVar(&blockCacheSize, "block-cache-size", "",
"Size of pachd's in-memory cache for PFS files. Size is specified in "+
"bytes, with allowed SI suffixes (M, K, G, Mi, Ki, Gi, etc).")
deploy.PersistentFlags().StringVar(&pachdNonCacheMemRequest,
"pachd-memory-request", "", "(rarely set) The size of PachD's memory "+
"request in addition to its block cache (set via --block-cache-size). "+
"Size is in bytes, with SI suffixes (M, K, G, Mi, Ki, Gi, etc).")
deploy.PersistentFlags().StringVar(&etcdCPURequest,
"etcd-cpu-request", "", "(rarely set) The size of etcd's CPU request, "+
"which we give to Kubernetes. Size is in cores (with partial cores "+
"allowed and encouraged).")
deploy.PersistentFlags().StringVar(&etcdMemRequest,
"etcd-memory-request", "", "(rarely set) The size of etcd's memory "+
"request. Size is in bytes, with SI suffixes (M, K, G, Mi, Ki, Gi, "+
"etc).")
return deploy
}
// Cmds returns a list of cobra commands for deploying Pachyderm clusters.
func Cmds(noMetrics *bool, noPortForwarding *bool) []*cobra.Command {
deploy := DeployCmd(noMetrics, noPortForwarding)
var all bool
var namespace string
undeploy := &cobra.Command{
Use: "undeploy",
Short: "Tear down a deployed Pachyderm cluster.",
Long: "Tear down a deployed Pachyderm cluster.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
if all {
fmt.Printf(`
By using the --all flag, you are going to delete everything, including the
persistent volumes where metadata is stored. If your persistent volumes
were dynamically provisioned (i.e. if you used the "--dynamic-etcd-nodes"
flag), the underlying volumes will be removed, making metadata such repos,
commits, pipelines, and jobs unrecoverable. If your persistent volume was
manually provisioned (i.e. if you used the "--static-etcd-volume" flag), the
underlying volume will not be removed.
`)
}
fmt.Println("Are you sure you want to do this? (y/n):")
r := bufio.NewReader(os.Stdin)
bytes, err := r.ReadBytes('\n')
if err != nil {
return err
}
if bytes[0] == 'y' || bytes[0] == 'Y' {
io := cmdutil.IO{
Stdout: os.Stdout,
Stderr: os.Stderr,
}
assets := []string{
"service",
"replicationcontroller",
"deployment",
"serviceaccount",
"secret",
"statefulset",
"clusterrole",
"clusterrolebinding",
}
if all {
assets = append(assets, []string{
"storageclass",
"persistentvolumeclaim",
"persistentvolume",
}...)
}
for _, asset := range assets {
if err := cmdutil.RunIO(io, "kubectl", "delete", asset, "-l", "suite=pachyderm", "--namespace", namespace); err != nil {
return err
}
}
}
return nil
}),
}
undeploy.Flags().BoolVarP(&all, "all", "a", false, `
Delete everything, including the persistent volumes where metadata
is stored. If your persistent volumes were dynamically provisioned (i.e. if
you used the "--dynamic-etcd-nodes" flag), the underlying volumes will be
removed, making metadata such repos, commits, pipelines, and jobs
unrecoverable. If your persistent volume was manually provisioned (i.e. if
you used the "--static-etcd-volume" flag), the underlying volume will not be
removed.`)
undeploy.Flags().StringVar(&namespace, "namespace", "default", "Kubernetes namespace to undeploy Pachyderm from.")
var updateDashDryRun bool
var updateDashOutputFormat string
updateDash := &cobra.Command{
Use: "update-dash",
Short: "Update and redeploy the Pachyderm Dashboard at the latest compatible version.",
Long: "Update and redeploy the Pachyderm Dashboard at the latest compatible version.",
Run: cmdutil.RunFixedArgs(0, func(args []string) error {
// Undeploy the dash
if !updateDashDryRun {
io := cmdutil.IO{
Stdout: os.Stdout,
Stderr: os.Stderr,
}
if err := cmdutil.RunIO(io, "kubectl", "delete", "deploy", "-l", "suite=pachyderm,app=dash"); err != nil {
return err
}
if err := cmdutil.RunIO(io, "kubectl", "delete", "svc", "-l", "suite=pachyderm,app=dash"); err != nil {
return err
}
}
// Redeploy the dash
manifest := getEncoder(updateDashOutputFormat)
dashImage := getDefaultOrLatestDashImage("", updateDashDryRun)
opts := &assets.AssetOpts{
DashOnly: true,
DashImage: dashImage,
}
assets.WriteDashboardAssets(manifest, opts)
return kubectlCreate(updateDashDryRun, manifest, opts, false)
}),
}
updateDash.Flags().BoolVar(&updateDashDryRun, "dry-run", false, "Don't actually deploy Pachyderm Dash to Kubernetes, instead just print the manifest.")
updateDash.Flags().StringVarP(&updateDashOutputFormat, "output", "o", "json", "Output formmat. One of: json|yaml")
return []*cobra.Command{deploy, undeploy, updateDash}
}
func getDefaultOrLatestDashImage(dashImage string, dryRun bool) string {
var err error
version := version.PrettyPrintVersion(version.Version)
defer func() {
if err != nil && !dryRun {
fmt.Printf("No updated dash image found for pachctl %v: %v Falling back to dash image %v\n", version, err, defaultDashImage)
}
}()
if dashImage != "" {
// It has been supplied explicitly by version on the command line
return dashImage
}
dashImage = defaultDashImage
compatibleDashVersionsURL := fmt.Sprintf("https://raw.githubusercontent.com/pachyderm/pachyderm/master/etc/compatibility/%v", version)
resp, err := http.Get(compatibleDashVersionsURL)
if err != nil {
return dashImage
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return dashImage
}
if resp.StatusCode != 200 {
err = errors.New(string(body))
return dashImage
}
allVersions := strings.Split(strings.TrimSpace(string(body)), "\n")
if len(allVersions) < 1 {
return dashImage
}
latestVersion := strings.TrimSpace(allVersions[len(allVersions)-1])
return fmt.Sprintf("pachyderm/dash:%v", latestVersion)
}
|
package postgresql
import (
"database/sql"
"fmt"
"log"
"os"
"path"
"reflect"
"sync"
"testing"
"time"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/lib/pq"
"github.com/mitchellh/mapstructure"
"github.com/ory-am/dockertest"
)
var (
testImagePull sync.Once
)
func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
if os.Getenv("PG_URL") != "" {
return "", os.Getenv("PG_URL")
}
// Without this the checks for whether the container has started seem to
// never actually pass. There's really no reason to expose the test
// containers, so don't.
dockertest.BindDockerToLocalhost = "yep"
testImagePull.Do(func() {
dockertest.Pull("postgres")
})
cid, connErr := dockertest.ConnectToPostgreSQL(60, 500*time.Millisecond, func(connURL string) bool {
// This will cause a validation to run
resp, err := b.HandleRequest(&logical.Request{
Storage: s,
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: map[string]interface{}{
"connection_url": connURL,
},
})
if err != nil || (resp != nil && resp.IsError()) {
// It's likely not up and running yet, so return false and try again
return false
}
if resp == nil {
t.Fatal("expected warning")
}
retURL = connURL
return true
})
if connErr != nil {
t.Fatalf("could not connect to database: %v", connErr)
}
return
}
func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
err := cid.KillRemove()
if err != nil {
t.Fatal(err)
}
}
func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
configData := map[string]interface{}{
"connection_url": "sample_connection_url",
"value": "",
"max_open_connections": 9,
"max_idle_connections": 7,
"verify_connection": false,
}
configReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/connection",
Storage: config.StorageView,
Data: configData,
}
resp, err = b.HandleRequest(configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
delete(configData, "verify_connection")
if !reflect.DeepEqual(configData, resp.Data) {
t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
}
}
func TestBackend_basic(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
testAccStepCreateRole(t, "web", testRole),
testAccStepReadCreds(t, b, config.StorageView, "web", connURL),
},
})
}
func TestBackend_roleCrud(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
testAccStepCreateRole(t, "web", testRole),
testAccStepReadRole(t, "web", testRole),
testAccStepDeleteRole(t, "web"),
testAccStepReadRole(t, "web", ""),
},
})
}
func TestBackend_roleReadOnly(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
testAccStepCreateRole(t, "web", testRole),
testAccStepCreateRole(t, "web-readonly", testReadOnlyRole),
testAccStepReadRole(t, "web-readonly", testReadOnlyRole),
testAccStepCreateTable(t, b, config.StorageView, "web", connURL),
testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL),
testAccStepDropTable(t, b, config.StorageView, "web", connURL),
testAccStepDeleteRole(t, "web-readonly"),
testAccStepDeleteRole(t, "web"),
testAccStepReadRole(t, "web-readonly", ""),
},
})
}
func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: d,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if expectError {
if resp.Data == nil {
return fmt.Errorf("data is nil")
}
var e struct {
Error string `mapstructure:"error"`
}
if err := mapstructure.Decode(resp.Data, &e); err != nil {
return err
}
if len(e.Error) == 0 {
return fmt.Errorf("expected error, but write succeeded.")
}
return nil
} else if resp != nil && resp.IsError() {
return fmt.Errorf("got an error response: %v", resp.Error())
}
return nil
},
}
}
func testAccStepCreateRole(t *testing.T, name string, sql string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: path.Join("roles", name),
Data: map[string]interface{}{
"sql": sql,
},
}
}
func testAccStepDeleteRole(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: path.Join("roles", name),
}
}
func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: path.Join("creds", name),
Check: func(resp *logical.Response) error {
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[TRACE] Generated credentials: %v", d)
conn, err := pq.ParseURL(connURL)
if err != nil {
t.Fatal(err)
}
conn += " timezone=utc"
db, err := sql.Open("postgres", conn)
if err != nil {
t.Fatal(err)
}
returnedRows := func() int {
stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');")
if err != nil {
return -1
}
defer stmt.Close()
rows, err := stmt.Query(d.Username)
if err != nil {
return -1
}
defer rows.Close()
i := 0
for rows.Next() {
i++
}
return i
}
// minNumPermissions is the minimum number of permissions that will always be present.
const minNumPermissions = 2
userRows := returnedRows()
if userRows < minNumPermissions {
t.Fatalf("did not get expected number of rows, got %d", userRows)
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.RevokeOperation,
Storage: s,
Secret: &logical.Secret{
InternalData: map[string]interface{}{
"secret_type": "creds",
"username": d.Username,
},
},
})
if err != nil {
return err
}
if resp != nil {
if resp.IsError() {
return fmt.Errorf("Error on resp: %#v", *resp)
}
}
userRows = returnedRows()
// User shouldn't exist so returnedRows() should encounter an error and exit with -1
if userRows != -1 {
t.Fatalf("did not get expected number of rows, got %d", userRows)
}
return nil
},
}
}
func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: path.Join("creds", name),
Check: func(resp *logical.Response) error {
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[TRACE] Generated credentials: %v", d)
conn, err := pq.ParseURL(connURL)
if err != nil {
t.Fatal(err)
}
conn += " timezone=utc"
db, err := sql.Open("postgres", conn)
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("CREATE TABLE test (id SERIAL PRIMARY KEY);")
if err != nil {
t.Fatal(err)
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.RevokeOperation,
Storage: s,
Secret: &logical.Secret{
InternalData: map[string]interface{}{
"secret_type": "creds",
"username": d.Username,
},
},
})
if err != nil {
return err
}
if resp != nil {
if resp.IsError() {
return fmt.Errorf("Error on resp: %#v", *resp)
}
}
return nil
},
}
}
func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: path.Join("creds", name),
Check: func(resp *logical.Response) error {
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[TRACE] Generated credentials: %v", d)
conn, err := pq.ParseURL(connURL)
if err != nil {
t.Fatal(err)
}
conn += " timezone=utc"
db, err := sql.Open("postgres", conn)
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("DROP TABLE test;")
if err != nil {
t.Fatal(err)
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.RevokeOperation,
Storage: s,
Secret: &logical.Secret{
InternalData: map[string]interface{}{
"secret_type": "creds",
"username": d.Username,
},
},
})
if err != nil {
return err
}
if resp != nil {
if resp.IsError() {
return fmt.Errorf("Error on resp: %#v", *resp)
}
}
return nil
},
}
}
func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if sql == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
SQL string `mapstructure:"sql"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.SQL != sql {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const testRole = `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
const testReadOnlyRole = `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
`
Add postgres test for block statements
package postgresql
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"os"
"path"
"reflect"
"sync"
"testing"
"time"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/lib/pq"
"github.com/mitchellh/mapstructure"
"github.com/ory-am/dockertest"
)
var (
testImagePull sync.Once
)
func prepareTestContainer(t *testing.T, s logical.Storage, b logical.Backend) (cid dockertest.ContainerID, retURL string) {
if os.Getenv("PG_URL") != "" {
return "", os.Getenv("PG_URL")
}
// Without this the checks for whether the container has started seem to
// never actually pass. There's really no reason to expose the test
// containers, so don't.
dockertest.BindDockerToLocalhost = "yep"
testImagePull.Do(func() {
dockertest.Pull("postgres")
})
cid, connErr := dockertest.ConnectToPostgreSQL(60, 500*time.Millisecond, func(connURL string) bool {
// This will cause a validation to run
resp, err := b.HandleRequest(&logical.Request{
Storage: s,
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: map[string]interface{}{
"connection_url": connURL,
},
})
if err != nil || (resp != nil && resp.IsError()) {
// It's likely not up and running yet, so return false and try again
return false
}
if resp == nil {
t.Fatal("expected warning")
}
retURL = connURL
return true
})
if connErr != nil {
t.Fatalf("could not connect to database: %v", connErr)
}
return
}
func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) {
err := cid.KillRemove()
if err != nil {
t.Fatal(err)
}
}
func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
configData := map[string]interface{}{
"connection_url": "sample_connection_url",
"value": "",
"max_open_connections": 9,
"max_idle_connections": 7,
"verify_connection": false,
}
configReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/connection",
Storage: config.StorageView,
Data: configData,
}
resp, err = b.HandleRequest(configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
delete(configData, "verify_connection")
if !reflect.DeepEqual(configData, resp.Data) {
t.Fatalf("bad: expected:%#v\nactual:%#v\n", configData, resp.Data)
}
}
func TestBackend_basic(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
testAccStepCreateRole(t, "web", testRole, false),
testAccStepReadCreds(t, b, config.StorageView, "web", connURL),
},
})
}
func TestBackend_roleCrud(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
testAccStepCreateRole(t, "web", testRole, false),
testAccStepReadRole(t, "web", testRole),
testAccStepDeleteRole(t, "web"),
testAccStepReadRole(t, "web", ""),
},
})
}
func TestBackend_BlockStatements(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
jsonBlockStatement, err := json.Marshal(testBlockStatementRoleSlice)
if err != nil {
t.Fatal(err)
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
// This will also validate the query
testAccStepCreateRole(t, "web-block", testBlockStatementRole, true),
testAccStepCreateRole(t, "web-block", string(jsonBlockStatement), false),
},
})
}
func TestBackend_roleReadOnly(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(config)
if err != nil {
t.Fatal(err)
}
cid, connURL := prepareTestContainer(t, config.StorageView, b)
if cid != "" {
defer cleanupTestContainer(t, cid)
}
connData := map[string]interface{}{
"connection_url": connURL,
}
logicaltest.Test(t, logicaltest.TestCase{
Backend: b,
Steps: []logicaltest.TestStep{
testAccStepConfig(t, connData, false),
testAccStepCreateRole(t, "web", testRole, false),
testAccStepCreateRole(t, "web-readonly", testReadOnlyRole, false),
testAccStepReadRole(t, "web-readonly", testReadOnlyRole),
testAccStepCreateTable(t, b, config.StorageView, "web", connURL),
testAccStepReadCreds(t, b, config.StorageView, "web-readonly", connURL),
testAccStepDropTable(t, b, config.StorageView, "web", connURL),
testAccStepDeleteRole(t, "web-readonly"),
testAccStepDeleteRole(t, "web"),
testAccStepReadRole(t, "web-readonly", ""),
},
})
}
func testAccStepConfig(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/connection",
Data: d,
ErrorOk: true,
Check: func(resp *logical.Response) error {
if expectError {
if resp.Data == nil {
return fmt.Errorf("data is nil")
}
var e struct {
Error string `mapstructure:"error"`
}
if err := mapstructure.Decode(resp.Data, &e); err != nil {
return err
}
if len(e.Error) == 0 {
return fmt.Errorf("expected error, but write succeeded.")
}
return nil
} else if resp != nil && resp.IsError() {
return fmt.Errorf("got an error response: %v", resp.Error())
}
return nil
},
}
}
func testAccStepCreateRole(t *testing.T, name string, sql string, expectFail bool) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: path.Join("roles", name),
Data: map[string]interface{}{
"sql": sql,
},
ErrorOk: expectFail,
}
}
func testAccStepDeleteRole(t *testing.T, name string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.DeleteOperation,
Path: path.Join("roles", name),
}
}
func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: path.Join("creds", name),
Check: func(resp *logical.Response) error {
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[TRACE] Generated credentials: %v", d)
conn, err := pq.ParseURL(connURL)
if err != nil {
t.Fatal(err)
}
conn += " timezone=utc"
db, err := sql.Open("postgres", conn)
if err != nil {
t.Fatal(err)
}
returnedRows := func() int {
stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');")
if err != nil {
return -1
}
defer stmt.Close()
rows, err := stmt.Query(d.Username)
if err != nil {
return -1
}
defer rows.Close()
i := 0
for rows.Next() {
i++
}
return i
}
// minNumPermissions is the minimum number of permissions that will always be present.
const minNumPermissions = 2
userRows := returnedRows()
if userRows < minNumPermissions {
t.Fatalf("did not get expected number of rows, got %d", userRows)
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.RevokeOperation,
Storage: s,
Secret: &logical.Secret{
InternalData: map[string]interface{}{
"secret_type": "creds",
"username": d.Username,
},
},
})
if err != nil {
return err
}
if resp != nil {
if resp.IsError() {
return fmt.Errorf("Error on resp: %#v", *resp)
}
}
userRows = returnedRows()
// User shouldn't exist so returnedRows() should encounter an error and exit with -1
if userRows != -1 {
t.Fatalf("did not get expected number of rows, got %d", userRows)
}
return nil
},
}
}
func testAccStepCreateTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: path.Join("creds", name),
Check: func(resp *logical.Response) error {
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[TRACE] Generated credentials: %v", d)
conn, err := pq.ParseURL(connURL)
if err != nil {
t.Fatal(err)
}
conn += " timezone=utc"
db, err := sql.Open("postgres", conn)
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("CREATE TABLE test (id SERIAL PRIMARY KEY);")
if err != nil {
t.Fatal(err)
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.RevokeOperation,
Storage: s,
Secret: &logical.Secret{
InternalData: map[string]interface{}{
"secret_type": "creds",
"username": d.Username,
},
},
})
if err != nil {
return err
}
if resp != nil {
if resp.IsError() {
return fmt.Errorf("Error on resp: %#v", *resp)
}
}
return nil
},
}
}
func testAccStepDropTable(t *testing.T, b logical.Backend, s logical.Storage, name string, connURL string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: path.Join("creds", name),
Check: func(resp *logical.Response) error {
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
log.Printf("[TRACE] Generated credentials: %v", d)
conn, err := pq.ParseURL(connURL)
if err != nil {
t.Fatal(err)
}
conn += " timezone=utc"
db, err := sql.Open("postgres", conn)
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("DROP TABLE test;")
if err != nil {
t.Fatal(err)
}
resp, err = b.HandleRequest(&logical.Request{
Operation: logical.RevokeOperation,
Storage: s,
Secret: &logical.Secret{
InternalData: map[string]interface{}{
"secret_type": "creds",
"username": d.Username,
},
},
})
if err != nil {
return err
}
if resp != nil {
if resp.IsError() {
return fmt.Errorf("Error on resp: %#v", *resp)
}
}
return nil
},
}
}
func testAccStepReadRole(t *testing.T, name string, sql string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
Path: "roles/" + name,
Check: func(resp *logical.Response) error {
if resp == nil {
if sql == "" {
return nil
}
return fmt.Errorf("bad: %#v", resp)
}
var d struct {
SQL string `mapstructure:"sql"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
return err
}
if d.SQL != sql {
return fmt.Errorf("bad: %#v", resp)
}
return nil
},
}
}
const testRole = `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
const testReadOnlyRole = `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
`
const testBlockStatementRole = `
DO $$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
CREATE ROLE "foo-role";
CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
ALTER ROLE "foo-role" SET search_path = foo;
GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
END IF;
END
$$
CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';
GRANT "foo-role" TO "{{name}}";
ALTER ROLE "{{name}}" SET search_path = foo;
GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";
`
var testBlockStatementRoleSlice = []string{
`DO $$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
CREATE ROLE "foo-role";
CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
ALTER ROLE "foo-role" SET search_path = foo;
GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
END IF;
END
$$`,
`CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`,
`GRANT "foo-role" TO "{{name}}";`,
`ALTER ROLE "{{name}}" SET search_path = foo;`,
`GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`,
}
|
// Copyright 2019 NDP Systèmes. All Rights Reserved.
// See LICENSE file for full licensing details.
package generate
import "text/template"
var poolInterfacesTemplate = template.Must(template.New("").Parse(`
// This file is autogenerated by hexya-generate
// DO NOT MODIFY THIS FILE - ANY CHANGES WILL BE OVERWRITTEN
package {{ .InterfacesPackageName }}
import (
"github.com/hexya-erp/pool/{{ .QueryPackageName }}"
{{ range .Deps }} "{{ . }}"
{{ end }}
)
// {{ .Name }}Set is an autogenerated type to handle {{ .Name }} objects.
type {{ .Name }}Set interface {
models.RecordSet
// {{ .Name }}SetHexyaFunc is a dummy function to uniquely match interfaces.
{{ .Name }}SetHexyaFunc()
{{- range .Fields }}
// {{ .Name }} is a getter for the value of the "{{ .Name }}" field of the first
// record in this RecordSet. It returns the Go zero value if the RecordSet is empty.
{{ .Name }}() {{ .IType }}
// Set{{ .Name }} is a setter for the value of the "{{ .Name }}" field of this
// RecordSet. All Records of this RecordSet will be updated. Each call to this
// method makes an update query in the database.
//
// Set{{ .Name }} panics if the RecordSet is empty.
Set{{ .Name }}(value {{ .IType }})
{{- end }}
{{- range .AllMethods }}
{{ .Doc }}
{{ .Name }}({{ .IParamsTypes }}) ({{ .IReturnString }})
{{- end }}
// Super returns a RecordSet with a modified callstack so that call to the current
// method will execute the next method layer.
//
// This method is meant to be used inside a method layer function to call its parent,
// such as:
//
// func (rs h.MyRecordSet) MyMethod() string {
// res := rs.Super().MyMethod()
// res += " ok!"
// return res
// }
//
// Calls to a different method than the current method will call its next layer only
// if the current method has been called from a layer of the other method. Otherwise,
// it will be the same as calling the other method directly.
Super() {{ .Name }}Set
// ModelData returns a new {{ .Name }}Data object populated with the values
// of the given FieldMap.
ModelData(fMap models.FieldMap) {{ .Name }}Data
// Records returns a slice with all the records of this RecordSet, as singleton RecordSets
Records() []{{ .Name }}Set
// First returns the values of the first Record of the RecordSet as a pointer to a {{ .Name }}Data.
//
// If this RecordSet is empty, it returns an empty {{ .Name }}Data.
First() {{ .Name }}Data
// All returns the values of all Records of the RecordCollection as a slice of {{ .Name }}Data pointers.
All() []{{ .Name }}Data
}
// {{ .Name }}Data is used to hold values of an {{ .Name }} object instance
// when creating or updating a {{ .Name }}Set.
type {{ .Name }}Data interface {
// Underlying returns the object converted to a FieldMap.
Underlying() models.FieldMap
// Get returns the value of the given field.
// The second returned value is true if the value exists.
//
// The field can be either its name or is JSON name.
Get(field string) (interface{}, bool)
// Set sets the given field with the given value.
// If the field already exists, then it is updated with value.
// Otherwise, a new entry is inserted.
//
// It returns the given {{ .Name }}Data so that calls can be chained
Set(field string, value interface{}) {{ .Name }}Data
// Unset removes the value of the given field if it exists.
//
// It returns the given ModelData so that calls can be chained
Unset(field string) {{ .Name }}Data
// Copy returns a copy of this {{ .Name }}Data
Copy() {{ .Name }}Data
// Keys returns the {{ .Name }}Data keys as a slice of strings
Keys() (res []string)
// OrderedKeys returns the keys of this {{ .Name }}Data ordered.
//
// This has the convenient side effect of having shorter paths come before longer paths,
// which is particularly useful when creating or updating related records.
OrderedKeys() []string
// FieldNames returns the {{ .Name }}Data keys as a slice of FieldNamer.
FieldNames() (res []models.FieldNamer)
// Values returns the {{ .Name }}Data values as a slice of interface{}
Values() (res []interface{})
{{- range .Fields }}
// {{ .Name }} returns the value of the {{ .Name }} field.
// If this {{ .Name }} is not set in this {{ $.Name }}Data, then
// the Go zero value for the type is returned.
{{ .Name }}() {{ .IType }}
// Has{{ .Name }} returns true if {{ .Name }} is set in this {{ $.Name }}Data
Has{{ .Name }}() bool
// Set{{ .Name }} sets the {{ .Name }} field with the given value.
// It returns this {{ $.Name }}Data so that calls can be chained.
Set{{ .Name }}(value {{ .IType }}) {{ $.Name }}Data
// Unset{{ .Name }} removes the value of the {{ .Name }} field if it exists.
// It returns this {{ $.Name }}Data so that calls can be chained.
Unset{{ .Name }}() {{ $.Name }}Data
{{- end }}
}
// A {{ .Name }}GroupAggregateRow holds a row of results of a query with a group by clause
type {{ .Name }}GroupAggregateRow interface {
// Values() returns the values of the actual query
Values() {{ .Name }}Data
// Count is the number of lines aggregated into this one
Count() int
// Condition can be used to query the aggregated rows separately if needed
Condition() {{ $.QueryPackageName }}.{{ .Name }}Condition
}
`))
Removed Values from generated model data instances
// Copyright 2019 NDP Systèmes. All Rights Reserved.
// See LICENSE file for full licensing details.
package generate
import "text/template"
var poolInterfacesTemplate = template.Must(template.New("").Parse(`
// This file is autogenerated by hexya-generate
// DO NOT MODIFY THIS FILE - ANY CHANGES WILL BE OVERWRITTEN
package {{ .InterfacesPackageName }}
import (
"github.com/hexya-erp/pool/{{ .QueryPackageName }}"
{{ range .Deps }} "{{ . }}"
{{ end }}
)
// {{ .Name }}Set is an autogenerated type to handle {{ .Name }} objects.
type {{ .Name }}Set interface {
models.RecordSet
// {{ .Name }}SetHexyaFunc is a dummy function to uniquely match interfaces.
{{ .Name }}SetHexyaFunc()
{{- range .Fields }}
// {{ .Name }} is a getter for the value of the "{{ .Name }}" field of the first
// record in this RecordSet. It returns the Go zero value if the RecordSet is empty.
{{ .Name }}() {{ .IType }}
// Set{{ .Name }} is a setter for the value of the "{{ .Name }}" field of this
// RecordSet. All Records of this RecordSet will be updated. Each call to this
// method makes an update query in the database.
//
// Set{{ .Name }} panics if the RecordSet is empty.
Set{{ .Name }}(value {{ .IType }})
{{- end }}
{{- range .AllMethods }}
{{ .Doc }}
{{ .Name }}({{ .IParamsTypes }}) ({{ .IReturnString }})
{{- end }}
// Super returns a RecordSet with a modified callstack so that call to the current
// method will execute the next method layer.
//
// This method is meant to be used inside a method layer function to call its parent,
// such as:
//
// func (rs h.MyRecordSet) MyMethod() string {
// res := rs.Super().MyMethod()
// res += " ok!"
// return res
// }
//
// Calls to a different method than the current method will call its next layer only
// if the current method has been called from a layer of the other method. Otherwise,
// it will be the same as calling the other method directly.
Super() {{ .Name }}Set
// ModelData returns a new {{ .Name }}Data object populated with the values
// of the given FieldMap.
ModelData(fMap models.FieldMap) {{ .Name }}Data
// Records returns a slice with all the records of this RecordSet, as singleton RecordSets
Records() []{{ .Name }}Set
// First returns the values of the first Record of the RecordSet as a pointer to a {{ .Name }}Data.
//
// If this RecordSet is empty, it returns an empty {{ .Name }}Data.
First() {{ .Name }}Data
// All returns the values of all Records of the RecordCollection as a slice of {{ .Name }}Data pointers.
All() []{{ .Name }}Data
}
// {{ .Name }}Data is used to hold values of an {{ .Name }} object instance
// when creating or updating a {{ .Name }}Set.
type {{ .Name }}Data interface {
// Underlying returns the object converted to a FieldMap.
Underlying() models.FieldMap
// Get returns the value of the given field.
// The second returned value is true if the value exists.
//
// The field can be either its name or is JSON name.
Get(field string) (interface{}, bool)
// Set sets the given field with the given value.
// If the field already exists, then it is updated with value.
// Otherwise, a new entry is inserted.
//
// It returns the given {{ .Name }}Data so that calls can be chained
Set(field string, value interface{}) {{ .Name }}Data
// Unset removes the value of the given field if it exists.
//
// It returns the given ModelData so that calls can be chained
Unset(field string) {{ .Name }}Data
// Copy returns a copy of this {{ .Name }}Data
Copy() {{ .Name }}Data
// Keys returns the {{ .Name }}Data keys as a slice of strings
Keys() (res []string)
// OrderedKeys returns the keys of this {{ .Name }}Data ordered.
//
// This has the convenient side effect of having shorter paths come before longer paths,
// which is particularly useful when creating or updating related records.
OrderedKeys() []string
// FieldNames returns the {{ .Name }}Data keys as a slice of FieldNamer.
FieldNames() (res []models.FieldNamer)
{{- range .Fields }}
// {{ .Name }} returns the value of the {{ .Name }} field.
// If this {{ .Name }} is not set in this {{ $.Name }}Data, then
// the Go zero value for the type is returned.
{{ .Name }}() {{ .IType }}
// Has{{ .Name }} returns true if {{ .Name }} is set in this {{ $.Name }}Data
Has{{ .Name }}() bool
// Set{{ .Name }} sets the {{ .Name }} field with the given value.
// It returns this {{ $.Name }}Data so that calls can be chained.
Set{{ .Name }}(value {{ .IType }}) {{ $.Name }}Data
// Unset{{ .Name }} removes the value of the {{ .Name }} field if it exists.
// It returns this {{ $.Name }}Data so that calls can be chained.
Unset{{ .Name }}() {{ $.Name }}Data
{{- end }}
}
// A {{ .Name }}GroupAggregateRow holds a row of results of a query with a group by clause
type {{ .Name }}GroupAggregateRow interface {
// Values() returns the values of the actual query
Values() {{ .Name }}Data
// Count is the number of lines aggregated into this one
Count() int
// Condition can be used to query the aggregated rows separately if needed
Condition() {{ $.QueryPackageName }}.{{ .Name }}Condition
}
`))
|
package main
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"crypto/sha512"
"database/sql"
"encoding/base64"
"fmt"
"github.com/apache/incubator-trafficcontrol/traffic_monitor_golang/common/log"
"github.com/apache/incubator-trafficcontrol/traffic_ops/tocookie"
"net/http"
"time"
)
const ServerName = "traffic_ops_golang" + "/" + Version
func wrapHeaders(h RegexHandlerFunc) RegexHandlerFunc {
return func(w http.ResponseWriter, r *http.Request, p ParamMap) {
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Set-Cookie, Cookie")
w.Header().Set("Access-Control-Allow-Methods", "POST,GET,OPTIONS,PUT,DELETE")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("X-Server-Name", ServerName)
iw := &BodyInterceptor{w: w}
h(iw, r, p)
sha := sha512.Sum512(iw.Body())
w.Header().Set("Whole-Content-SHA512", base64.StdEncoding.EncodeToString(sha[:]))
iw.RealWrite(iw.Body())
}
}
type AuthRegexHandlerFunc func(w http.ResponseWriter, r *http.Request, params ParamMap, user string, privLevel int)
func handlerToAuthHandler(h RegexHandlerFunc) AuthRegexHandlerFunc {
return func(w http.ResponseWriter, r *http.Request, p ParamMap, user string, privLevel int) { h(w, r, p) }
}
func wrapAuth(h RegexHandlerFunc, noAuth bool, secret string, privLevelStmt *sql.Stmt, privLevelRequired int) RegexHandlerFunc {
return wrapAuthWithData(handlerToAuthHandler(h), noAuth, secret, privLevelStmt, privLevelRequired)
}
func wrapAuthWithData(h AuthRegexHandlerFunc, noAuth bool, secret string, privLevelStmt *sql.Stmt, privLevelRequired int) RegexHandlerFunc {
if noAuth {
return func(w http.ResponseWriter, r *http.Request, p ParamMap) {
h(w, r, p, "", PrivLevelInvalid)
}
}
return func(w http.ResponseWriter, r *http.Request, p ParamMap) {
// TODO remove, and make username available to wrapLogTime
start := time.Now()
iw := &Interceptor{w: w}
w = iw
username := "-"
defer func() {
log.EventfRaw(`%s - %s [%s] "%v %v HTTP/1.1" %v %v %v "%v"`, r.RemoteAddr, username, time.Now().Format(AccessLogTimeFormat), r.Method, r.URL.Path, iw.code, iw.byteCount, int(time.Now().Sub(start)/time.Millisecond), r.UserAgent())
}()
handleUnauthorized := func(reason string) {
status := http.StatusUnauthorized
w.WriteHeader(status)
fmt.Fprintf(w, http.StatusText(status))
log.Infof("%v %v %v %v returned unauthorized: %v\n", r.RemoteAddr, r.Method, r.URL.Path, username, reason)
}
cookie, err := r.Cookie(tocookie.Name)
if err != nil {
handleUnauthorized("error getting cookie: " + err.Error())
return
}
if cookie == nil {
handleUnauthorized("no auth cookie")
return
}
oldCookie, err := tocookie.Parse(secret, cookie.Value)
if err != nil {
handleUnauthorized("cookie error: " + err.Error())
return
}
username = oldCookie.AuthData
privLevel := PrivLevel(privLevelStmt, username)
if privLevel < privLevelRequired {
handleUnauthorized("insufficient privileges")
return
}
newCookieVal := tocookie.Refresh(oldCookie, secret)
http.SetCookie(w, &http.Cookie{Name: tocookie.Name, Value: newCookieVal, Path: "/", HttpOnly: true})
h(w, r, p, username, privLevel)
}
}
const AccessLogTimeFormat = "02/Jan/2006:15:04:05 -0700"
func wrapAccessLog(secret string, h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
iw := &Interceptor{w: w}
user := "-"
cookie, err := r.Cookie(tocookie.Name)
if err == nil && cookie != nil {
cookie, err := tocookie.Parse(secret, cookie.Value)
if err == nil {
user = cookie.AuthData
}
}
start := time.Now()
defer func() {
log.EventfRaw(`%s - %s [%s] "%v %v HTTP/1.1" %v %v %v "%v"`, r.RemoteAddr, user, time.Now().Format(AccessLogTimeFormat), r.Method, r.URL.Path, iw.code, iw.byteCount, int(time.Now().Sub(start)/time.Millisecond), r.UserAgent())
}()
h.ServeHTTP(iw, r)
}
}
type Interceptor struct {
w http.ResponseWriter
code int
byteCount int
}
func (i *Interceptor) WriteHeader(rc int) {
i.w.WriteHeader(rc)
i.code = rc
}
func (i *Interceptor) Write(b []byte) (int, error) {
wi, werr := i.w.Write(b)
i.byteCount += wi
if i.code == 0 {
i.code = 200
}
return wi, werr
}
func (i *Interceptor) Header() http.Header {
return i.w.Header()
}
// BodyInterceptor fulfills the Writer interface, but records the body and doesn't actually write. This allows performing operations on the entire body written by a handler, for example, compressing or hashing. To actually write, call `RealWrite()`. Note this means `len(b)` and `nil` are always returned by `Write()`, any real write errors will be returned by `RealWrite()`.
type BodyInterceptor struct {
w http.ResponseWriter
body []byte
}
func (i *BodyInterceptor) WriteHeader(rc int) {
i.w.WriteHeader(rc)
}
func (i *BodyInterceptor) Write(b []byte) (int, error) {
i.body = append(i.body, b...)
return len(b), nil
}
func (i *BodyInterceptor) Header() http.Header {
return i.w.Header()
}
func (i *BodyInterceptor) RealWrite(b []byte) (int, error) {
wi, werr := i.w.Write(i.body)
return wi, werr
}
func (i *BodyInterceptor) Body() []byte {
return i.body
}
renamed PathMap to PathParams
package main
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"crypto/sha512"
"database/sql"
"encoding/base64"
"fmt"
"net/http"
"time"
"github.com/apache/incubator-trafficcontrol/traffic_monitor_golang/common/log"
"github.com/apache/incubator-trafficcontrol/traffic_ops/tocookie"
)
const ServerName = "traffic_ops_golang" + "/" + Version
func wrapHeaders(h RegexHandlerFunc) RegexHandlerFunc {
return func(w http.ResponseWriter, r *http.Request, p PathParams) {
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Set-Cookie, Cookie")
w.Header().Set("Access-Control-Allow-Methods", "POST,GET,OPTIONS,PUT,DELETE")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("X-Server-Name", ServerName)
iw := &BodyInterceptor{w: w}
h(iw, r, p)
sha := sha512.Sum512(iw.Body())
w.Header().Set("Whole-Content-SHA512", base64.StdEncoding.EncodeToString(sha[:]))
iw.RealWrite(iw.Body())
}
}
type AuthRegexHandlerFunc func(w http.ResponseWriter, r *http.Request, params PathParams, user string, privLevel int)
func handlerToAuthHandler(h RegexHandlerFunc) AuthRegexHandlerFunc {
return func(w http.ResponseWriter, r *http.Request, p PathParams, user string, privLevel int) { h(w, r, p) }
}
func wrapAuth(h RegexHandlerFunc, noAuth bool, secret string, privLevelStmt *sql.Stmt, privLevelRequired int) RegexHandlerFunc {
return wrapAuthWithData(handlerToAuthHandler(h), noAuth, secret, privLevelStmt, privLevelRequired)
}
func wrapAuthWithData(h AuthRegexHandlerFunc, noAuth bool, secret string, privLevelStmt *sql.Stmt, privLevelRequired int) RegexHandlerFunc {
if noAuth {
return func(w http.ResponseWriter, r *http.Request, p PathParams) {
h(w, r, p, "", PrivLevelInvalid)
}
}
return func(w http.ResponseWriter, r *http.Request, p PathParams) {
// TODO remove, and make username available to wrapLogTime
start := time.Now()
iw := &Interceptor{w: w}
w = iw
username := "-"
defer func() {
log.EventfRaw(`%s - %s [%s] "%v %v HTTP/1.1" %v %v %v "%v"`, r.RemoteAddr, username, time.Now().Format(AccessLogTimeFormat), r.Method, r.URL.Path, iw.code, iw.byteCount, int(time.Now().Sub(start)/time.Millisecond), r.UserAgent())
}()
handleUnauthorized := func(reason string) {
status := http.StatusUnauthorized
w.WriteHeader(status)
fmt.Fprintf(w, http.StatusText(status))
log.Infof("%v %v %v %v returned unauthorized: %v\n", r.RemoteAddr, r.Method, r.URL.Path, username, reason)
}
cookie, err := r.Cookie(tocookie.Name)
if err != nil {
handleUnauthorized("error getting cookie: " + err.Error())
return
}
if cookie == nil {
handleUnauthorized("no auth cookie")
return
}
oldCookie, err := tocookie.Parse(secret, cookie.Value)
if err != nil {
handleUnauthorized("cookie error: " + err.Error())
return
}
username = oldCookie.AuthData
privLevel := PrivLevel(privLevelStmt, username)
if privLevel < privLevelRequired {
handleUnauthorized("insufficient privileges")
return
}
newCookieVal := tocookie.Refresh(oldCookie, secret)
http.SetCookie(w, &http.Cookie{Name: tocookie.Name, Value: newCookieVal, Path: "/", HttpOnly: true})
h(w, r, p, username, privLevel)
}
}
const AccessLogTimeFormat = "02/Jan/2006:15:04:05 -0700"
func wrapAccessLog(secret string, h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
iw := &Interceptor{w: w}
user := "-"
cookie, err := r.Cookie(tocookie.Name)
if err == nil && cookie != nil {
cookie, err := tocookie.Parse(secret, cookie.Value)
if err == nil {
user = cookie.AuthData
}
}
start := time.Now()
defer func() {
log.EventfRaw(`%s - %s [%s] "%v %v HTTP/1.1" %v %v %v "%v"`, r.RemoteAddr, user, time.Now().Format(AccessLogTimeFormat), r.Method, r.URL.Path, iw.code, iw.byteCount, int(time.Now().Sub(start)/time.Millisecond), r.UserAgent())
}()
h.ServeHTTP(iw, r)
}
}
type Interceptor struct {
w http.ResponseWriter
code int
byteCount int
}
func (i *Interceptor) WriteHeader(rc int) {
i.w.WriteHeader(rc)
i.code = rc
}
func (i *Interceptor) Write(b []byte) (int, error) {
wi, werr := i.w.Write(b)
i.byteCount += wi
if i.code == 0 {
i.code = 200
}
return wi, werr
}
func (i *Interceptor) Header() http.Header {
return i.w.Header()
}
// BodyInterceptor fulfills the Writer interface, but records the body and doesn't actually write. This allows performing operations on the entire body written by a handler, for example, compressing or hashing. To actually write, call `RealWrite()`. Note this means `len(b)` and `nil` are always returned by `Write()`, any real write errors will be returned by `RealWrite()`.
type BodyInterceptor struct {
w http.ResponseWriter
body []byte
}
func (i *BodyInterceptor) WriteHeader(rc int) {
i.w.WriteHeader(rc)
}
func (i *BodyInterceptor) Write(b []byte) (int, error) {
i.body = append(i.body, b...)
return len(b), nil
}
func (i *BodyInterceptor) Header() http.Header {
return i.w.Header()
}
func (i *BodyInterceptor) RealWrite(b []byte) (int, error) {
wi, werr := i.w.Write(i.body)
return wi, werr
}
func (i *BodyInterceptor) Body() []byte {
return i.body
}
|
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package state
import (
"fmt"
"math/rand"
"net"
"github.com/juju/errors"
jujutxn "github.com/juju/txn"
"github.com/juju/utils/set"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/txn"
"github.com/juju/juju/network"
"strings"
)
// defaultSpaceName is the name of the default space to assign containers.
// Currently hard-coded to 'default', we may consider making this a model
// config
const defaultSpaceName = "default"
// LinkLayerDevice returns the link-layer device matching the given name. An
// error satisfying errors.IsNotFound() is returned when no such device exists
// on the machine.
func (m *Machine) LinkLayerDevice(name string) (*LinkLayerDevice, error) {
linkLayerDevices, closer := m.st.db().GetCollection(linkLayerDevicesC)
defer closer()
linkLayerDeviceDocID := m.linkLayerDeviceDocIDFromName(name)
deviceAsString := m.deviceAsStringFromName(name)
var doc linkLayerDeviceDoc
err := linkLayerDevices.FindId(linkLayerDeviceDocID).One(&doc)
if err == mgo.ErrNotFound {
return nil, errors.NotFoundf("%s", deviceAsString)
} else if err != nil {
return nil, errors.Annotatef(err, "cannot get %s", deviceAsString)
}
return newLinkLayerDevice(m.st, doc), nil
}
func (m *Machine) linkLayerDeviceDocIDFromName(deviceName string) string {
return m.st.docID(m.linkLayerDeviceGlobalKeyFromName(deviceName))
}
func (m *Machine) linkLayerDeviceGlobalKeyFromName(deviceName string) string {
return linkLayerDeviceGlobalKey(m.doc.Id, deviceName)
}
func (m *Machine) deviceAsStringFromName(deviceName string) string {
return fmt.Sprintf("device %q on machine %q", deviceName, m.doc.Id)
}
// AllLinkLayerDevices returns all exiting link-layer devices of the machine.
func (m *Machine) AllLinkLayerDevices() ([]*LinkLayerDevice, error) {
var allDevices []*LinkLayerDevice
callbackFunc := func(resultDoc *linkLayerDeviceDoc) {
allDevices = append(allDevices, newLinkLayerDevice(m.st, *resultDoc))
}
if err := m.forEachLinkLayerDeviceDoc(nil, callbackFunc); err != nil {
return nil, errors.Trace(err)
}
return allDevices, nil
}
func (m *Machine) forEachLinkLayerDeviceDoc(docFieldsToSelect bson.D, callbackFunc func(resultDoc *linkLayerDeviceDoc)) error {
linkLayerDevices, closer := m.st.db().GetCollection(linkLayerDevicesC)
defer closer()
query := linkLayerDevices.Find(bson.D{{"machine-id", m.doc.Id}})
if docFieldsToSelect != nil {
query = query.Select(docFieldsToSelect)
}
iter := query.Iter()
var resultDoc linkLayerDeviceDoc
for iter.Next(&resultDoc) {
callbackFunc(&resultDoc)
}
return errors.Trace(iter.Close())
}
// AllProviderInterfaceInfos returns the provider details for all of
// the link layer devices belonging to this machine. These can be used
// to identify the devices when interacting with the provider
// directly (for example, releasing container addresses).
func (m *Machine) AllProviderInterfaceInfos() ([]network.ProviderInterfaceInfo, error) {
devices, err := m.AllLinkLayerDevices()
if err != nil {
return nil, errors.Trace(err)
}
result := make([]network.ProviderInterfaceInfo, len(devices))
for i, device := range devices {
result[i].InterfaceName = device.Name()
result[i].MACAddress = device.MACAddress()
result[i].ProviderId = device.ProviderID()
}
return result, nil
}
// RemoveAllLinkLayerDevices removes all existing link-layer devices of the
// machine in a single transaction. No error is returned when some or all of the
// devices were already removed.
func (m *Machine) RemoveAllLinkLayerDevices() error {
ops, err := m.removeAllLinkLayerDevicesOps()
if err != nil {
return errors.Trace(err)
}
return m.st.runTransaction(ops)
}
func (m *Machine) removeAllLinkLayerDevicesOps() ([]txn.Op, error) {
var ops []txn.Op
callbackFunc := func(resultDoc *linkLayerDeviceDoc) {
removeOps := removeLinkLayerDeviceUnconditionallyOps(resultDoc.DocID)
ops = append(ops, removeOps...)
if resultDoc.ProviderID != "" {
providerId := network.Id(resultDoc.ProviderID)
op := m.st.networkEntityGlobalKeyRemoveOp("linklayerdevice", providerId)
ops = append(ops, op)
}
}
selectDocIDOnly := bson.D{{"_id", 1}}
if err := m.forEachLinkLayerDeviceDoc(selectDocIDOnly, callbackFunc); err != nil {
return nil, errors.Trace(err)
}
return ops, nil
}
// LinkLayerDeviceArgs contains the arguments accepted by Machine.SetLinkLayerDevices().
type LinkLayerDeviceArgs struct {
// Name is the name of the device as it appears on the machine.
Name string
// MTU is the maximum transmission unit the device can handle.
MTU uint
// ProviderID is a provider-specific ID of the device. Empty when not
// supported by the provider. Cannot be cleared once set.
ProviderID network.Id
// Type is the type of the underlying link-layer device.
Type LinkLayerDeviceType
// MACAddress is the media access control address for the device.
MACAddress string
// IsAutoStart is true if the device should be activated on boot.
IsAutoStart bool
// IsUp is true when the device is up (enabled).
IsUp bool
// ParentName is the name of the parent device, which may be empty. If set,
// it needs to be an existing device on the same machine, unless the current
// device is inside a container, in which case ParentName can be a global
// key of a BridgeDevice on the host machine of the container. Traffic
// originating from a device egresses from its parent device.
ParentName string
}
// SetLinkLayerDevices sets link-layer devices on the machine, adding or
// updating existing devices as needed, in a single transaction. ProviderID
// field can be empty if not supported by the provider, but when set must be
// unique within the model, and cannot be unset once set. Errors are returned in
// the following cases:
// - Machine is no longer alive or is missing;
// - Model no longer alive;
// - errors.NotValidError, when any of the fields in args contain invalid values;
// - ErrProviderIDNotUnique, when one or more specified ProviderIDs are not unique;
// Setting new parent devices must be done in a separate call than setting their
// children on the same machine.
func (m *Machine) SetLinkLayerDevices(devicesArgs ...LinkLayerDeviceArgs) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set link-layer devices to machine %q", m.doc.Id)
if len(devicesArgs) == 0 {
logger.Warningf("no device addresses to set")
return nil
}
buildTxn := func(attempt int) ([]txn.Op, error) {
newDocs, err := m.prepareToSetLinkLayerDevices(devicesArgs)
if err != nil {
return nil, errors.Trace(err)
}
if m.doc.Life != Alive {
return nil, errors.Errorf("machine %q not alive", m.doc.Id)
}
if attempt > 0 {
if err := m.isStillAlive(); err != nil {
return nil, errors.Trace(err)
}
allIds, err := m.st.allProviderIDsForLinkLayerDevices()
if err != nil {
return nil, errors.Trace(err)
}
for _, args := range devicesArgs {
if allIds.Contains(string(args.ProviderID)) {
err := NewProviderIDNotUniqueError(args.ProviderID)
return nil, errors.Annotatef(err, "invalid device %q", args.Name)
}
}
}
// We've checked the model is alive directly, and we assert the machine is alive, we don't need to also
// assert the model is alive, because then the machine would be dying as well.
ops := []txn.Op{
m.assertAliveOp(),
}
setDevicesOps, err := m.setDevicesFromDocsOps(newDocs)
if err != nil {
return nil, errors.Trace(err)
}
if len(setDevicesOps) == 0 {
// No need to assert only that the machine is alive
logger.Debugf("no changes to LinkLayerDevices for machine %q", m.Id())
return nil, jujutxn.ErrNoOperations
}
return append(ops, setDevicesOps...), nil
}
if err := m.st.run(buildTxn); err != nil {
return errors.Trace(err)
}
return nil
}
func (st *State) allProviderIDsForLinkLayerDevices() (set.Strings, error) {
return st.allProviderIDsForEntity("linklayerdevice")
}
func (st *State) allProviderIDsForAddresses() (set.Strings, error) {
return st.allProviderIDsForEntity("address")
}
func (st *State) allProviderIDsForEntity(entityName string) (set.Strings, error) {
idCollection, closer := st.db().GetCollection(providerIDsC)
defer closer()
allProviderIDs := set.NewStrings()
var doc struct {
ID string `bson:"_id"`
}
pattern := fmt.Sprintf("^%s:%s:.+$", st.ModelUUID(), entityName)
modelProviderIDs := bson.D{{"_id", bson.D{{"$regex", pattern}}}}
iter := idCollection.Find(modelProviderIDs).Iter()
for iter.Next(&doc) {
localProviderID := st.localID(doc.ID)[len(entityName)+1:]
allProviderIDs.Add(localProviderID)
}
if err := iter.Close(); err != nil {
return nil, errors.Trace(err)
}
return allProviderIDs, nil
}
func (m *Machine) prepareToSetLinkLayerDevices(devicesArgs []LinkLayerDeviceArgs) ([]linkLayerDeviceDoc, error) {
var pendingDocs []linkLayerDeviceDoc
pendingNames := set.NewStrings()
for _, args := range devicesArgs {
newDoc, err := m.prepareOneSetLinkLayerDeviceArgs(&args, pendingNames)
if err != nil {
return nil, errors.Trace(err)
}
pendingNames.Add(args.Name)
pendingDocs = append(pendingDocs, *newDoc)
}
return pendingDocs, nil
}
func (m *Machine) prepareOneSetLinkLayerDeviceArgs(args *LinkLayerDeviceArgs, pendingNames set.Strings) (_ *linkLayerDeviceDoc, err error) {
defer errors.DeferredAnnotatef(&err, "invalid device %q", args.Name)
if err := m.validateSetLinkLayerDeviceArgs(args); err != nil {
return nil, errors.Trace(err)
}
if pendingNames.Contains(args.Name) {
return nil, errors.NewNotValid(nil, "Name specified more than once")
}
return m.newLinkLayerDeviceDocFromArgs(args), nil
}
func (m *Machine) validateSetLinkLayerDeviceArgs(args *LinkLayerDeviceArgs) error {
if args.Name == "" {
return errors.NotValidf("empty Name")
}
if !IsValidLinkLayerDeviceName(args.Name) {
logger.Warningf(
"link-layer device %q on machine %q has invalid name (using anyway)",
args.Name, m.Id(),
)
}
if args.ParentName != "" {
if err := m.validateLinkLayerDeviceParent(args); err != nil {
return errors.Trace(err)
}
}
if !IsValidLinkLayerDeviceType(string(args.Type)) {
return errors.NotValidf("Type %q", args.Type)
}
if args.MACAddress != "" {
if _, err := net.ParseMAC(args.MACAddress); err != nil {
return errors.NotValidf("MACAddress %q", args.MACAddress)
}
}
return nil
}
func (m *Machine) validateLinkLayerDeviceParent(args *LinkLayerDeviceArgs) error {
hostMachineID, parentDeviceName, err := parseLinkLayerDeviceParentNameAsGlobalKey(args.ParentName)
if err != nil {
return errors.Trace(err)
} else if hostMachineID == "" {
// Not a global key, so validate as usual.
if err := m.validateParentDeviceNameWhenNotAGlobalKey(args); errors.IsNotFound(err) {
return errors.NewNotValid(err, "ParentName not valid")
} else if err != nil {
return errors.Trace(err)
}
return nil
}
ourParentMachineID, hasParent := m.ParentId()
if !hasParent {
// Using global key for ParentName not allowed for non-container machine
// devices.
return errors.NotValidf("ParentName %q for non-container machine %q", args.ParentName, m.Id())
}
if hostMachineID != ourParentMachineID {
// ParentName as global key only allowed when the key's machine ID is
// the container's host machine.
return errors.NotValidf("ParentName %q on non-host machine %q", args.ParentName, hostMachineID)
}
err = m.verifyHostMachineParentDeviceExistsAndIsABridgeDevice(hostMachineID, parentDeviceName)
return errors.Trace(err)
}
func parseLinkLayerDeviceParentNameAsGlobalKey(parentName string) (hostMachineID, parentDeviceName string, err error) {
hostMachineID, parentDeviceName, canBeGlobalKey := parseLinkLayerDeviceGlobalKey(parentName)
if !canBeGlobalKey {
return "", "", nil
} else if hostMachineID == "" {
return "", "", errors.NotValidf("ParentName %q format", parentName)
}
return hostMachineID, parentDeviceName, nil
}
func (m *Machine) verifyHostMachineParentDeviceExistsAndIsABridgeDevice(hostMachineID, parentDeviceName string) error {
hostMachine, err := m.st.Machine(hostMachineID)
if errors.IsNotFound(err) || err == nil && hostMachine.Life() != Alive {
return errors.Errorf("host machine %q of parent device %q not found or not alive", hostMachineID, parentDeviceName)
} else if err != nil {
return errors.Trace(err)
}
parentDevice, err := hostMachine.LinkLayerDevice(parentDeviceName)
if errors.IsNotFound(err) {
return errors.NotFoundf("parent device %q on host machine %q", parentDeviceName, hostMachineID)
} else if err != nil {
return errors.Trace(err)
}
if parentDevice.Type() != BridgeDevice {
errorMessage := fmt.Sprintf(
"parent device %q on host machine %q must be of type %q, not type %q",
parentDeviceName, hostMachineID, BridgeDevice, parentDevice.Type(),
)
return errors.NewNotValid(nil, errorMessage)
}
return nil
}
func (m *Machine) validateParentDeviceNameWhenNotAGlobalKey(args *LinkLayerDeviceArgs) error {
if !IsValidLinkLayerDeviceName(args.ParentName) {
logger.Warningf(
"parent link-layer device %q on machine %q has invalid name (using anyway)",
args.ParentName, m.Id(),
)
}
if args.Name == args.ParentName {
return errors.NewNotValid(nil, "Name and ParentName must be different")
}
if err := m.verifyParentDeviceExists(args.ParentName); err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) verifyParentDeviceExists(parentName string) error {
if _, err := m.LinkLayerDevice(parentName); err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) newLinkLayerDeviceDocFromArgs(args *LinkLayerDeviceArgs) *linkLayerDeviceDoc {
linkLayerDeviceDocID := m.linkLayerDeviceDocIDFromName(args.Name)
providerID := string(args.ProviderID)
modelUUID := m.st.ModelUUID()
return &linkLayerDeviceDoc{
DocID: linkLayerDeviceDocID,
Name: args.Name,
ModelUUID: modelUUID,
MTU: args.MTU,
ProviderID: providerID,
MachineID: m.doc.Id,
Type: args.Type,
MACAddress: args.MACAddress,
IsAutoStart: args.IsAutoStart,
IsUp: args.IsUp,
ParentName: args.ParentName,
}
}
func (m *Machine) isStillAlive() error {
if machineAlive, err := isAlive(m.st, machinesC, m.doc.Id); err != nil {
return errors.Trace(err)
} else if !machineAlive {
return errors.Errorf("machine not found or not alive")
}
return nil
}
func (m *Machine) assertAliveOp() txn.Op {
return txn.Op{
C: machinesC,
Id: m.doc.Id,
Assert: isAliveDoc,
}
}
func (m *Machine) setDevicesFromDocsOps(newDocs []linkLayerDeviceDoc) ([]txn.Op, error) {
devices, closer := m.st.db().GetCollection(linkLayerDevicesC)
defer closer()
var ops []txn.Op
for _, newDoc := range newDocs {
var existingDoc linkLayerDeviceDoc
if err := devices.FindId(newDoc.DocID).One(&existingDoc); err == mgo.ErrNotFound {
// Device does not exist yet - insert it.
insertOps, err := m.insertLinkLayerDeviceOps(&newDoc)
if err != nil {
return nil, errors.Trace(err)
}
ops = append(ops, insertOps...)
} else if err == nil {
// Device already exists - update what's possible.
updateOps, err := m.updateLinkLayerDeviceOps(&existingDoc, &newDoc)
if err != nil {
return nil, errors.Trace(err)
}
ops = append(ops, updateOps...)
} else {
return nil, errors.Trace(err)
}
}
return ops, nil
}
func (m *Machine) insertLinkLayerDeviceOps(newDoc *linkLayerDeviceDoc) ([]txn.Op, error) {
modelUUID, linkLayerDeviceDocID := newDoc.ModelUUID, newDoc.DocID
var ops []txn.Op
if newDoc.ParentName != "" {
newParentDocID, err := m.parentDocIDFromDeviceDoc(newDoc)
if err != nil {
return nil, errors.Trace(err)
}
if newParentDocID != "" {
ops = append(ops, assertLinkLayerDeviceExistsOp(newParentDocID))
ops = append(ops, incrementDeviceNumChildrenOp(newParentDocID))
}
}
if newDoc.ProviderID != "" {
id := network.Id(newDoc.ProviderID)
ops = append(ops, m.st.networkEntityGlobalKeyOp("linklayerdevice", id))
}
return append(ops,
insertLinkLayerDeviceDocOp(newDoc),
insertLinkLayerDevicesRefsOp(modelUUID, linkLayerDeviceDocID),
), nil
}
func (m *Machine) parentDocIDFromDeviceDoc(doc *linkLayerDeviceDoc) (string, error) {
hostMachineID, parentName, err := parseLinkLayerDeviceParentNameAsGlobalKey(doc.ParentName)
if err != nil {
return "", errors.Trace(err)
}
if parentName == "" {
// doc.ParentName is not a global key, but on the same machine.
return m.linkLayerDeviceDocIDFromName(doc.ParentName), nil
}
// doc.ParentName is a global key, on a different host machine.
return m.st.docID(linkLayerDeviceGlobalKey(hostMachineID, parentName)), nil
}
func (m *Machine) updateLinkLayerDeviceOps(existingDoc, newDoc *linkLayerDeviceDoc) (ops []txn.Op, err error) {
// none of the ops in this function are assert-only, so callers can know if there are any changes by just checking len(ops)
var newParentDocID string
if newDoc.ParentName != "" {
newParentDocID, err = m.parentDocIDFromDeviceDoc(newDoc)
if err != nil {
return nil, errors.Trace(err)
}
}
var existingParentDocID string
if existingDoc.ParentName != "" {
existingParentDocID, err = m.parentDocIDFromDeviceDoc(existingDoc)
if err != nil {
return nil, errors.Trace(err)
}
}
if newParentDocID != "" && existingParentDocID != "" && newParentDocID != existingParentDocID {
ops = append(ops,
assertLinkLayerDeviceExistsOp(newParentDocID),
incrementDeviceNumChildrenOp(newParentDocID),
assertLinkLayerDeviceExistsOp(existingParentDocID),
decrementDeviceNumChildrenOp(existingParentDocID),
)
} else if newParentDocID != "" && existingParentDocID == "" {
ops = append(ops, assertLinkLayerDeviceExistsOp(newParentDocID))
ops = append(ops, incrementDeviceNumChildrenOp(newParentDocID))
} else if newParentDocID == "" && existingParentDocID != "" {
ops = append(ops, assertLinkLayerDeviceExistsOp(existingParentDocID))
ops = append(ops, decrementDeviceNumChildrenOp(existingParentDocID))
}
updateDeviceOp, deviceHasChanges := updateLinkLayerDeviceDocOp(existingDoc, newDoc)
if deviceHasChanges {
// we only include the op if it will actually change something
ops = append(ops, updateDeviceOp)
}
if newDoc.ProviderID != "" {
if existingDoc.ProviderID != "" && existingDoc.ProviderID != newDoc.ProviderID {
return nil, errors.Errorf("cannot change ProviderID of link layer device %q", existingDoc.Name)
}
if existingDoc.ProviderID != newDoc.ProviderID {
// Need to insert the new provider id in providerIDsC
id := network.Id(newDoc.ProviderID)
ops = append(ops, m.st.networkEntityGlobalKeyOp("linklayerdevice", id))
}
}
return ops, nil
}
// LinkLayerDeviceAddress contains an IP address assigned to a link-layer
// device.
type LinkLayerDeviceAddress struct {
// DeviceName is the name of the link-layer device that has this address.
DeviceName string
// ConfigMethod is the method used to configure this address.
ConfigMethod AddressConfigMethod
// ProviderID is the provider-specific ID of the address. Empty when not
// supported. Cannot be changed once set to non-empty.
ProviderID network.Id
// CIDRAddress is the IP address assigned to the device, in CIDR format
// (e.g. 10.20.30.5/24 or fc00:1234::/64).
CIDRAddress string
// DNSServers contains a list of DNS nameservers to use, which can be empty.
DNSServers []string
// DNSSearchDomains contains a list of DNS domain names to qualify
// hostnames, and can be empty.
DNSSearchDomains []string
// GatewayAddress is the address of the gateway to use, which can be empty.
GatewayAddress string
}
// SetDevicesAddresses sets the addresses of all devices in devicesAddresses,
// adding new or updating existing assignments as needed, in a single
// transaction. ProviderID field can be empty if not supported by the provider,
// but when set must be unique within the model. Errors are returned in the
// following cases:
// - Machine is no longer alive or is missing;
// - Subnet inferred from any CIDRAddress field in args is known but no longer
// alive (no error reported if the CIDRAddress does not match a known subnet);
// - Model no longer alive;
// - errors.NotValidError, when any of the fields in args contain invalid values;
// - errors.NotFoundError, when any DeviceName in args refers to unknown device;
// - ErrProviderIDNotUnique, when one or more specified ProviderIDs are not unique.
func (m *Machine) SetDevicesAddresses(devicesAddresses ...LinkLayerDeviceAddress) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set link-layer device addresses of machine %q", m.doc.Id)
if len(devicesAddresses) == 0 {
logger.Warningf("no device addresses to set")
return nil
}
buildTxn := func(attempt int) ([]txn.Op, error) {
newDocs, err := m.prepareToSetDevicesAddresses(devicesAddresses)
if err != nil {
return nil, errors.Trace(err)
}
if err := m.isStillAlive(); err != nil {
return nil, errors.Trace(err)
}
if attempt > 0 {
allIds, err := m.st.allProviderIDsForAddresses()
if err != nil {
return nil, errors.Trace(err)
}
for _, args := range devicesAddresses {
if allIds.Contains(string(args.ProviderID)) {
err := NewProviderIDNotUniqueError(args.ProviderID)
return nil, errors.Annotatef(err, "invalid address %q", args.CIDRAddress)
}
}
}
// we checked the model is active, but we only assert the machine is alive, because it will be dying if
// the model is dying.
ops := []txn.Op{
m.assertAliveOp(),
}
setAddressesOps, err := m.setDevicesAddressesFromDocsOps(newDocs)
if err != nil {
return nil, errors.Trace(err)
}
if len(setAddressesOps) == 0 {
// no actual address changes to be queued, so no need to create an op that just asserts
// the machine is alive
logger.Debugf("no changes to DevicesAddresses for machine %q", m.Id())
return nil, jujutxn.ErrNoOperations
}
return append(ops, setAddressesOps...), nil
}
if err := m.st.run(buildTxn); err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) prepareToSetDevicesAddresses(devicesAddresses []LinkLayerDeviceAddress) ([]ipAddressDoc, error) {
var pendingDocs []ipAddressDoc
for _, args := range devicesAddresses {
newDoc, err := m.prepareOneSetDevicesAddresses(&args)
if err != nil {
return nil, errors.Trace(err)
}
pendingDocs = append(pendingDocs, *newDoc)
}
return pendingDocs, nil
}
func (m *Machine) prepareOneSetDevicesAddresses(args *LinkLayerDeviceAddress) (_ *ipAddressDoc, err error) {
defer errors.DeferredAnnotatef(&err, "invalid address %q", args.CIDRAddress)
if err := m.validateSetDevicesAddressesArgs(args); err != nil {
return nil, errors.Trace(err)
}
return m.newIPAddressDocFromArgs(args)
}
func (m *Machine) validateSetDevicesAddressesArgs(args *LinkLayerDeviceAddress) error {
if args.CIDRAddress == "" {
return errors.NotValidf("empty CIDRAddress")
}
if _, _, err := net.ParseCIDR(args.CIDRAddress); err != nil {
return errors.NewNotValid(err, "CIDRAddress")
}
if args.DeviceName == "" {
return errors.NotValidf("empty DeviceName")
}
if !IsValidLinkLayerDeviceName(args.DeviceName) {
logger.Warningf(
"address %q on machine %q has invalid device name %q (using anyway)",
args.CIDRAddress, m.Id(), args.DeviceName,
)
}
if err := m.verifyDeviceAlreadyExists(args.DeviceName); err != nil {
return errors.Trace(err)
}
if !IsValidAddressConfigMethod(string(args.ConfigMethod)) {
return errors.NotValidf("ConfigMethod %q", args.ConfigMethod)
}
if args.GatewayAddress != "" {
if ip := net.ParseIP(args.GatewayAddress); ip == nil {
return errors.NotValidf("GatewayAddress %q", args.GatewayAddress)
}
}
return nil
}
func (m *Machine) verifyDeviceAlreadyExists(deviceName string) error {
if _, err := m.LinkLayerDevice(deviceName); errors.IsNotFound(err) {
return errors.NotFoundf("DeviceName %q on machine %q", deviceName, m.Id())
} else if err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) newIPAddressDocFromArgs(args *LinkLayerDeviceAddress) (*ipAddressDoc, error) {
ip, ipNet, err := net.ParseCIDR(args.CIDRAddress)
if err != nil {
// We already validated CIDRAddress earlier, so this cannot happen in
// practice, but we handle it anyway.
return nil, errors.Trace(err)
}
addressValue := ip.String()
subnetCIDR := ipNet.String()
subnet, err := m.st.Subnet(subnetCIDR)
if errors.IsNotFound(err) {
logger.Debugf(
"address %q on machine %q uses unknown or machine-local subnet %q",
addressValue, m.Id(), subnetCIDR,
)
} else if err != nil {
return nil, errors.Trace(err)
} else if err := m.verifySubnetAlive(subnet); err != nil {
return nil, errors.Trace(err)
}
globalKey := ipAddressGlobalKey(m.doc.Id, args.DeviceName, addressValue)
ipAddressDocID := m.st.docID(globalKey)
providerID := string(args.ProviderID)
modelUUID := m.st.ModelUUID()
newDoc := &ipAddressDoc{
DocID: ipAddressDocID,
ModelUUID: modelUUID,
ProviderID: providerID,
DeviceName: args.DeviceName,
MachineID: m.doc.Id,
SubnetCIDR: subnetCIDR,
ConfigMethod: args.ConfigMethod,
Value: addressValue,
DNSServers: args.DNSServers,
DNSSearchDomains: args.DNSSearchDomains,
GatewayAddress: args.GatewayAddress,
}
return newDoc, nil
}
func (m *Machine) verifySubnetAlive(subnet *Subnet) error {
if subnet.Life() != Alive {
return errors.Errorf("subnet %q is not alive", subnet.CIDR())
}
return nil
}
func (m *Machine) setDevicesAddressesFromDocsOps(newDocs []ipAddressDoc) ([]txn.Op, error) {
addresses, closer := m.st.db().GetCollection(ipAddressesC)
defer closer()
var ops []txn.Op
for _, newDoc := range newDocs {
var thisDeviceOps []txn.Op
hasChanges := false
deviceDocID := m.linkLayerDeviceDocIDFromName(newDoc.DeviceName)
thisDeviceOps = append(thisDeviceOps, assertLinkLayerDeviceExistsOp(deviceDocID))
var existingDoc ipAddressDoc
err := addresses.FindId(newDoc.DocID).One(&existingDoc)
if err == mgo.ErrNotFound {
// Address does not exist yet - insert it.
hasChanges = true
thisDeviceOps = append(thisDeviceOps, insertIPAddressDocOp(&newDoc))
if newDoc.ProviderID != "" {
id := network.Id(newDoc.ProviderID)
thisDeviceOps = append(thisDeviceOps, m.st.networkEntityGlobalKeyOp("address", id))
}
} else if err == nil {
// Address already exists - update what's possible.
var ipOp txn.Op
ipOp, hasChanges = updateIPAddressDocOp(&existingDoc, &newDoc)
thisDeviceOps = append(thisDeviceOps, ipOp)
if newDoc.ProviderID != "" {
if existingDoc.ProviderID != "" && existingDoc.ProviderID != newDoc.ProviderID {
return nil, errors.Errorf("cannot change ProviderID of link address %q", existingDoc.Value)
}
if existingDoc.ProviderID != newDoc.ProviderID {
// Need to insert the new provider id in providerIDsC
id := network.Id(newDoc.ProviderID)
thisDeviceOps = append(thisDeviceOps, m.st.networkEntityGlobalKeyOp("address", id))
hasChanges = true
}
}
} else {
return nil, errors.Trace(err)
}
thisDeviceOps, err = m.maybeAssertSubnetAliveOps(&newDoc, thisDeviceOps)
if err != nil {
return nil, errors.Trace(err)
}
if hasChanges {
ops = append(ops, thisDeviceOps...)
}
}
return ops, nil
}
func (m *Machine) maybeAssertSubnetAliveOps(newDoc *ipAddressDoc, opsSoFar []txn.Op) ([]txn.Op, error) {
subnet, err := m.st.Subnet(newDoc.SubnetCIDR)
if errors.IsNotFound(err) {
// Subnet is machine-local, no need to assert whether it's alive.
return opsSoFar, nil
} else if err != nil {
return nil, errors.Trace(err)
}
if err := m.verifySubnetAlive(subnet); err != nil {
return nil, errors.Trace(err)
}
// Subnet exists and is still alive, assert that is stays that way.
return append(opsSoFar, txn.Op{
C: subnetsC,
Id: m.st.docID(newDoc.SubnetCIDR),
Assert: isAliveDoc,
}), nil
}
// RemoveAllAddresses removes all assigned addresses to all devices of the
// machine, in a single transaction. No error is returned when some or all of
// the addresses were already removed.
func (m *Machine) RemoveAllAddresses() error {
ops, err := m.removeAllAddressesOps()
if err != nil {
return errors.Trace(err)
}
return m.st.runTransaction(ops)
}
func (m *Machine) removeAllAddressesOps() ([]txn.Op, error) {
findQuery := findAddressesQuery(m.doc.Id, "")
return m.st.removeMatchingIPAddressesDocOps(findQuery)
}
// AllAddresses returns the all addresses assigned to all devices of the
// machine.
func (m *Machine) AllAddresses() ([]*Address, error) {
var allAddresses []*Address
callbackFunc := func(resultDoc *ipAddressDoc) {
allAddresses = append(allAddresses, newIPAddress(m.st, *resultDoc))
}
findQuery := findAddressesQuery(m.doc.Id, "")
if err := m.st.forEachIPAddressDoc(findQuery, callbackFunc); err != nil {
return nil, errors.Trace(err)
}
return allAddresses, nil
}
// AllSpaces returns the set of spaces that this machine is actively
// connected to.
func (m *Machine) AllSpaces() (set.Strings, error) {
// TODO(jam): 2016-12-18 This should evolve to look at the
// LinkLayerDevices directly, instead of using the Addresses the devices
// are in to link back to spaces.
spaces := set.NewStrings()
addresses, err := m.AllAddresses()
if err != nil {
return nil, errors.Trace(err)
}
for _, address := range addresses {
subnet, err := address.Subnet()
if err != nil {
if errors.IsNotFound(err) {
// We don't know what this subnet is, so it can't be a space. It
// might just be the loopback device.
continue
}
return nil, errors.Trace(err)
}
spaceName := subnet.SpaceName()
if spaceName != "" {
spaces.Add(spaceName)
}
}
logger.Tracef("machine %q found AllSpaces() = %s",
m.Id(), network.QuoteSpaceSet(spaces))
return spaces, nil
}
// AllNetworkAddresses returns the result of AllAddresses(), but transformed to
// []network.Address.
func (m *Machine) AllNetworkAddresses() ([]network.Address, error) {
stateAddresses, err := m.AllAddresses()
if err != nil {
return nil, errors.Trace(err)
}
networkAddresses := make([]network.Address, len(stateAddresses))
for i := range stateAddresses {
networkAddresses[i] = stateAddresses[i].NetworkAddress()
}
// TODO(jam): 20161130 NetworkAddress object has a SpaceName attribute.
// However, we are not filling in that information here.
return networkAddresses, nil
}
// deviceMapToSortedList takes a map from device name to LinkLayerDevice
// object, and returns the list of LinkLayerDevice object using
// NaturallySortDeviceNames
func deviceMapToSortedList(deviceMap map[string]*LinkLayerDevice) []*LinkLayerDevice {
names := make([]string, 0, len(deviceMap))
for name, _ := range deviceMap {
// name must == device.Name()
names = append(names, name)
}
sortedNames := network.NaturallySortDeviceNames(names...)
result := make([]*LinkLayerDevice, len(sortedNames))
for i, name := range sortedNames {
result[i] = deviceMap[name]
}
return result
}
// LinkLayerDevicesForSpaces takes a list of spaces, and returns the devices on
// this machine that are in that space that we feel would be useful for
// containers to know about. (eg, if there is a host device that has been
// bridged, we return the bridge, rather than the underlying device, but if we
// have only the host device, we return that.)
// Note that devices like 'lxdbr0' that are bridges that might might not be
// externally accessible may be returned if "" is listed as one of the desired
// spaces.
func (m *Machine) LinkLayerDevicesForSpaces(spaces []string) (map[string][]*LinkLayerDevice, error) {
addresses, err := m.AllAddresses()
if err != nil {
return nil, errors.Trace(err)
}
devices, err := m.AllLinkLayerDevices()
if err != nil {
return nil, errors.Trace(err)
}
deviceByName := make(map[string]*LinkLayerDevice, len(devices))
for _, dev := range devices {
deviceByName[dev.Name()] = dev
}
requestedSpaces := set.NewStrings(spaces...)
spaceToDevices := make(map[string]map[string]*LinkLayerDevice, 0)
processedDeviceNames := set.NewStrings()
includeDevice := func(spaceName string, device *LinkLayerDevice) {
spaceInfo, ok := spaceToDevices[spaceName]
if !ok {
spaceInfo = make(map[string]*LinkLayerDevice)
spaceToDevices[spaceName] = spaceInfo
}
spaceInfo[device.Name()] = device
}
// First pass, iterate the addresses, lookup the associated spaces, and
// gather the devices.
for _, addr := range addresses {
subnet, err := addr.Subnet()
spaceName := ""
if err != nil {
if errors.IsNotFound(err) {
// unknown subnets are considered part of the "unknown" space
spaceName = ""
} else {
// We don't understand the error, so error out for now
return nil, errors.Trace(err)
}
} else {
spaceName = subnet.SpaceName()
}
device, ok := deviceByName[addr.DeviceName()]
if !ok {
return nil, errors.Errorf("address %v for machine %q refers to a missing device %q",
addr, m.Id(), addr.DeviceName())
}
processedDeviceNames.Add(device.Name())
if device.Type() == LoopbackDevice {
// We skip loopback devices here
continue
}
includeDevice(spaceName, device)
}
// Now grab any devices we may have missed. For now, any device without an
// address must be in the "unknown" space.
for devName, device := range deviceByName {
if processedDeviceNames.Contains(devName) {
continue
}
// Loopback devices aren't considered part of the empty space
// Also, devices that are attached to another device also aren't
// considered to be in the unknown space.
if device.Type() == LoopbackDevice || device.ParentName() != "" {
continue
}
includeDevice("", device)
}
result := make(map[string][]*LinkLayerDevice, len(spaceToDevices))
for spaceName, deviceMap := range spaceToDevices {
if !requestedSpaces.Contains(spaceName) {
continue
}
result[spaceName] = deviceMapToSortedList(deviceMap)
}
return result, nil
}
// SetParentLinkLayerDevicesBeforeTheirChildren splits the given devicesArgs
// into multiple sets of args and calls SetLinkLayerDevices() for each set, such
// that child devices are set only after their parents.
func (m *Machine) SetParentLinkLayerDevicesBeforeTheirChildren(devicesArgs []LinkLayerDeviceArgs) error {
seenNames := set.NewStrings("") // sentinel for empty ParentName.
for {
argsToSet := []LinkLayerDeviceArgs{}
for _, args := range devicesArgs {
if seenNames.Contains(args.Name) {
// Already added earlier.
continue
}
if seenNames.Contains(args.ParentName) {
argsToSet = append(argsToSet, args)
}
}
if len(argsToSet) == 0 {
// We're done.
break
}
logger.Debugf("setting link-layer devices %+v", argsToSet)
if err := m.SetLinkLayerDevices(argsToSet...); IsProviderIDNotUniqueError(err) {
// FIXME: Make updating devices with unchanged ProviderID idempotent.
// FIXME: this obliterates the ProviderID of *all*
// devices if any *one* of them is not unique.
for i, args := range argsToSet {
args.ProviderID = ""
argsToSet[i] = args
}
if err := m.SetLinkLayerDevices(argsToSet...); err != nil {
return errors.Trace(err)
}
} else if err != nil {
return errors.Trace(err)
}
for _, args := range argsToSet {
seenNames.Add(args.Name)
}
}
return nil
}
// SetDevicesAddressesIdempotently calls SetDevicesAddresses() and if it fails
// with ErrProviderIDNotUnique, retries the call with all ProviderID fields in
// devicesAddresses set to empty.
func (m *Machine) SetDevicesAddressesIdempotently(devicesAddresses []LinkLayerDeviceAddress) error {
if err := m.SetDevicesAddresses(devicesAddresses...); IsProviderIDNotUniqueError(err) {
// FIXME: Make updating addresses with unchanged ProviderID idempotent.
// FIXME: this obliterates the ProviderID of *all*
// addresses if any *one* of them is not unique.
for i, args := range devicesAddresses {
args.ProviderID = ""
devicesAddresses[i] = args
}
if err := m.SetDevicesAddresses(devicesAddresses...); err != nil {
return errors.Trace(err)
}
} else if err != nil {
return errors.Trace(err)
}
return nil
}
func DefineEthernetDeviceOnBridge(name string, hostBridge *LinkLayerDevice) (LinkLayerDeviceArgs, error) {
if hostBridge.Type() != BridgeDevice {
return LinkLayerDeviceArgs{}, errors.Errorf("hostBridge must be a Bridge Device not %q", hostBridge.Type())
}
return LinkLayerDeviceArgs{
Name: name,
Type: EthernetDevice,
MACAddress: generateMACAddress(),
MTU: hostBridge.MTU(),
IsUp: true,
IsAutoStart: true,
ParentName: hostBridge.globalKey(),
}, nil
}
// MACAddressTemplate is used to generate a unique MAC address for a
// container. Every '%x' is replaced by a random hexadecimal digit,
// while the rest is kept as-is.
const macAddressTemplate = "00:16:3e:%02x:%02x:%02x"
// generateMACAddress creates a random MAC address within the space defined by
// macAddressTemplate above.
//
// TODO(dimitern): We should make a best effort to ensure the MAC address we
// generate is unique at least within the current environment.
func generateMACAddress() string {
digits := make([]interface{}, 3)
for i := range digits {
digits[i] = rand.Intn(256)
}
return fmt.Sprintf(macAddressTemplate, digits...)
}
// MachineNetworkInfoResult contains an error or a list of NetworkInfo structures for a specific space.
type MachineNetworkInfoResult struct {
NetworkInfos []network.NetworkInfo
Error *error
}
// Add address to a device in list or create a new device with this address.
func addAddressToResult(networkInfos []network.NetworkInfo, address *Address) ([]network.NetworkInfo, error) {
ifaceAddress := network.InterfaceAddress{
Address: address.Value(),
CIDR: address.SubnetCIDR(),
}
for i := range networkInfos {
networkInfo := &networkInfos[i]
if networkInfo.InterfaceName == address.DeviceName() {
networkInfo.Addresses = append(networkInfo.Addresses, ifaceAddress)
return networkInfos, nil
}
}
MAC := ""
device, err := address.Device()
if err == nil {
MAC = device.MACAddress()
} else if !errors.IsNotFound(err) {
return nil, err
}
networkInfo := network.NetworkInfo{
InterfaceName: address.DeviceName(),
MACAddress: MAC,
Addresses: []network.InterfaceAddress{ifaceAddress},
}
return append(networkInfos, networkInfo), nil
}
// GetNetworkInfoForSpaces returns MachineNetworkInfoResult with a list of devices for each space in spaces
// TODO(wpk): 2017-05-04 This does not work for L2-only devices as it iterates over addresses, needs to be fixed.
// When changing the method we have to keep the ordering.
func (m *Machine) GetNetworkInfoForSpaces(spaces set.Strings) map[string](MachineNetworkInfoResult) {
results := make(map[string](MachineNetworkInfoResult))
var privateAddress network.Address
if spaces.Contains("") {
var err error
privateAddress, err = m.PrivateAddress()
if err != nil {
error := errors.Annotatef(err, "getting machine %q preferred private address", m.MachineTag())
results[""] = MachineNetworkInfoResult{Error: &error}
spaces.Remove("")
}
}
addresses, err := m.AllAddresses()
logger.Debugf("Looking for something from spaces %v in %v", spaces, addresses)
if err != nil {
newErr := errors.Annotate(err, "cannot get devices addresses")
result := MachineNetworkInfoResult{Error: &newErr}
for space := range spaces {
if _, ok := results[space]; !ok {
results[space] = result
}
}
return results
}
actualSpaces := set.NewStrings()
for _, addr := range addresses {
subnet, err := addr.Subnet()
switch {
case errors.IsNotFound(err):
logger.Debugf("skipping %s: not linked to a known subnet (%v)", addr, err)
case err != nil:
logger.Errorf("cannot get subnet for address %q - %q", addr, err)
default:
space := subnet.SpaceName()
actualSpaces.Add(space)
if spaces.Contains(space) {
r := results[space]
r.NetworkInfos, err = addAddressToResult(r.NetworkInfos, addr)
if err != nil {
r.Error = &err
} else {
results[space] = r
}
}
if spaces.Contains("") && privateAddress.Value == addr.Value() {
r := results[""]
r.NetworkInfos, err = addAddressToResult(r.NetworkInfos, addr)
if err != nil {
r.Error = &err
} else {
results[""] = r
}
}
}
}
actualSpacesStr := strings.Join(actualSpaces.Values(), ",")
for space := range spaces {
if _, ok := results[space]; !ok {
newErr := errors.Errorf("machine %q has no devices in space %q, only spaces %s", m.doc.Id, space, actualSpacesStr)
results[space] = MachineNetworkInfoResult{
Error: &newErr,
}
}
return results
}
return results
}
Use QuoteSpaceSet
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package state
import (
"fmt"
"math/rand"
"net"
"github.com/juju/errors"
jujutxn "github.com/juju/txn"
"github.com/juju/utils/set"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/txn"
"github.com/juju/juju/network"
)
// defaultSpaceName is the name of the default space to assign containers.
// Currently hard-coded to 'default', we may consider making this a model
// config
const defaultSpaceName = "default"
// LinkLayerDevice returns the link-layer device matching the given name. An
// error satisfying errors.IsNotFound() is returned when no such device exists
// on the machine.
func (m *Machine) LinkLayerDevice(name string) (*LinkLayerDevice, error) {
linkLayerDevices, closer := m.st.db().GetCollection(linkLayerDevicesC)
defer closer()
linkLayerDeviceDocID := m.linkLayerDeviceDocIDFromName(name)
deviceAsString := m.deviceAsStringFromName(name)
var doc linkLayerDeviceDoc
err := linkLayerDevices.FindId(linkLayerDeviceDocID).One(&doc)
if err == mgo.ErrNotFound {
return nil, errors.NotFoundf("%s", deviceAsString)
} else if err != nil {
return nil, errors.Annotatef(err, "cannot get %s", deviceAsString)
}
return newLinkLayerDevice(m.st, doc), nil
}
func (m *Machine) linkLayerDeviceDocIDFromName(deviceName string) string {
return m.st.docID(m.linkLayerDeviceGlobalKeyFromName(deviceName))
}
func (m *Machine) linkLayerDeviceGlobalKeyFromName(deviceName string) string {
return linkLayerDeviceGlobalKey(m.doc.Id, deviceName)
}
func (m *Machine) deviceAsStringFromName(deviceName string) string {
return fmt.Sprintf("device %q on machine %q", deviceName, m.doc.Id)
}
// AllLinkLayerDevices returns all exiting link-layer devices of the machine.
func (m *Machine) AllLinkLayerDevices() ([]*LinkLayerDevice, error) {
var allDevices []*LinkLayerDevice
callbackFunc := func(resultDoc *linkLayerDeviceDoc) {
allDevices = append(allDevices, newLinkLayerDevice(m.st, *resultDoc))
}
if err := m.forEachLinkLayerDeviceDoc(nil, callbackFunc); err != nil {
return nil, errors.Trace(err)
}
return allDevices, nil
}
func (m *Machine) forEachLinkLayerDeviceDoc(docFieldsToSelect bson.D, callbackFunc func(resultDoc *linkLayerDeviceDoc)) error {
linkLayerDevices, closer := m.st.db().GetCollection(linkLayerDevicesC)
defer closer()
query := linkLayerDevices.Find(bson.D{{"machine-id", m.doc.Id}})
if docFieldsToSelect != nil {
query = query.Select(docFieldsToSelect)
}
iter := query.Iter()
var resultDoc linkLayerDeviceDoc
for iter.Next(&resultDoc) {
callbackFunc(&resultDoc)
}
return errors.Trace(iter.Close())
}
// AllProviderInterfaceInfos returns the provider details for all of
// the link layer devices belonging to this machine. These can be used
// to identify the devices when interacting with the provider
// directly (for example, releasing container addresses).
func (m *Machine) AllProviderInterfaceInfos() ([]network.ProviderInterfaceInfo, error) {
devices, err := m.AllLinkLayerDevices()
if err != nil {
return nil, errors.Trace(err)
}
result := make([]network.ProviderInterfaceInfo, len(devices))
for i, device := range devices {
result[i].InterfaceName = device.Name()
result[i].MACAddress = device.MACAddress()
result[i].ProviderId = device.ProviderID()
}
return result, nil
}
// RemoveAllLinkLayerDevices removes all existing link-layer devices of the
// machine in a single transaction. No error is returned when some or all of the
// devices were already removed.
func (m *Machine) RemoveAllLinkLayerDevices() error {
ops, err := m.removeAllLinkLayerDevicesOps()
if err != nil {
return errors.Trace(err)
}
return m.st.runTransaction(ops)
}
func (m *Machine) removeAllLinkLayerDevicesOps() ([]txn.Op, error) {
var ops []txn.Op
callbackFunc := func(resultDoc *linkLayerDeviceDoc) {
removeOps := removeLinkLayerDeviceUnconditionallyOps(resultDoc.DocID)
ops = append(ops, removeOps...)
if resultDoc.ProviderID != "" {
providerId := network.Id(resultDoc.ProviderID)
op := m.st.networkEntityGlobalKeyRemoveOp("linklayerdevice", providerId)
ops = append(ops, op)
}
}
selectDocIDOnly := bson.D{{"_id", 1}}
if err := m.forEachLinkLayerDeviceDoc(selectDocIDOnly, callbackFunc); err != nil {
return nil, errors.Trace(err)
}
return ops, nil
}
// LinkLayerDeviceArgs contains the arguments accepted by Machine.SetLinkLayerDevices().
type LinkLayerDeviceArgs struct {
// Name is the name of the device as it appears on the machine.
Name string
// MTU is the maximum transmission unit the device can handle.
MTU uint
// ProviderID is a provider-specific ID of the device. Empty when not
// supported by the provider. Cannot be cleared once set.
ProviderID network.Id
// Type is the type of the underlying link-layer device.
Type LinkLayerDeviceType
// MACAddress is the media access control address for the device.
MACAddress string
// IsAutoStart is true if the device should be activated on boot.
IsAutoStart bool
// IsUp is true when the device is up (enabled).
IsUp bool
// ParentName is the name of the parent device, which may be empty. If set,
// it needs to be an existing device on the same machine, unless the current
// device is inside a container, in which case ParentName can be a global
// key of a BridgeDevice on the host machine of the container. Traffic
// originating from a device egresses from its parent device.
ParentName string
}
// SetLinkLayerDevices sets link-layer devices on the machine, adding or
// updating existing devices as needed, in a single transaction. ProviderID
// field can be empty if not supported by the provider, but when set must be
// unique within the model, and cannot be unset once set. Errors are returned in
// the following cases:
// - Machine is no longer alive or is missing;
// - Model no longer alive;
// - errors.NotValidError, when any of the fields in args contain invalid values;
// - ErrProviderIDNotUnique, when one or more specified ProviderIDs are not unique;
// Setting new parent devices must be done in a separate call than setting their
// children on the same machine.
func (m *Machine) SetLinkLayerDevices(devicesArgs ...LinkLayerDeviceArgs) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set link-layer devices to machine %q", m.doc.Id)
if len(devicesArgs) == 0 {
logger.Warningf("no device addresses to set")
return nil
}
buildTxn := func(attempt int) ([]txn.Op, error) {
newDocs, err := m.prepareToSetLinkLayerDevices(devicesArgs)
if err != nil {
return nil, errors.Trace(err)
}
if m.doc.Life != Alive {
return nil, errors.Errorf("machine %q not alive", m.doc.Id)
}
if attempt > 0 {
if err := m.isStillAlive(); err != nil {
return nil, errors.Trace(err)
}
allIds, err := m.st.allProviderIDsForLinkLayerDevices()
if err != nil {
return nil, errors.Trace(err)
}
for _, args := range devicesArgs {
if allIds.Contains(string(args.ProviderID)) {
err := NewProviderIDNotUniqueError(args.ProviderID)
return nil, errors.Annotatef(err, "invalid device %q", args.Name)
}
}
}
// We've checked the model is alive directly, and we assert the machine is alive, we don't need to also
// assert the model is alive, because then the machine would be dying as well.
ops := []txn.Op{
m.assertAliveOp(),
}
setDevicesOps, err := m.setDevicesFromDocsOps(newDocs)
if err != nil {
return nil, errors.Trace(err)
}
if len(setDevicesOps) == 0 {
// No need to assert only that the machine is alive
logger.Debugf("no changes to LinkLayerDevices for machine %q", m.Id())
return nil, jujutxn.ErrNoOperations
}
return append(ops, setDevicesOps...), nil
}
if err := m.st.run(buildTxn); err != nil {
return errors.Trace(err)
}
return nil
}
func (st *State) allProviderIDsForLinkLayerDevices() (set.Strings, error) {
return st.allProviderIDsForEntity("linklayerdevice")
}
func (st *State) allProviderIDsForAddresses() (set.Strings, error) {
return st.allProviderIDsForEntity("address")
}
func (st *State) allProviderIDsForEntity(entityName string) (set.Strings, error) {
idCollection, closer := st.db().GetCollection(providerIDsC)
defer closer()
allProviderIDs := set.NewStrings()
var doc struct {
ID string `bson:"_id"`
}
pattern := fmt.Sprintf("^%s:%s:.+$", st.ModelUUID(), entityName)
modelProviderIDs := bson.D{{"_id", bson.D{{"$regex", pattern}}}}
iter := idCollection.Find(modelProviderIDs).Iter()
for iter.Next(&doc) {
localProviderID := st.localID(doc.ID)[len(entityName)+1:]
allProviderIDs.Add(localProviderID)
}
if err := iter.Close(); err != nil {
return nil, errors.Trace(err)
}
return allProviderIDs, nil
}
func (m *Machine) prepareToSetLinkLayerDevices(devicesArgs []LinkLayerDeviceArgs) ([]linkLayerDeviceDoc, error) {
var pendingDocs []linkLayerDeviceDoc
pendingNames := set.NewStrings()
for _, args := range devicesArgs {
newDoc, err := m.prepareOneSetLinkLayerDeviceArgs(&args, pendingNames)
if err != nil {
return nil, errors.Trace(err)
}
pendingNames.Add(args.Name)
pendingDocs = append(pendingDocs, *newDoc)
}
return pendingDocs, nil
}
func (m *Machine) prepareOneSetLinkLayerDeviceArgs(args *LinkLayerDeviceArgs, pendingNames set.Strings) (_ *linkLayerDeviceDoc, err error) {
defer errors.DeferredAnnotatef(&err, "invalid device %q", args.Name)
if err := m.validateSetLinkLayerDeviceArgs(args); err != nil {
return nil, errors.Trace(err)
}
if pendingNames.Contains(args.Name) {
return nil, errors.NewNotValid(nil, "Name specified more than once")
}
return m.newLinkLayerDeviceDocFromArgs(args), nil
}
func (m *Machine) validateSetLinkLayerDeviceArgs(args *LinkLayerDeviceArgs) error {
if args.Name == "" {
return errors.NotValidf("empty Name")
}
if !IsValidLinkLayerDeviceName(args.Name) {
logger.Warningf(
"link-layer device %q on machine %q has invalid name (using anyway)",
args.Name, m.Id(),
)
}
if args.ParentName != "" {
if err := m.validateLinkLayerDeviceParent(args); err != nil {
return errors.Trace(err)
}
}
if !IsValidLinkLayerDeviceType(string(args.Type)) {
return errors.NotValidf("Type %q", args.Type)
}
if args.MACAddress != "" {
if _, err := net.ParseMAC(args.MACAddress); err != nil {
return errors.NotValidf("MACAddress %q", args.MACAddress)
}
}
return nil
}
func (m *Machine) validateLinkLayerDeviceParent(args *LinkLayerDeviceArgs) error {
hostMachineID, parentDeviceName, err := parseLinkLayerDeviceParentNameAsGlobalKey(args.ParentName)
if err != nil {
return errors.Trace(err)
} else if hostMachineID == "" {
// Not a global key, so validate as usual.
if err := m.validateParentDeviceNameWhenNotAGlobalKey(args); errors.IsNotFound(err) {
return errors.NewNotValid(err, "ParentName not valid")
} else if err != nil {
return errors.Trace(err)
}
return nil
}
ourParentMachineID, hasParent := m.ParentId()
if !hasParent {
// Using global key for ParentName not allowed for non-container machine
// devices.
return errors.NotValidf("ParentName %q for non-container machine %q", args.ParentName, m.Id())
}
if hostMachineID != ourParentMachineID {
// ParentName as global key only allowed when the key's machine ID is
// the container's host machine.
return errors.NotValidf("ParentName %q on non-host machine %q", args.ParentName, hostMachineID)
}
err = m.verifyHostMachineParentDeviceExistsAndIsABridgeDevice(hostMachineID, parentDeviceName)
return errors.Trace(err)
}
func parseLinkLayerDeviceParentNameAsGlobalKey(parentName string) (hostMachineID, parentDeviceName string, err error) {
hostMachineID, parentDeviceName, canBeGlobalKey := parseLinkLayerDeviceGlobalKey(parentName)
if !canBeGlobalKey {
return "", "", nil
} else if hostMachineID == "" {
return "", "", errors.NotValidf("ParentName %q format", parentName)
}
return hostMachineID, parentDeviceName, nil
}
func (m *Machine) verifyHostMachineParentDeviceExistsAndIsABridgeDevice(hostMachineID, parentDeviceName string) error {
hostMachine, err := m.st.Machine(hostMachineID)
if errors.IsNotFound(err) || err == nil && hostMachine.Life() != Alive {
return errors.Errorf("host machine %q of parent device %q not found or not alive", hostMachineID, parentDeviceName)
} else if err != nil {
return errors.Trace(err)
}
parentDevice, err := hostMachine.LinkLayerDevice(parentDeviceName)
if errors.IsNotFound(err) {
return errors.NotFoundf("parent device %q on host machine %q", parentDeviceName, hostMachineID)
} else if err != nil {
return errors.Trace(err)
}
if parentDevice.Type() != BridgeDevice {
errorMessage := fmt.Sprintf(
"parent device %q on host machine %q must be of type %q, not type %q",
parentDeviceName, hostMachineID, BridgeDevice, parentDevice.Type(),
)
return errors.NewNotValid(nil, errorMessage)
}
return nil
}
func (m *Machine) validateParentDeviceNameWhenNotAGlobalKey(args *LinkLayerDeviceArgs) error {
if !IsValidLinkLayerDeviceName(args.ParentName) {
logger.Warningf(
"parent link-layer device %q on machine %q has invalid name (using anyway)",
args.ParentName, m.Id(),
)
}
if args.Name == args.ParentName {
return errors.NewNotValid(nil, "Name and ParentName must be different")
}
if err := m.verifyParentDeviceExists(args.ParentName); err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) verifyParentDeviceExists(parentName string) error {
if _, err := m.LinkLayerDevice(parentName); err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) newLinkLayerDeviceDocFromArgs(args *LinkLayerDeviceArgs) *linkLayerDeviceDoc {
linkLayerDeviceDocID := m.linkLayerDeviceDocIDFromName(args.Name)
providerID := string(args.ProviderID)
modelUUID := m.st.ModelUUID()
return &linkLayerDeviceDoc{
DocID: linkLayerDeviceDocID,
Name: args.Name,
ModelUUID: modelUUID,
MTU: args.MTU,
ProviderID: providerID,
MachineID: m.doc.Id,
Type: args.Type,
MACAddress: args.MACAddress,
IsAutoStart: args.IsAutoStart,
IsUp: args.IsUp,
ParentName: args.ParentName,
}
}
func (m *Machine) isStillAlive() error {
if machineAlive, err := isAlive(m.st, machinesC, m.doc.Id); err != nil {
return errors.Trace(err)
} else if !machineAlive {
return errors.Errorf("machine not found or not alive")
}
return nil
}
func (m *Machine) assertAliveOp() txn.Op {
return txn.Op{
C: machinesC,
Id: m.doc.Id,
Assert: isAliveDoc,
}
}
func (m *Machine) setDevicesFromDocsOps(newDocs []linkLayerDeviceDoc) ([]txn.Op, error) {
devices, closer := m.st.db().GetCollection(linkLayerDevicesC)
defer closer()
var ops []txn.Op
for _, newDoc := range newDocs {
var existingDoc linkLayerDeviceDoc
if err := devices.FindId(newDoc.DocID).One(&existingDoc); err == mgo.ErrNotFound {
// Device does not exist yet - insert it.
insertOps, err := m.insertLinkLayerDeviceOps(&newDoc)
if err != nil {
return nil, errors.Trace(err)
}
ops = append(ops, insertOps...)
} else if err == nil {
// Device already exists - update what's possible.
updateOps, err := m.updateLinkLayerDeviceOps(&existingDoc, &newDoc)
if err != nil {
return nil, errors.Trace(err)
}
ops = append(ops, updateOps...)
} else {
return nil, errors.Trace(err)
}
}
return ops, nil
}
func (m *Machine) insertLinkLayerDeviceOps(newDoc *linkLayerDeviceDoc) ([]txn.Op, error) {
modelUUID, linkLayerDeviceDocID := newDoc.ModelUUID, newDoc.DocID
var ops []txn.Op
if newDoc.ParentName != "" {
newParentDocID, err := m.parentDocIDFromDeviceDoc(newDoc)
if err != nil {
return nil, errors.Trace(err)
}
if newParentDocID != "" {
ops = append(ops, assertLinkLayerDeviceExistsOp(newParentDocID))
ops = append(ops, incrementDeviceNumChildrenOp(newParentDocID))
}
}
if newDoc.ProviderID != "" {
id := network.Id(newDoc.ProviderID)
ops = append(ops, m.st.networkEntityGlobalKeyOp("linklayerdevice", id))
}
return append(ops,
insertLinkLayerDeviceDocOp(newDoc),
insertLinkLayerDevicesRefsOp(modelUUID, linkLayerDeviceDocID),
), nil
}
func (m *Machine) parentDocIDFromDeviceDoc(doc *linkLayerDeviceDoc) (string, error) {
hostMachineID, parentName, err := parseLinkLayerDeviceParentNameAsGlobalKey(doc.ParentName)
if err != nil {
return "", errors.Trace(err)
}
if parentName == "" {
// doc.ParentName is not a global key, but on the same machine.
return m.linkLayerDeviceDocIDFromName(doc.ParentName), nil
}
// doc.ParentName is a global key, on a different host machine.
return m.st.docID(linkLayerDeviceGlobalKey(hostMachineID, parentName)), nil
}
func (m *Machine) updateLinkLayerDeviceOps(existingDoc, newDoc *linkLayerDeviceDoc) (ops []txn.Op, err error) {
// none of the ops in this function are assert-only, so callers can know if there are any changes by just checking len(ops)
var newParentDocID string
if newDoc.ParentName != "" {
newParentDocID, err = m.parentDocIDFromDeviceDoc(newDoc)
if err != nil {
return nil, errors.Trace(err)
}
}
var existingParentDocID string
if existingDoc.ParentName != "" {
existingParentDocID, err = m.parentDocIDFromDeviceDoc(existingDoc)
if err != nil {
return nil, errors.Trace(err)
}
}
if newParentDocID != "" && existingParentDocID != "" && newParentDocID != existingParentDocID {
ops = append(ops,
assertLinkLayerDeviceExistsOp(newParentDocID),
incrementDeviceNumChildrenOp(newParentDocID),
assertLinkLayerDeviceExistsOp(existingParentDocID),
decrementDeviceNumChildrenOp(existingParentDocID),
)
} else if newParentDocID != "" && existingParentDocID == "" {
ops = append(ops, assertLinkLayerDeviceExistsOp(newParentDocID))
ops = append(ops, incrementDeviceNumChildrenOp(newParentDocID))
} else if newParentDocID == "" && existingParentDocID != "" {
ops = append(ops, assertLinkLayerDeviceExistsOp(existingParentDocID))
ops = append(ops, decrementDeviceNumChildrenOp(existingParentDocID))
}
updateDeviceOp, deviceHasChanges := updateLinkLayerDeviceDocOp(existingDoc, newDoc)
if deviceHasChanges {
// we only include the op if it will actually change something
ops = append(ops, updateDeviceOp)
}
if newDoc.ProviderID != "" {
if existingDoc.ProviderID != "" && existingDoc.ProviderID != newDoc.ProviderID {
return nil, errors.Errorf("cannot change ProviderID of link layer device %q", existingDoc.Name)
}
if existingDoc.ProviderID != newDoc.ProviderID {
// Need to insert the new provider id in providerIDsC
id := network.Id(newDoc.ProviderID)
ops = append(ops, m.st.networkEntityGlobalKeyOp("linklayerdevice", id))
}
}
return ops, nil
}
// LinkLayerDeviceAddress contains an IP address assigned to a link-layer
// device.
type LinkLayerDeviceAddress struct {
// DeviceName is the name of the link-layer device that has this address.
DeviceName string
// ConfigMethod is the method used to configure this address.
ConfigMethod AddressConfigMethod
// ProviderID is the provider-specific ID of the address. Empty when not
// supported. Cannot be changed once set to non-empty.
ProviderID network.Id
// CIDRAddress is the IP address assigned to the device, in CIDR format
// (e.g. 10.20.30.5/24 or fc00:1234::/64).
CIDRAddress string
// DNSServers contains a list of DNS nameservers to use, which can be empty.
DNSServers []string
// DNSSearchDomains contains a list of DNS domain names to qualify
// hostnames, and can be empty.
DNSSearchDomains []string
// GatewayAddress is the address of the gateway to use, which can be empty.
GatewayAddress string
}
// SetDevicesAddresses sets the addresses of all devices in devicesAddresses,
// adding new or updating existing assignments as needed, in a single
// transaction. ProviderID field can be empty if not supported by the provider,
// but when set must be unique within the model. Errors are returned in the
// following cases:
// - Machine is no longer alive or is missing;
// - Subnet inferred from any CIDRAddress field in args is known but no longer
// alive (no error reported if the CIDRAddress does not match a known subnet);
// - Model no longer alive;
// - errors.NotValidError, when any of the fields in args contain invalid values;
// - errors.NotFoundError, when any DeviceName in args refers to unknown device;
// - ErrProviderIDNotUnique, when one or more specified ProviderIDs are not unique.
func (m *Machine) SetDevicesAddresses(devicesAddresses ...LinkLayerDeviceAddress) (err error) {
defer errors.DeferredAnnotatef(&err, "cannot set link-layer device addresses of machine %q", m.doc.Id)
if len(devicesAddresses) == 0 {
logger.Warningf("no device addresses to set")
return nil
}
buildTxn := func(attempt int) ([]txn.Op, error) {
newDocs, err := m.prepareToSetDevicesAddresses(devicesAddresses)
if err != nil {
return nil, errors.Trace(err)
}
if err := m.isStillAlive(); err != nil {
return nil, errors.Trace(err)
}
if attempt > 0 {
allIds, err := m.st.allProviderIDsForAddresses()
if err != nil {
return nil, errors.Trace(err)
}
for _, args := range devicesAddresses {
if allIds.Contains(string(args.ProviderID)) {
err := NewProviderIDNotUniqueError(args.ProviderID)
return nil, errors.Annotatef(err, "invalid address %q", args.CIDRAddress)
}
}
}
// we checked the model is active, but we only assert the machine is alive, because it will be dying if
// the model is dying.
ops := []txn.Op{
m.assertAliveOp(),
}
setAddressesOps, err := m.setDevicesAddressesFromDocsOps(newDocs)
if err != nil {
return nil, errors.Trace(err)
}
if len(setAddressesOps) == 0 {
// no actual address changes to be queued, so no need to create an op that just asserts
// the machine is alive
logger.Debugf("no changes to DevicesAddresses for machine %q", m.Id())
return nil, jujutxn.ErrNoOperations
}
return append(ops, setAddressesOps...), nil
}
if err := m.st.run(buildTxn); err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) prepareToSetDevicesAddresses(devicesAddresses []LinkLayerDeviceAddress) ([]ipAddressDoc, error) {
var pendingDocs []ipAddressDoc
for _, args := range devicesAddresses {
newDoc, err := m.prepareOneSetDevicesAddresses(&args)
if err != nil {
return nil, errors.Trace(err)
}
pendingDocs = append(pendingDocs, *newDoc)
}
return pendingDocs, nil
}
func (m *Machine) prepareOneSetDevicesAddresses(args *LinkLayerDeviceAddress) (_ *ipAddressDoc, err error) {
defer errors.DeferredAnnotatef(&err, "invalid address %q", args.CIDRAddress)
if err := m.validateSetDevicesAddressesArgs(args); err != nil {
return nil, errors.Trace(err)
}
return m.newIPAddressDocFromArgs(args)
}
func (m *Machine) validateSetDevicesAddressesArgs(args *LinkLayerDeviceAddress) error {
if args.CIDRAddress == "" {
return errors.NotValidf("empty CIDRAddress")
}
if _, _, err := net.ParseCIDR(args.CIDRAddress); err != nil {
return errors.NewNotValid(err, "CIDRAddress")
}
if args.DeviceName == "" {
return errors.NotValidf("empty DeviceName")
}
if !IsValidLinkLayerDeviceName(args.DeviceName) {
logger.Warningf(
"address %q on machine %q has invalid device name %q (using anyway)",
args.CIDRAddress, m.Id(), args.DeviceName,
)
}
if err := m.verifyDeviceAlreadyExists(args.DeviceName); err != nil {
return errors.Trace(err)
}
if !IsValidAddressConfigMethod(string(args.ConfigMethod)) {
return errors.NotValidf("ConfigMethod %q", args.ConfigMethod)
}
if args.GatewayAddress != "" {
if ip := net.ParseIP(args.GatewayAddress); ip == nil {
return errors.NotValidf("GatewayAddress %q", args.GatewayAddress)
}
}
return nil
}
func (m *Machine) verifyDeviceAlreadyExists(deviceName string) error {
if _, err := m.LinkLayerDevice(deviceName); errors.IsNotFound(err) {
return errors.NotFoundf("DeviceName %q on machine %q", deviceName, m.Id())
} else if err != nil {
return errors.Trace(err)
}
return nil
}
func (m *Machine) newIPAddressDocFromArgs(args *LinkLayerDeviceAddress) (*ipAddressDoc, error) {
ip, ipNet, err := net.ParseCIDR(args.CIDRAddress)
if err != nil {
// We already validated CIDRAddress earlier, so this cannot happen in
// practice, but we handle it anyway.
return nil, errors.Trace(err)
}
addressValue := ip.String()
subnetCIDR := ipNet.String()
subnet, err := m.st.Subnet(subnetCIDR)
if errors.IsNotFound(err) {
logger.Debugf(
"address %q on machine %q uses unknown or machine-local subnet %q",
addressValue, m.Id(), subnetCIDR,
)
} else if err != nil {
return nil, errors.Trace(err)
} else if err := m.verifySubnetAlive(subnet); err != nil {
return nil, errors.Trace(err)
}
globalKey := ipAddressGlobalKey(m.doc.Id, args.DeviceName, addressValue)
ipAddressDocID := m.st.docID(globalKey)
providerID := string(args.ProviderID)
modelUUID := m.st.ModelUUID()
newDoc := &ipAddressDoc{
DocID: ipAddressDocID,
ModelUUID: modelUUID,
ProviderID: providerID,
DeviceName: args.DeviceName,
MachineID: m.doc.Id,
SubnetCIDR: subnetCIDR,
ConfigMethod: args.ConfigMethod,
Value: addressValue,
DNSServers: args.DNSServers,
DNSSearchDomains: args.DNSSearchDomains,
GatewayAddress: args.GatewayAddress,
}
return newDoc, nil
}
func (m *Machine) verifySubnetAlive(subnet *Subnet) error {
if subnet.Life() != Alive {
return errors.Errorf("subnet %q is not alive", subnet.CIDR())
}
return nil
}
func (m *Machine) setDevicesAddressesFromDocsOps(newDocs []ipAddressDoc) ([]txn.Op, error) {
addresses, closer := m.st.db().GetCollection(ipAddressesC)
defer closer()
var ops []txn.Op
for _, newDoc := range newDocs {
var thisDeviceOps []txn.Op
hasChanges := false
deviceDocID := m.linkLayerDeviceDocIDFromName(newDoc.DeviceName)
thisDeviceOps = append(thisDeviceOps, assertLinkLayerDeviceExistsOp(deviceDocID))
var existingDoc ipAddressDoc
err := addresses.FindId(newDoc.DocID).One(&existingDoc)
if err == mgo.ErrNotFound {
// Address does not exist yet - insert it.
hasChanges = true
thisDeviceOps = append(thisDeviceOps, insertIPAddressDocOp(&newDoc))
if newDoc.ProviderID != "" {
id := network.Id(newDoc.ProviderID)
thisDeviceOps = append(thisDeviceOps, m.st.networkEntityGlobalKeyOp("address", id))
}
} else if err == nil {
// Address already exists - update what's possible.
var ipOp txn.Op
ipOp, hasChanges = updateIPAddressDocOp(&existingDoc, &newDoc)
thisDeviceOps = append(thisDeviceOps, ipOp)
if newDoc.ProviderID != "" {
if existingDoc.ProviderID != "" && existingDoc.ProviderID != newDoc.ProviderID {
return nil, errors.Errorf("cannot change ProviderID of link address %q", existingDoc.Value)
}
if existingDoc.ProviderID != newDoc.ProviderID {
// Need to insert the new provider id in providerIDsC
id := network.Id(newDoc.ProviderID)
thisDeviceOps = append(thisDeviceOps, m.st.networkEntityGlobalKeyOp("address", id))
hasChanges = true
}
}
} else {
return nil, errors.Trace(err)
}
thisDeviceOps, err = m.maybeAssertSubnetAliveOps(&newDoc, thisDeviceOps)
if err != nil {
return nil, errors.Trace(err)
}
if hasChanges {
ops = append(ops, thisDeviceOps...)
}
}
return ops, nil
}
func (m *Machine) maybeAssertSubnetAliveOps(newDoc *ipAddressDoc, opsSoFar []txn.Op) ([]txn.Op, error) {
subnet, err := m.st.Subnet(newDoc.SubnetCIDR)
if errors.IsNotFound(err) {
// Subnet is machine-local, no need to assert whether it's alive.
return opsSoFar, nil
} else if err != nil {
return nil, errors.Trace(err)
}
if err := m.verifySubnetAlive(subnet); err != nil {
return nil, errors.Trace(err)
}
// Subnet exists and is still alive, assert that is stays that way.
return append(opsSoFar, txn.Op{
C: subnetsC,
Id: m.st.docID(newDoc.SubnetCIDR),
Assert: isAliveDoc,
}), nil
}
// RemoveAllAddresses removes all assigned addresses to all devices of the
// machine, in a single transaction. No error is returned when some or all of
// the addresses were already removed.
func (m *Machine) RemoveAllAddresses() error {
ops, err := m.removeAllAddressesOps()
if err != nil {
return errors.Trace(err)
}
return m.st.runTransaction(ops)
}
func (m *Machine) removeAllAddressesOps() ([]txn.Op, error) {
findQuery := findAddressesQuery(m.doc.Id, "")
return m.st.removeMatchingIPAddressesDocOps(findQuery)
}
// AllAddresses returns the all addresses assigned to all devices of the
// machine.
func (m *Machine) AllAddresses() ([]*Address, error) {
var allAddresses []*Address
callbackFunc := func(resultDoc *ipAddressDoc) {
allAddresses = append(allAddresses, newIPAddress(m.st, *resultDoc))
}
findQuery := findAddressesQuery(m.doc.Id, "")
if err := m.st.forEachIPAddressDoc(findQuery, callbackFunc); err != nil {
return nil, errors.Trace(err)
}
return allAddresses, nil
}
// AllSpaces returns the set of spaces that this machine is actively
// connected to.
func (m *Machine) AllSpaces() (set.Strings, error) {
// TODO(jam): 2016-12-18 This should evolve to look at the
// LinkLayerDevices directly, instead of using the Addresses the devices
// are in to link back to spaces.
spaces := set.NewStrings()
addresses, err := m.AllAddresses()
if err != nil {
return nil, errors.Trace(err)
}
for _, address := range addresses {
subnet, err := address.Subnet()
if err != nil {
if errors.IsNotFound(err) {
// We don't know what this subnet is, so it can't be a space. It
// might just be the loopback device.
continue
}
return nil, errors.Trace(err)
}
spaceName := subnet.SpaceName()
if spaceName != "" {
spaces.Add(spaceName)
}
}
logger.Tracef("machine %q found AllSpaces() = %s",
m.Id(), network.QuoteSpaceSet(spaces))
return spaces, nil
}
// AllNetworkAddresses returns the result of AllAddresses(), but transformed to
// []network.Address.
func (m *Machine) AllNetworkAddresses() ([]network.Address, error) {
stateAddresses, err := m.AllAddresses()
if err != nil {
return nil, errors.Trace(err)
}
networkAddresses := make([]network.Address, len(stateAddresses))
for i := range stateAddresses {
networkAddresses[i] = stateAddresses[i].NetworkAddress()
}
// TODO(jam): 20161130 NetworkAddress object has a SpaceName attribute.
// However, we are not filling in that information here.
return networkAddresses, nil
}
// deviceMapToSortedList takes a map from device name to LinkLayerDevice
// object, and returns the list of LinkLayerDevice object using
// NaturallySortDeviceNames
func deviceMapToSortedList(deviceMap map[string]*LinkLayerDevice) []*LinkLayerDevice {
names := make([]string, 0, len(deviceMap))
for name, _ := range deviceMap {
// name must == device.Name()
names = append(names, name)
}
sortedNames := network.NaturallySortDeviceNames(names...)
result := make([]*LinkLayerDevice, len(sortedNames))
for i, name := range sortedNames {
result[i] = deviceMap[name]
}
return result
}
// LinkLayerDevicesForSpaces takes a list of spaces, and returns the devices on
// this machine that are in that space that we feel would be useful for
// containers to know about. (eg, if there is a host device that has been
// bridged, we return the bridge, rather than the underlying device, but if we
// have only the host device, we return that.)
// Note that devices like 'lxdbr0' that are bridges that might might not be
// externally accessible may be returned if "" is listed as one of the desired
// spaces.
func (m *Machine) LinkLayerDevicesForSpaces(spaces []string) (map[string][]*LinkLayerDevice, error) {
addresses, err := m.AllAddresses()
if err != nil {
return nil, errors.Trace(err)
}
devices, err := m.AllLinkLayerDevices()
if err != nil {
return nil, errors.Trace(err)
}
deviceByName := make(map[string]*LinkLayerDevice, len(devices))
for _, dev := range devices {
deviceByName[dev.Name()] = dev
}
requestedSpaces := set.NewStrings(spaces...)
spaceToDevices := make(map[string]map[string]*LinkLayerDevice, 0)
processedDeviceNames := set.NewStrings()
includeDevice := func(spaceName string, device *LinkLayerDevice) {
spaceInfo, ok := spaceToDevices[spaceName]
if !ok {
spaceInfo = make(map[string]*LinkLayerDevice)
spaceToDevices[spaceName] = spaceInfo
}
spaceInfo[device.Name()] = device
}
// First pass, iterate the addresses, lookup the associated spaces, and
// gather the devices.
for _, addr := range addresses {
subnet, err := addr.Subnet()
spaceName := ""
if err != nil {
if errors.IsNotFound(err) {
// unknown subnets are considered part of the "unknown" space
spaceName = ""
} else {
// We don't understand the error, so error out for now
return nil, errors.Trace(err)
}
} else {
spaceName = subnet.SpaceName()
}
device, ok := deviceByName[addr.DeviceName()]
if !ok {
return nil, errors.Errorf("address %v for machine %q refers to a missing device %q",
addr, m.Id(), addr.DeviceName())
}
processedDeviceNames.Add(device.Name())
if device.Type() == LoopbackDevice {
// We skip loopback devices here
continue
}
includeDevice(spaceName, device)
}
// Now grab any devices we may have missed. For now, any device without an
// address must be in the "unknown" space.
for devName, device := range deviceByName {
if processedDeviceNames.Contains(devName) {
continue
}
// Loopback devices aren't considered part of the empty space
// Also, devices that are attached to another device also aren't
// considered to be in the unknown space.
if device.Type() == LoopbackDevice || device.ParentName() != "" {
continue
}
includeDevice("", device)
}
result := make(map[string][]*LinkLayerDevice, len(spaceToDevices))
for spaceName, deviceMap := range spaceToDevices {
if !requestedSpaces.Contains(spaceName) {
continue
}
result[spaceName] = deviceMapToSortedList(deviceMap)
}
return result, nil
}
// SetParentLinkLayerDevicesBeforeTheirChildren splits the given devicesArgs
// into multiple sets of args and calls SetLinkLayerDevices() for each set, such
// that child devices are set only after their parents.
func (m *Machine) SetParentLinkLayerDevicesBeforeTheirChildren(devicesArgs []LinkLayerDeviceArgs) error {
seenNames := set.NewStrings("") // sentinel for empty ParentName.
for {
argsToSet := []LinkLayerDeviceArgs{}
for _, args := range devicesArgs {
if seenNames.Contains(args.Name) {
// Already added earlier.
continue
}
if seenNames.Contains(args.ParentName) {
argsToSet = append(argsToSet, args)
}
}
if len(argsToSet) == 0 {
// We're done.
break
}
logger.Debugf("setting link-layer devices %+v", argsToSet)
if err := m.SetLinkLayerDevices(argsToSet...); IsProviderIDNotUniqueError(err) {
// FIXME: Make updating devices with unchanged ProviderID idempotent.
// FIXME: this obliterates the ProviderID of *all*
// devices if any *one* of them is not unique.
for i, args := range argsToSet {
args.ProviderID = ""
argsToSet[i] = args
}
if err := m.SetLinkLayerDevices(argsToSet...); err != nil {
return errors.Trace(err)
}
} else if err != nil {
return errors.Trace(err)
}
for _, args := range argsToSet {
seenNames.Add(args.Name)
}
}
return nil
}
// SetDevicesAddressesIdempotently calls SetDevicesAddresses() and if it fails
// with ErrProviderIDNotUnique, retries the call with all ProviderID fields in
// devicesAddresses set to empty.
func (m *Machine) SetDevicesAddressesIdempotently(devicesAddresses []LinkLayerDeviceAddress) error {
if err := m.SetDevicesAddresses(devicesAddresses...); IsProviderIDNotUniqueError(err) {
// FIXME: Make updating addresses with unchanged ProviderID idempotent.
// FIXME: this obliterates the ProviderID of *all*
// addresses if any *one* of them is not unique.
for i, args := range devicesAddresses {
args.ProviderID = ""
devicesAddresses[i] = args
}
if err := m.SetDevicesAddresses(devicesAddresses...); err != nil {
return errors.Trace(err)
}
} else if err != nil {
return errors.Trace(err)
}
return nil
}
func DefineEthernetDeviceOnBridge(name string, hostBridge *LinkLayerDevice) (LinkLayerDeviceArgs, error) {
if hostBridge.Type() != BridgeDevice {
return LinkLayerDeviceArgs{}, errors.Errorf("hostBridge must be a Bridge Device not %q", hostBridge.Type())
}
return LinkLayerDeviceArgs{
Name: name,
Type: EthernetDevice,
MACAddress: generateMACAddress(),
MTU: hostBridge.MTU(),
IsUp: true,
IsAutoStart: true,
ParentName: hostBridge.globalKey(),
}, nil
}
// MACAddressTemplate is used to generate a unique MAC address for a
// container. Every '%x' is replaced by a random hexadecimal digit,
// while the rest is kept as-is.
const macAddressTemplate = "00:16:3e:%02x:%02x:%02x"
// generateMACAddress creates a random MAC address within the space defined by
// macAddressTemplate above.
//
// TODO(dimitern): We should make a best effort to ensure the MAC address we
// generate is unique at least within the current environment.
func generateMACAddress() string {
digits := make([]interface{}, 3)
for i := range digits {
digits[i] = rand.Intn(256)
}
return fmt.Sprintf(macAddressTemplate, digits...)
}
// MachineNetworkInfoResult contains an error or a list of NetworkInfo structures for a specific space.
type MachineNetworkInfoResult struct {
NetworkInfos []network.NetworkInfo
Error *error
}
// Add address to a device in list or create a new device with this address.
func addAddressToResult(networkInfos []network.NetworkInfo, address *Address) ([]network.NetworkInfo, error) {
ifaceAddress := network.InterfaceAddress{
Address: address.Value(),
CIDR: address.SubnetCIDR(),
}
for i := range networkInfos {
networkInfo := &networkInfos[i]
if networkInfo.InterfaceName == address.DeviceName() {
networkInfo.Addresses = append(networkInfo.Addresses, ifaceAddress)
return networkInfos, nil
}
}
MAC := ""
device, err := address.Device()
if err == nil {
MAC = device.MACAddress()
} else if !errors.IsNotFound(err) {
return nil, err
}
networkInfo := network.NetworkInfo{
InterfaceName: address.DeviceName(),
MACAddress: MAC,
Addresses: []network.InterfaceAddress{ifaceAddress},
}
return append(networkInfos, networkInfo), nil
}
// GetNetworkInfoForSpaces returns MachineNetworkInfoResult with a list of devices for each space in spaces
// TODO(wpk): 2017-05-04 This does not work for L2-only devices as it iterates over addresses, needs to be fixed.
// When changing the method we have to keep the ordering.
func (m *Machine) GetNetworkInfoForSpaces(spaces set.Strings) map[string](MachineNetworkInfoResult) {
results := make(map[string](MachineNetworkInfoResult))
var privateAddress network.Address
if spaces.Contains("") {
var err error
privateAddress, err = m.PrivateAddress()
if err != nil {
error := errors.Annotatef(err, "getting machine %q preferred private address", m.MachineTag())
results[""] = MachineNetworkInfoResult{Error: &error}
spaces.Remove("")
}
}
addresses, err := m.AllAddresses()
logger.Debugf("Looking for something from spaces %v in %v", spaces, addresses)
if err != nil {
newErr := errors.Annotate(err, "cannot get devices addresses")
result := MachineNetworkInfoResult{Error: &newErr}
for space := range spaces {
if _, ok := results[space]; !ok {
results[space] = result
}
}
return results
}
actualSpaces := set.NewStrings()
for _, addr := range addresses {
subnet, err := addr.Subnet()
switch {
case errors.IsNotFound(err):
logger.Debugf("skipping %s: not linked to a known subnet (%v)", addr, err)
case err != nil:
logger.Errorf("cannot get subnet for address %q - %q", addr, err)
default:
space := subnet.SpaceName()
actualSpaces.Add(space)
if spaces.Contains(space) {
r := results[space]
r.NetworkInfos, err = addAddressToResult(r.NetworkInfos, addr)
if err != nil {
r.Error = &err
} else {
results[space] = r
}
}
if spaces.Contains("") && privateAddress.Value == addr.Value() {
r := results[""]
r.NetworkInfos, err = addAddressToResult(r.NetworkInfos, addr)
if err != nil {
r.Error = &err
} else {
results[""] = r
}
}
}
}
actualSpacesStr := network.QuoteSpaceSet(actualSpaces)
for space := range spaces {
if _, ok := results[space]; !ok {
newErr := errors.Errorf("machine %q has no devices in space %q, only spaces %s", m.doc.Id, space, actualSpacesStr)
results[space] = MachineNetworkInfoResult{
Error: &newErr,
}
}
return results
}
return results
}
|
package jira
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/url"
"reflect"
"strings"
"time"
"github.com/fatih/structs"
"github.com/google/go-querystring/query"
"github.com/trivago/tgo/tcontainer"
)
const (
// AssigneeAutomatic represents the value of the "Assignee: Automatic" of JIRA
AssigneeAutomatic = "-1"
)
// IssueService handles Issues for the JIRA instance / API.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue
type IssueService struct {
client *Client
}
// Issue represents a JIRA issue.
type Issue struct {
Expand string `json:"expand,omitempty" structs:"expand,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Key string `json:"key,omitempty" structs:"key,omitempty"`
Fields *IssueFields `json:"fields,omitempty" structs:"fields,omitempty"`
Changelog *Changelog `json:"changelog,omitempty" structs:"changelog,omitempty"`
}
// ChangelogItems reflects one single changelog item of a history item
type ChangelogItems struct {
Field string `json:"field" structs:"field"`
FieldType string `json:"fieldtype" structs:"fieldtype"`
From interface{} `json:"from" structs:"from"`
FromString string `json:"fromString" structs:"fromString"`
To interface{} `json:"to" structs:"to"`
ToString string `json:"toString" structs:"toString"`
}
// ChangelogHistory reflects one single changelog history entry
type ChangelogHistory struct {
Id string `json:"id" structs:"id"`
Author User `json:"author" structs:"author"`
Created string `json:"created" structs:"created"`
Items []ChangelogItems `json:"items" structs:"items"`
}
// Changelog reflects the change log of an issue
type Changelog struct {
Histories []ChangelogHistory `json:"histories,omitempty"`
}
// Attachment represents a JIRA attachment
type Attachment struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Filename string `json:"filename,omitempty" structs:"filename,omitempty"`
Author *User `json:"author,omitempty" structs:"author,omitempty"`
Created string `json:"created,omitempty" structs:"created,omitempty"`
Size int `json:"size,omitempty" structs:"size,omitempty"`
MimeType string `json:"mimeType,omitempty" structs:"mimeType,omitempty"`
Content string `json:"content,omitempty" structs:"content,omitempty"`
Thumbnail string `json:"thumbnail,omitempty" structs:"thumbnail,omitempty"`
}
// Epic represents the epic to which an issue is associated
// Not that this struct does not process the returned "color" value
type Epic struct {
ID int `json:"id" structs:"id"`
Key string `json:"key" structs:"key"`
Self string `json:"self" structs:"self"`
Name string `json:"name" structs:"name"`
Summary string `json:"summary" structs:"summary"`
Done bool `json:"done" structs:"done"`
}
// IssueFields represents single fields of a JIRA issue.
// Every JIRA issue has several fields attached.
type IssueFields struct {
// TODO Missing fields
// * "aggregatetimespent": null,
// * "workratio": -1,
// * "lastViewed": null,
// * "aggregatetimeoriginalestimate": null,
// * "aggregatetimeestimate": null,
// * "environment": null,
Expand string `json:"expand,omitempty" structs:"expand,omitempty"`
Type IssueType `json:"issuetype" structs:"issuetype"`
Project Project `json:"project,omitempty" structs:"project,omitempty"`
Resolution *Resolution `json:"resolution,omitempty" structs:"resolution,omitempty"`
Priority *Priority `json:"priority,omitempty" structs:"priority,omitempty"`
Resolutiondate string `json:"resolutiondate,omitempty" structs:"resolutiondate,omitempty"`
Created string `json:"created,omitempty" structs:"created,omitempty"`
Duedate string `json:"duedate,omitempty" structs:"duedate,omitempty"`
Watches *Watches `json:"watches,omitempty" structs:"watches,omitempty"`
Assignee *User `json:"assignee,omitempty" structs:"assignee,omitempty"`
Updated string `json:"updated,omitempty" structs:"updated,omitempty"`
Description string `json:"description,omitempty" structs:"description,omitempty"`
Summary string `json:"summary" structs:"summary"`
Creator *User `json:"Creator,omitempty" structs:"Creator,omitempty"`
Reporter *User `json:"reporter,omitempty" structs:"reporter,omitempty"`
Components []*Component `json:"components,omitempty" structs:"components,omitempty"`
Status *Status `json:"status,omitempty" structs:"status,omitempty"`
Progress *Progress `json:"progress,omitempty" structs:"progress,omitempty"`
AggregateProgress *Progress `json:"aggregateprogress,omitempty" structs:"aggregateprogress,omitempty"`
TimeTracking *TimeTracking `json:"timetracking,omitempty" structs:"timetracking,omitempty"`
TimeSpent int `json:"timespent,omitempty" structs:"timespent,omitempty"`
TimeEstimate int `json:"timeestimate,omitempty" structs:"timeestimate,omitempty"`
TimeOriginalEstimate int `json:"timeoriginalestimate,omitempty" structs:"timeoriginalestimate,omitempty"`
Worklog *Worklog `json:"worklog,omitempty" structs:"worklog,omitempty"`
IssueLinks []*IssueLink `json:"issuelinks,omitempty" structs:"issuelinks,omitempty"`
Comments *Comments `json:"comment,omitempty" structs:"comment,omitempty"`
FixVersions []*FixVersion `json:"fixVersions,omitempty" structs:"fixVersions,omitempty"`
Labels []string `json:"labels,omitempty" structs:"labels,omitempty"`
Subtasks []*Subtasks `json:"subtasks,omitempty" structs:"subtasks,omitempty"`
Attachments []*Attachment `json:"attachment,omitempty" structs:"attachment,omitempty"`
Epic *Epic `json:"epic,omitempty" structs:"epic,omitempty"`
Parent *Parent `json:"parent,omitempty" structs:"parent,omitempty"`
Unknowns tcontainer.MarshalMap
}
type DeleteIssueOptions struct {
DeleteSubTasks string `url:"deleteSubTasks,omitempty"`
}
// MarshalJSON is a custom JSON marshal function for the IssueFields structs.
// It handles JIRA custom fields and maps those from / to "Unknowns" key.
func (i *IssueFields) MarshalJSON() ([]byte, error) {
m := structs.Map(i)
unknowns, okay := m["Unknowns"]
if okay {
// if unknowns present, shift all key value from unkown to a level up
for key, value := range unknowns.(tcontainer.MarshalMap) {
m[key] = value
}
delete(m, "Unknowns")
}
return json.Marshal(m)
}
// UnmarshalJSON is a custom JSON marshal function for the IssueFields structs.
// It handles JIRA custom fields and maps those from / to "Unknowns" key.
func (i *IssueFields) UnmarshalJSON(data []byte) error {
// Do the normal unmarshalling first
// Details for this way: http://choly.ca/post/go-json-marshalling/
type Alias IssueFields
aux := &struct {
*Alias
}{
Alias: (*Alias)(i),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
totalMap := tcontainer.NewMarshalMap()
err := json.Unmarshal(data, &totalMap)
if err != nil {
return err
}
t := reflect.TypeOf(*i)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
tagDetail := field.Tag.Get("json")
if tagDetail == "" {
// ignore if there are no tags
continue
}
options := strings.Split(tagDetail, ",")
if len(options) == 0 {
return fmt.Errorf("No tags options found for %s", field.Name)
}
// the first one is the json tag
key := options[0]
if _, okay := totalMap.Value(key); okay {
delete(totalMap, key)
}
}
i = (*IssueFields)(aux.Alias)
// all the tags found in the struct were removed. Whatever is left are unknowns to struct
i.Unknowns = totalMap
return nil
}
// IssueType represents a type of a JIRA issue.
// Typical types are "Request", "Bug", "Story", ...
type IssueType struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Description string `json:"description,omitempty" structs:"description,omitempty"`
IconURL string `json:"iconUrl,omitempty" structs:"iconUrl,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
Subtask bool `json:"subtask,omitempty" structs:"subtask,omitempty"`
AvatarID int `json:"avatarId,omitempty" structs:"avatarId,omitempty"`
}
// Resolution represents a resolution of a JIRA issue.
// Typical types are "Fixed", "Suspended", "Won't Fix", ...
type Resolution struct {
Self string `json:"self" structs:"self"`
ID string `json:"id" structs:"id"`
Description string `json:"description" structs:"description"`
Name string `json:"name" structs:"name"`
}
// Priority represents a priority of a JIRA issue.
// Typical types are "Normal", "Moderate", "Urgent", ...
type Priority struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
IconURL string `json:"iconUrl,omitempty" structs:"iconUrl,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
}
// Watches represents a type of how many user are "observing" a JIRA issue to track the status / updates.
type Watches struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
WatchCount int `json:"watchCount,omitempty" structs:"watchCount,omitempty"`
IsWatching bool `json:"isWatching,omitempty" structs:"isWatching,omitempty"`
}
// AvatarUrls represents different dimensions of avatars / images
type AvatarUrls struct {
Four8X48 string `json:"48x48,omitempty" structs:"48x48,omitempty"`
Two4X24 string `json:"24x24,omitempty" structs:"24x24,omitempty"`
One6X16 string `json:"16x16,omitempty" structs:"16x16,omitempty"`
Three2X32 string `json:"32x32,omitempty" structs:"32x32,omitempty"`
}
// Component represents a "component" of a JIRA issue.
// Components can be user defined in every JIRA instance.
type Component struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
}
// Status represents the current status of a JIRA issue.
// Typical status are "Open", "In Progress", "Closed", ...
// Status can be user defined in every JIRA instance.
type Status struct {
Self string `json:"self" structs:"self"`
Description string `json:"description" structs:"description"`
IconURL string `json:"iconUrl" structs:"iconUrl"`
Name string `json:"name" structs:"name"`
ID string `json:"id" structs:"id"`
StatusCategory StatusCategory `json:"statusCategory" structs:"statusCategory"`
}
// StatusCategory represents the category a status belongs to.
// Those categories can be user defined in every JIRA instance.
type StatusCategory struct {
Self string `json:"self" structs:"self"`
ID int `json:"id" structs:"id"`
Name string `json:"name" structs:"name"`
Key string `json:"key" structs:"key"`
ColorName string `json:"colorName" structs:"colorName"`
}
// Progress represents the progress of a JIRA issue.
type Progress struct {
Progress int `json:"progress" structs:"progress"`
Total int `json:"total" structs:"total"`
}
// Parent represents the parent of a JIRA issue, to be used with subtask issue types.
type Parent struct {
ID string `json:"id,omitempty" structs:"id"`
Key string `json:"key,omitempty" structs:"key"`
}
// Time represents the Time definition of JIRA as a time.Time of go
type Time time.Time
// Wrapper struct for search result
type transitionResult struct {
Transitions []Transition `json:"transitions" structs:"transitions"`
}
// Transition represents an issue transition in JIRA
type Transition struct {
ID string `json:"id" structs:"id"`
Name string `json:"name" structs:"name"`
Fields map[string]TransitionField `json:"fields" structs:"fields"`
}
// TransitionField represents the value of one Transistion
type TransitionField struct {
Required bool `json:"required" structs:"required"`
}
// CreateTransitionPayload is used for creating new issue transitions
type CreateTransitionPayload struct {
Transition TransitionPayload `json:"transition" structs:"transition"`
}
// TransitionPayload represents the request payload of Transistion calls like DoTransition
type TransitionPayload struct {
ID string `json:"id" structs:"id"`
}
// UnmarshalJSON will transform the JIRA time into a time.Time
// during the transformation of the JIRA JSON response
func (t *Time) UnmarshalJSON(b []byte) error {
ti, err := time.Parse("\"2006-01-02T15:04:05.999-0700\"", string(b))
if err != nil {
return err
}
*t = Time(ti)
return nil
}
// Worklog represents the work log of a JIRA issue.
// One Worklog contains zero or n WorklogRecords
// JIRA Wiki: https://confluence.atlassian.com/jira/logging-work-on-an-issue-185729605.html
type Worklog struct {
StartAt int `json:"startAt" structs:"startAt"`
MaxResults int `json:"maxResults" structs:"maxResults"`
Total int `json:"total" structs:"total"`
Worklogs []WorklogRecord `json:"worklogs" structs:"worklogs"`
}
// WorklogRecord represents one entry of a Worklog
type WorklogRecord struct {
Self string `json:"self" structs:"self"`
Author User `json:"author" structs:"author"`
UpdateAuthor User `json:"updateAuthor" structs:"updateAuthor"`
Comment string `json:"comment" structs:"comment"`
Created Time `json:"created" structs:"created"`
Updated Time `json:"updated" structs:"updated"`
Started Time `json:"started" structs:"started"`
TimeSpent string `json:"timeSpent" structs:"timeSpent"`
TimeSpentSeconds int `json:"timeSpentSeconds" structs:"timeSpentSeconds"`
ID string `json:"id" structs:"id"`
IssueID string `json:"issueId" structs:"issueId"`
}
// TimeTracking represents the timetracking fields of a JIRA issue.
type TimeTracking struct {
OriginalEstimate string `json:"originalEstimate,omitempty" structs:"originalEstimate,omitempty"`
RemainingEstimate string `json:"remainingEstimate,omitempty" structs:"remainingEstimate,omitempty"`
TimeSpent string `json:"timeSpent,omitempty" structs:"timeSpent,omitempty"`
OriginalEstimateSeconds int `json:"originalEstimateSeconds,omitempty" structs:"originalEstimateSeconds,omitempty"`
RemainingEstimateSeconds int `json:"remainingEstimateSeconds,omitempty" structs:"remainingEstimateSeconds,omitempty"`
TimeSpentSeconds int `json:"timeSpentSeconds,omitempty" structs:"timeSpentSeconds,omitempty"`
}
// Subtasks represents all issues of a parent issue.
type Subtasks struct {
ID string `json:"id" structs:"id"`
Key string `json:"key" structs:"key"`
Self string `json:"self" structs:"self"`
Fields IssueFields `json:"fields" structs:"fields"`
}
// IssueLink represents a link between two issues in JIRA.
type IssueLink struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Type IssueLinkType `json:"type" structs:"type"`
OutwardIssue *Issue `json:"outwardIssue" structs:"outwardIssue"`
InwardIssue *Issue `json:"inwardIssue" structs:"inwardIssue"`
Comment *Comment `json:"comment,omitempty" structs:"comment,omitempty"`
}
// IssueLinkType represents a type of a link between to issues in JIRA.
// Typical issue link types are "Related to", "Duplicate", "Is blocked by", etc.
type IssueLinkType struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Name string `json:"name" structs:"name"`
Inward string `json:"inward" structs:"inward"`
Outward string `json:"outward" structs:"outward"`
}
// Comments represents a list of Comment.
type Comments struct {
Comments []*Comment `json:"comments,omitempty" structs:"comments,omitempty"`
}
// Comment represents a comment by a person to an issue in JIRA.
type Comment struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
Author User `json:"author,omitempty" structs:"author,omitempty"`
Body string `json:"body,omitempty" structs:"body,omitempty"`
UpdateAuthor User `json:"updateAuthor,omitempty" structs:"updateAuthor,omitempty"`
Updated string `json:"updated,omitempty" structs:"updated,omitempty"`
Created string `json:"created,omitempty" structs:"created,omitempty"`
Visibility CommentVisibility `json:"visibility,omitempty" structs:"visibility,omitempty"`
}
// FixVersion represents a software release in which an issue is fixed.
type FixVersion struct {
Archived *bool `json:"archived,omitempty" structs:"archived,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
ProjectID int `json:"projectId,omitempty" structs:"projectId,omitempty"`
ReleaseDate string `json:"releaseDate,omitempty" structs:"releaseDate,omitempty"`
Released *bool `json:"released,omitempty" structs:"released,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
UserReleaseDate string `json:"userReleaseDate,omitempty" structs:"userReleaseDate,omitempty"`
}
// CommentVisibility represents he visibility of a comment.
// E.g. Type could be "role" and Value "Administrators"
type CommentVisibility struct {
Type string `json:"type,omitempty" structs:"type,omitempty"`
Value string `json:"value,omitempty" structs:"value,omitempty"`
}
// SearchOptions specifies the optional parameters to various List methods that
// support pagination.
// Pagination is used for the JIRA REST APIs to conserve server resources and limit
// response size for resources that return potentially large collection of items.
// A request to a pages API will result in a values array wrapped in a JSON object with some paging metadata
// Default Pagination options
type SearchOptions struct {
// StartAt: The starting index of the returned projects. Base index: 0.
StartAt int `url:"startAt,omitempty"`
// MaxResults: The maximum number of projects to return per page. Default: 50.
MaxResults int `url:"maxResults,omitempty"`
// Expand: Expand specific sections in the returned issues
Expand string `url:"expand,omitempty"`
}
// searchResult is only a small wrapper around the Search (with JQL) method
// to be able to parse the results
type searchResult struct {
Issues []Issue `json:"issues" structs:"issues"`
StartAt int `json:"startAt" structs:"startAt"`
MaxResults int `json:"maxResults" structs:"maxResults"`
Total int `json:"total" structs:"total"`
}
// GetQueryOptions specifies the optional parameters for the Get Issue methods
type GetQueryOptions struct {
// Fields is the list of fields to return for the issue. By default, all fields are returned.
Fields string `url:"fields,omitempty"`
Expand string `url:"expand,omitempty"`
// Properties is the list of properties to return for the issue. By default no properties are returned.
Properties string `url:"properties,omitempty"`
// FieldsByKeys if true then fields in issues will be referenced by keys instead of ids
FieldsByKeys bool `url:"fieldsByKeys,omitempty"`
UpdateHistory bool `url:"updateHistory,omitempty"`
}
// CustomFields represents custom fields of JIRA
// This can heavily differ between JIRA instances
type CustomFields map[string]string
// Get returns a full representation of the issue for the given issue key.
// JIRA will attempt to identify the issue by the issueIdOrKey path parameter.
// This can be an issue id, or an issue key.
// If the issue cannot be found via an exact match, JIRA will also look for the issue in a case-insensitive way, or by looking to see if the issue was moved.
//
// The given options will be appended to the query string
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getIssue
func (s *IssueService) Get(issueID string, options *GetQueryOptions) (*Issue, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
if options != nil {
q, err := query.Values(options)
if err != nil {
return nil, nil, err
}
req.URL.RawQuery = q.Encode()
}
issue := new(Issue)
resp, err := s.client.Do(req, issue)
if err != nil {
return nil, resp, err
}
return issue, resp, nil
}
// DownloadAttachment returns a Response of an attachment for a given attachmentID.
// The attachment is in the Response.Body of the response.
// This is an io.ReadCloser.
// The caller should close the resp.Body.
func (s *IssueService) DownloadAttachment(attachmentID string) (*Response, error) {
apiEndpoint := fmt.Sprintf("secure/attachment/%s/", attachmentID)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// PostAttachment uploads r (io.Reader) as an attachment to a given attachmentID
func (s *IssueService) PostAttachment(attachmentID string, r io.Reader, attachmentName string) (*[]Attachment, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/attachments", attachmentID)
b := new(bytes.Buffer)
writer := multipart.NewWriter(b)
fw, err := writer.CreateFormFile("file", attachmentName)
if err != nil {
return nil, nil, err
}
if r != nil {
// Copy the file
if _, err = io.Copy(fw, r); err != nil {
return nil, nil, err
}
}
writer.Close()
req, err := s.client.NewMultiPartRequest("POST", apiEndpoint, b)
if err != nil {
return nil, nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
// PostAttachment response returns a JSON array (as multiple attachments can be posted)
attachment := new([]Attachment)
resp, err := s.client.Do(req, attachment)
if err != nil {
return nil, resp, err
}
return attachment, resp, nil
}
// Create creates an issue or a sub-task from a JSON representation.
// Creating a sub-task is similar to creating a regular issue, with two important differences:
// The issueType field must correspond to a sub-task issue type and you must provide a parent field in the issue create request containing the id or key of the parent issue.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-createIssues
func (s *IssueService) Create(issue *Issue) (*Issue, *Response, error) {
apiEndpoint := "rest/api/2/issue/"
req, err := s.client.NewRequest("POST", apiEndpoint, issue)
if err != nil {
return nil, nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
// incase of error return the resp for further inspection
return nil, resp, err
}
responseIssue := new(Issue)
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, resp, fmt.Errorf("Could not read the returned data")
}
err = json.Unmarshal(data, responseIssue)
if err != nil {
return nil, resp, fmt.Errorf("Could not unmarshall the data into struct")
}
return responseIssue, resp, nil
}
// Delete an existing issue.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/issue-deleteIssue
func (s *IssueService) Delete(issueID string, deleteSubTasks bool) (*Response, error) {
var err error
url := fmt.Sprint("rest/api/2/issue/%s", issueID)
if deleteSubTasks {
opts := DeleteIssueOptions{DeleteSubTasks: "true"}
url, err = addOptions("deleteSubtasks", &opts)
if err != nil {
// incase of error return the resp for further inspection
return nil, err
}
}
req, err := s.client.NewRequest("DELETE", url, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
// incase of error return the resp for further inspection
return resp, err
}
return resp, nil
}
// AddComment adds a new comment to issueID.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-addComment
func (s *IssueService) AddComment(issueID string, comment *Comment) (*Comment, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/comment", issueID)
req, err := s.client.NewRequest("POST", apiEndpoint, comment)
if err != nil {
return nil, nil, err
}
responseComment := new(Comment)
resp, err := s.client.Do(req, responseComment)
if err != nil {
return nil, resp, err
}
return responseComment, resp, nil
}
// AddLink adds a link between two issues.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issueLink
func (s *IssueService) AddLink(issueLink *IssueLink) (*Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issueLink")
req, err := s.client.NewRequest("POST", apiEndpoint, issueLink)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
return resp, err
}
// Search will search for tickets according to the jql
//
// JIRA API docs: https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-example-query-issues
func (s *IssueService) Search(jql string, options *SearchOptions) ([]Issue, *Response, error) {
var u string
if options == nil {
u = fmt.Sprintf("rest/api/2/search?jql=%s", url.QueryEscape(jql))
} else {
u = fmt.Sprintf("rest/api/2/search?jql=%s&startAt=%d&maxResults=%d", url.QueryEscape(jql),
options.StartAt, options.MaxResults)
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return []Issue{}, nil, err
}
v := new(searchResult)
resp, err := s.client.Do(req, v)
return v.Issues, resp, err
}
// GetCustomFields returns a map of customfield_* keys with string values
func (s *IssueService) GetCustomFields(issueID string) (CustomFields, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
issue := new(map[string]interface{})
resp, err := s.client.Do(req, issue)
if err != nil {
return nil, resp, err
}
m := *issue
f := m["fields"]
cf := make(CustomFields)
if f == nil {
return cf, resp, nil
}
if rec, ok := f.(map[string]interface{}); ok {
for key, val := range rec {
if strings.Contains(key, "customfield") {
if valMap, ok := val.(map[string]interface{}); ok {
if v, ok := valMap["value"]; ok {
val = v
}
}
cf[key] = fmt.Sprint(val)
}
}
}
return cf, resp, nil
}
// GetTransitions gets a list of the transitions possible for this issue by the current user,
// along with fields that are required and their types.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getTransitions
func (s *IssueService) GetTransitions(id string) ([]Transition, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions?expand=transitions.fields", id)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
result := new(transitionResult)
resp, err := s.client.Do(req, result)
return result.Transitions, resp, err
}
// DoTransition performs a transition on an issue.
// When performing the transition you can update or set other issue fields.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-doTransition
func (s *IssueService) DoTransition(ticketID, transitionID string) (*Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions", ticketID)
payload := CreateTransitionPayload{
Transition: TransitionPayload{
ID: transitionID,
},
}
req, err := s.client.NewRequest("POST", apiEndpoint, payload)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return nil, err
}
return resp, nil
}
// InitIssueWithMetaAndFields returns Issue with with values from fieldsConfig properly set.
// * metaProject should contain metaInformation about the project where the issue should be created.
// * metaIssuetype is the MetaInformation about the Issuetype that needs to be created.
// * fieldsConfig is a key->value pair where key represents the name of the field as seen in the UI
// And value is the string value for that particular key.
// Note: This method doesn't verify that the fieldsConfig is complete with mandatory fields. The fieldsConfig is
// supposed to be already verified with MetaIssueType.CheckCompleteAndAvailable. It will however return
// error if the key is not found.
// All values will be packed into Unknowns. This is much convenient. If the struct fields needs to be
// configured as well, marshalling and unmarshalling will set the proper fields.
func InitIssueWithMetaAndFields(metaProject *MetaProject, metaIssuetype *MetaIssueType, fieldsConfig map[string]string) (*Issue, error) {
issue := new(Issue)
issueFields := new(IssueFields)
issueFields.Unknowns = tcontainer.NewMarshalMap()
// map the field names the User presented to jira's internal key
allFields, _ := metaIssuetype.GetAllFields()
for key, value := range fieldsConfig {
jiraKey, found := allFields[key]
if !found {
return nil, fmt.Errorf("Key %s is not found in the list of fields.", key)
}
valueType, err := metaIssuetype.Fields.String(jiraKey + "/schema/type")
if err != nil {
return nil, err
}
switch valueType {
case "array":
elemType, err := metaIssuetype.Fields.String(jiraKey + "/schema/items")
if err != nil {
return nil, err
}
switch elemType {
case "component":
issueFields.Unknowns[jiraKey] = []Component{Component{Name: value}}
default:
issueFields.Unknowns[jiraKey] = []string{value}
}
case "string":
issueFields.Unknowns[jiraKey] = value
case "date":
issueFields.Unknowns[jiraKey] = value
case "any":
// Treat any as string
issueFields.Unknowns[jiraKey] = value
case "project":
issueFields.Unknowns[jiraKey] = Project{
Name: metaProject.Name,
ID: metaProject.Id,
}
case "priority":
issueFields.Unknowns[jiraKey] = Priority{Name: value}
case "user":
issueFields.Unknowns[jiraKey] = User{
Name: value,
}
case "issuetype":
issueFields.Unknowns[jiraKey] = IssueType{
Name: value,
}
default:
return nil, fmt.Errorf("Unknown issue type encountered: %s for %s", valueType, key)
}
}
issue.Fields = issueFields
return issue, nil
}
Fix Issue Delete
package jira
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/url"
"reflect"
"strings"
"time"
"github.com/fatih/structs"
"github.com/google/go-querystring/query"
"github.com/trivago/tgo/tcontainer"
)
const (
// AssigneeAutomatic represents the value of the "Assignee: Automatic" of JIRA
AssigneeAutomatic = "-1"
)
// IssueService handles Issues for the JIRA instance / API.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue
type IssueService struct {
client *Client
}
// Issue represents a JIRA issue.
type Issue struct {
Expand string `json:"expand,omitempty" structs:"expand,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Key string `json:"key,omitempty" structs:"key,omitempty"`
Fields *IssueFields `json:"fields,omitempty" structs:"fields,omitempty"`
Changelog *Changelog `json:"changelog,omitempty" structs:"changelog,omitempty"`
}
// ChangelogItems reflects one single changelog item of a history item
type ChangelogItems struct {
Field string `json:"field" structs:"field"`
FieldType string `json:"fieldtype" structs:"fieldtype"`
From interface{} `json:"from" structs:"from"`
FromString string `json:"fromString" structs:"fromString"`
To interface{} `json:"to" structs:"to"`
ToString string `json:"toString" structs:"toString"`
}
// ChangelogHistory reflects one single changelog history entry
type ChangelogHistory struct {
Id string `json:"id" structs:"id"`
Author User `json:"author" structs:"author"`
Created string `json:"created" structs:"created"`
Items []ChangelogItems `json:"items" structs:"items"`
}
// Changelog reflects the change log of an issue
type Changelog struct {
Histories []ChangelogHistory `json:"histories,omitempty"`
}
// Attachment represents a JIRA attachment
type Attachment struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Filename string `json:"filename,omitempty" structs:"filename,omitempty"`
Author *User `json:"author,omitempty" structs:"author,omitempty"`
Created string `json:"created,omitempty" structs:"created,omitempty"`
Size int `json:"size,omitempty" structs:"size,omitempty"`
MimeType string `json:"mimeType,omitempty" structs:"mimeType,omitempty"`
Content string `json:"content,omitempty" structs:"content,omitempty"`
Thumbnail string `json:"thumbnail,omitempty" structs:"thumbnail,omitempty"`
}
// Epic represents the epic to which an issue is associated
// Not that this struct does not process the returned "color" value
type Epic struct {
ID int `json:"id" structs:"id"`
Key string `json:"key" structs:"key"`
Self string `json:"self" structs:"self"`
Name string `json:"name" structs:"name"`
Summary string `json:"summary" structs:"summary"`
Done bool `json:"done" structs:"done"`
}
// IssueFields represents single fields of a JIRA issue.
// Every JIRA issue has several fields attached.
type IssueFields struct {
// TODO Missing fields
// * "aggregatetimespent": null,
// * "workratio": -1,
// * "lastViewed": null,
// * "aggregatetimeoriginalestimate": null,
// * "aggregatetimeestimate": null,
// * "environment": null,
Expand string `json:"expand,omitempty" structs:"expand,omitempty"`
Type IssueType `json:"issuetype" structs:"issuetype"`
Project Project `json:"project,omitempty" structs:"project,omitempty"`
Resolution *Resolution `json:"resolution,omitempty" structs:"resolution,omitempty"`
Priority *Priority `json:"priority,omitempty" structs:"priority,omitempty"`
Resolutiondate string `json:"resolutiondate,omitempty" structs:"resolutiondate,omitempty"`
Created string `json:"created,omitempty" structs:"created,omitempty"`
Duedate string `json:"duedate,omitempty" structs:"duedate,omitempty"`
Watches *Watches `json:"watches,omitempty" structs:"watches,omitempty"`
Assignee *User `json:"assignee,omitempty" structs:"assignee,omitempty"`
Updated string `json:"updated,omitempty" structs:"updated,omitempty"`
Description string `json:"description,omitempty" structs:"description,omitempty"`
Summary string `json:"summary" structs:"summary"`
Creator *User `json:"Creator,omitempty" structs:"Creator,omitempty"`
Reporter *User `json:"reporter,omitempty" structs:"reporter,omitempty"`
Components []*Component `json:"components,omitempty" structs:"components,omitempty"`
Status *Status `json:"status,omitempty" structs:"status,omitempty"`
Progress *Progress `json:"progress,omitempty" structs:"progress,omitempty"`
AggregateProgress *Progress `json:"aggregateprogress,omitempty" structs:"aggregateprogress,omitempty"`
TimeTracking *TimeTracking `json:"timetracking,omitempty" structs:"timetracking,omitempty"`
TimeSpent int `json:"timespent,omitempty" structs:"timespent,omitempty"`
TimeEstimate int `json:"timeestimate,omitempty" structs:"timeestimate,omitempty"`
TimeOriginalEstimate int `json:"timeoriginalestimate,omitempty" structs:"timeoriginalestimate,omitempty"`
Worklog *Worklog `json:"worklog,omitempty" structs:"worklog,omitempty"`
IssueLinks []*IssueLink `json:"issuelinks,omitempty" structs:"issuelinks,omitempty"`
Comments *Comments `json:"comment,omitempty" structs:"comment,omitempty"`
FixVersions []*FixVersion `json:"fixVersions,omitempty" structs:"fixVersions,omitempty"`
Labels []string `json:"labels,omitempty" structs:"labels,omitempty"`
Subtasks []*Subtasks `json:"subtasks,omitempty" structs:"subtasks,omitempty"`
Attachments []*Attachment `json:"attachment,omitempty" structs:"attachment,omitempty"`
Epic *Epic `json:"epic,omitempty" structs:"epic,omitempty"`
Parent *Parent `json:"parent,omitempty" structs:"parent,omitempty"`
Unknowns tcontainer.MarshalMap
}
type DeleteIssueOptions struct {
DeleteSubtasks string `url:"deleteSubtasks"`
}
// MarshalJSON is a custom JSON marshal function for the IssueFields structs.
// It handles JIRA custom fields and maps those from / to "Unknowns" key.
func (i *IssueFields) MarshalJSON() ([]byte, error) {
m := structs.Map(i)
unknowns, okay := m["Unknowns"]
if okay {
// if unknowns present, shift all key value from unkown to a level up
for key, value := range unknowns.(tcontainer.MarshalMap) {
m[key] = value
}
delete(m, "Unknowns")
}
return json.Marshal(m)
}
// UnmarshalJSON is a custom JSON marshal function for the IssueFields structs.
// It handles JIRA custom fields and maps those from / to "Unknowns" key.
func (i *IssueFields) UnmarshalJSON(data []byte) error {
// Do the normal unmarshalling first
// Details for this way: http://choly.ca/post/go-json-marshalling/
type Alias IssueFields
aux := &struct {
*Alias
}{
Alias: (*Alias)(i),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
totalMap := tcontainer.NewMarshalMap()
err := json.Unmarshal(data, &totalMap)
if err != nil {
return err
}
t := reflect.TypeOf(*i)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
tagDetail := field.Tag.Get("json")
if tagDetail == "" {
// ignore if there are no tags
continue
}
options := strings.Split(tagDetail, ",")
if len(options) == 0 {
return fmt.Errorf("No tags options found for %s", field.Name)
}
// the first one is the json tag
key := options[0]
if _, okay := totalMap.Value(key); okay {
delete(totalMap, key)
}
}
i = (*IssueFields)(aux.Alias)
// all the tags found in the struct were removed. Whatever is left are unknowns to struct
i.Unknowns = totalMap
return nil
}
// IssueType represents a type of a JIRA issue.
// Typical types are "Request", "Bug", "Story", ...
type IssueType struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Description string `json:"description,omitempty" structs:"description,omitempty"`
IconURL string `json:"iconUrl,omitempty" structs:"iconUrl,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
Subtask bool `json:"subtask,omitempty" structs:"subtask,omitempty"`
AvatarID int `json:"avatarId,omitempty" structs:"avatarId,omitempty"`
}
// Resolution represents a resolution of a JIRA issue.
// Typical types are "Fixed", "Suspended", "Won't Fix", ...
type Resolution struct {
Self string `json:"self" structs:"self"`
ID string `json:"id" structs:"id"`
Description string `json:"description" structs:"description"`
Name string `json:"name" structs:"name"`
}
// Priority represents a priority of a JIRA issue.
// Typical types are "Normal", "Moderate", "Urgent", ...
type Priority struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
IconURL string `json:"iconUrl,omitempty" structs:"iconUrl,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
}
// Watches represents a type of how many user are "observing" a JIRA issue to track the status / updates.
type Watches struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
WatchCount int `json:"watchCount,omitempty" structs:"watchCount,omitempty"`
IsWatching bool `json:"isWatching,omitempty" structs:"isWatching,omitempty"`
}
// AvatarUrls represents different dimensions of avatars / images
type AvatarUrls struct {
Four8X48 string `json:"48x48,omitempty" structs:"48x48,omitempty"`
Two4X24 string `json:"24x24,omitempty" structs:"24x24,omitempty"`
One6X16 string `json:"16x16,omitempty" structs:"16x16,omitempty"`
Three2X32 string `json:"32x32,omitempty" structs:"32x32,omitempty"`
}
// Component represents a "component" of a JIRA issue.
// Components can be user defined in every JIRA instance.
type Component struct {
Self string `json:"self,omitempty" structs:"self,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
}
// Status represents the current status of a JIRA issue.
// Typical status are "Open", "In Progress", "Closed", ...
// Status can be user defined in every JIRA instance.
type Status struct {
Self string `json:"self" structs:"self"`
Description string `json:"description" structs:"description"`
IconURL string `json:"iconUrl" structs:"iconUrl"`
Name string `json:"name" structs:"name"`
ID string `json:"id" structs:"id"`
StatusCategory StatusCategory `json:"statusCategory" structs:"statusCategory"`
}
// StatusCategory represents the category a status belongs to.
// Those categories can be user defined in every JIRA instance.
type StatusCategory struct {
Self string `json:"self" structs:"self"`
ID int `json:"id" structs:"id"`
Name string `json:"name" structs:"name"`
Key string `json:"key" structs:"key"`
ColorName string `json:"colorName" structs:"colorName"`
}
// Progress represents the progress of a JIRA issue.
type Progress struct {
Progress int `json:"progress" structs:"progress"`
Total int `json:"total" structs:"total"`
}
// Parent represents the parent of a JIRA issue, to be used with subtask issue types.
type Parent struct {
ID string `json:"id,omitempty" structs:"id"`
Key string `json:"key,omitempty" structs:"key"`
}
// Time represents the Time definition of JIRA as a time.Time of go
type Time time.Time
// Wrapper struct for search result
type transitionResult struct {
Transitions []Transition `json:"transitions" structs:"transitions"`
}
// Transition represents an issue transition in JIRA
type Transition struct {
ID string `json:"id" structs:"id"`
Name string `json:"name" structs:"name"`
Fields map[string]TransitionField `json:"fields" structs:"fields"`
}
// TransitionField represents the value of one Transistion
type TransitionField struct {
Required bool `json:"required" structs:"required"`
}
// CreateTransitionPayload is used for creating new issue transitions
type CreateTransitionPayload struct {
Transition TransitionPayload `json:"transition" structs:"transition"`
}
// TransitionPayload represents the request payload of Transistion calls like DoTransition
type TransitionPayload struct {
ID string `json:"id" structs:"id"`
}
// UnmarshalJSON will transform the JIRA time into a time.Time
// during the transformation of the JIRA JSON response
func (t *Time) UnmarshalJSON(b []byte) error {
ti, err := time.Parse("\"2006-01-02T15:04:05.999-0700\"", string(b))
if err != nil {
return err
}
*t = Time(ti)
return nil
}
// Worklog represents the work log of a JIRA issue.
// One Worklog contains zero or n WorklogRecords
// JIRA Wiki: https://confluence.atlassian.com/jira/logging-work-on-an-issue-185729605.html
type Worklog struct {
StartAt int `json:"startAt" structs:"startAt"`
MaxResults int `json:"maxResults" structs:"maxResults"`
Total int `json:"total" structs:"total"`
Worklogs []WorklogRecord `json:"worklogs" structs:"worklogs"`
}
// WorklogRecord represents one entry of a Worklog
type WorklogRecord struct {
Self string `json:"self" structs:"self"`
Author User `json:"author" structs:"author"`
UpdateAuthor User `json:"updateAuthor" structs:"updateAuthor"`
Comment string `json:"comment" structs:"comment"`
Created Time `json:"created" structs:"created"`
Updated Time `json:"updated" structs:"updated"`
Started Time `json:"started" structs:"started"`
TimeSpent string `json:"timeSpent" structs:"timeSpent"`
TimeSpentSeconds int `json:"timeSpentSeconds" structs:"timeSpentSeconds"`
ID string `json:"id" structs:"id"`
IssueID string `json:"issueId" structs:"issueId"`
}
// TimeTracking represents the timetracking fields of a JIRA issue.
type TimeTracking struct {
OriginalEstimate string `json:"originalEstimate,omitempty" structs:"originalEstimate,omitempty"`
RemainingEstimate string `json:"remainingEstimate,omitempty" structs:"remainingEstimate,omitempty"`
TimeSpent string `json:"timeSpent,omitempty" structs:"timeSpent,omitempty"`
OriginalEstimateSeconds int `json:"originalEstimateSeconds,omitempty" structs:"originalEstimateSeconds,omitempty"`
RemainingEstimateSeconds int `json:"remainingEstimateSeconds,omitempty" structs:"remainingEstimateSeconds,omitempty"`
TimeSpentSeconds int `json:"timeSpentSeconds,omitempty" structs:"timeSpentSeconds,omitempty"`
}
// Subtasks represents all issues of a parent issue.
type Subtasks struct {
ID string `json:"id" structs:"id"`
Key string `json:"key" structs:"key"`
Self string `json:"self" structs:"self"`
Fields IssueFields `json:"fields" structs:"fields"`
}
// IssueLink represents a link between two issues in JIRA.
type IssueLink struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Type IssueLinkType `json:"type" structs:"type"`
OutwardIssue *Issue `json:"outwardIssue" structs:"outwardIssue"`
InwardIssue *Issue `json:"inwardIssue" structs:"inwardIssue"`
Comment *Comment `json:"comment,omitempty" structs:"comment,omitempty"`
}
// IssueLinkType represents a type of a link between to issues in JIRA.
// Typical issue link types are "Related to", "Duplicate", "Is blocked by", etc.
type IssueLinkType struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Name string `json:"name" structs:"name"`
Inward string `json:"inward" structs:"inward"`
Outward string `json:"outward" structs:"outward"`
}
// Comments represents a list of Comment.
type Comments struct {
Comments []*Comment `json:"comments,omitempty" structs:"comments,omitempty"`
}
// Comment represents a comment by a person to an issue in JIRA.
type Comment struct {
ID string `json:"id,omitempty" structs:"id,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
Author User `json:"author,omitempty" structs:"author,omitempty"`
Body string `json:"body,omitempty" structs:"body,omitempty"`
UpdateAuthor User `json:"updateAuthor,omitempty" structs:"updateAuthor,omitempty"`
Updated string `json:"updated,omitempty" structs:"updated,omitempty"`
Created string `json:"created,omitempty" structs:"created,omitempty"`
Visibility CommentVisibility `json:"visibility,omitempty" structs:"visibility,omitempty"`
}
// FixVersion represents a software release in which an issue is fixed.
type FixVersion struct {
Archived *bool `json:"archived,omitempty" structs:"archived,omitempty"`
ID string `json:"id,omitempty" structs:"id,omitempty"`
Name string `json:"name,omitempty" structs:"name,omitempty"`
ProjectID int `json:"projectId,omitempty" structs:"projectId,omitempty"`
ReleaseDate string `json:"releaseDate,omitempty" structs:"releaseDate,omitempty"`
Released *bool `json:"released,omitempty" structs:"released,omitempty"`
Self string `json:"self,omitempty" structs:"self,omitempty"`
UserReleaseDate string `json:"userReleaseDate,omitempty" structs:"userReleaseDate,omitempty"`
}
// CommentVisibility represents he visibility of a comment.
// E.g. Type could be "role" and Value "Administrators"
type CommentVisibility struct {
Type string `json:"type,omitempty" structs:"type,omitempty"`
Value string `json:"value,omitempty" structs:"value,omitempty"`
}
// SearchOptions specifies the optional parameters to various List methods that
// support pagination.
// Pagination is used for the JIRA REST APIs to conserve server resources and limit
// response size for resources that return potentially large collection of items.
// A request to a pages API will result in a values array wrapped in a JSON object with some paging metadata
// Default Pagination options
type SearchOptions struct {
// StartAt: The starting index of the returned projects. Base index: 0.
StartAt int `url:"startAt,omitempty"`
// MaxResults: The maximum number of projects to return per page. Default: 50.
MaxResults int `url:"maxResults,omitempty"`
// Expand: Expand specific sections in the returned issues
Expand string `url:"expand,omitempty"`
}
// searchResult is only a small wrapper around the Search (with JQL) method
// to be able to parse the results
type searchResult struct {
Issues []Issue `json:"issues" structs:"issues"`
StartAt int `json:"startAt" structs:"startAt"`
MaxResults int `json:"maxResults" structs:"maxResults"`
Total int `json:"total" structs:"total"`
}
// GetQueryOptions specifies the optional parameters for the Get Issue methods
type GetQueryOptions struct {
// Fields is the list of fields to return for the issue. By default, all fields are returned.
Fields string `url:"fields,omitempty"`
Expand string `url:"expand,omitempty"`
// Properties is the list of properties to return for the issue. By default no properties are returned.
Properties string `url:"properties,omitempty"`
// FieldsByKeys if true then fields in issues will be referenced by keys instead of ids
FieldsByKeys bool `url:"fieldsByKeys,omitempty"`
UpdateHistory bool `url:"updateHistory,omitempty"`
}
// CustomFields represents custom fields of JIRA
// This can heavily differ between JIRA instances
type CustomFields map[string]string
// Get returns a full representation of the issue for the given issue key.
// JIRA will attempt to identify the issue by the issueIdOrKey path parameter.
// This can be an issue id, or an issue key.
// If the issue cannot be found via an exact match, JIRA will also look for the issue in a case-insensitive way, or by looking to see if the issue was moved.
//
// The given options will be appended to the query string
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getIssue
func (s *IssueService) Get(issueID string, options *GetQueryOptions) (*Issue, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
if options != nil {
q, err := query.Values(options)
if err != nil {
return nil, nil, err
}
req.URL.RawQuery = q.Encode()
}
issue := new(Issue)
resp, err := s.client.Do(req, issue)
if err != nil {
return nil, resp, err
}
return issue, resp, nil
}
// DownloadAttachment returns a Response of an attachment for a given attachmentID.
// The attachment is in the Response.Body of the response.
// This is an io.ReadCloser.
// The caller should close the resp.Body.
func (s *IssueService) DownloadAttachment(attachmentID string) (*Response, error) {
apiEndpoint := fmt.Sprintf("secure/attachment/%s/", attachmentID)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// PostAttachment uploads r (io.Reader) as an attachment to a given attachmentID
func (s *IssueService) PostAttachment(attachmentID string, r io.Reader, attachmentName string) (*[]Attachment, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/attachments", attachmentID)
b := new(bytes.Buffer)
writer := multipart.NewWriter(b)
fw, err := writer.CreateFormFile("file", attachmentName)
if err != nil {
return nil, nil, err
}
if r != nil {
// Copy the file
if _, err = io.Copy(fw, r); err != nil {
return nil, nil, err
}
}
writer.Close()
req, err := s.client.NewMultiPartRequest("POST", apiEndpoint, b)
if err != nil {
return nil, nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
// PostAttachment response returns a JSON array (as multiple attachments can be posted)
attachment := new([]Attachment)
resp, err := s.client.Do(req, attachment)
if err != nil {
return nil, resp, err
}
return attachment, resp, nil
}
// Create creates an issue or a sub-task from a JSON representation.
// Creating a sub-task is similar to creating a regular issue, with two important differences:
// The issueType field must correspond to a sub-task issue type and you must provide a parent field in the issue create request containing the id or key of the parent issue.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-createIssues
func (s *IssueService) Create(issue *Issue) (*Issue, *Response, error) {
apiEndpoint := "rest/api/2/issue/"
req, err := s.client.NewRequest("POST", apiEndpoint, issue)
if err != nil {
return nil, nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
// incase of error return the resp for further inspection
return nil, resp, err
}
responseIssue := new(Issue)
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, resp, fmt.Errorf("Could not read the returned data")
}
err = json.Unmarshal(data, responseIssue)
if err != nil {
return nil, resp, fmt.Errorf("Could not unmarshall the data into struct")
}
return responseIssue, resp, nil
}
// Delete an existing issue.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/cloud/#api/2/issue-deleteIssue
func (s *IssueService) Delete(issueID string, deleteSubTasks bool) (*Response, error) {
var err error
url := fmt.Sprintf("rest/api/2/issue/%s", issueID)
if deleteSubTasks {
opts := DeleteIssueOptions{DeleteSubtasks: "true"}
url, err = addOptions(url, &opts)
if err != nil {
// incase of error return the resp for further inspection
return nil, err
}
}
req, err := s.client.NewRequest("DELETE", url, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
// incase of error return the resp for further inspection
return resp, err
}
return resp, nil
}
// AddComment adds a new comment to issueID.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-addComment
func (s *IssueService) AddComment(issueID string, comment *Comment) (*Comment, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/comment", issueID)
req, err := s.client.NewRequest("POST", apiEndpoint, comment)
if err != nil {
return nil, nil, err
}
responseComment := new(Comment)
resp, err := s.client.Do(req, responseComment)
if err != nil {
return nil, resp, err
}
return responseComment, resp, nil
}
// AddLink adds a link between two issues.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issueLink
func (s *IssueService) AddLink(issueLink *IssueLink) (*Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issueLink")
req, err := s.client.NewRequest("POST", apiEndpoint, issueLink)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
return resp, err
}
// Search will search for tickets according to the jql
//
// JIRA API docs: https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-example-query-issues
func (s *IssueService) Search(jql string, options *SearchOptions) ([]Issue, *Response, error) {
var u string
if options == nil {
u = fmt.Sprintf("rest/api/2/search?jql=%s", url.QueryEscape(jql))
} else {
u = fmt.Sprintf("rest/api/2/search?jql=%s&startAt=%d&maxResults=%d", url.QueryEscape(jql),
options.StartAt, options.MaxResults)
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return []Issue{}, nil, err
}
v := new(searchResult)
resp, err := s.client.Do(req, v)
return v.Issues, resp, err
}
// GetCustomFields returns a map of customfield_* keys with string values
func (s *IssueService) GetCustomFields(issueID string) (CustomFields, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
issue := new(map[string]interface{})
resp, err := s.client.Do(req, issue)
if err != nil {
return nil, resp, err
}
m := *issue
f := m["fields"]
cf := make(CustomFields)
if f == nil {
return cf, resp, nil
}
if rec, ok := f.(map[string]interface{}); ok {
for key, val := range rec {
if strings.Contains(key, "customfield") {
if valMap, ok := val.(map[string]interface{}); ok {
if v, ok := valMap["value"]; ok {
val = v
}
}
cf[key] = fmt.Sprint(val)
}
}
}
return cf, resp, nil
}
// GetTransitions gets a list of the transitions possible for this issue by the current user,
// along with fields that are required and their types.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getTransitions
func (s *IssueService) GetTransitions(id string) ([]Transition, *Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions?expand=transitions.fields", id)
req, err := s.client.NewRequest("GET", apiEndpoint, nil)
if err != nil {
return nil, nil, err
}
result := new(transitionResult)
resp, err := s.client.Do(req, result)
return result.Transitions, resp, err
}
// DoTransition performs a transition on an issue.
// When performing the transition you can update or set other issue fields.
//
// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-doTransition
func (s *IssueService) DoTransition(ticketID, transitionID string) (*Response, error) {
apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions", ticketID)
payload := CreateTransitionPayload{
Transition: TransitionPayload{
ID: transitionID,
},
}
req, err := s.client.NewRequest("POST", apiEndpoint, payload)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return nil, err
}
return resp, nil
}
// InitIssueWithMetaAndFields returns Issue with with values from fieldsConfig properly set.
// * metaProject should contain metaInformation about the project where the issue should be created.
// * metaIssuetype is the MetaInformation about the Issuetype that needs to be created.
// * fieldsConfig is a key->value pair where key represents the name of the field as seen in the UI
// And value is the string value for that particular key.
// Note: This method doesn't verify that the fieldsConfig is complete with mandatory fields. The fieldsConfig is
// supposed to be already verified with MetaIssueType.CheckCompleteAndAvailable. It will however return
// error if the key is not found.
// All values will be packed into Unknowns. This is much convenient. If the struct fields needs to be
// configured as well, marshalling and unmarshalling will set the proper fields.
func InitIssueWithMetaAndFields(metaProject *MetaProject, metaIssuetype *MetaIssueType, fieldsConfig map[string]string) (*Issue, error) {
issue := new(Issue)
issueFields := new(IssueFields)
issueFields.Unknowns = tcontainer.NewMarshalMap()
// map the field names the User presented to jira's internal key
allFields, _ := metaIssuetype.GetAllFields()
for key, value := range fieldsConfig {
jiraKey, found := allFields[key]
if !found {
return nil, fmt.Errorf("Key %s is not found in the list of fields.", key)
}
valueType, err := metaIssuetype.Fields.String(jiraKey + "/schema/type")
if err != nil {
return nil, err
}
switch valueType {
case "array":
elemType, err := metaIssuetype.Fields.String(jiraKey + "/schema/items")
if err != nil {
return nil, err
}
switch elemType {
case "component":
issueFields.Unknowns[jiraKey] = []Component{Component{Name: value}}
default:
issueFields.Unknowns[jiraKey] = []string{value}
}
case "string":
issueFields.Unknowns[jiraKey] = value
case "date":
issueFields.Unknowns[jiraKey] = value
case "any":
// Treat any as string
issueFields.Unknowns[jiraKey] = value
case "project":
issueFields.Unknowns[jiraKey] = Project{
Name: metaProject.Name,
ID: metaProject.Id,
}
case "priority":
issueFields.Unknowns[jiraKey] = Priority{Name: value}
case "user":
issueFields.Unknowns[jiraKey] = User{
Name: value,
}
case "issuetype":
issueFields.Unknowns[jiraKey] = IssueType{
Name: value,
}
default:
return nil, fmt.Errorf("Unknown issue type encountered: %s for %s", valueType, key)
}
}
issue.Fields = issueFields
return issue, nil
}
|
package main
import (
"container/heap"
"errors"
"fmt"
)
// + consumables (potion-like or throwing dart, strategic + tactical)
// + equipables
// + recharging with depth (rod-like, strategic & a little tactical + mana)
// - digging, fog, slowing clouds or something, fear,
// fireball, lightning bolt, shatter, blink, teleport other
type consumable interface {
Use(*game, event) error
String() string
Plural() string
Desc() string
Letter() rune
Int() int
}
func (g *game) UseConsumable(c consumable) {
g.Player.Consumables[c]--
g.StoryPrintf("You used %s.", Indefinite(c.String(), false))
if g.Player.Consumables[c] <= 0 {
delete(g.Player.Consumables, c)
}
g.FairAction()
}
type potion int
const (
HealWoundsPotion potion = iota
TeleportationPotion
BerserkPotion
DescentPotion
RunningPotion
EvasionPotion
LignificationPotion
MagicMappingPotion
MagicPotion
WallPotion
// below unimplemented
ResistancePotion
)
func (p potion) String() (text string) {
text = "potion"
switch p {
case HealWoundsPotion:
text += " of heal wounds"
case TeleportationPotion:
text += " of teleportation"
case DescentPotion:
text += " of descent"
case EvasionPotion:
text += " of evasion"
case MagicMappingPotion:
text += " of magic mapping"
case MagicPotion:
text += " of refill magic"
case BerserkPotion:
text += " of berserk"
case RunningPotion:
text += " of running"
case LignificationPotion:
text += " of lignification"
case WallPotion:
text += " of walls"
case ResistancePotion:
text += " of resistance"
}
return text
}
func (p potion) Plural() (text string) {
// never used for potions
return p.String()
}
func (p potion) Desc() (text string) {
switch p {
case HealWoundsPotion:
text = "heals you a good deal."
case TeleportationPotion:
text = "teleports you away after a short delay."
case DescentPotion:
text = "makes you go to deeper in the Underground."
case EvasionPotion:
text = "makes you better at avoiding blows."
case MagicMappingPotion:
text = "shows you the map."
case MagicPotion:
text = "replenishes your magical reserves."
case BerserkPotion:
text = "makes you enter a crazy rage, temporarily making you faster, stronger and healthier. You cannot drink potions while berserk, and afterwards it leaves you slow and exhausted."
case RunningPotion:
text = "makes you move faster."
case LignificationPotion:
text = "makes you more resistant to physical blows, but you are attached to the ground while the effect lasts."
case WallPotion:
text = "replaces free cells around you with temporal walls."
case ResistancePotion:
text = "makes you resistent to the elements."
}
return fmt.Sprintf("The %s %s", p, text)
}
func (p potion) Letter() rune {
return '!'
}
func (p potion) Int() int {
return int(p)
}
func (p potion) Use(g *game, ev event) error {
quant, ok := g.Player.Consumables[p]
if !ok || quant <= 0 {
// should not happen
return errors.New("no such consumable: " + p.String())
}
if g.Player.HasStatus(StatusNausea) {
return errors.New("You cannot drink potions while sick.")
}
if g.Player.HasStatus(StatusBerserk) {
return errors.New("You cannot drink potions while berserk.")
}
var err error
switch p {
case HealWoundsPotion:
err = g.QuaffHealWounds(ev)
case TeleportationPotion:
err = g.QuaffTeleportation(ev)
case BerserkPotion:
err = g.QuaffBerserk(ev)
case DescentPotion:
err = g.QuaffDescent(ev)
case RunningPotion:
err = g.QuaffHaste(ev)
case EvasionPotion:
err = g.QuaffEvasion(ev)
case LignificationPotion:
err = g.QuaffLignification(ev)
case MagicMappingPotion:
err = g.QuaffMagicMapping(ev)
case MagicPotion:
err = g.QuaffMagic(ev)
case WallPotion:
err = g.QuaffWallPotion(ev)
}
if err != nil {
return err
}
ev.Renew(g, 5)
g.UseConsumable(p)
return nil
}
func (g *game) QuaffTeleportation(ev event) error {
if g.Player.HasStatus(StatusLignification) {
return errors.New("You cannot teleport while lignified.")
}
if g.Player.HasStatus(StatusTele) {
return errors.New("You already quaffed a potion of teleportation.")
}
delay := 20 + RandInt(30)
g.Player.Statuses[StatusTele]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + delay, EAction: Teleportation})
g.Printf("You quaff a %s. You feel unstable.", TeleportationPotion)
return nil
}
func (g *game) QuaffBerserk(ev event) error {
if g.Player.HasStatus(StatusExhausted) {
return errors.New("You are too exhausted to berserk.")
}
g.Player.Statuses[StatusBerserk]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 65 + RandInt(20), EAction: BerserkEnd})
g.Printf("You quaff a %s. You feel a sudden urge to kill things.", BerserkPotion)
g.Player.HP += 10
return nil
}
func (g *game) QuaffHealWounds(ev event) error {
hp := g.Player.HP
g.Player.HP += 2 * g.Player.HPMax() / 3
if g.Player.HP > g.Player.HPMax() {
g.Player.HP = g.Player.HPMax()
}
g.Printf("You quaff a %s (%d -> %d).", HealWoundsPotion, hp, g.Player.HP)
return nil
}
func (g *game) QuaffMagic(ev event) error {
mp := g.Player.MP
g.Player.MP += 2 * g.Player.MPMax() / 3
if g.Player.MP > g.Player.MPMax() {
g.Player.MP = g.Player.MPMax()
}
g.Printf("You quaff the %s (%d -> %d).", MagicPotion, mp, g.Player.MP)
return nil
}
func (g *game) QuaffDescent(ev event) error {
if g.Player.HasStatus(StatusLignification) {
return errors.New("You cannot descend while lignified.")
}
if g.Depth >= g.MaxDepth() {
return errors.New("You cannot descend more!")
}
g.Printf("You quaff the %s. You feel yourself falling through the ground.", DescentPotion)
g.Depth++
g.InitLevel()
g.Save()
return nil
}
func (g *game) QuaffHaste(ev event) error {
g.Player.Statuses[StatusSwift]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 80 + RandInt(20), EAction: HasteEnd})
g.Printf("You quaff the %s. You feel speedy.", RunningPotion)
return nil
}
func (g *game) QuaffEvasion(ev event) error {
g.Player.Statuses[StatusAgile]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 90 + RandInt(20), EAction: EvasionEnd})
g.Printf("You quaff the %s. You feel agile.", EvasionPotion)
return nil
}
func (g *game) QuaffLignification(ev event) error {
g.Player.Statuses[StatusLignification]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 150 + RandInt(100), EAction: LignificationEnd})
g.Printf("You quaff the %s. You feel attuned with the ground.", LignificationPotion)
return nil
}
func (g *game) QuaffMagicMapping(ev event) error {
for i, c := range g.Dungeon.Cells {
pos := g.Dungeon.CellPosition(i)
if c.T == FreeCell || g.Dungeon.WallNeighborsCount(pos) < 8 {
g.Dungeon.SetExplored(pos)
}
}
g.Printf("You quaff the %s. You feel wiser.", MagicMappingPotion)
return nil
}
func (g *game) QuaffWallPotion(ev event) error {
neighbors := g.Dungeon.FreeNeighbors(g.Player.Pos)
for _, pos := range neighbors {
mons, _ := g.MonsterAt(pos)
if mons.Exists() {
continue
}
posNeighbors := g.Dungeon.FreeNeighbors(pos)
for _, pos := range posNeighbors {
if pos == g.Player.Pos {
continue
}
g.MakeNoise(18, pos)
break
}
g.Dungeon.SetCell(pos, WallCell)
heap.Push(g.Events, &cloudEvent{ERank: ev.Rank() + 200 + RandInt(50), Pos: pos, EAction: ObstructionEnd})
}
g.Printf("You quaff the %s. You feel surrounded by temporal walls.", WallPotion)
g.ComputeLOS()
return nil
}
type projectile int
const (
Javelin projectile = iota
ConfusingDart
// unimplemented
Net
)
func (p projectile) String() (text string) {
switch p {
case Javelin:
text = "javelin"
case ConfusingDart:
text = "dart of confusion"
case Net:
text = "throwing net"
}
return text
}
func (p projectile) Plural() (text string) {
switch p {
case Javelin:
text = "javelins"
case ConfusingDart:
text = "darts of confusion"
case Net:
text = "throwing nets"
}
return text
}
func (p projectile) Desc() (text string) {
switch p {
case Javelin:
// XXX
text = "can be thrown to ennemies, dealing up to 11 damage."
case ConfusingDart:
text = "can be thrown to confuse foes. Confused monsters cannot move diagonally."
case Net:
text = "can be thrown to emprison your ennemies."
}
return fmt.Sprintf("The %s %s", p, text)
}
func (p projectile) Letter() rune {
return '('
}
func (p projectile) Int() int {
return int(p)
}
func (p projectile) Use(g *game, ev event) error {
quant, ok := g.Player.Consumables[p]
if !ok || quant <= 0 {
// should not happen
return errors.New("no such consumable: " + p.String())
}
mons, _ := g.MonsterAt(g.Player.Target)
if mons == nil {
// should not happen
return errors.New("internal error: no monster")
}
switch p {
case Javelin:
g.ThrowJavelin(mons, ev)
case ConfusingDart:
g.ThrowConfusingDart(mons, ev)
}
g.UseConsumable(p)
return nil
}
func (g *game) ThrowJavelin(mons *monster, ev event) {
acc := RandInt(g.Player.Accuracy())
evasion := RandInt(mons.Evasion)
if mons.State == Resting {
evasion /= 2 + 1
}
if acc > evasion {
g.MakeNoise(12, mons.Pos)
bonus := 0
if g.Player.HasStatus(StatusBerserk) {
bonus += RandInt(5)
}
if g.Player.Aptitudes[AptStrong] {
bonus += 2
}
attack := g.HitDamage(11+bonus, mons.Armor)
mons.HP -= attack
if mons.HP > 0 {
g.Printf("Your %s hits the %s (%d).", Javelin, mons.Kind, attack)
mons.MakeHuntIfHurt(g)
} else {
g.Printf("Your %s kills the %s.", Javelin, mons.Kind)
g.HandleKill(mons)
}
} else {
g.Printf("Your %s missed the %s.", Javelin, mons.Kind)
mons.MakeHuntIfHurt(g)
}
ev.Renew(g, 10)
}
func (g *game) ThrowConfusingDart(mons *monster, ev event) {
acc := RandInt(g.Player.Accuracy())
evasion := RandInt(mons.Evasion)
if mons.State == Resting {
evasion /= 2 + 1
}
if acc > evasion {
mons.Statuses[MonsConfused]++
mons.Path = nil
heap.Push(g.Events, &monsterEvent{
ERank: ev.Rank() + 50 + RandInt(100), NMons: mons.Index(g), EAction: MonsConfusionEnd})
g.Printf("Your %s hits the %s. The %s appears confused.", ConfusingDart, mons.Kind, mons.Kind)
} else {
g.Printf("Your %s missed the %s.", ConfusingDart, mons.Kind)
}
mons.MakeHuntIfHurt(g)
ev.Renew(g, 10)
}
type collectable struct {
Consumable consumable
Quantity int
}
type collectData struct {
rarity int
quantity int
}
var ConsumablesCollectData = map[consumable]collectData{
HealWoundsPotion: {rarity: 6, quantity: 1},
TeleportationPotion: {rarity: 4, quantity: 1},
BerserkPotion: {rarity: 5, quantity: 1},
RunningPotion: {rarity: 10, quantity: 1},
DescentPotion: {rarity: 15, quantity: 1},
EvasionPotion: {rarity: 8, quantity: 1},
LignificationPotion: {rarity: 8, quantity: 1},
MagicMappingPotion: {rarity: 15, quantity: 1},
MagicPotion: {rarity: 10, quantity: 1},
WallPotion: {rarity: 12, quantity: 1},
Javelin: {rarity: 3, quantity: 3},
ConfusingDart: {rarity: 5, quantity: 2},
}
type equipable interface {
Equip(g *game)
String() string
Letter() rune
Desc() string
}
type armour int
const (
Robe armour = iota
LeatherArmour
ChainMail
PlateArmour
)
func (ar armour) Equip(g *game) {
oar := g.Player.Armour
g.Player.Armour = ar
if !g.FoundEquipables[ar] {
if g.FoundEquipables == nil {
g.FoundEquipables = map[equipable]bool{}
}
g.StoryPrintf("You found and put on %s.", Indefinite(ar.String(), false))
g.FoundEquipables[ar] = true
}
g.Printf("You put the %s on and leave your %s on the ground.", ar, oar)
g.Equipables[g.Player.Pos] = oar
}
func (ar armour) String() string {
switch ar {
case Robe:
return "robe"
case LeatherArmour:
return "leather armour"
case ChainMail:
return "chain mail"
case PlateArmour:
return "plate armour"
default:
// should not happen
return "some piece of armour"
}
}
func (ar armour) Desc() string {
var text string
switch ar {
case Robe:
text = "A robe provides no special protection, and will not help you much in your journey."
case LeatherArmour:
text = "A leather armour provides some protection against blows."
case ChainMail:
text = "A chain mail provides more protection than a leather armour, but the blows you receive are louder."
case PlateArmour:
text = "A plate armour provides great protection against blows, but blows you receive are quite noisy."
}
return text
}
func (ar armour) Letter() rune {
return '['
}
type weapon int
const (
Dagger weapon = iota
Axe
BattleAxe
Spear
Halberd
Sword
DoubleSword
)
func (wp weapon) Equip(g *game) {
owp := g.Player.Weapon
g.Player.Weapon = wp
if !g.FoundEquipables[wp] {
if g.FoundEquipables == nil {
g.FoundEquipables = map[equipable]bool{}
}
g.StoryPrintf("You found and took %s.", Indefinite(wp.String(), false))
g.FoundEquipables[wp] = true
}
g.Printf("You take the %s and leave your %s on the ground.", wp, owp)
g.Equipables[g.Player.Pos] = owp
}
func (wp weapon) String() string {
switch wp {
case Dagger:
return "dagger"
case Axe:
return "axe"
case BattleAxe:
return "battle axe"
case Spear:
return "spear"
case Halberd:
return "halberd"
case Sword:
return "sword"
case DoubleSword:
return "double sword"
default:
// should not happen
return "some weapon"
}
}
func (wp weapon) Desc() string {
var text string
switch wp {
case Dagger:
text = "A dagger is the most basic weapon. Great against sleeping monsters, but that's all."
case Axe:
text = "An axe is a one-handed weapon that can hit at once any foes adjacent to you."
case BattleAxe:
text = "A battle axe is a big two-handed weapon that can hit at once any foes adjacent to you."
case Spear:
text = "A spear is a one-handed weapon that can hit two opponents in a row at once. Useful in corridors."
case Halberd:
text = "An halberd is a big two-handed weapon that can hit two opponents in a row at once. Useful in corridors."
case Sword:
text = "A sword is a one-handed weapon that occasionally gets additional free hits."
case DoubleSword:
text = "A double sword is a big two-handed weapon that occasionally gets additional free hits."
}
return fmt.Sprintf("%s It can hit for up to %d damage.", text, wp.Attack())
}
func (wp weapon) Attack() int {
switch wp {
case Axe, Spear, Sword:
return 11
case BattleAxe, Halberd, DoubleSword:
return 15
case Dagger:
return 8
default:
return 0
}
}
func (wp weapon) TwoHanded() bool {
switch wp {
case BattleAxe, Halberd, DoubleSword:
return true
default:
return false
}
}
func (wp weapon) Letter() rune {
return ')'
}
func (wp weapon) Cleave() bool {
switch wp {
case Axe, BattleAxe:
return true
default:
return false
}
}
func (wp weapon) Pierce() bool {
switch wp {
case Spear, Halberd:
return true
default:
return false
}
}
type shield int
const (
NoShield shield = iota
Buckler
Shield
)
func (sh shield) Equip(g *game) {
osh := g.Player.Shield
g.Player.Shield = sh
if !g.FoundEquipables[sh] {
if g.FoundEquipables == nil {
g.FoundEquipables = map[equipable]bool{}
}
g.StoryPrintf("You found and put on %s.", Indefinite(sh.String(), false))
g.FoundEquipables[sh] = true
}
if osh != NoShield {
g.Equipables[g.Player.Pos] = osh
g.Printf("You put the %s on and leave your %s on the ground.", sh, osh)
} else {
delete(g.Equipables, g.Player.Pos)
g.Printf("You put the %s on.", sh)
}
}
func (sh shield) String() (text string) {
switch sh {
case Buckler:
text = "buckler"
case Shield:
text = "shield"
}
return text
}
func (sh shield) Desc() (text string) {
switch sh {
case Buckler:
text = "A buckler is a small shield that can sometimes block attacks, including some magical attacks. You cannot use it if you are wielding a two-handed weapon."
case Shield:
text = "A shield can block attacks, including some magical attacks. You cannot use it if you are wielding a two-handed weapon."
}
return text
}
func (sh shield) Letter() rune {
return ']'
}
func (sh shield) Block() (block int) {
switch sh {
case Buckler:
block += 6
case Shield:
block += 9
}
return block
}
type equipableData struct {
rarity int
minDepth int
}
func (data equipableData) FavorableRoll(lateness int) int {
ratio := data.rarity / (2 * lateness)
if ratio < 2 {
ratio = 2
}
r := RandInt(ratio)
if r != 0 && ratio == 2 && lateness >= 3 {
r = RandInt(ratio)
}
return r
}
var EquipablesRepartitionData = map[equipable]equipableData{
Robe: equipableData{5, 0},
LeatherArmour: equipableData{5, 0},
ChainMail: equipableData{10, 3},
PlateArmour: equipableData{15, 6},
Dagger: equipableData{20, 0},
Axe: equipableData{25, 1},
BattleAxe: equipableData{30, 3},
Spear: equipableData{25, 1},
Halberd: equipableData{30, 3},
Sword: equipableData{25, 1},
DoubleSword: equipableData{30, 3},
Buckler: equipableData{10, 2},
Shield: equipableData{15, 5},
}
new potion: controlled blink potion
package main
import (
"container/heap"
"errors"
"fmt"
)
// + consumables (potion-like or throwing dart, strategic + tactical)
// + equipables
// + recharging with depth (rod-like, strategic & a little tactical + mana)
// - digging, fog, slowing clouds or something, fear,
// fireball, lightning bolt, shatter, blink, teleport other
type consumable interface {
Use(*game, event) error
String() string
Plural() string
Desc() string
Letter() rune
Int() int
}
func (g *game) UseConsumable(c consumable) {
g.Player.Consumables[c]--
g.StoryPrintf("You used %s.", Indefinite(c.String(), false))
if g.Player.Consumables[c] <= 0 {
delete(g.Player.Consumables, c)
}
g.FairAction()
}
type potion int
const (
HealWoundsPotion potion = iota
TeleportationPotion
BerserkPotion
DescentPotion
RunningPotion
EvasionPotion
LignificationPotion
MagicMappingPotion
MagicPotion
WallPotion
CBlinkPotion
// below unimplemented
ResistancePotion
)
func (p potion) String() (text string) {
text = "potion"
switch p {
case HealWoundsPotion:
text += " of heal wounds"
case TeleportationPotion:
text += " of teleportation"
case DescentPotion:
text += " of descent"
case EvasionPotion:
text += " of evasion"
case MagicMappingPotion:
text += " of magic mapping"
case MagicPotion:
text += " of refill magic"
case BerserkPotion:
text += " of berserk"
case RunningPotion:
text += " of running"
case LignificationPotion:
text += " of lignification"
case WallPotion:
text += " of walls"
case CBlinkPotion:
text += " of controlled blink"
case ResistancePotion:
text += " of resistance"
}
return text
}
func (p potion) Plural() (text string) {
// never used for potions
return p.String()
}
func (p potion) Desc() (text string) {
switch p {
case HealWoundsPotion:
text = "heals you a good deal."
case TeleportationPotion:
text = "teleports you away after a short delay."
case DescentPotion:
text = "makes you go to deeper in the Underground."
case EvasionPotion:
text = "makes you better at avoiding blows."
case MagicMappingPotion:
text = "shows you the map."
case MagicPotion:
text = "replenishes your magical reserves."
case BerserkPotion:
text = "makes you enter a crazy rage, temporarily making you faster, stronger and healthier. You cannot drink potions while berserk, and afterwards it leaves you slow and exhausted."
case RunningPotion:
text = "makes you move faster."
case LignificationPotion:
text = "makes you more resistant to physical blows, but you are attached to the ground while the effect lasts."
case WallPotion:
text = "replaces free cells around you with temporal walls."
case CBlinkPotion:
text = "makes you blink to a targetted cell in your line of sight."
case ResistancePotion:
text = "makes you resistent to the elements."
}
return fmt.Sprintf("The %s %s", p, text)
}
func (p potion) Letter() rune {
return '!'
}
func (p potion) Int() int {
return int(p)
}
func (p potion) Use(g *game, ev event) error {
quant, ok := g.Player.Consumables[p]
if !ok || quant <= 0 {
// should not happen
return errors.New("no such consumable: " + p.String())
}
if g.Player.HasStatus(StatusNausea) {
return errors.New("You cannot drink potions while sick.")
}
if g.Player.HasStatus(StatusBerserk) {
return errors.New("You cannot drink potions while berserk.")
}
var err error
switch p {
case HealWoundsPotion:
err = g.QuaffHealWounds(ev)
case TeleportationPotion:
err = g.QuaffTeleportation(ev)
case BerserkPotion:
err = g.QuaffBerserk(ev)
case DescentPotion:
err = g.QuaffDescent(ev)
case RunningPotion:
err = g.QuaffHaste(ev)
case EvasionPotion:
err = g.QuaffEvasion(ev)
case LignificationPotion:
err = g.QuaffLignification(ev)
case MagicMappingPotion:
err = g.QuaffMagicMapping(ev)
case MagicPotion:
err = g.QuaffMagic(ev)
case WallPotion:
err = g.QuaffWallPotion(ev)
case CBlinkPotion:
err = g.QuaffCBlinkPotion(ev)
}
if err != nil {
return err
}
ev.Renew(g, 5)
g.UseConsumable(p)
return nil
}
func (g *game) QuaffTeleportation(ev event) error {
if g.Player.HasStatus(StatusLignification) {
return errors.New("You cannot teleport while lignified.")
}
if g.Player.HasStatus(StatusTele) {
return errors.New("You already quaffed a potion of teleportation.")
}
delay := 20 + RandInt(30)
g.Player.Statuses[StatusTele]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + delay, EAction: Teleportation})
g.Printf("You quaff a %s. You feel unstable.", TeleportationPotion)
return nil
}
func (g *game) QuaffBerserk(ev event) error {
if g.Player.HasStatus(StatusExhausted) {
return errors.New("You are too exhausted to berserk.")
}
g.Player.Statuses[StatusBerserk]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 65 + RandInt(20), EAction: BerserkEnd})
g.Printf("You quaff a %s. You feel a sudden urge to kill things.", BerserkPotion)
g.Player.HP += 10
return nil
}
func (g *game) QuaffHealWounds(ev event) error {
hp := g.Player.HP
g.Player.HP += 2 * g.Player.HPMax() / 3
if g.Player.HP > g.Player.HPMax() {
g.Player.HP = g.Player.HPMax()
}
g.Printf("You quaff a %s (%d -> %d).", HealWoundsPotion, hp, g.Player.HP)
return nil
}
func (g *game) QuaffMagic(ev event) error {
mp := g.Player.MP
g.Player.MP += 2 * g.Player.MPMax() / 3
if g.Player.MP > g.Player.MPMax() {
g.Player.MP = g.Player.MPMax()
}
g.Printf("You quaff the %s (%d -> %d).", MagicPotion, mp, g.Player.MP)
return nil
}
func (g *game) QuaffDescent(ev event) error {
if g.Player.HasStatus(StatusLignification) {
return errors.New("You cannot descend while lignified.")
}
if g.Depth >= g.MaxDepth() {
return errors.New("You cannot descend more!")
}
g.Printf("You quaff the %s. You feel yourself falling through the ground.", DescentPotion)
g.Depth++
g.InitLevel()
g.Save()
return nil
}
func (g *game) QuaffHaste(ev event) error {
g.Player.Statuses[StatusSwift]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 80 + RandInt(20), EAction: HasteEnd})
g.Printf("You quaff the %s. You feel speedy.", RunningPotion)
return nil
}
func (g *game) QuaffEvasion(ev event) error {
g.Player.Statuses[StatusAgile]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 90 + RandInt(20), EAction: EvasionEnd})
g.Printf("You quaff the %s. You feel agile.", EvasionPotion)
return nil
}
func (g *game) QuaffLignification(ev event) error {
g.Player.Statuses[StatusLignification]++
heap.Push(g.Events, &simpleEvent{ERank: ev.Rank() + 150 + RandInt(100), EAction: LignificationEnd})
g.Printf("You quaff the %s. You feel attuned with the ground.", LignificationPotion)
return nil
}
func (g *game) QuaffMagicMapping(ev event) error {
for i, c := range g.Dungeon.Cells {
pos := g.Dungeon.CellPosition(i)
if c.T == FreeCell || g.Dungeon.WallNeighborsCount(pos) < 8 {
g.Dungeon.SetExplored(pos)
}
}
g.Printf("You quaff the %s. You feel wiser.", MagicMappingPotion)
return nil
}
func (g *game) QuaffWallPotion(ev event) error {
neighbors := g.Dungeon.FreeNeighbors(g.Player.Pos)
for _, pos := range neighbors {
mons, _ := g.MonsterAt(pos)
if mons.Exists() {
continue
}
posNeighbors := g.Dungeon.FreeNeighbors(pos)
for _, pos := range posNeighbors {
if pos == g.Player.Pos {
continue
}
g.MakeNoise(18, pos)
break
}
g.Dungeon.SetCell(pos, WallCell)
heap.Push(g.Events, &cloudEvent{ERank: ev.Rank() + 200 + RandInt(50), Pos: pos, EAction: ObstructionEnd})
}
g.Printf("You quaff the %s. You feel surrounded by temporal walls.", WallPotion)
g.ComputeLOS()
return nil
}
func (g *game) QuaffCBlinkPotion(ev event) error {
if !g.ui.ChooseTarget(g, &chooser{free: true}) {
return errors.New("Ok, then.")
}
g.Player.Pos = g.Player.Target
g.Printf("You quaff the %s. You blink.", CBlinkPotion)
g.ComputeLOS()
g.MakeMonstersAware()
return nil
}
type projectile int
const (
Javelin projectile = iota
ConfusingDart
// unimplemented
Net
)
func (p projectile) String() (text string) {
switch p {
case Javelin:
text = "javelin"
case ConfusingDart:
text = "dart of confusion"
case Net:
text = "throwing net"
}
return text
}
func (p projectile) Plural() (text string) {
switch p {
case Javelin:
text = "javelins"
case ConfusingDart:
text = "darts of confusion"
case Net:
text = "throwing nets"
}
return text
}
func (p projectile) Desc() (text string) {
switch p {
case Javelin:
// XXX
text = "can be thrown to ennemies, dealing up to 11 damage."
case ConfusingDart:
text = "can be thrown to confuse foes. Confused monsters cannot move diagonally."
case Net:
text = "can be thrown to emprison your ennemies."
}
return fmt.Sprintf("The %s %s", p, text)
}
func (p projectile) Letter() rune {
return '('
}
func (p projectile) Int() int {
return int(p)
}
func (p projectile) Use(g *game, ev event) error {
quant, ok := g.Player.Consumables[p]
if !ok || quant <= 0 {
// should not happen
return errors.New("no such consumable: " + p.String())
}
mons, _ := g.MonsterAt(g.Player.Target)
if mons == nil {
// should not happen
return errors.New("internal error: no monster")
}
switch p {
case Javelin:
g.ThrowJavelin(mons, ev)
case ConfusingDart:
g.ThrowConfusingDart(mons, ev)
}
g.UseConsumable(p)
return nil
}
func (g *game) ThrowJavelin(mons *monster, ev event) {
acc := RandInt(g.Player.Accuracy())
evasion := RandInt(mons.Evasion)
if mons.State == Resting {
evasion /= 2 + 1
}
if acc > evasion {
g.MakeNoise(12, mons.Pos)
bonus := 0
if g.Player.HasStatus(StatusBerserk) {
bonus += RandInt(5)
}
if g.Player.Aptitudes[AptStrong] {
bonus += 2
}
attack := g.HitDamage(11+bonus, mons.Armor)
mons.HP -= attack
if mons.HP > 0 {
g.Printf("Your %s hits the %s (%d).", Javelin, mons.Kind, attack)
mons.MakeHuntIfHurt(g)
} else {
g.Printf("Your %s kills the %s.", Javelin, mons.Kind)
g.HandleKill(mons)
}
} else {
g.Printf("Your %s missed the %s.", Javelin, mons.Kind)
mons.MakeHuntIfHurt(g)
}
ev.Renew(g, 10)
}
func (g *game) ThrowConfusingDart(mons *monster, ev event) {
acc := RandInt(g.Player.Accuracy())
evasion := RandInt(mons.Evasion)
if mons.State == Resting {
evasion /= 2 + 1
}
if acc > evasion {
mons.Statuses[MonsConfused]++
mons.Path = nil
heap.Push(g.Events, &monsterEvent{
ERank: ev.Rank() + 50 + RandInt(100), NMons: mons.Index(g), EAction: MonsConfusionEnd})
g.Printf("Your %s hits the %s. The %s appears confused.", ConfusingDart, mons.Kind, mons.Kind)
} else {
g.Printf("Your %s missed the %s.", ConfusingDart, mons.Kind)
}
mons.MakeHuntIfHurt(g)
ev.Renew(g, 10)
}
type collectable struct {
Consumable consumable
Quantity int
}
type collectData struct {
rarity int
quantity int
}
var ConsumablesCollectData = map[consumable]collectData{
HealWoundsPotion: {rarity: 6, quantity: 1},
TeleportationPotion: {rarity: 4, quantity: 1},
BerserkPotion: {rarity: 5, quantity: 1},
RunningPotion: {rarity: 10, quantity: 1},
DescentPotion: {rarity: 15, quantity: 1},
EvasionPotion: {rarity: 10, quantity: 1},
LignificationPotion: {rarity: 8, quantity: 1},
MagicMappingPotion: {rarity: 15, quantity: 1},
MagicPotion: {rarity: 10, quantity: 1},
WallPotion: {rarity: 12, quantity: 1},
CBlinkPotion: {rarity: 12, quantity: 1},
Javelin: {rarity: 3, quantity: 3},
ConfusingDart: {rarity: 5, quantity: 2},
}
type equipable interface {
Equip(g *game)
String() string
Letter() rune
Desc() string
}
type armour int
const (
Robe armour = iota
LeatherArmour
ChainMail
PlateArmour
)
func (ar armour) Equip(g *game) {
oar := g.Player.Armour
g.Player.Armour = ar
if !g.FoundEquipables[ar] {
if g.FoundEquipables == nil {
g.FoundEquipables = map[equipable]bool{}
}
g.StoryPrintf("You found and put on %s.", Indefinite(ar.String(), false))
g.FoundEquipables[ar] = true
}
g.Printf("You put the %s on and leave your %s on the ground.", ar, oar)
g.Equipables[g.Player.Pos] = oar
}
func (ar armour) String() string {
switch ar {
case Robe:
return "robe"
case LeatherArmour:
return "leather armour"
case ChainMail:
return "chain mail"
case PlateArmour:
return "plate armour"
default:
// should not happen
return "some piece of armour"
}
}
func (ar armour) Desc() string {
var text string
switch ar {
case Robe:
text = "A robe provides no special protection, and will not help you much in your journey."
case LeatherArmour:
text = "A leather armour provides some protection against blows."
case ChainMail:
text = "A chain mail provides more protection than a leather armour, but the blows you receive are louder."
case PlateArmour:
text = "A plate armour provides great protection against blows, but blows you receive are quite noisy."
}
return text
}
func (ar armour) Letter() rune {
return '['
}
type weapon int
const (
Dagger weapon = iota
Axe
BattleAxe
Spear
Halberd
Sword
DoubleSword
)
func (wp weapon) Equip(g *game) {
owp := g.Player.Weapon
g.Player.Weapon = wp
if !g.FoundEquipables[wp] {
if g.FoundEquipables == nil {
g.FoundEquipables = map[equipable]bool{}
}
g.StoryPrintf("You found and took %s.", Indefinite(wp.String(), false))
g.FoundEquipables[wp] = true
}
g.Printf("You take the %s and leave your %s on the ground.", wp, owp)
g.Equipables[g.Player.Pos] = owp
}
func (wp weapon) String() string {
switch wp {
case Dagger:
return "dagger"
case Axe:
return "axe"
case BattleAxe:
return "battle axe"
case Spear:
return "spear"
case Halberd:
return "halberd"
case Sword:
return "sword"
case DoubleSword:
return "double sword"
default:
// should not happen
return "some weapon"
}
}
func (wp weapon) Desc() string {
var text string
switch wp {
case Dagger:
text = "A dagger is the most basic weapon. Great against sleeping monsters, but that's all."
case Axe:
text = "An axe is a one-handed weapon that can hit at once any foes adjacent to you."
case BattleAxe:
text = "A battle axe is a big two-handed weapon that can hit at once any foes adjacent to you."
case Spear:
text = "A spear is a one-handed weapon that can hit two opponents in a row at once. Useful in corridors."
case Halberd:
text = "An halberd is a big two-handed weapon that can hit two opponents in a row at once. Useful in corridors."
case Sword:
text = "A sword is a one-handed weapon that occasionally gets additional free hits."
case DoubleSword:
text = "A double sword is a big two-handed weapon that occasionally gets additional free hits."
}
return fmt.Sprintf("%s It can hit for up to %d damage.", text, wp.Attack())
}
func (wp weapon) Attack() int {
switch wp {
case Axe, Spear, Sword:
return 11
case BattleAxe, Halberd, DoubleSword:
return 15
case Dagger:
return 8
default:
return 0
}
}
func (wp weapon) TwoHanded() bool {
switch wp {
case BattleAxe, Halberd, DoubleSword:
return true
default:
return false
}
}
func (wp weapon) Letter() rune {
return ')'
}
func (wp weapon) Cleave() bool {
switch wp {
case Axe, BattleAxe:
return true
default:
return false
}
}
func (wp weapon) Pierce() bool {
switch wp {
case Spear, Halberd:
return true
default:
return false
}
}
type shield int
const (
NoShield shield = iota
Buckler
Shield
)
func (sh shield) Equip(g *game) {
osh := g.Player.Shield
g.Player.Shield = sh
if !g.FoundEquipables[sh] {
if g.FoundEquipables == nil {
g.FoundEquipables = map[equipable]bool{}
}
g.StoryPrintf("You found and put on %s.", Indefinite(sh.String(), false))
g.FoundEquipables[sh] = true
}
if osh != NoShield {
g.Equipables[g.Player.Pos] = osh
g.Printf("You put the %s on and leave your %s on the ground.", sh, osh)
} else {
delete(g.Equipables, g.Player.Pos)
g.Printf("You put the %s on.", sh)
}
}
func (sh shield) String() (text string) {
switch sh {
case Buckler:
text = "buckler"
case Shield:
text = "shield"
}
return text
}
func (sh shield) Desc() (text string) {
switch sh {
case Buckler:
text = "A buckler is a small shield that can sometimes block attacks, including some magical attacks. You cannot use it if you are wielding a two-handed weapon."
case Shield:
text = "A shield can block attacks, including some magical attacks. You cannot use it if you are wielding a two-handed weapon."
}
return text
}
func (sh shield) Letter() rune {
return ']'
}
func (sh shield) Block() (block int) {
switch sh {
case Buckler:
block += 6
case Shield:
block += 9
}
return block
}
type equipableData struct {
rarity int
minDepth int
}
func (data equipableData) FavorableRoll(lateness int) int {
ratio := data.rarity / (2 * lateness)
if ratio < 2 {
ratio = 2
}
r := RandInt(ratio)
if r != 0 && ratio == 2 && lateness >= 3 {
r = RandInt(ratio)
}
return r
}
var EquipablesRepartitionData = map[equipable]equipableData{
Robe: equipableData{5, 0},
LeatherArmour: equipableData{5, 0},
ChainMail: equipableData{10, 3},
PlateArmour: equipableData{15, 6},
Dagger: equipableData{20, 0},
Axe: equipableData{25, 1},
BattleAxe: equipableData{30, 3},
Spear: equipableData{25, 1},
Halberd: equipableData{30, 3},
Sword: equipableData{25, 1},
DoubleSword: equipableData{30, 3},
Buckler: equipableData{10, 2},
Shield: equipableData{15, 5},
}
|
// Copyright 2016 Tim O'Brien. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jnigi
import (
"errors"
"fmt"
"runtime"
"unsafe"
"strings"
)
// copy arguments in to C memory before passing to jni functions
var copyToC bool = false
func toBool(b jboolean) bool {
return b == 1
}
func fromBool(b bool) jboolean {
if b {
return 1
} else {
return 0
}
}
type ObjectRef struct {
jobject jobject
className string
isArray bool
}
func WrapJObject(jobj uintptr, className string, isArray bool) *ObjectRef {
return &ObjectRef{jobject(jobj), className, isArray}
}
func (o *ObjectRef) Cast(className string) *ObjectRef {
if className == o.className {
return o
} else {
return &ObjectRef{o.jobject, className, o.isArray}
}
}
func (o *ObjectRef) IsNil() bool {
return o.jobject == 0
}
func (o *ObjectRef) IsInstanceOf(env *Env, className string) (bool, error) {
class, err := env.callFindClass(className)
if err != nil {
return false, err
}
return toBool(isInstanceOf(env.jniEnv, o.jobject, class)), nil
}
func (o *ObjectRef) jobj() jobject {
return o.jobject
}
func (o *ObjectRef) JObject() jobject {
return o.jobj()
}
type jobj interface {
jobj() jobject
}
// ExceptionHandler is used to convert a thrown exception (java.lang.Throwable) to a Go error.
type ExceptionHandler interface {
CatchException(env *Env, exception *ObjectRef) error
}
// ExceptionHandlerFunc is an adapter to allow use of ordinary functions as an
// ExceptionHandler. If f is a function with the appropriate signature, ExceptionHandlerFunc(f)
// is an ExceptionHandler object that calls f.
type ExceptionHandlerFunc func(env *Env, exception *ObjectRef) error
// CatchException calls f to implement ExceptionHandler.
func (f ExceptionHandlerFunc) CatchException(env *Env, exception *ObjectRef) error {
return f(env, exception)
}
type Env struct {
jniEnv unsafe.Pointer
preCalcSig string
noReturnConvert bool
classCache map[string]jclass
ExceptionHandler ExceptionHandler
}
func WrapEnv(envPtr unsafe.Pointer) *Env {
return &Env{jniEnv: envPtr, classCache: make(map[string]jclass)}
}
type JVM struct {
javaVM unsafe.Pointer
}
type JVMInitArgs struct {
javaVMInitArgs unsafe.Pointer
}
func CreateJVM(jvmInitArgs *JVMInitArgs) (*JVM, *Env, error) {
runtime.LockOSThread()
p := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
p2 := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
if jni_CreateJavaVM(p2, p, jvmInitArgs.javaVMInitArgs) < 0 {
return nil, nil, errors.New("Couldn't instantiate JVM")
}
jvm := &JVM{*(*unsafe.Pointer)(p2)}
env := &Env{jniEnv: *(*unsafe.Pointer)(p), classCache: make(map[string]jclass)}
free(p)
free(p2)
return jvm, env, nil
}
func (j *JVM) AttachCurrentThread() *Env {
runtime.LockOSThread()
p := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
// p := (**C.JNIEnv)(malloc(unsafe.Sizeof((*C.JNIEnv)(nil))))
if attachCurrentThread(j.javaVM, p, nil) < 0 {
panic("AttachCurrentThread failed")
}
return &Env{jniEnv: *(*unsafe.Pointer)(p), classCache: make(map[string]jclass)}
}
func (j *JVM) DetachCurrentThread() error {
if detachCurrentThread(j.javaVM) < 0 {
return errors.New("JNIGI: detachCurrentThread error")
}
return nil
}
func (j *JVM) Destroy() error {
if destroyJavaVM(j.javaVM) < 0 {
return errors.New("JNIGI: destroyJavaVM error")
}
return nil
}
func (j *Env) GetJVM() (*JVM, error) {
runtime.LockOSThread()
p := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
if getJavaVM(j.jniEnv, p) < 0 {
return nil, errors.New("Couldn't get JVM")
}
jvm := &JVM{*(*unsafe.Pointer)(p)}
free(p)
return jvm, nil
}
func (j *Env) exceptionCheck() bool {
return toBool(exceptionCheck(j.jniEnv))
}
func (j *Env) describeException() {
exceptionDescribe(j.jniEnv)
}
func (j *Env) handleException() error {
e := exceptionOccurred(j.jniEnv)
if e == 0 {
return errors.New("Java JNI function returned error but JNI indicates no current exception")
}
defer deleteLocalRef(j.jniEnv, jobject(e))
ref := WrapJObject(uintptr(e), "java/lang/Throwable", false)
if j.ExceptionHandler == nil {
return DefaultExceptionHandler.CatchException(j, ref)
}
// Temporarily disable handler in the event exception rises during handling.
// By setting it to the DescribeExceptionHandler, exceptions will get printed
// and cleared.
handler := j.ExceptionHandler
j.ExceptionHandler = DescribeExceptionHandler
defer func() {
j.ExceptionHandler = handler
}()
return handler.CatchException(j, ref)
}
func (j *Env) NewObject(className string, args ...interface{}) (*ObjectRef, error) {
class, err := j.callFindClass(className)
if err != nil {
return nil, err
}
var methodSig string
if j.preCalcSig != "" {
methodSig = j.preCalcSig
j.preCalcSig = ""
} else {
calcSig, err := sigForMethod(Void, "", args)
if err != nil {
return nil, err
}
methodSig = calcSig
}
mid, err := j.callGetMethodID(false, class, "<init>", methodSig)
if err != nil {
return nil, err
}
// create args for jni call
jniArgs, refs, err := j.createArgs(args)
if err != nil {
return nil, err
}
defer func() {
cleanUpArgs(jniArgs)
for _, ref := range refs {
deleteLocalRef(j.jniEnv, ref)
}
}()
obj := newObjectA(j.jniEnv, class, mid, jniArgs)
if obj == 0 {
return nil, j.handleException()
}
return &ObjectRef{obj, className, false}, nil
}
func (j *Env) callFindClass(className string) (jclass, error) {
if v, ok := j.classCache[className]; ok {
return v, nil
}
cnCstr := cString(className)
defer free(cnCstr)
class := findClass(j.jniEnv, cnCstr)
if class == 0 {
return 0, j.handleException()
}
ref := newGlobalRef(j.jniEnv, jobject(class))
deleteLocalRef(j.jniEnv, jobject(class))
j.classCache[className] = jclass(ref)
return jclass(ref), nil
}
func (j *Env) callGetMethodID(static bool, class jclass, name, sig string) (jmethodID, error) {
mnCstr := cString(name)
defer free(mnCstr)
sigCstr := cString(sig)
defer free(sigCstr)
var mid jmethodID
if static {
mid = getStaticMethodID(j.jniEnv, class, mnCstr, sigCstr)
} else {
mid = getMethodID(j.jniEnv, class, mnCstr, sigCstr)
}
// fmt.Printf("sig = %s\n", sig)
if mid == 0 {
return 0, j.handleException()
}
return mid, nil
}
func (j *Env) PrecalculateSignature(sig string) {
j.preCalcSig = sig
}
func (j *Env) NoReturnConvert() {
j.noReturnConvert = true
}
const big = 1024 * 1024 * 100
func (j *Env) FromObjectArray(objRef *ObjectRef) []*ObjectRef {
len := int(getArrayLength(j.jniEnv, jarray(objRef.jobject)))
// exception check?
v := make([]*ObjectRef, len)
for i := 0; i < len; i++ {
jobj := getObjectArrayElement(j.jniEnv, jobjectArray(objRef.jobject), jsize(i))
if j.exceptionCheck() {
panic(j.handleException())
}
v[i] = &ObjectRef{jobj, objRef.className, false}
}
return v
}
func (j *Env) toGoArray(array jobject, aType Type) (interface{}, error) {
len := int(getArrayLength(j.jniEnv, jarray(array)))
// exception check?
switch aType.baseType() {
case Boolean:
v := make([]bool, len)
if len >= 0 {
ptr := getBooleanArrayElements(j.jniEnv, jbooleanArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]byte)(ptr))[0:len]
for i := 0; i < len; i++ {
v[i] = (elems[i] == 1)
}
releaseBooleanArrayElements(j.jniEnv, jbooleanArray(array), ptr, jint(jni_abort))
}
return v, nil
case Byte:
v := make([]byte, len)
if len >= 0 {
ptr := getByteArrayElements(j.jniEnv, jbyteArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]byte)(ptr))[0:len]
copy(v, elems)
releaseByteArrayElements(j.jniEnv, jbyteArray(array), ptr, jint(jni_abort))
}
return v, nil
case Short:
v := make([]int16, len)
if len >= 0 {
ptr := getShortArrayElements(j.jniEnv, jshortArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]int16)(ptr))[0:len]
copy(v, elems)
releaseShortArrayElements(j.jniEnv, jshortArray(array), ptr, jint(jni_abort))
}
return v, nil
case Char:
v := make([]uint16, len)
if len >= 0 {
ptr := getCharArrayElements(j.jniEnv, jcharArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]uint16)(ptr))[0:len]
copy(v, elems)
releaseCharArrayElements(j.jniEnv, jcharArray(array), ptr, jint(jni_abort))
}
return v, nil
case Int:
v := make([]int, len)
if len >= 0 {
ptr := getIntArrayElements(j.jniEnv, jintArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]int32)(ptr))[0:len]
//copy(v, elems)
for i := 0; i < len; i++ {
v[i] = int(elems[i])
}
releaseIntArrayElements(j.jniEnv, jintArray(array), ptr, jint(jni_abort))
}
return v, nil
case Long:
v := make([]int64, len)
if len >= 0 {
ptr := getLongArrayElements(j.jniEnv, jlongArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]int64)(ptr))[0:len]
copy(v, elems)
releaseLongArrayElements(j.jniEnv, jlongArray(array), ptr, jint(jni_abort))
}
return v, nil
case Float:
v := make([]float32, len)
if len >= 0 {
ptr := getFloatArrayElements(j.jniEnv, jfloatArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]float32)(ptr))[0:len]
copy(v, elems)
releaseFloatArrayElements(j.jniEnv, jfloatArray(array), ptr, jint(jni_abort))
}
return v, nil
case Double:
v := make([]float64, len)
if len >= 0 {
ptr := getDoubleArrayElements(j.jniEnv, jdoubleArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]float64)(ptr))[0:len]
copy(v, elems)
releaseDoubleArrayElements(j.jniEnv, jdoubleArray(array), ptr, jint(jni_abort))
}
return v, nil
default:
return nil, errors.New("JNIGI unsupported array type")
}
}
func (j *Env) ToObjectArray(objRefs []*ObjectRef, className string) (arrayRef *ObjectRef) {
arrayRef = &ObjectRef{className: className, isArray: true}
class, err := j.callFindClass(className)
if err != nil {
j.describeException()
exceptionClear(j.jniEnv)
return
}
oa := newObjectArray(j.jniEnv, jsize(len(objRefs)), class, 0)
if oa == 0 {
panic(j.handleException())
}
arrayRef.jobject = jobject(oa)
for i, obj := range objRefs {
setObjectArrayElement(j.jniEnv, oa, jsize(i), obj.jobject)
if j.exceptionCheck() {
j.describeException()
exceptionClear(j.jniEnv)
}
}
return
}
type ByteArray struct {
arr jbyteArray
n int
}
func (j *Env) NewByteArray(n int) *ByteArray {
a := newByteArray(j.jniEnv, jsize(n))
return &ByteArray{a, n}
}
func (j *Env) NewByteArrayFromSlice(src []byte) *ByteArray {
b := j.NewByteArray(len(src))
if len(src) > 0 {
bytes := b.GetCritical(j)
copy(bytes, src)
b.ReleaseCritical(j, bytes)
}
return b
}
func (j *Env) NewByteArrayFromObject(o *ObjectRef) *ByteArray {
ba := &ByteArray{}
ba.SetObject(o)
ba.n = int(getArrayLength(j.jniEnv, jarray(ba.arr)))
return ba
}
func (b *ByteArray) jobj() jobject {
return jobject(b.arr)
}
func (b *ByteArray) getType() Type {
return Byte | Array
}
func (b *ByteArray) GetCritical(env *Env) []byte {
if b.n == 0 {
return nil
}
ptr := getPrimitiveArrayCritical(env.jniEnv, jarray(b.arr), nil)
return (*(*[big]byte)(ptr))[0:b.n]
}
func (b *ByteArray) ReleaseCritical(env *Env, bytes []byte) {
if len(bytes) == 0 {
return
}
ptr := unsafe.Pointer(&bytes[0])
releasePrimitiveArrayCritical(env.jniEnv, jarray(b.arr), ptr, 0)
}
//returns jlo
func (b *ByteArray) GetObject() *ObjectRef {
return &ObjectRef{jobject(b.arr), "java/lang/Object", false}
}
func (b *ByteArray) SetObject(o *ObjectRef) {
b.arr = jbyteArray(o.jobject)
}
func (b *ByteArray) CopyBytes(env *Env) []byte {
r := make([]byte, b.n)
src := b.GetCritical(env)
copy(r, src)
b.ReleaseCritical(env, src)
return r
}
// this copies slice contents in to C memory before passing this pointer to JNI array function
// if copy var is set to true
func (j *Env) toJavaArray(src interface{}) (jobject, error) {
switch v := src.(type) {
case []bool:
ba := newBooleanArray(j.jniEnv, jsize(len(v)))
if ba == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(ba), nil
}
src := make([]byte, len(v))
for i, vset := range v {
if vset {
src[i] = 1
}
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(uintptr(len(v)))
defer free(ptr)
data := (*(*[big]byte)(ptr))[:len(v)]
copy(data, src)
} else {
ptr = unsafe.Pointer(&src[0])
}
setBooleanArrayRegion(j.jniEnv, ba, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(ba), nil
case []byte:
ba := newByteArray(j.jniEnv, jsize(len(v)))
if ba == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(ba), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(uintptr(len(v)))
defer free(ptr)
data := (*(*[big]byte)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setByteArrayRegion(j.jniEnv, ba, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(ba), nil
case []int16:
array := newShortArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int16(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int16)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setShortArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []uint16:
array := newCharArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(uint16(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]uint16)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setCharArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []int32:
array := newIntArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int32(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int32)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setIntArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []int:
array := newIntArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int32(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int32)(ptr))[:len(v)]
//copy(data, v)
for i := 0; i < len(data); i++ {
data[i] = int32(v[i])
}
} else {
data := make([]int32, len(v))
for i := 0; i < len(v); i++ {
data[i] = int32(v[i])
}
ptr = unsafe.Pointer(&data[0])
}
setIntArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []int64:
array := newLongArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int64(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int64)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setLongArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []float32:
array := newFloatArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(float32(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]float32)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setFloatArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []float64:
array := newDoubleArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(float64(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]float64)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setDoubleArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
default:
return 0, errors.New("JNIGI unsupported array type")
}
}
// pointer should be freed, refs should be deleted
// jvalue 64 bit
func (j *Env) createArgs(args []interface{}) (ptr unsafe.Pointer, refs []jobject, err error) {
if len(args) == 0 {
return nil, nil, nil
}
argList := make([]uint64, len(args))
refs = make([]jobject, 0)
for i, arg := range args {
switch v := arg.(type) {
case jobj:
argList[i] = uint64(v.jobj())
case bool:
if v {
argList[i] = uint64(jboolean(1))
} else {
argList[i] = uint64(jboolean(0))
}
case byte:
argList[i] = uint64(jbyte(v))
case uint16:
argList[i] = uint64(jchar(v))
case int16:
argList[i] = uint64(jshort(v))
case int32:
argList[i] = uint64(jint(v))
case int:
argList[i] = uint64(jint(int32(v)))
case int64:
argList[i] = uint64(jlong(v))
case float32:
argList[i] = uint64(jfloat(v))
case float64:
argList[i] = uint64(jdouble(v))
case []bool, []byte, []int16, []uint16, []int32, []int, []int64, []float32, []float64:
if array, arrayErr := j.toJavaArray(v); arrayErr == nil {
argList[i] = uint64(array)
refs = append(refs, array)
} else {
err = arrayErr
}
default:
err = fmt.Errorf("JNIGI: argument not a valid value %t (%v)", args[i], args[i])
}
if err != nil {
break
}
}
if err != nil {
for _, ref := range refs {
deleteLocalRef(j.jniEnv, ref)
}
refs = nil
return
}
if copyToC {
ptr = malloc(unsafe.Sizeof(uint64(0)) * uintptr(len(args)))
data := (*(*[big]uint64)(ptr))[:len(args)]
copy(data, argList)
} else {
ptr = unsafe.Pointer(&argList[0])
}
return
}
type Type uint32
const (
Void = Type(1 << iota)
Boolean
Byte
Char
Short
Int
Long
Float
Double
Object
Array
)
func (t Type) baseType() Type {
return t &^ Array
}
func (t Type) isArray() bool {
return t&Array > 0
}
type ObjectType string
type ObjectArrayType string
type convertedArray interface {
getType() Type
}
func typeOfValue(value interface{}) (t Type, className string, err error) {
switch v := value.(type) {
case Type:
t = v
if t.baseType() == Object {
className = "java/lang/Object"
}
case string:
t = Object
className = v
case ObjectType:
t = Object
className = string(v)
case ObjectArrayType:
t = Object | Array
className = string(v)
case *ObjectRef:
t = Object
if v.isArray {
t = t | Array
}
className = v.className
case bool:
t = Boolean
case byte:
t = Byte
case int16:
t = Short
case uint16:
t = Char
case int32:
t = Int
case int:
t = Int
case int64:
t = Long
case float32:
t = Float
case float64:
t = Double
case []bool:
t = Boolean | Array
className = "java/lang/Object"
case []byte:
t = Byte | Array
className = "java/lang/Object"
case []uint16:
t = Char | Array
className = "java/lang/Object"
case []int16:
t = Short | Array
className = "java/lang/Object"
case []int32:
t = Int | Array
className = "java/lang/Object"
case []int:
t = Int | Array
className = "java/lang/Object"
case []int64:
t = Long | Array
className = "java/lang/Object"
case []float32:
t = Float | Array
className = "java/lang/Object"
case []float64:
t = Double | Array
className = "java/lang/Object"
case convertedArray:
t = v.getType()
className = "java/lang/Object"
default:
err = fmt.Errorf("JNIGI: unknown type %t (%v)", v, v)
}
return
}
func typeSignature(t Type, className string) (sig string) {
if t.isArray() {
sig = "["
}
base := t.baseType()
switch {
case base == Object:
sig += "L" + className + ";"
case base == Void:
sig += "V"
case base == Boolean:
sig += "Z"
case base == Byte:
sig += "B"
case base == Char:
sig += "C"
case base == Short:
sig += "S"
case base == Int:
sig += "I"
case base == Long:
sig += "J"
case base == Float:
sig += "F"
case base == Double:
sig += "D"
}
return
}
func sigForMethod(returnType Type, returnClass string, args []interface{}) (string, error) {
var paramStr string
for i := range args {
t, c, err := typeOfValue(args[i])
if err != nil {
return "", err
}
paramStr += typeSignature(t, c)
}
return fmt.Sprintf("(%s)%s", paramStr, typeSignature(returnType, returnClass)), nil
}
func cleanUpArgs(ptr unsafe.Pointer) {
if copyToC {
free(ptr)
}
}
func (o *ObjectRef) getClass(env *Env) (class jclass, err error) {
class, err = env.callFindClass(o.className)
if err != nil {
return 0, err
}
// if object is java/lang/Object try to up class it
// there is an odd way to get the class name see: http://stackoverflow.com/questions/12719766/can-i-know-the-name-of-the-class-that-calls-a-jni-c-method
if o.className == "java/lang/Object" {
mid, err := env.callGetMethodID(false, class, "getClass", "()Ljava/lang/Class;")
if err != nil {
return 0, err
}
obj := callObjectMethodA(env.jniEnv, o.jobject, mid, nil)
if env.exceptionCheck() {
return 0, env.handleException()
}
defer deleteLocalRef(env.jniEnv, obj)
objClass := getObjectClass(env.jniEnv, obj)
if objClass == 0 {
return 0, env.handleException()
}
defer deleteLocalRef(env.jniEnv, jobject(objClass))
mid, err = env.callGetMethodID(false, objClass, "getName", "()Ljava/lang/String;")
if err != nil {
return 0, err
}
obj2 := callObjectMethodA(env.jniEnv, obj, mid, nil)
if env.exceptionCheck() {
return 0, env.handleException()
}
strObj := WrapJObject(uintptr(obj2), "java/lang/String", false)
if strObj.IsNil() {
return 0, errors.New("unexpected error getting object class name")
}
defer env.DeleteLocalRef(strObj)
b , err := strObj.CallMethod(env, "getBytes", Byte | Array, env.GetUTF8String())
if err != nil {
return 0, err
}
gotClass := string(b.([]byte))
// note uses . for class name separator
if gotClass != "java.lang.Object" {
gotClass = strings.Replace(gotClass, ".", "/", -1)
class, err = env.callFindClass(gotClass)
if err != nil {
return 0, err
}
o.className = gotClass
return class, err
}
}
return
}
func (o *ObjectRef) CallMethod(env *Env, methodName string, returnType interface{}, args ...interface{}) (interface{}, error) {
class, err := o.getClass(env)
if err != nil {
return nil, err
}
rType, rClassName, err := typeOfValue(returnType)
if err != nil {
return nil, err
}
var methodSig string
if env.preCalcSig != "" {
methodSig = env.preCalcSig
env.preCalcSig = ""
} else {
calcSig, err := sigForMethod(rType, rClassName, args)
if err != nil {
return nil, err
}
methodSig = calcSig
}
mid, err := env.callGetMethodID(false, class, methodName, methodSig)
if err != nil {
return nil, err
}
// create args for jni call
jniArgs, refs, err := env.createArgs(args)
if err != nil {
return nil, err
}
defer func() {
cleanUpArgs(jniArgs)
for _, ref := range refs {
deleteLocalRef(env.jniEnv, ref)
}
}()
var arrayToConvert jobject
var retVal interface{}
switch {
case rType == Void:
callVoidMethodA(env.jniEnv, o.jobject, mid, jniArgs)
case rType == Boolean:
retVal = toBool(callBooleanMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Byte:
retVal = byte(callByteMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Char:
retVal = uint16(callCharMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Short:
retVal = int16(callShortMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Int:
retVal = int(callIntMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Long:
retVal = int64(callLongMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Float:
retVal = float32(callFloatMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Double:
retVal = float64(callDoubleMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Object || rType.isArray():
obj := callObjectMethodA(env.jniEnv, o.jobject, mid, jniArgs)
if rType == Object || rType == Object|Array || env.noReturnConvert {
retVal = &ObjectRef{obj, rClassName, rType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown return type")
}
env.noReturnConvert = false
if env.exceptionCheck() {
return nil, env.handleException()
}
if arrayToConvert != 0 {
retVal, err = env.toGoArray(arrayToConvert, rType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (j *Env) CallStaticMethod(className string, methodName string, returnType interface{}, args ...interface{}) (interface{}, error) {
class, err := j.callFindClass(className)
if err != nil {
return nil, err
}
rType, rClassName, err := typeOfValue(returnType)
if err != nil {
return nil, err
}
var methodSig string
if j.preCalcSig != "" {
methodSig = j.preCalcSig
j.preCalcSig = ""
} else {
calcSig, err := sigForMethod(rType, rClassName, args)
if err != nil {
return nil, err
}
methodSig = calcSig
}
mid, err := j.callGetMethodID(true, class, methodName, methodSig)
if err != nil {
return nil, err
}
// create args for jni call
jniArgs, refs, err := j.createArgs(args)
if err != nil {
return nil, err
}
defer func() {
cleanUpArgs(jniArgs)
for _, ref := range refs {
deleteLocalRef(j.jniEnv, ref)
}
}()
var arrayToConvert jobject
var retVal interface{}
switch {
case rType == Void:
callStaticVoidMethodA(j.jniEnv, class, mid, jniArgs)
case rType == Boolean:
retVal = toBool(callStaticBooleanMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Byte:
retVal = byte(callStaticByteMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Char:
retVal = uint16(callStaticCharMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Short:
retVal = int16(callStaticShortMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Int:
retVal = int(callStaticIntMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Long:
retVal = int64(callStaticLongMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Float:
retVal = float32(callStaticFloatMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Double:
retVal = float64(callStaticDoubleMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Object || rType.isArray():
obj := callStaticObjectMethodA(j.jniEnv, class, mid, jniArgs)
if rType == Object || rType == Object|Array || j.noReturnConvert {
retVal = &ObjectRef{obj, rClassName, rType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown return type")
}
j.noReturnConvert = false
if j.exceptionCheck() {
return nil, j.handleException()
}
if arrayToConvert != 0 {
retVal, err = j.toGoArray(arrayToConvert, rType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (j *Env) callGetFieldID(static bool, class jclass, name, sig string) (jfieldID, error) {
fnCstr := cString(name)
defer free(fnCstr)
sigCstr := cString(sig)
defer free(sigCstr)
var fid jfieldID
if static {
fid = getStaticFieldID(j.jniEnv, class, fnCstr, sigCstr)
} else {
fid = getFieldID(j.jniEnv, class, fnCstr, sigCstr)
}
if fid == 0 {
return 0, j.handleException()
}
return fid, nil
}
func (o *ObjectRef) GetField(env *Env, fieldName string, fieldType interface{}) (interface{}, error) {
class, err := o.getClass(env)
if err != nil {
return nil, err
}
fType, fClassName, err := typeOfValue(fieldType)
if err != nil {
return nil, err
}
var fieldSig string
if env.preCalcSig != "" {
fieldSig = env.preCalcSig
env.preCalcSig = ""
} else {
fieldSig = typeSignature(fType, fClassName)
}
fid, err := env.callGetFieldID(false, class, fieldName, fieldSig)
if err != nil {
return nil, err
}
var arrayToConvert jobject
var retVal interface{}
switch {
case fType == Boolean:
retVal = toBool(getBooleanField(env.jniEnv, o.jobject, fid))
case fType == Byte:
retVal = byte(getByteField(env.jniEnv, o.jobject, fid))
case fType == Char:
retVal = uint16(getCharField(env.jniEnv, o.jobject, fid))
case fType == Short:
retVal = int16(getShortField(env.jniEnv, o.jobject, fid))
case fType == Int:
retVal = int(getIntField(env.jniEnv, o.jobject, fid))
case fType == Long:
retVal = int64(getLongField(env.jniEnv, o.jobject, fid))
case fType == Float:
retVal = float32(getFloatField(env.jniEnv, o.jobject, fid))
case fType == Double:
retVal = float64(getDoubleField(env.jniEnv, o.jobject, fid))
case fType == Object || fType.isArray():
obj := getObjectField(env.jniEnv, o.jobject, fid)
if fType == Object || fType == Object|Array || env.noReturnConvert {
retVal = &ObjectRef{obj, fClassName, fType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown field type")
}
env.noReturnConvert = false
if env.exceptionCheck() {
return nil, env.handleException()
}
if arrayToConvert != 0 {
retVal, err = env.toGoArray(arrayToConvert, fType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (o *ObjectRef) SetField(env *Env, fieldName string, value interface{}) error {
class, err := o.getClass(env)
if err != nil {
return err
}
vType, vClassName, err := typeOfValue(value)
if err != nil {
return err
}
var fieldSig string
if env.preCalcSig != "" {
fieldSig = env.preCalcSig
env.preCalcSig = ""
} else {
fieldSig = typeSignature(vType, vClassName)
}
fid, err := env.callGetFieldID(false, class, fieldName, fieldSig)
if err != nil {
return err
}
switch v := value.(type) {
case bool:
setBooleanField(env.jniEnv, o.jobject, fid, fromBool(v))
case byte:
setByteField(env.jniEnv, o.jobject, fid, jbyte(v))
case uint16:
setCharField(env.jniEnv, o.jobject, fid, jchar(v))
case int16:
setShortField(env.jniEnv, o.jobject, fid, jshort(v))
case int32:
setIntField(env.jniEnv, o.jobject, fid, jint(v))
case int:
setIntField(env.jniEnv, o.jobject, fid, jint(int32(v)))
case int64:
setLongField(env.jniEnv, o.jobject, fid, jlong(v))
case float32:
setFloatField(env.jniEnv, o.jobject, fid, jfloat(v))
case float64:
setDoubleField(env.jniEnv, o.jobject, fid, jdouble(v))
case jobj:
setObjectField(env.jniEnv, o.jobject, fid, v.jobj())
case []bool, []byte, []int16, []uint16, []int32, []int, []int64, []float32, []float64:
array, err := env.toJavaArray(v)
if err != nil {
return err
}
defer deleteLocalRef(env.jniEnv, array)
setObjectField(env.jniEnv, o.jobject, fid, jobject(array))
default:
return errors.New("JNIGI unknown field value")
}
if env.exceptionCheck() {
return env.handleException()
}
return nil
}
func (j *Env) GetStaticField(className string, fieldName string, fieldType interface{}) (interface{}, error) {
class, err := j.callFindClass(className)
if err != nil {
return nil, err
}
fType, fClassName, err := typeOfValue(fieldType)
if err != nil {
return nil, err
}
var fieldSig string
if j.preCalcSig != "" {
fieldSig = j.preCalcSig
j.preCalcSig = ""
} else {
fieldSig = typeSignature(fType, fClassName)
}
fid, err := j.callGetFieldID(true, class, fieldName, fieldSig)
if err != nil {
return nil, err
}
var arrayToConvert jobject
var retVal interface{}
switch {
case fType == Boolean:
retVal = toBool(getStaticBooleanField(j.jniEnv, class, fid))
case fType == Byte:
retVal = byte(getStaticByteField(j.jniEnv, class, fid))
case fType == Char:
retVal = uint16(getStaticCharField(j.jniEnv, class, fid))
case fType == Short:
retVal = int16(getStaticShortField(j.jniEnv, class, fid))
case fType == Int:
retVal = int(getStaticIntField(j.jniEnv, class, fid))
case fType == Long:
retVal = int64(getStaticLongField(j.jniEnv, class, fid))
case fType == Float:
retVal = float32(getStaticFloatField(j.jniEnv, class, fid))
case fType == Double:
retVal = float64(getStaticDoubleField(j.jniEnv, class, fid))
case fType == Object || fType.isArray():
obj := getStaticObjectField(j.jniEnv, class, fid)
if fType == Object || fType == Object|Array || j.noReturnConvert {
retVal = &ObjectRef{obj, fClassName, fType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown field type")
}
j.noReturnConvert = false
if j.exceptionCheck() {
return nil, j.handleException()
}
if arrayToConvert != 0 {
retVal, err = j.toGoArray(arrayToConvert, fType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (j *Env) SetStaticField(className string, fieldName string, value interface{}) error {
class, err := j.callFindClass(className)
if err != nil {
return err
}
vType, vClassName, err := typeOfValue(value)
if err != nil {
return err
}
var fieldSig string
if j.preCalcSig != "" {
fieldSig = j.preCalcSig
j.preCalcSig = ""
} else {
fieldSig = typeSignature(vType, vClassName)
}
fid, err := j.callGetFieldID(true, class, fieldName, fieldSig)
if err != nil {
return err
}
switch v := value.(type) {
case bool:
setStaticBooleanField(j.jniEnv, class, fid, fromBool(v))
case byte:
setStaticByteField(j.jniEnv, class, fid, jbyte(v))
case uint16:
setStaticCharField(j.jniEnv, class, fid, jchar(v))
case int16:
setStaticShortField(j.jniEnv, class, fid, jshort(v))
case int32:
setStaticIntField(j.jniEnv, class, fid, jint(v))
case int:
setStaticIntField(j.jniEnv, class, fid, jint(int32(v)))
case int64:
setStaticLongField(j.jniEnv, class, fid, jlong(v))
case float32:
setStaticFloatField(j.jniEnv, class, fid, jfloat(v))
case float64:
setStaticDoubleField(j.jniEnv, class, fid, jdouble(v))
case jobj:
setStaticObjectField(j.jniEnv, class, fid, v.jobj())
case []bool, []byte, []int16, []uint16, []int32, []int, []int64, []float32, []float64:
array, err := j.toJavaArray(v)
if err != nil {
return err
}
defer deleteLocalRef(j.jniEnv, array)
setStaticObjectField(j.jniEnv, class, fid, jobject(array))
default:
return errors.New("JNIGI unknown field value")
}
if j.exceptionCheck() {
return j.handleException()
}
return nil
}
func (j *Env) RegisterNative(className, methodName string, returnType interface{}, params []interface{}, fptr interface{}) error {
class, err := j.callFindClass(className)
if err != nil {
return err
}
mnCstr := cString(methodName)
defer free(mnCstr)
rType, rClassName, err := typeOfValue(returnType)
if err != nil {
return err
}
sig, err := sigForMethod(rType, rClassName, params)
if err != nil {
return err
}
sigCstr := cString(sig)
defer free(sigCstr)
if registerNative(j.jniEnv, class, mnCstr, sigCstr, fptr.(unsafe.Pointer)) < 0 {
return j.handleException()
}
return nil
}
func (j *Env) NewGlobalRef(o *ObjectRef) *ObjectRef {
g := newGlobalRef(j.jniEnv, o.jobject)
return &ObjectRef{g, o.className, o.isArray}
}
func (j *Env) DeleteGlobalRef(o *ObjectRef) {
deleteGlobalRef(j.jniEnv, o.jobject)
o.jobject = 0
}
func (j *Env) DeleteLocalRef(o *ObjectRef) {
deleteLocalRef(j.jniEnv, o.jobject)
o.jobject = 0
}
func (j *Env) EnsureLocalCapacity(capacity int32) error {
success := ensureLocalCapacity(j.jniEnv, jint(capacity)) == 0
if j.exceptionCheck() {
return j.handleException()
}
if !success {
return errors.New("JNIGI: ensureLocalCapacity error")
}
return nil
}
func (j *Env) PushLocalFrame(capacity int32) error {
success := pushLocalFrame(j.jniEnv, jint(capacity)) == 0
if j.exceptionCheck() {
return j.handleException()
}
if !success {
return errors.New("JNIGI: pushLocalFrame error")
}
return nil
}
func (j *Env) PopLocalFrame(result *ObjectRef) *ObjectRef {
if result == nil {
result = &ObjectRef{}
}
o := popLocalFrame(j.jniEnv, result.jobject)
result.jobject = 0
return &ObjectRef{o, result.className, result.isArray}
}
var utf8 *ObjectRef
// return global reference to java/lang/String containing "UTF-8"
func (j *Env) GetUTF8String() *ObjectRef {
if utf8 == nil {
cStr := cString("UTF-8")
local := newStringUTF(j.jniEnv, cStr)
if local == 0 {
panic(j.handleException())
}
global := jstring(newGlobalRef(j.jniEnv, jobject(local)))
deleteLocalRef(j.jniEnv, jobject(local))
free(cStr)
utf8 = &ObjectRef{jobject: jobject(global), isArray: false, className: "java/lang/String"}
}
return utf8
}
// StackTraceElement is a struct holding the contents of java.lang.StackTraceElement
// for use in a ThrowableError.
type StackTraceElement struct {
ClassName string
FileName string
LineNumber int
MethodName string
IsNativeMethod bool
AsString string
}
func (el StackTraceElement) String() string {
return el.AsString
}
// ThrowableError is an error struct that holds the relevant contents of a
// java.lang.Throwable. This is the returned error from ThrowableErrorExceptionHandler.
type ThrowableError struct {
ClassName string
LocalizedMessage string
Message string
StackTrace []StackTraceElement
AsString string
Cause *ThrowableError
}
func (e ThrowableError) String() string {
return e.AsString
}
func (e ThrowableError) Error() string {
return e.AsString
}
func stringFromJavaLangString(env *Env, ref *ObjectRef) string {
if ref.IsNil() {
return ""
}
env.PrecalculateSignature("(Ljava/lang/String;)[B")
ret, err := ref.CallMethod(env, "getBytes", Byte|Array, env.GetUTF8String())
if err != nil {
return ""
}
return string(ret.([]byte))
}
func callStringMethodAndAssign(env *Env, obj *ObjectRef, method string, assign func(s string)) error {
env.PrecalculateSignature("()Ljava/lang/String;")
ret, err := obj.CallMethod(env, method, "java/lang/String")
if err != nil {
return err
}
strref := ret.(*ObjectRef)
defer env.DeleteLocalRef(strref)
assign(stringFromJavaLangString(env, strref))
return nil
}
// NewStackTraceElementFromObject creates a new StackTraceElement with its contents
// set from the values provided in stackTraceElement's methods.
func NewStackTraceElementFromObject(env *Env, stackTraceElement *ObjectRef) (*StackTraceElement, error) {
if stackTraceElement.IsNil() {
return nil, nil
}
getStringAndAssign := func(method string, assign func(s string)) error {
return callStringMethodAndAssign(env, stackTraceElement, method, assign)
}
out := StackTraceElement{}
// ClassName
if err := getStringAndAssign("getClassName", func(s string) {
out.ClassName = s
}); err != nil {
return nil, err
}
// FileName
if err := getStringAndAssign("getFileName", func(s string) {
out.FileName = s
}); err != nil {
return nil, err
}
// MethodName
if err := getStringAndAssign("getMethodName", func(s string) {
out.MethodName = s
}); err != nil {
return nil, err
}
// ToString
if err := getStringAndAssign("toString", func(s string) {
out.AsString = s
}); err != nil {
return nil, err
}
// LineNumber
{
env.PrecalculateSignature("()I")
ret, err := stackTraceElement.CallMethod(env, "getLineNumber", Int)
if err != nil {
return nil, err
}
out.LineNumber = ret.(int)
}
// IsNativeMethod
{
env.PrecalculateSignature("()Z")
ret, err := stackTraceElement.CallMethod(env, "isNativeMethod", Boolean)
if err != nil {
return nil, err
}
out.IsNativeMethod = ret.(bool)
}
return &out, nil
}
// NewThrowableErrorFromObject creates a new ThrowableError with its contents
// set from the values provided in throwable's methods.
func NewThrowableErrorFromObject(env *Env, throwable *ObjectRef) (*ThrowableError, error) {
if throwable.IsNil() {
return nil, nil
}
getStringAndAssign := func(obj *ObjectRef, method string, assign func(s string)) error {
return callStringMethodAndAssign(env, obj, method, assign)
}
out := &ThrowableError{}
// ClassName
{
objClass := getObjectClass(env.jniEnv, throwable.jobject)
if objClass == 0 {
return nil, fmt.Errorf("unable to get throwable class")
}
clsref := WrapJObject(uintptr(objClass), "java/lang/Class", false)
defer env.DeleteLocalRef(clsref)
if err := getStringAndAssign(clsref, "getName", func(s string) {
out.ClassName = s
}); err != nil {
return nil, err
}
}
// AsString
if err := getStringAndAssign(throwable, "toString", func(s string) {
out.AsString = s
}); err != nil {
return nil, err
}
// From this point on, return throwableError if a call fails, since we have some basic information.
// LocalizedMessage
if err := getStringAndAssign(throwable, "getLocalizedMessage", func(s string) {
out.LocalizedMessage = s
}); err != nil {
return out, err
}
// Message
if err := getStringAndAssign(throwable, "getMessage", func(s string) {
out.Message = s
}); err != nil {
return out, err
}
// StackTrace
{
env.PrecalculateSignature("()[Ljava/lang/StackTraceElement;")
ret, err := throwable.CallMethod(env, "getStackTrace", ObjectArrayType("java/lang/StackTraceElement"))
if err != nil {
return out, err
}
stkTrcArr := ret.(*ObjectRef)
defer env.DeleteLocalRef(stkTrcArr)
if !stkTrcArr.IsNil() {
stkTrcSlc := env.FromObjectArray(stkTrcArr)
stackTrace := make([]StackTraceElement, 0, len(stkTrcSlc))
for _, stkTrc := range stkTrcSlc {
if stkTrc.IsNil() {
continue
}
defer env.DeleteLocalRef(stkTrc)
stackTraceElement, err := NewStackTraceElementFromObject(env, stkTrc)
if err != nil || stackTraceElement == nil {
continue
}
stackTrace = append(stackTrace, *stackTraceElement)
}
out.StackTrace = stackTrace
}
}
// Cause
{
env.PrecalculateSignature("()Ljava/lang/Throwable;")
ret, err := throwable.CallMethod(env, "getCause", "java/lang/Throwable")
if err != nil {
return out, err
}
obj := ret.(*ObjectRef)
defer env.DeleteLocalRef(obj)
out.Cause, _ = NewThrowableErrorFromObject(env, obj)
}
return out, nil
}
var (
errThrowableConvertFail = fmt.Errorf("Java exception occured")
// DefaultExceptionHandler is an alias for DescribeExceptionHandler, which is the default.
DefaultExceptionHandler = DescribeExceptionHandler
// DescribeExceptionHandler calls the JNI exceptionDescribe function.
DescribeExceptionHandler ExceptionHandler = ExceptionHandlerFunc(func(env *Env, exception *ObjectRef) error {
exceptionDescribe(env.jniEnv)
exceptionClear(env.jniEnv)
return errors.New("Java exception occured. check stderr")
})
// ThrowableToStringExceptionHandler calls ToString on the exception and returns an error
// with the returned value as its Error message.
// If exception is nil or the toString() call fails, a generic default error is returned.
ThrowableToStringExceptionHandler ExceptionHandler = ExceptionHandlerFunc(func(env *Env, exception *ObjectRef) error {
exceptionClear(env.jniEnv)
if exception.IsNil() {
return errThrowableConvertFail
}
msg := "Java exception occured"
callStringMethodAndAssign(env, exception, "toString", func(s string) {
if s == "" {
return
}
msg = s
})
return errors.New(msg)
})
// ThrowableErrorExceptionHandler populates a new ThrowableError with the values of exception.
// If exception is nil, the getClass().getName(), or the toString call fails, a generic default
// error is returned.
ThrowableErrorExceptionHandler ExceptionHandler = ExceptionHandlerFunc(func(env *Env, exception *ObjectRef) error {
exceptionClear(env.jniEnv)
if exception.IsNil() {
return errThrowableConvertFail
}
throwableError, _ := NewThrowableErrorFromObject(env, exception)
if throwableError == nil {
return errThrowableConvertFail
}
return *throwableError
})
)
Fix unkown type errors not being found in supplied arguments. (#44)
// Copyright 2016 Tim O'Brien. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jnigi
import (
"errors"
"fmt"
"runtime"
"unsafe"
"strings"
)
// copy arguments in to C memory before passing to jni functions
var copyToC bool = false
func toBool(b jboolean) bool {
return b == 1
}
func fromBool(b bool) jboolean {
if b {
return 1
} else {
return 0
}
}
type ObjectRef struct {
jobject jobject
className string
isArray bool
}
func WrapJObject(jobj uintptr, className string, isArray bool) *ObjectRef {
return &ObjectRef{jobject(jobj), className, isArray}
}
func (o *ObjectRef) Cast(className string) *ObjectRef {
if className == o.className {
return o
} else {
return &ObjectRef{o.jobject, className, o.isArray}
}
}
func (o *ObjectRef) IsNil() bool {
return o.jobject == 0
}
func (o *ObjectRef) IsInstanceOf(env *Env, className string) (bool, error) {
class, err := env.callFindClass(className)
if err != nil {
return false, err
}
return toBool(isInstanceOf(env.jniEnv, o.jobject, class)), nil
}
func (o *ObjectRef) jobj() jobject {
return o.jobject
}
func (o *ObjectRef) JObject() jobject {
return o.jobj()
}
type jobj interface {
jobj() jobject
}
// ExceptionHandler is used to convert a thrown exception (java.lang.Throwable) to a Go error.
type ExceptionHandler interface {
CatchException(env *Env, exception *ObjectRef) error
}
// ExceptionHandlerFunc is an adapter to allow use of ordinary functions as an
// ExceptionHandler. If f is a function with the appropriate signature, ExceptionHandlerFunc(f)
// is an ExceptionHandler object that calls f.
type ExceptionHandlerFunc func(env *Env, exception *ObjectRef) error
// CatchException calls f to implement ExceptionHandler.
func (f ExceptionHandlerFunc) CatchException(env *Env, exception *ObjectRef) error {
return f(env, exception)
}
type Env struct {
jniEnv unsafe.Pointer
preCalcSig string
noReturnConvert bool
classCache map[string]jclass
ExceptionHandler ExceptionHandler
}
func WrapEnv(envPtr unsafe.Pointer) *Env {
return &Env{jniEnv: envPtr, classCache: make(map[string]jclass)}
}
type JVM struct {
javaVM unsafe.Pointer
}
type JVMInitArgs struct {
javaVMInitArgs unsafe.Pointer
}
func CreateJVM(jvmInitArgs *JVMInitArgs) (*JVM, *Env, error) {
runtime.LockOSThread()
p := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
p2 := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
if jni_CreateJavaVM(p2, p, jvmInitArgs.javaVMInitArgs) < 0 {
return nil, nil, errors.New("Couldn't instantiate JVM")
}
jvm := &JVM{*(*unsafe.Pointer)(p2)}
env := &Env{jniEnv: *(*unsafe.Pointer)(p), classCache: make(map[string]jclass)}
free(p)
free(p2)
return jvm, env, nil
}
func (j *JVM) AttachCurrentThread() *Env {
runtime.LockOSThread()
p := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
// p := (**C.JNIEnv)(malloc(unsafe.Sizeof((*C.JNIEnv)(nil))))
if attachCurrentThread(j.javaVM, p, nil) < 0 {
panic("AttachCurrentThread failed")
}
return &Env{jniEnv: *(*unsafe.Pointer)(p), classCache: make(map[string]jclass)}
}
func (j *JVM) DetachCurrentThread() error {
if detachCurrentThread(j.javaVM) < 0 {
return errors.New("JNIGI: detachCurrentThread error")
}
return nil
}
func (j *JVM) Destroy() error {
if destroyJavaVM(j.javaVM) < 0 {
return errors.New("JNIGI: destroyJavaVM error")
}
return nil
}
func (j *Env) GetJVM() (*JVM, error) {
runtime.LockOSThread()
p := malloc(unsafe.Sizeof((unsafe.Pointer)(nil)))
if getJavaVM(j.jniEnv, p) < 0 {
return nil, errors.New("Couldn't get JVM")
}
jvm := &JVM{*(*unsafe.Pointer)(p)}
free(p)
return jvm, nil
}
func (j *Env) exceptionCheck() bool {
return toBool(exceptionCheck(j.jniEnv))
}
func (j *Env) describeException() {
exceptionDescribe(j.jniEnv)
}
func (j *Env) handleException() error {
e := exceptionOccurred(j.jniEnv)
if e == 0 {
return errors.New("Java JNI function returned error but JNI indicates no current exception")
}
defer deleteLocalRef(j.jniEnv, jobject(e))
ref := WrapJObject(uintptr(e), "java/lang/Throwable", false)
if j.ExceptionHandler == nil {
return DefaultExceptionHandler.CatchException(j, ref)
}
// Temporarily disable handler in the event exception rises during handling.
// By setting it to the DescribeExceptionHandler, exceptions will get printed
// and cleared.
handler := j.ExceptionHandler
j.ExceptionHandler = DescribeExceptionHandler
defer func() {
j.ExceptionHandler = handler
}()
return handler.CatchException(j, ref)
}
func (j *Env) NewObject(className string, args ...interface{}) (*ObjectRef, error) {
class, err := j.callFindClass(className)
if err != nil {
return nil, err
}
var methodSig string
if j.preCalcSig != "" {
methodSig = j.preCalcSig
j.preCalcSig = ""
} else {
calcSig, err := sigForMethod(Void, "", args)
if err != nil {
return nil, err
}
methodSig = calcSig
}
mid, err := j.callGetMethodID(false, class, "<init>", methodSig)
if err != nil {
return nil, err
}
// create args for jni call
jniArgs, refs, err := j.createArgs(args)
if err != nil {
return nil, err
}
defer func() {
cleanUpArgs(jniArgs)
for _, ref := range refs {
deleteLocalRef(j.jniEnv, ref)
}
}()
obj := newObjectA(j.jniEnv, class, mid, jniArgs)
if obj == 0 {
return nil, j.handleException()
}
return &ObjectRef{obj, className, false}, nil
}
func (j *Env) callFindClass(className string) (jclass, error) {
if v, ok := j.classCache[className]; ok {
return v, nil
}
cnCstr := cString(className)
defer free(cnCstr)
class := findClass(j.jniEnv, cnCstr)
if class == 0 {
return 0, j.handleException()
}
ref := newGlobalRef(j.jniEnv, jobject(class))
deleteLocalRef(j.jniEnv, jobject(class))
j.classCache[className] = jclass(ref)
return jclass(ref), nil
}
func (j *Env) callGetMethodID(static bool, class jclass, name, sig string) (jmethodID, error) {
mnCstr := cString(name)
defer free(mnCstr)
sigCstr := cString(sig)
defer free(sigCstr)
var mid jmethodID
if static {
mid = getStaticMethodID(j.jniEnv, class, mnCstr, sigCstr)
} else {
mid = getMethodID(j.jniEnv, class, mnCstr, sigCstr)
}
// fmt.Printf("sig = %s\n", sig)
if mid == 0 {
return 0, j.handleException()
}
return mid, nil
}
func (j *Env) PrecalculateSignature(sig string) {
j.preCalcSig = sig
}
func (j *Env) NoReturnConvert() {
j.noReturnConvert = true
}
const big = 1024 * 1024 * 100
func (j *Env) FromObjectArray(objRef *ObjectRef) []*ObjectRef {
len := int(getArrayLength(j.jniEnv, jarray(objRef.jobject)))
// exception check?
v := make([]*ObjectRef, len)
for i := 0; i < len; i++ {
jobj := getObjectArrayElement(j.jniEnv, jobjectArray(objRef.jobject), jsize(i))
if j.exceptionCheck() {
panic(j.handleException())
}
v[i] = &ObjectRef{jobj, objRef.className, false}
}
return v
}
func (j *Env) toGoArray(array jobject, aType Type) (interface{}, error) {
len := int(getArrayLength(j.jniEnv, jarray(array)))
// exception check?
switch aType.baseType() {
case Boolean:
v := make([]bool, len)
if len >= 0 {
ptr := getBooleanArrayElements(j.jniEnv, jbooleanArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]byte)(ptr))[0:len]
for i := 0; i < len; i++ {
v[i] = (elems[i] == 1)
}
releaseBooleanArrayElements(j.jniEnv, jbooleanArray(array), ptr, jint(jni_abort))
}
return v, nil
case Byte:
v := make([]byte, len)
if len >= 0 {
ptr := getByteArrayElements(j.jniEnv, jbyteArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]byte)(ptr))[0:len]
copy(v, elems)
releaseByteArrayElements(j.jniEnv, jbyteArray(array), ptr, jint(jni_abort))
}
return v, nil
case Short:
v := make([]int16, len)
if len >= 0 {
ptr := getShortArrayElements(j.jniEnv, jshortArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]int16)(ptr))[0:len]
copy(v, elems)
releaseShortArrayElements(j.jniEnv, jshortArray(array), ptr, jint(jni_abort))
}
return v, nil
case Char:
v := make([]uint16, len)
if len >= 0 {
ptr := getCharArrayElements(j.jniEnv, jcharArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]uint16)(ptr))[0:len]
copy(v, elems)
releaseCharArrayElements(j.jniEnv, jcharArray(array), ptr, jint(jni_abort))
}
return v, nil
case Int:
v := make([]int, len)
if len >= 0 {
ptr := getIntArrayElements(j.jniEnv, jintArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]int32)(ptr))[0:len]
//copy(v, elems)
for i := 0; i < len; i++ {
v[i] = int(elems[i])
}
releaseIntArrayElements(j.jniEnv, jintArray(array), ptr, jint(jni_abort))
}
return v, nil
case Long:
v := make([]int64, len)
if len >= 0 {
ptr := getLongArrayElements(j.jniEnv, jlongArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]int64)(ptr))[0:len]
copy(v, elems)
releaseLongArrayElements(j.jniEnv, jlongArray(array), ptr, jint(jni_abort))
}
return v, nil
case Float:
v := make([]float32, len)
if len >= 0 {
ptr := getFloatArrayElements(j.jniEnv, jfloatArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]float32)(ptr))[0:len]
copy(v, elems)
releaseFloatArrayElements(j.jniEnv, jfloatArray(array), ptr, jint(jni_abort))
}
return v, nil
case Double:
v := make([]float64, len)
if len >= 0 {
ptr := getDoubleArrayElements(j.jniEnv, jdoubleArray(array), nil)
if j.exceptionCheck() {
return nil, j.handleException()
}
elems := (*(*[big]float64)(ptr))[0:len]
copy(v, elems)
releaseDoubleArrayElements(j.jniEnv, jdoubleArray(array), ptr, jint(jni_abort))
}
return v, nil
default:
return nil, errors.New("JNIGI unsupported array type")
}
}
func (j *Env) ToObjectArray(objRefs []*ObjectRef, className string) (arrayRef *ObjectRef) {
arrayRef = &ObjectRef{className: className, isArray: true}
class, err := j.callFindClass(className)
if err != nil {
j.describeException()
exceptionClear(j.jniEnv)
return
}
oa := newObjectArray(j.jniEnv, jsize(len(objRefs)), class, 0)
if oa == 0 {
panic(j.handleException())
}
arrayRef.jobject = jobject(oa)
for i, obj := range objRefs {
setObjectArrayElement(j.jniEnv, oa, jsize(i), obj.jobject)
if j.exceptionCheck() {
j.describeException()
exceptionClear(j.jniEnv)
}
}
return
}
type ByteArray struct {
arr jbyteArray
n int
}
func (j *Env) NewByteArray(n int) *ByteArray {
a := newByteArray(j.jniEnv, jsize(n))
return &ByteArray{a, n}
}
func (j *Env) NewByteArrayFromSlice(src []byte) *ByteArray {
b := j.NewByteArray(len(src))
if len(src) > 0 {
bytes := b.GetCritical(j)
copy(bytes, src)
b.ReleaseCritical(j, bytes)
}
return b
}
func (j *Env) NewByteArrayFromObject(o *ObjectRef) *ByteArray {
ba := &ByteArray{}
ba.SetObject(o)
ba.n = int(getArrayLength(j.jniEnv, jarray(ba.arr)))
return ba
}
func (b *ByteArray) jobj() jobject {
return jobject(b.arr)
}
func (b *ByteArray) getType() Type {
return Byte | Array
}
func (b *ByteArray) GetCritical(env *Env) []byte {
if b.n == 0 {
return nil
}
ptr := getPrimitiveArrayCritical(env.jniEnv, jarray(b.arr), nil)
return (*(*[big]byte)(ptr))[0:b.n]
}
func (b *ByteArray) ReleaseCritical(env *Env, bytes []byte) {
if len(bytes) == 0 {
return
}
ptr := unsafe.Pointer(&bytes[0])
releasePrimitiveArrayCritical(env.jniEnv, jarray(b.arr), ptr, 0)
}
//returns jlo
func (b *ByteArray) GetObject() *ObjectRef {
return &ObjectRef{jobject(b.arr), "java/lang/Object", false}
}
func (b *ByteArray) SetObject(o *ObjectRef) {
b.arr = jbyteArray(o.jobject)
}
func (b *ByteArray) CopyBytes(env *Env) []byte {
r := make([]byte, b.n)
src := b.GetCritical(env)
copy(r, src)
b.ReleaseCritical(env, src)
return r
}
// this copies slice contents in to C memory before passing this pointer to JNI array function
// if copy var is set to true
func (j *Env) toJavaArray(src interface{}) (jobject, error) {
switch v := src.(type) {
case []bool:
ba := newBooleanArray(j.jniEnv, jsize(len(v)))
if ba == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(ba), nil
}
src := make([]byte, len(v))
for i, vset := range v {
if vset {
src[i] = 1
}
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(uintptr(len(v)))
defer free(ptr)
data := (*(*[big]byte)(ptr))[:len(v)]
copy(data, src)
} else {
ptr = unsafe.Pointer(&src[0])
}
setBooleanArrayRegion(j.jniEnv, ba, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(ba), nil
case []byte:
ba := newByteArray(j.jniEnv, jsize(len(v)))
if ba == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(ba), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(uintptr(len(v)))
defer free(ptr)
data := (*(*[big]byte)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setByteArrayRegion(j.jniEnv, ba, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(ba), nil
case []int16:
array := newShortArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int16(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int16)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setShortArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []uint16:
array := newCharArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(uint16(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]uint16)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setCharArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []int32:
array := newIntArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int32(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int32)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setIntArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []int:
array := newIntArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int32(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int32)(ptr))[:len(v)]
//copy(data, v)
for i := 0; i < len(data); i++ {
data[i] = int32(v[i])
}
} else {
data := make([]int32, len(v))
for i := 0; i < len(v); i++ {
data[i] = int32(v[i])
}
ptr = unsafe.Pointer(&data[0])
}
setIntArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []int64:
array := newLongArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(int64(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]int64)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setLongArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []float32:
array := newFloatArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(float32(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]float32)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setFloatArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
case []float64:
array := newDoubleArray(j.jniEnv, jsize(len(v)))
if array == 0 {
return 0, j.handleException()
}
if len(v) == 0 {
return jobject(array), nil
}
var ptr unsafe.Pointer
if copyToC {
ptr = malloc(unsafe.Sizeof(float64(0)) * uintptr(len(v)))
defer free(ptr)
data := (*(*[big]float64)(ptr))[:len(v)]
copy(data, v)
} else {
ptr = unsafe.Pointer(&v[0])
}
setDoubleArrayRegion(j.jniEnv, array, jsize(0), jsize(len(v)), ptr)
if j.exceptionCheck() {
return 0, j.handleException()
}
return jobject(array), nil
default:
return 0, errors.New("JNIGI unsupported array type")
}
}
// pointer should be freed, refs should be deleted
// jvalue 64 bit
func (j *Env) createArgs(args []interface{}) (ptr unsafe.Pointer, refs []jobject, err error) {
if len(args) == 0 {
return nil, nil, nil
}
argList := make([]uint64, len(args))
refs = make([]jobject, 0)
for i, arg := range args {
switch v := arg.(type) {
case jobj:
argList[i] = uint64(v.jobj())
case bool:
if v {
argList[i] = uint64(jboolean(1))
} else {
argList[i] = uint64(jboolean(0))
}
case byte:
argList[i] = uint64(jbyte(v))
case uint16:
argList[i] = uint64(jchar(v))
case int16:
argList[i] = uint64(jshort(v))
case int32:
argList[i] = uint64(jint(v))
case int:
argList[i] = uint64(jint(int32(v)))
case int64:
argList[i] = uint64(jlong(v))
case float32:
argList[i] = uint64(jfloat(v))
case float64:
argList[i] = uint64(jdouble(v))
case []bool, []byte, []int16, []uint16, []int32, []int, []int64, []float32, []float64:
if array, arrayErr := j.toJavaArray(v); arrayErr == nil {
argList[i] = uint64(array)
refs = append(refs, array)
} else {
err = arrayErr
}
default:
err = fmt.Errorf("JNIGI: argument not a valid value %t (%v)", args[i], args[i])
}
if err != nil {
break
}
}
if err != nil {
for _, ref := range refs {
deleteLocalRef(j.jniEnv, ref)
}
refs = nil
return
}
if copyToC {
ptr = malloc(unsafe.Sizeof(uint64(0)) * uintptr(len(args)))
data := (*(*[big]uint64)(ptr))[:len(args)]
copy(data, argList)
} else {
ptr = unsafe.Pointer(&argList[0])
}
return
}
type Type uint32
const (
Void = Type(1 << iota)
Boolean
Byte
Char
Short
Int
Long
Float
Double
Object
Array
)
func (t Type) baseType() Type {
return t &^ Array
}
func (t Type) isArray() bool {
return t&Array > 0
}
type ObjectType string
type ObjectArrayType string
type convertedArray interface {
getType() Type
}
// Allow return types to be a string that specifies an object type. This is to
// retain compatiblity.
func typeOfReturnValue(value interface{}) (t Type, className string, err error) {
if v, ok := value.(string); ok {
return typeOfValue(ObjectType(v))
}
return typeOfValue(value)
}
func typeOfValue(value interface{}) (t Type, className string, err error) {
switch v := value.(type) {
case Type:
t = v
if t.baseType() == Object {
className = "java/lang/Object"
}
case ObjectType:
t = Object
className = string(v)
case ObjectArrayType:
t = Object | Array
className = string(v)
case *ObjectRef:
t = Object
if v.isArray {
t = t | Array
}
className = v.className
case bool:
t = Boolean
case byte:
t = Byte
case int16:
t = Short
case uint16:
t = Char
case int32:
t = Int
case int:
t = Int
case int64:
t = Long
case float32:
t = Float
case float64:
t = Double
case []bool:
t = Boolean | Array
className = "java/lang/Object"
case []byte:
t = Byte | Array
className = "java/lang/Object"
case []uint16:
t = Char | Array
className = "java/lang/Object"
case []int16:
t = Short | Array
className = "java/lang/Object"
case []int32:
t = Int | Array
className = "java/lang/Object"
case []int:
t = Int | Array
className = "java/lang/Object"
case []int64:
t = Long | Array
className = "java/lang/Object"
case []float32:
t = Float | Array
className = "java/lang/Object"
case []float64:
t = Double | Array
className = "java/lang/Object"
case convertedArray:
t = v.getType()
className = "java/lang/Object"
default:
err = fmt.Errorf("JNIGI: unknown type %T (value = %v)", v, v)
}
return
}
func typeSignature(t Type, className string) (sig string) {
if t.isArray() {
sig = "["
}
base := t.baseType()
switch {
case base == Object:
sig += "L" + className + ";"
case base == Void:
sig += "V"
case base == Boolean:
sig += "Z"
case base == Byte:
sig += "B"
case base == Char:
sig += "C"
case base == Short:
sig += "S"
case base == Int:
sig += "I"
case base == Long:
sig += "J"
case base == Float:
sig += "F"
case base == Double:
sig += "D"
}
return
}
func sigForMethod(returnType Type, returnClass string, args []interface{}) (string, error) {
var paramStr string
for i := range args {
t, c, err := typeOfValue(args[i])
if err != nil {
return "", err
}
paramStr += typeSignature(t, c)
}
return fmt.Sprintf("(%s)%s", paramStr, typeSignature(returnType, returnClass)), nil
}
func cleanUpArgs(ptr unsafe.Pointer) {
if copyToC {
free(ptr)
}
}
func (o *ObjectRef) getClass(env *Env) (class jclass, err error) {
class, err = env.callFindClass(o.className)
if err != nil {
return 0, err
}
// if object is java/lang/Object try to up class it
// there is an odd way to get the class name see: http://stackoverflow.com/questions/12719766/can-i-know-the-name-of-the-class-that-calls-a-jni-c-method
if o.className == "java/lang/Object" {
mid, err := env.callGetMethodID(false, class, "getClass", "()Ljava/lang/Class;")
if err != nil {
return 0, err
}
obj := callObjectMethodA(env.jniEnv, o.jobject, mid, nil)
if env.exceptionCheck() {
return 0, env.handleException()
}
defer deleteLocalRef(env.jniEnv, obj)
objClass := getObjectClass(env.jniEnv, obj)
if objClass == 0 {
return 0, env.handleException()
}
defer deleteLocalRef(env.jniEnv, jobject(objClass))
mid, err = env.callGetMethodID(false, objClass, "getName", "()Ljava/lang/String;")
if err != nil {
return 0, err
}
obj2 := callObjectMethodA(env.jniEnv, obj, mid, nil)
if env.exceptionCheck() {
return 0, env.handleException()
}
strObj := WrapJObject(uintptr(obj2), "java/lang/String", false)
if strObj.IsNil() {
return 0, errors.New("unexpected error getting object class name")
}
defer env.DeleteLocalRef(strObj)
b , err := strObj.CallMethod(env, "getBytes", Byte | Array, env.GetUTF8String())
if err != nil {
return 0, err
}
gotClass := string(b.([]byte))
// note uses . for class name separator
if gotClass != "java.lang.Object" {
gotClass = strings.Replace(gotClass, ".", "/", -1)
class, err = env.callFindClass(gotClass)
if err != nil {
return 0, err
}
o.className = gotClass
return class, err
}
}
return
}
func (o *ObjectRef) CallMethod(env *Env, methodName string, returnType interface{}, args ...interface{}) (interface{}, error) {
class, err := o.getClass(env)
if err != nil {
return nil, err
}
rType, rClassName, err := typeOfReturnValue(returnType)
if err != nil {
return nil, err
}
var methodSig string
if env.preCalcSig != "" {
methodSig = env.preCalcSig
env.preCalcSig = ""
} else {
calcSig, err := sigForMethod(rType, rClassName, args)
if err != nil {
return nil, err
}
methodSig = calcSig
}
mid, err := env.callGetMethodID(false, class, methodName, methodSig)
if err != nil {
return nil, err
}
// create args for jni call
jniArgs, refs, err := env.createArgs(args)
if err != nil {
return nil, err
}
defer func() {
cleanUpArgs(jniArgs)
for _, ref := range refs {
deleteLocalRef(env.jniEnv, ref)
}
}()
var arrayToConvert jobject
var retVal interface{}
switch {
case rType == Void:
callVoidMethodA(env.jniEnv, o.jobject, mid, jniArgs)
case rType == Boolean:
retVal = toBool(callBooleanMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Byte:
retVal = byte(callByteMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Char:
retVal = uint16(callCharMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Short:
retVal = int16(callShortMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Int:
retVal = int(callIntMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Long:
retVal = int64(callLongMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Float:
retVal = float32(callFloatMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Double:
retVal = float64(callDoubleMethodA(env.jniEnv, o.jobject, mid, jniArgs))
case rType == Object || rType.isArray():
obj := callObjectMethodA(env.jniEnv, o.jobject, mid, jniArgs)
if rType == Object || rType == Object|Array || env.noReturnConvert {
retVal = &ObjectRef{obj, rClassName, rType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown return type")
}
env.noReturnConvert = false
if env.exceptionCheck() {
return nil, env.handleException()
}
if arrayToConvert != 0 {
retVal, err = env.toGoArray(arrayToConvert, rType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (j *Env) CallStaticMethod(className string, methodName string, returnType interface{}, args ...interface{}) (interface{}, error) {
class, err := j.callFindClass(className)
if err != nil {
return nil, err
}
rType, rClassName, err := typeOfReturnValue(returnType)
if err != nil {
return nil, err
}
var methodSig string
if j.preCalcSig != "" {
methodSig = j.preCalcSig
j.preCalcSig = ""
} else {
calcSig, err := sigForMethod(rType, rClassName, args)
if err != nil {
return nil, err
}
methodSig = calcSig
}
mid, err := j.callGetMethodID(true, class, methodName, methodSig)
if err != nil {
return nil, err
}
// create args for jni call
jniArgs, refs, err := j.createArgs(args)
if err != nil {
return nil, err
}
defer func() {
cleanUpArgs(jniArgs)
for _, ref := range refs {
deleteLocalRef(j.jniEnv, ref)
}
}()
var arrayToConvert jobject
var retVal interface{}
switch {
case rType == Void:
callStaticVoidMethodA(j.jniEnv, class, mid, jniArgs)
case rType == Boolean:
retVal = toBool(callStaticBooleanMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Byte:
retVal = byte(callStaticByteMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Char:
retVal = uint16(callStaticCharMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Short:
retVal = int16(callStaticShortMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Int:
retVal = int(callStaticIntMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Long:
retVal = int64(callStaticLongMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Float:
retVal = float32(callStaticFloatMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Double:
retVal = float64(callStaticDoubleMethodA(j.jniEnv, class, mid, jniArgs))
case rType == Object || rType.isArray():
obj := callStaticObjectMethodA(j.jniEnv, class, mid, jniArgs)
if rType == Object || rType == Object|Array || j.noReturnConvert {
retVal = &ObjectRef{obj, rClassName, rType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown return type")
}
j.noReturnConvert = false
if j.exceptionCheck() {
return nil, j.handleException()
}
if arrayToConvert != 0 {
retVal, err = j.toGoArray(arrayToConvert, rType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (j *Env) callGetFieldID(static bool, class jclass, name, sig string) (jfieldID, error) {
fnCstr := cString(name)
defer free(fnCstr)
sigCstr := cString(sig)
defer free(sigCstr)
var fid jfieldID
if static {
fid = getStaticFieldID(j.jniEnv, class, fnCstr, sigCstr)
} else {
fid = getFieldID(j.jniEnv, class, fnCstr, sigCstr)
}
if fid == 0 {
return 0, j.handleException()
}
return fid, nil
}
func (o *ObjectRef) GetField(env *Env, fieldName string, fieldType interface{}) (interface{}, error) {
class, err := o.getClass(env)
if err != nil {
return nil, err
}
fType, fClassName, err := typeOfReturnValue(fieldType)
if err != nil {
return nil, err
}
var fieldSig string
if env.preCalcSig != "" {
fieldSig = env.preCalcSig
env.preCalcSig = ""
} else {
fieldSig = typeSignature(fType, fClassName)
}
fid, err := env.callGetFieldID(false, class, fieldName, fieldSig)
if err != nil {
return nil, err
}
var arrayToConvert jobject
var retVal interface{}
switch {
case fType == Boolean:
retVal = toBool(getBooleanField(env.jniEnv, o.jobject, fid))
case fType == Byte:
retVal = byte(getByteField(env.jniEnv, o.jobject, fid))
case fType == Char:
retVal = uint16(getCharField(env.jniEnv, o.jobject, fid))
case fType == Short:
retVal = int16(getShortField(env.jniEnv, o.jobject, fid))
case fType == Int:
retVal = int(getIntField(env.jniEnv, o.jobject, fid))
case fType == Long:
retVal = int64(getLongField(env.jniEnv, o.jobject, fid))
case fType == Float:
retVal = float32(getFloatField(env.jniEnv, o.jobject, fid))
case fType == Double:
retVal = float64(getDoubleField(env.jniEnv, o.jobject, fid))
case fType == Object || fType.isArray():
obj := getObjectField(env.jniEnv, o.jobject, fid)
if fType == Object || fType == Object|Array || env.noReturnConvert {
retVal = &ObjectRef{obj, fClassName, fType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown field type")
}
env.noReturnConvert = false
if env.exceptionCheck() {
return nil, env.handleException()
}
if arrayToConvert != 0 {
retVal, err = env.toGoArray(arrayToConvert, fType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (o *ObjectRef) SetField(env *Env, fieldName string, value interface{}) error {
class, err := o.getClass(env)
if err != nil {
return err
}
vType, vClassName, err := typeOfValue(value)
if err != nil {
return err
}
var fieldSig string
if env.preCalcSig != "" {
fieldSig = env.preCalcSig
env.preCalcSig = ""
} else {
fieldSig = typeSignature(vType, vClassName)
}
fid, err := env.callGetFieldID(false, class, fieldName, fieldSig)
if err != nil {
return err
}
switch v := value.(type) {
case bool:
setBooleanField(env.jniEnv, o.jobject, fid, fromBool(v))
case byte:
setByteField(env.jniEnv, o.jobject, fid, jbyte(v))
case uint16:
setCharField(env.jniEnv, o.jobject, fid, jchar(v))
case int16:
setShortField(env.jniEnv, o.jobject, fid, jshort(v))
case int32:
setIntField(env.jniEnv, o.jobject, fid, jint(v))
case int:
setIntField(env.jniEnv, o.jobject, fid, jint(int32(v)))
case int64:
setLongField(env.jniEnv, o.jobject, fid, jlong(v))
case float32:
setFloatField(env.jniEnv, o.jobject, fid, jfloat(v))
case float64:
setDoubleField(env.jniEnv, o.jobject, fid, jdouble(v))
case jobj:
setObjectField(env.jniEnv, o.jobject, fid, v.jobj())
case []bool, []byte, []int16, []uint16, []int32, []int, []int64, []float32, []float64:
array, err := env.toJavaArray(v)
if err != nil {
return err
}
defer deleteLocalRef(env.jniEnv, array)
setObjectField(env.jniEnv, o.jobject, fid, jobject(array))
default:
return errors.New("JNIGI unknown field value")
}
if env.exceptionCheck() {
return env.handleException()
}
return nil
}
func (j *Env) GetStaticField(className string, fieldName string, fieldType interface{}) (interface{}, error) {
class, err := j.callFindClass(className)
if err != nil {
return nil, err
}
fType, fClassName, err := typeOfReturnValue(fieldType)
if err != nil {
return nil, err
}
var fieldSig string
if j.preCalcSig != "" {
fieldSig = j.preCalcSig
j.preCalcSig = ""
} else {
fieldSig = typeSignature(fType, fClassName)
}
fid, err := j.callGetFieldID(true, class, fieldName, fieldSig)
if err != nil {
return nil, err
}
var arrayToConvert jobject
var retVal interface{}
switch {
case fType == Boolean:
retVal = toBool(getStaticBooleanField(j.jniEnv, class, fid))
case fType == Byte:
retVal = byte(getStaticByteField(j.jniEnv, class, fid))
case fType == Char:
retVal = uint16(getStaticCharField(j.jniEnv, class, fid))
case fType == Short:
retVal = int16(getStaticShortField(j.jniEnv, class, fid))
case fType == Int:
retVal = int(getStaticIntField(j.jniEnv, class, fid))
case fType == Long:
retVal = int64(getStaticLongField(j.jniEnv, class, fid))
case fType == Float:
retVal = float32(getStaticFloatField(j.jniEnv, class, fid))
case fType == Double:
retVal = float64(getStaticDoubleField(j.jniEnv, class, fid))
case fType == Object || fType.isArray():
obj := getStaticObjectField(j.jniEnv, class, fid)
if fType == Object || fType == Object|Array || j.noReturnConvert {
retVal = &ObjectRef{obj, fClassName, fType.isArray()}
} else {
arrayToConvert = obj
}
default:
return nil, errors.New("JNIGI unknown field type")
}
j.noReturnConvert = false
if j.exceptionCheck() {
return nil, j.handleException()
}
if arrayToConvert != 0 {
retVal, err = j.toGoArray(arrayToConvert, fType)
if err != nil {
return nil, err
}
}
return retVal, nil
}
func (j *Env) SetStaticField(className string, fieldName string, value interface{}) error {
class, err := j.callFindClass(className)
if err != nil {
return err
}
vType, vClassName, err := typeOfValue(value)
if err != nil {
return err
}
var fieldSig string
if j.preCalcSig != "" {
fieldSig = j.preCalcSig
j.preCalcSig = ""
} else {
fieldSig = typeSignature(vType, vClassName)
}
fid, err := j.callGetFieldID(true, class, fieldName, fieldSig)
if err != nil {
return err
}
switch v := value.(type) {
case bool:
setStaticBooleanField(j.jniEnv, class, fid, fromBool(v))
case byte:
setStaticByteField(j.jniEnv, class, fid, jbyte(v))
case uint16:
setStaticCharField(j.jniEnv, class, fid, jchar(v))
case int16:
setStaticShortField(j.jniEnv, class, fid, jshort(v))
case int32:
setStaticIntField(j.jniEnv, class, fid, jint(v))
case int:
setStaticIntField(j.jniEnv, class, fid, jint(int32(v)))
case int64:
setStaticLongField(j.jniEnv, class, fid, jlong(v))
case float32:
setStaticFloatField(j.jniEnv, class, fid, jfloat(v))
case float64:
setStaticDoubleField(j.jniEnv, class, fid, jdouble(v))
case jobj:
setStaticObjectField(j.jniEnv, class, fid, v.jobj())
case []bool, []byte, []int16, []uint16, []int32, []int, []int64, []float32, []float64:
array, err := j.toJavaArray(v)
if err != nil {
return err
}
defer deleteLocalRef(j.jniEnv, array)
setStaticObjectField(j.jniEnv, class, fid, jobject(array))
default:
return errors.New("JNIGI unknown field value")
}
if j.exceptionCheck() {
return j.handleException()
}
return nil
}
func (j *Env) RegisterNative(className, methodName string, returnType interface{}, params []interface{}, fptr interface{}) error {
class, err := j.callFindClass(className)
if err != nil {
return err
}
mnCstr := cString(methodName)
defer free(mnCstr)
rType, rClassName, err := typeOfReturnValue(returnType)
if err != nil {
return err
}
sig, err := sigForMethod(rType, rClassName, params)
if err != nil {
return err
}
sigCstr := cString(sig)
defer free(sigCstr)
if registerNative(j.jniEnv, class, mnCstr, sigCstr, fptr.(unsafe.Pointer)) < 0 {
return j.handleException()
}
return nil
}
func (j *Env) NewGlobalRef(o *ObjectRef) *ObjectRef {
g := newGlobalRef(j.jniEnv, o.jobject)
return &ObjectRef{g, o.className, o.isArray}
}
func (j *Env) DeleteGlobalRef(o *ObjectRef) {
deleteGlobalRef(j.jniEnv, o.jobject)
o.jobject = 0
}
func (j *Env) DeleteLocalRef(o *ObjectRef) {
deleteLocalRef(j.jniEnv, o.jobject)
o.jobject = 0
}
func (j *Env) EnsureLocalCapacity(capacity int32) error {
success := ensureLocalCapacity(j.jniEnv, jint(capacity)) == 0
if j.exceptionCheck() {
return j.handleException()
}
if !success {
return errors.New("JNIGI: ensureLocalCapacity error")
}
return nil
}
func (j *Env) PushLocalFrame(capacity int32) error {
success := pushLocalFrame(j.jniEnv, jint(capacity)) == 0
if j.exceptionCheck() {
return j.handleException()
}
if !success {
return errors.New("JNIGI: pushLocalFrame error")
}
return nil
}
func (j *Env) PopLocalFrame(result *ObjectRef) *ObjectRef {
if result == nil {
result = &ObjectRef{}
}
o := popLocalFrame(j.jniEnv, result.jobject)
result.jobject = 0
return &ObjectRef{o, result.className, result.isArray}
}
var utf8 *ObjectRef
// return global reference to java/lang/String containing "UTF-8"
func (j *Env) GetUTF8String() *ObjectRef {
if utf8 == nil {
cStr := cString("UTF-8")
local := newStringUTF(j.jniEnv, cStr)
if local == 0 {
panic(j.handleException())
}
global := jstring(newGlobalRef(j.jniEnv, jobject(local)))
deleteLocalRef(j.jniEnv, jobject(local))
free(cStr)
utf8 = &ObjectRef{jobject: jobject(global), isArray: false, className: "java/lang/String"}
}
return utf8
}
// StackTraceElement is a struct holding the contents of java.lang.StackTraceElement
// for use in a ThrowableError.
type StackTraceElement struct {
ClassName string
FileName string
LineNumber int
MethodName string
IsNativeMethod bool
AsString string
}
func (el StackTraceElement) String() string {
return el.AsString
}
// ThrowableError is an error struct that holds the relevant contents of a
// java.lang.Throwable. This is the returned error from ThrowableErrorExceptionHandler.
type ThrowableError struct {
ClassName string
LocalizedMessage string
Message string
StackTrace []StackTraceElement
AsString string
Cause *ThrowableError
}
func (e ThrowableError) String() string {
return e.AsString
}
func (e ThrowableError) Error() string {
return e.AsString
}
func stringFromJavaLangString(env *Env, ref *ObjectRef) string {
if ref.IsNil() {
return ""
}
env.PrecalculateSignature("(Ljava/lang/String;)[B")
ret, err := ref.CallMethod(env, "getBytes", Byte|Array, env.GetUTF8String())
if err != nil {
return ""
}
return string(ret.([]byte))
}
func callStringMethodAndAssign(env *Env, obj *ObjectRef, method string, assign func(s string)) error {
env.PrecalculateSignature("()Ljava/lang/String;")
ret, err := obj.CallMethod(env, method, "java/lang/String")
if err != nil {
return err
}
strref := ret.(*ObjectRef)
defer env.DeleteLocalRef(strref)
assign(stringFromJavaLangString(env, strref))
return nil
}
// NewStackTraceElementFromObject creates a new StackTraceElement with its contents
// set from the values provided in stackTraceElement's methods.
func NewStackTraceElementFromObject(env *Env, stackTraceElement *ObjectRef) (*StackTraceElement, error) {
if stackTraceElement.IsNil() {
return nil, nil
}
getStringAndAssign := func(method string, assign func(s string)) error {
return callStringMethodAndAssign(env, stackTraceElement, method, assign)
}
out := StackTraceElement{}
// ClassName
if err := getStringAndAssign("getClassName", func(s string) {
out.ClassName = s
}); err != nil {
return nil, err
}
// FileName
if err := getStringAndAssign("getFileName", func(s string) {
out.FileName = s
}); err != nil {
return nil, err
}
// MethodName
if err := getStringAndAssign("getMethodName", func(s string) {
out.MethodName = s
}); err != nil {
return nil, err
}
// ToString
if err := getStringAndAssign("toString", func(s string) {
out.AsString = s
}); err != nil {
return nil, err
}
// LineNumber
{
env.PrecalculateSignature("()I")
ret, err := stackTraceElement.CallMethod(env, "getLineNumber", Int)
if err != nil {
return nil, err
}
out.LineNumber = ret.(int)
}
// IsNativeMethod
{
env.PrecalculateSignature("()Z")
ret, err := stackTraceElement.CallMethod(env, "isNativeMethod", Boolean)
if err != nil {
return nil, err
}
out.IsNativeMethod = ret.(bool)
}
return &out, nil
}
// NewThrowableErrorFromObject creates a new ThrowableError with its contents
// set from the values provided in throwable's methods.
func NewThrowableErrorFromObject(env *Env, throwable *ObjectRef) (*ThrowableError, error) {
if throwable.IsNil() {
return nil, nil
}
getStringAndAssign := func(obj *ObjectRef, method string, assign func(s string)) error {
return callStringMethodAndAssign(env, obj, method, assign)
}
out := &ThrowableError{}
// ClassName
{
objClass := getObjectClass(env.jniEnv, throwable.jobject)
if objClass == 0 {
return nil, fmt.Errorf("unable to get throwable class")
}
clsref := WrapJObject(uintptr(objClass), "java/lang/Class", false)
defer env.DeleteLocalRef(clsref)
if err := getStringAndAssign(clsref, "getName", func(s string) {
out.ClassName = s
}); err != nil {
return nil, err
}
}
// AsString
if err := getStringAndAssign(throwable, "toString", func(s string) {
out.AsString = s
}); err != nil {
return nil, err
}
// From this point on, return throwableError if a call fails, since we have some basic information.
// LocalizedMessage
if err := getStringAndAssign(throwable, "getLocalizedMessage", func(s string) {
out.LocalizedMessage = s
}); err != nil {
return out, err
}
// Message
if err := getStringAndAssign(throwable, "getMessage", func(s string) {
out.Message = s
}); err != nil {
return out, err
}
// StackTrace
{
env.PrecalculateSignature("()[Ljava/lang/StackTraceElement;")
ret, err := throwable.CallMethod(env, "getStackTrace", ObjectArrayType("java/lang/StackTraceElement"))
if err != nil {
return out, err
}
stkTrcArr := ret.(*ObjectRef)
defer env.DeleteLocalRef(stkTrcArr)
if !stkTrcArr.IsNil() {
stkTrcSlc := env.FromObjectArray(stkTrcArr)
stackTrace := make([]StackTraceElement, 0, len(stkTrcSlc))
for _, stkTrc := range stkTrcSlc {
if stkTrc.IsNil() {
continue
}
defer env.DeleteLocalRef(stkTrc)
stackTraceElement, err := NewStackTraceElementFromObject(env, stkTrc)
if err != nil || stackTraceElement == nil {
continue
}
stackTrace = append(stackTrace, *stackTraceElement)
}
out.StackTrace = stackTrace
}
}
// Cause
{
env.PrecalculateSignature("()Ljava/lang/Throwable;")
ret, err := throwable.CallMethod(env, "getCause", "java/lang/Throwable")
if err != nil {
return out, err
}
obj := ret.(*ObjectRef)
defer env.DeleteLocalRef(obj)
out.Cause, _ = NewThrowableErrorFromObject(env, obj)
}
return out, nil
}
var (
errThrowableConvertFail = fmt.Errorf("Java exception occured")
// DefaultExceptionHandler is an alias for DescribeExceptionHandler, which is the default.
DefaultExceptionHandler = DescribeExceptionHandler
// DescribeExceptionHandler calls the JNI exceptionDescribe function.
DescribeExceptionHandler ExceptionHandler = ExceptionHandlerFunc(func(env *Env, exception *ObjectRef) error {
exceptionDescribe(env.jniEnv)
exceptionClear(env.jniEnv)
return errors.New("Java exception occured. check stderr")
})
// ThrowableToStringExceptionHandler calls ToString on the exception and returns an error
// with the returned value as its Error message.
// If exception is nil or the toString() call fails, a generic default error is returned.
ThrowableToStringExceptionHandler ExceptionHandler = ExceptionHandlerFunc(func(env *Env, exception *ObjectRef) error {
exceptionClear(env.jniEnv)
if exception.IsNil() {
return errThrowableConvertFail
}
msg := "Java exception occured"
callStringMethodAndAssign(env, exception, "toString", func(s string) {
if s == "" {
return
}
msg = s
})
return errors.New(msg)
})
// ThrowableErrorExceptionHandler populates a new ThrowableError with the values of exception.
// If exception is nil, the getClass().getName(), or the toString call fails, a generic default
// error is returned.
ThrowableErrorExceptionHandler ExceptionHandler = ExceptionHandlerFunc(func(env *Env, exception *ObjectRef) error {
exceptionClear(env.jniEnv)
if exception.IsNil() {
return errThrowableConvertFail
}
throwableError, _ := NewThrowableErrorFromObject(env, exception)
if throwableError == nil {
return errThrowableConvertFail
}
return *throwableError
})
)
|
// Package junos provides automation for Junos (Juniper Networks) devices, as
// well as interaction with Junos Space.
package junos
import (
"encoding/xml"
"errors"
"fmt"
"github.com/scottdware/go-netconf/netconf"
"io/ioutil"
"log"
"regexp"
"strings"
)
// All of our RPC calls we use.
var (
rpcCommand = "<command format=\"text\">%s</command>"
rpcCommandXML = "<command format=\"xml\">%s</command>"
rpcCommit = "<commit-configuration/>"
rpcCommitAt = "<commit-configuration><at-time>%s</at-time></commit-configuration>"
rpcCommitCheck = "<commit-configuration><check/></commit-configuration>"
rpcCommitConfirm = "<commit-configuration><confirmed/><confirm-timeout>%d</confirm-timeout></commit-configuration>"
rpcFactsRE = "<get-route-engine-information/>"
rpcFactsChassis = "<get-chassis-inventory/>"
rpcConfigFileSet = "<load-configuration action=\"set\" format=\"text\"><configuration-set>%s</configuration-set></load-configuration>"
rpcConfigFileText = "<load-configuration format=\"text\"><configuration-text>%s</configuration-text></load-configuration>"
rpcConfigFileXML = "<load-configuration format=\"xml\"><configuration>%s</configuration></load-configuration>"
rpcConfigURLSet = "<load-configuration action=\"set\" format=\"text\" url=\"%s\"/>"
rpcConfigURLText = "<load-configuration format=\"text\" url=\"%s\"/>"
rpcConfigURLXML = "<load-configuration format=\"xml\" url=\"%s\"/>"
rpcConfigStringSet = "<load-configuration action=\"set\" format=\"text\"><configuration-set>%s</configuration-set></load-configuration>"
rpcConfigStringText = "<load-configuration format=\"text\"><configuration-text>%s</configuration-text></load-configuration>"
rpcConfigStringXML = "<load-configuration format=\"xml\"><configuration>%s</configuration></load-configuration>"
rpcGetRescue = "<get-rescue-information><format>text</format></get-rescue-information>"
rpcGetRollback = "<get-rollback-information><rollback>%d</rollback><format>text</format></get-rollback-information>"
rpcGetRollbackCompare = "<get-rollback-information><rollback>0</rollback><compare>%d</compare><format>text</format></get-rollback-information>"
rpcHardware = "<get-chassis-inventory/>"
rpcLock = "<lock><target><candidate/></target></lock>"
rpcRescueConfig = "<load-configuration rescue=\"rescue\"/>"
rpcRescueDelete = "<request-delete-rescue-configuration/>"
rpcRescueSave = "<request-save-rescue-configuration/>"
rpcRollbackConfig = "<load-configuration rollback=\"%d\"/>"
rpcRoute = "<get-route-engine-information/>"
rpcSoftware = "<get-software-information/>"
rpcUnlock = "<unlock><target><candidate/></target></unlock>"
rpcVersion = "<get-software-information/>"
rpcReboot = "<request-reboot/>"
rpcCommitHistory = "<get-commit-information/>"
)
// Junos contains our session state.
type Junos struct {
Session *netconf.Session
Hostname string
RoutingEngines int
Platform []RoutingEngine
}
// CommitHistory holds all of the commit entries.
type CommitHistory struct {
Entries []CommitEntry `xml:"commit-history"`
}
// CommitEntry holds information about each prevous commit.
type CommitEntry struct {
Sequence int `xml:"sequence-number"`
User string `xml:"user"`
Method string `xml:"client"`
Timestamp string `xml:"date-time"`
}
// RoutingEngine contains the hardware and software information for each route engine.
type RoutingEngine struct {
Model string
Version string
}
type commandXML struct {
Config string `xml:",innerxml"`
}
type commitError struct {
Path string `xml:"error-path"`
Element string `xml:"error-info>bad-element"`
Message string `xml:"error-message"`
}
type commitResults struct {
XMLName xml.Name `xml:"commit-results"`
Errors []commitError `xml:"rpc-error"`
}
type diffXML struct {
XMLName xml.Name `xml:"rollback-information"`
Config string `xml:"configuration-information>configuration-output"`
}
type hardwareRouteEngines struct {
XMLName xml.Name `xml:"multi-routing-engine-results"`
RE []hardwareRouteEngine `xml:"multi-routing-engine-item>chassis-inventory"`
}
type hardwareRouteEngine struct {
XMLName xml.Name `xml:"chassis-inventory"`
Serial string `xml:"chassis>serial-number"`
Description string `xml:"chassis>description"`
}
type versionRouteEngines struct {
XMLName xml.Name `xml:"multi-routing-engine-results"`
RE []versionRouteEngine `xml:"multi-routing-engine-item>software-information"`
}
type versionRouteEngine struct {
XMLName xml.Name `xml:"software-information"`
Hostname string `xml:"host-name"`
Platform string `xml:"product-model"`
PackageInfo []versionPackageInfo `xml:"package-information"`
}
type versionPackageInfo struct {
XMLName xml.Name `xml:"package-information"`
PackageName []string `xml:"name"`
SoftwareVersion []string `xml:"comment"`
}
// Close disconnects our session to the device.
func (j *Junos) Close() {
j.Session.Transport.Close()
}
// RunCommand executes any operational mode command, such as "show" or "request."
// Format can be one of "text" or "xml."
func (j *Junos) RunCommand(cmd, format string) (string, error) {
var command string
command = fmt.Sprintf(rpcCommand, cmd)
errMessage := "No output available. Please check the syntax of your command."
if format == "xml" {
command = fmt.Sprintf(rpcCommandXML, cmd)
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return errMessage, err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errMessage, errors.New(m.Message)
}
}
if reply.Data == "" {
return errMessage, nil
}
if format == "text" {
var output commandXML
err = xml.Unmarshal([]byte(reply.Data), &output)
if err != nil {
return "", err
}
return output.Config, nil
}
return reply.Data, nil
}
// CommitHistory gathers all the information about previous commits.
func (j *Junos) CommitHistory() (*CommitHistory, error) {
var history CommitHistory
reply, err := j.Session.Exec(netconf.RawRPC(rpcCommitHistory))
if err != nil {
return nil, err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return nil, errors.New(m.Message)
}
}
if reply.Data == "" {
return nil, errors.New("could not load commit history")
}
err = xml.Unmarshal([]byte(reply.Data), &history)
if err != nil {
return nil, err
}
return &history, nil
}
// Commit commits the configuration.
func (j *Junos) Commit() error {
var errs commitResults
reply, err := j.Session.Exec(netconf.RawRPC(rpcCommit))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// CommitAt commits the configuration at the specified <time>.
func (j *Junos) CommitAt(time string) error {
var errs commitResults
command := fmt.Sprintf(rpcCommitAt, time)
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// CommitCheck checks the configuration for syntax errors.
func (j *Junos) CommitCheck() error {
var errs commitResults
reply, err := j.Session.Exec(netconf.RawRPC(rpcCommitCheck))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// CommitConfirm rolls back the configuration after <delay> minutes.
func (j *Junos) CommitConfirm(delay int) error {
var errs commitResults
command := fmt.Sprintf(rpcCommitConfirm, delay)
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// ConfigDiff compares the current active configuration to a given rollback configuration.
func (j *Junos) ConfigDiff(compare int) (string, error) {
var rb diffXML
command := fmt.Sprintf(rpcGetRollbackCompare, compare)
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return "", err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return "", errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &rb)
if err != nil {
return "", err
}
return rb.Config, nil
}
// PrintFacts prints information about the device, such as model and software.
func (j *Junos) PrintFacts() {
var str string
fpcRegex := regexp.MustCompile(`^(EX).*`)
srxRegex := regexp.MustCompile(`^(SRX).*`)
mRegex := regexp.MustCompile(`^(M[X]?).*`)
str += fmt.Sprintf("Routing Engines/FPC's: %d\n\n", j.RoutingEngines)
for i, p := range j.Platform {
model := p.Model
version := p.Version
switch model {
case fpcRegex.FindString(model):
str += fmt.Sprintf("fpc%d\n--------------------------------------------------------------------------\n", i)
str += fmt.Sprintf("Hostname: %s\nModel: %s\nVersion: %s\n\n", j.Hostname, model, version)
case srxRegex.FindString(model):
str += fmt.Sprintf("node%d\n--------------------------------------------------------------------------\n", i)
str += fmt.Sprintf("Hostname: %s\nModel: %s\nVersion: %s\n\n", j.Hostname, model, version)
case mRegex.FindString(model):
str += fmt.Sprintf("re%d\n--------------------------------------------------------------------------\n", i)
str += fmt.Sprintf("Hostname: %s\nModel: %s\nVersion: %s\n\n", j.Hostname, model, version)
}
}
fmt.Println(str)
}
// GetConfig returns the full configuration, or configuration starting at <section>.
// Format can be one of "text" or "xml." You can do sub-sections by separating the
// <section> path with a ">" symbol, i.e. "system>login"
func (j *Junos) GetConfig(section, format string) (string, error) {
secs := strings.Split(section, ">")
nSecs := len(secs) - 1
command := fmt.Sprintf("<get-configuration format=\"%s\"><configuration>", format)
if section == "full" {
command += "</configuration></get-configuration>"
}
if nSecs >= 0 {
for i := 0; i < nSecs; i++ {
command += fmt.Sprintf("<%s>", secs[i])
}
command += fmt.Sprintf("<%s/>", secs[nSecs])
for j := nSecs - 1; j >= 0; j-- {
command += fmt.Sprintf("</%s>", secs[j])
}
command += fmt.Sprint("</configuration></get-configuration>")
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return "", err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return "", errors.New(m.Message)
}
}
if format == "text" {
var output commandXML
err = xml.Unmarshal([]byte(reply.Data), &output)
if err != nil {
return "", err
}
return output.Config, nil
}
return reply.Data, nil
}
// Config loads a given configuration file from your local machine,
// a remote (FTP or HTTP server) location, or via configuration statements
// from variables (type string or []string) within your script. Format can be one of
// "set" "text" or "xml."
func (j *Junos) Config(path interface{}, format string, commit bool) error {
var command string
switch format {
case "set":
switch path.(type) {
case string:
if strings.Contains(path.(string), "tp://") {
command = fmt.Sprintf(rpcConfigURLSet, path.(string))
}
if _, err := ioutil.ReadFile(path.(string)); err != nil {
command = fmt.Sprintf(rpcConfigStringSet, path.(string))
} else {
data, err := ioutil.ReadFile(path.(string))
if err != nil {
return err
}
command = fmt.Sprintf(rpcConfigFileSet, string(data))
}
case []string:
command = fmt.Sprintf(rpcConfigStringSet, strings.Join(path.([]string), "\n"))
}
case "text":
switch path.(type) {
case string:
if strings.Contains(path.(string), "tp://") {
command = fmt.Sprintf(rpcConfigURLText, path.(string))
}
if _, err := ioutil.ReadFile(path.(string)); err != nil {
command = fmt.Sprintf(rpcConfigStringText, path.(string))
} else {
data, err := ioutil.ReadFile(path.(string))
if err != nil {
return err
}
command = fmt.Sprintf(rpcConfigFileText, string(data))
}
case []string:
command = fmt.Sprintf(rpcConfigStringText, strings.Join(path.([]string), "\n"))
}
case "xml":
switch path.(type) {
case string:
if strings.Contains(path.(string), "tp://") {
command = fmt.Sprintf(rpcConfigURLXML, path.(string))
}
if _, err := ioutil.ReadFile(path.(string)); err != nil {
command = fmt.Sprintf(rpcConfigStringXML, path.(string))
} else {
data, err := ioutil.ReadFile(path.(string))
if err != nil {
return err
}
command = fmt.Sprintf(rpcConfigFileXML, string(data))
}
case []string:
command = fmt.Sprintf(rpcConfigStringXML, strings.Join(path.([]string), "\n"))
}
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if commit {
err = j.Commit()
if err != nil {
return err
}
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// Lock locks the candidate configuration.
func (j *Junos) Lock() error {
reply, err := j.Session.Exec(netconf.RawRPC(rpcLock))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// NewSession establishes a new connection to a Junos device that we will use
// to run our commands against. NewSession also gathers software information
// about the device.
func NewSession(host, user, password string) (*Junos, error) {
rex := regexp.MustCompile(`^.*\[(.*)\]`)
s, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))
if err != nil {
log.Fatal(err)
}
reply, err := s.Exec(netconf.RawRPC(rpcVersion))
if err != nil {
return nil, err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return nil, errors.New(m.Message)
}
}
if strings.Contains(reply.Data, "multi-routing-engine-results") {
var facts versionRouteEngines
err = xml.Unmarshal([]byte(reply.Data), &facts)
if err != nil {
return nil, err
}
numRE := len(facts.RE)
hostname := facts.RE[0].Hostname
res := make([]RoutingEngine, 0, numRE)
for i := 0; i < numRE; i++ {
version := rex.FindStringSubmatch(facts.RE[i].PackageInfo[0].SoftwareVersion[0])
model := strings.ToUpper(facts.RE[i].Platform)
res = append(res, RoutingEngine{Model: model, Version: version[1]})
}
return &Junos{
Session: s,
Hostname: hostname,
RoutingEngines: numRE,
Platform: res,
}, nil
}
var facts versionRouteEngine
err = xml.Unmarshal([]byte(reply.Data), &facts)
if err != nil {
return nil, err
}
res := make([]RoutingEngine, 0)
hostname := facts.Hostname
version := rex.FindStringSubmatch(facts.PackageInfo[0].SoftwareVersion[0])
model := strings.ToUpper(facts.Platform)
res = append(res, RoutingEngine{Model: model, Version: version[1]})
return &Junos{
Session: s,
Hostname: hostname,
RoutingEngines: 1,
Platform: res,
}, nil
}
// Rescue will create or delete the rescue configuration given "save" or "delete."
func (j *Junos) Rescue(action string) error {
command := fmt.Sprintf(rpcRescueSave)
if action == "delete" {
command = fmt.Sprintf(rpcRescueDelete)
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// RollbackConfig loads and commits the configuration of a given rollback or rescue state.
func (j *Junos) RollbackConfig(option interface{}) error {
var command = fmt.Sprintf(rpcRollbackConfig, option)
if option == "rescue" {
command = fmt.Sprintf(rpcRescueConfig)
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
err = j.Commit()
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// Unlock unlocks the candidate configuration.
func (j *Junos) Unlock() error {
reply, err := j.Session.Exec(netconf.RawRPC(rpcUnlock))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// Reboot will reboot the device.
func (j *Junos) Reboot() error {
reply, err := j.Session.Exec(netconf.RawRPC(rpcReboot))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
Updated function documentation
// Package junos provides automation for Junos (Juniper Networks) devices, as
// well as interaction with Junos Space.
package junos
import (
"encoding/xml"
"errors"
"fmt"
"github.com/scottdware/go-netconf/netconf"
"io/ioutil"
"log"
"regexp"
"strings"
)
// All of our RPC calls we use.
var (
rpcCommand = "<command format=\"text\">%s</command>"
rpcCommandXML = "<command format=\"xml\">%s</command>"
rpcCommit = "<commit-configuration/>"
rpcCommitAt = "<commit-configuration><at-time>%s</at-time></commit-configuration>"
rpcCommitCheck = "<commit-configuration><check/></commit-configuration>"
rpcCommitConfirm = "<commit-configuration><confirmed/><confirm-timeout>%d</confirm-timeout></commit-configuration>"
rpcFactsRE = "<get-route-engine-information/>"
rpcFactsChassis = "<get-chassis-inventory/>"
rpcConfigFileSet = "<load-configuration action=\"set\" format=\"text\"><configuration-set>%s</configuration-set></load-configuration>"
rpcConfigFileText = "<load-configuration format=\"text\"><configuration-text>%s</configuration-text></load-configuration>"
rpcConfigFileXML = "<load-configuration format=\"xml\"><configuration>%s</configuration></load-configuration>"
rpcConfigURLSet = "<load-configuration action=\"set\" format=\"text\" url=\"%s\"/>"
rpcConfigURLText = "<load-configuration format=\"text\" url=\"%s\"/>"
rpcConfigURLXML = "<load-configuration format=\"xml\" url=\"%s\"/>"
rpcConfigStringSet = "<load-configuration action=\"set\" format=\"text\"><configuration-set>%s</configuration-set></load-configuration>"
rpcConfigStringText = "<load-configuration format=\"text\"><configuration-text>%s</configuration-text></load-configuration>"
rpcConfigStringXML = "<load-configuration format=\"xml\"><configuration>%s</configuration></load-configuration>"
rpcGetRescue = "<get-rescue-information><format>text</format></get-rescue-information>"
rpcGetRollback = "<get-rollback-information><rollback>%d</rollback><format>text</format></get-rollback-information>"
rpcGetRollbackCompare = "<get-rollback-information><rollback>0</rollback><compare>%d</compare><format>text</format></get-rollback-information>"
rpcHardware = "<get-chassis-inventory/>"
rpcLock = "<lock><target><candidate/></target></lock>"
rpcRescueConfig = "<load-configuration rescue=\"rescue\"/>"
rpcRescueDelete = "<request-delete-rescue-configuration/>"
rpcRescueSave = "<request-save-rescue-configuration/>"
rpcRollbackConfig = "<load-configuration rollback=\"%d\"/>"
rpcRoute = "<get-route-engine-information/>"
rpcSoftware = "<get-software-information/>"
rpcUnlock = "<unlock><target><candidate/></target></unlock>"
rpcVersion = "<get-software-information/>"
rpcReboot = "<request-reboot/>"
rpcCommitHistory = "<get-commit-information/>"
)
// Junos contains our session state.
type Junos struct {
Session *netconf.Session
Hostname string
RoutingEngines int
Platform []RoutingEngine
}
// CommitHistory holds all of the commit entries.
type CommitHistory struct {
Entries []CommitEntry `xml:"commit-history"`
}
// CommitEntry holds information about each prevous commit.
type CommitEntry struct {
Sequence int `xml:"sequence-number"`
User string `xml:"user"`
Method string `xml:"client"`
Timestamp string `xml:"date-time"`
}
// RoutingEngine contains the hardware and software information for each route engine.
type RoutingEngine struct {
Model string
Version string
}
type commandXML struct {
Config string `xml:",innerxml"`
}
type commitError struct {
Path string `xml:"error-path"`
Element string `xml:"error-info>bad-element"`
Message string `xml:"error-message"`
}
type commitResults struct {
XMLName xml.Name `xml:"commit-results"`
Errors []commitError `xml:"rpc-error"`
}
type diffXML struct {
XMLName xml.Name `xml:"rollback-information"`
Config string `xml:"configuration-information>configuration-output"`
}
type hardwareRouteEngines struct {
XMLName xml.Name `xml:"multi-routing-engine-results"`
RE []hardwareRouteEngine `xml:"multi-routing-engine-item>chassis-inventory"`
}
type hardwareRouteEngine struct {
XMLName xml.Name `xml:"chassis-inventory"`
Serial string `xml:"chassis>serial-number"`
Description string `xml:"chassis>description"`
}
type versionRouteEngines struct {
XMLName xml.Name `xml:"multi-routing-engine-results"`
RE []versionRouteEngine `xml:"multi-routing-engine-item>software-information"`
}
type versionRouteEngine struct {
XMLName xml.Name `xml:"software-information"`
Hostname string `xml:"host-name"`
Platform string `xml:"product-model"`
PackageInfo []versionPackageInfo `xml:"package-information"`
}
type versionPackageInfo struct {
XMLName xml.Name `xml:"package-information"`
PackageName []string `xml:"name"`
SoftwareVersion []string `xml:"comment"`
}
// Close disconnects our session to the device.
func (j *Junos) Close() {
j.Session.Transport.Close()
}
// RunCommand executes any operational mode command, such as "show" or "request."
// <format> can be one of "text" or "xml."
func (j *Junos) RunCommand(cmd, format string) (string, error) {
var command string
command = fmt.Sprintf(rpcCommand, cmd)
errMessage := "No output available. Please check the syntax of your command."
if format == "xml" {
command = fmt.Sprintf(rpcCommandXML, cmd)
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return errMessage, err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errMessage, errors.New(m.Message)
}
}
if reply.Data == "" {
return errMessage, nil
}
if format == "text" {
var output commandXML
err = xml.Unmarshal([]byte(reply.Data), &output)
if err != nil {
return "", err
}
return output.Config, nil
}
return reply.Data, nil
}
// CommitHistory gathers all the information about previous commits.
func (j *Junos) CommitHistory() (*CommitHistory, error) {
var history CommitHistory
reply, err := j.Session.Exec(netconf.RawRPC(rpcCommitHistory))
if err != nil {
return nil, err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return nil, errors.New(m.Message)
}
}
if reply.Data == "" {
return nil, errors.New("could not load commit history")
}
err = xml.Unmarshal([]byte(reply.Data), &history)
if err != nil {
return nil, err
}
return &history, nil
}
// Commit commits the configuration.
func (j *Junos) Commit() error {
var errs commitResults
reply, err := j.Session.Exec(netconf.RawRPC(rpcCommit))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// CommitAt commits the configuration at the specified <time>.
func (j *Junos) CommitAt(time string) error {
var errs commitResults
command := fmt.Sprintf(rpcCommitAt, time)
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// CommitCheck checks the configuration for syntax errors.
func (j *Junos) CommitCheck() error {
var errs commitResults
reply, err := j.Session.Exec(netconf.RawRPC(rpcCommitCheck))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// CommitConfirm rolls back the configuration after <delay> minutes.
func (j *Junos) CommitConfirm(delay int) error {
var errs commitResults
command := fmt.Sprintf(rpcCommitConfirm, delay)
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &errs)
if err != nil {
return err
}
if errs.Errors != nil {
for _, m := range errs.Errors {
message := fmt.Sprintf("[%s]\n %s\nError: %s", strings.Trim(m.Path, "[\r\n]"), strings.Trim(m.Element, "[\r\n]"), strings.Trim(m.Message, "[\r\n]"))
return errors.New(message)
}
}
return nil
}
// ConfigDiff compares the current active configuration to a given rollback configuration.
func (j *Junos) ConfigDiff(compare int) (string, error) {
var rb diffXML
command := fmt.Sprintf(rpcGetRollbackCompare, compare)
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return "", err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return "", errors.New(m.Message)
}
}
err = xml.Unmarshal([]byte(reply.Data), &rb)
if err != nil {
return "", err
}
return rb.Config, nil
}
// PrintFacts prints information about the device, such as model and software.
func (j *Junos) PrintFacts() {
var str string
fpcRegex := regexp.MustCompile(`^(EX).*`)
srxRegex := regexp.MustCompile(`^(SRX).*`)
mRegex := regexp.MustCompile(`^(M[X]?).*`)
str += fmt.Sprintf("Routing Engines/FPC's: %d\n\n", j.RoutingEngines)
for i, p := range j.Platform {
model := p.Model
version := p.Version
switch model {
case fpcRegex.FindString(model):
str += fmt.Sprintf("fpc%d\n--------------------------------------------------------------------------\n", i)
str += fmt.Sprintf("Hostname: %s\nModel: %s\nVersion: %s\n\n", j.Hostname, model, version)
case srxRegex.FindString(model):
str += fmt.Sprintf("node%d\n--------------------------------------------------------------------------\n", i)
str += fmt.Sprintf("Hostname: %s\nModel: %s\nVersion: %s\n\n", j.Hostname, model, version)
case mRegex.FindString(model):
str += fmt.Sprintf("re%d\n--------------------------------------------------------------------------\n", i)
str += fmt.Sprintf("Hostname: %s\nModel: %s\nVersion: %s\n\n", j.Hostname, model, version)
}
}
fmt.Println(str)
}
// GetConfig returns the full configuration, or configuration starting at <section>.
// <format> can be one of "text" or "xml." You can do sub-sections by separating the
// <section> path with a ">" symbol, i.e. "system>login" or "protocols>ospf>area".
func (j *Junos) GetConfig(section, format string) (string, error) {
secs := strings.Split(section, ">")
nSecs := len(secs) - 1
command := fmt.Sprintf("<get-configuration format=\"%s\"><configuration>", format)
if section == "full" {
command += "</configuration></get-configuration>"
}
if nSecs >= 0 {
for i := 0; i < nSecs; i++ {
command += fmt.Sprintf("<%s>", secs[i])
}
command += fmt.Sprintf("<%s/>", secs[nSecs])
for j := nSecs - 1; j >= 0; j-- {
command += fmt.Sprintf("</%s>", secs[j])
}
command += fmt.Sprint("</configuration></get-configuration>")
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return "", err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return "", errors.New(m.Message)
}
}
if format == "text" {
var output commandXML
err = xml.Unmarshal([]byte(reply.Data), &output)
if err != nil {
return "", err
}
return output.Config, nil
}
return reply.Data, nil
}
// Config loads a given configuration file from your local machine,
// a remote (FTP or HTTP server) location, or via configuration statements
// from variables (type string or []string) within your script. Format can be one of
// "set" "text" or "xml."
func (j *Junos) Config(path interface{}, format string, commit bool) error {
var command string
switch format {
case "set":
switch path.(type) {
case string:
if strings.Contains(path.(string), "tp://") {
command = fmt.Sprintf(rpcConfigURLSet, path.(string))
}
if _, err := ioutil.ReadFile(path.(string)); err != nil {
command = fmt.Sprintf(rpcConfigStringSet, path.(string))
} else {
data, err := ioutil.ReadFile(path.(string))
if err != nil {
return err
}
command = fmt.Sprintf(rpcConfigFileSet, string(data))
}
case []string:
command = fmt.Sprintf(rpcConfigStringSet, strings.Join(path.([]string), "\n"))
}
case "text":
switch path.(type) {
case string:
if strings.Contains(path.(string), "tp://") {
command = fmt.Sprintf(rpcConfigURLText, path.(string))
}
if _, err := ioutil.ReadFile(path.(string)); err != nil {
command = fmt.Sprintf(rpcConfigStringText, path.(string))
} else {
data, err := ioutil.ReadFile(path.(string))
if err != nil {
return err
}
command = fmt.Sprintf(rpcConfigFileText, string(data))
}
case []string:
command = fmt.Sprintf(rpcConfigStringText, strings.Join(path.([]string), "\n"))
}
case "xml":
switch path.(type) {
case string:
if strings.Contains(path.(string), "tp://") {
command = fmt.Sprintf(rpcConfigURLXML, path.(string))
}
if _, err := ioutil.ReadFile(path.(string)); err != nil {
command = fmt.Sprintf(rpcConfigStringXML, path.(string))
} else {
data, err := ioutil.ReadFile(path.(string))
if err != nil {
return err
}
command = fmt.Sprintf(rpcConfigFileXML, string(data))
}
case []string:
command = fmt.Sprintf(rpcConfigStringXML, strings.Join(path.([]string), "\n"))
}
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if commit {
err = j.Commit()
if err != nil {
return err
}
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// Lock locks the candidate configuration.
func (j *Junos) Lock() error {
reply, err := j.Session.Exec(netconf.RawRPC(rpcLock))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// NewSession establishes a new connection to a Junos device that we will use
// to run our commands against. NewSession also gathers software information
// about the device.
func NewSession(host, user, password string) (*Junos, error) {
rex := regexp.MustCompile(`^.*\[(.*)\]`)
s, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))
if err != nil {
log.Fatal(err)
}
reply, err := s.Exec(netconf.RawRPC(rpcVersion))
if err != nil {
return nil, err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return nil, errors.New(m.Message)
}
}
if strings.Contains(reply.Data, "multi-routing-engine-results") {
var facts versionRouteEngines
err = xml.Unmarshal([]byte(reply.Data), &facts)
if err != nil {
return nil, err
}
numRE := len(facts.RE)
hostname := facts.RE[0].Hostname
res := make([]RoutingEngine, 0, numRE)
for i := 0; i < numRE; i++ {
version := rex.FindStringSubmatch(facts.RE[i].PackageInfo[0].SoftwareVersion[0])
model := strings.ToUpper(facts.RE[i].Platform)
res = append(res, RoutingEngine{Model: model, Version: version[1]})
}
return &Junos{
Session: s,
Hostname: hostname,
RoutingEngines: numRE,
Platform: res,
}, nil
}
var facts versionRouteEngine
err = xml.Unmarshal([]byte(reply.Data), &facts)
if err != nil {
return nil, err
}
res := make([]RoutingEngine, 0)
hostname := facts.Hostname
version := rex.FindStringSubmatch(facts.PackageInfo[0].SoftwareVersion[0])
model := strings.ToUpper(facts.Platform)
res = append(res, RoutingEngine{Model: model, Version: version[1]})
return &Junos{
Session: s,
Hostname: hostname,
RoutingEngines: 1,
Platform: res,
}, nil
}
// Rescue will create or delete the rescue configuration given "save" or "delete."
func (j *Junos) Rescue(action string) error {
command := fmt.Sprintf(rpcRescueSave)
if action == "delete" {
command = fmt.Sprintf(rpcRescueDelete)
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// RollbackConfig loads and commits the configuration of a given rollback or rescue state.
func (j *Junos) RollbackConfig(option interface{}) error {
var command = fmt.Sprintf(rpcRollbackConfig, option)
if option == "rescue" {
command = fmt.Sprintf(rpcRescueConfig)
}
reply, err := j.Session.Exec(netconf.RawRPC(command))
if err != nil {
return err
}
err = j.Commit()
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// Unlock unlocks the candidate configuration.
func (j *Junos) Unlock() error {
reply, err := j.Session.Exec(netconf.RawRPC(rpcUnlock))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
// Reboot will reboot the device.
func (j *Junos) Reboot() error {
reply, err := j.Session.Exec(netconf.RawRPC(rpcReboot))
if err != nil {
return err
}
if reply.Errors != nil {
for _, m := range reply.Errors {
return errors.New(m.Message)
}
}
return nil
}
|
package junos
import (
"fmt"
"github.com/Juniper/go-netconf/netconf"
"log"
)
type Session struct {
Conn *netconf.Session
}
func NewSession(host, user, password string) *Session {
s, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))
if err != nil {
log.Fatal(err)
}
defer s.Close()
return &Session{
Conn: s,
}
}
func (s *Session) Lock() {
resp, err := s.Conn.Exec("<rpc><lock-configuration/></rpc>")
if err != nil {
fmt.Printf("Error: %+v\n", err)
}
}
func (s *Session) Unlock() {
resp, err := s.Conn.Exec("<rpc><unlock-configuration/></rpc>")
if err != nil {
fmt.Printf("Error: %+v\n", err)
}
}
Updated functions
package junos
import (
"fmt"
"github.com/Juniper/go-netconf/netconf"
"log"
)
type Session struct {
Conn *netconf.Session
}
func NewSession(host, user, password string) *Session {
s, err := netconf.DialSSH(host, netconf.SSHConfigPassword(user, password))
if err != nil {
log.Fatal(err)
}
defer s.Close()
return &Session{
Conn: s,
}
}
func (s *Session) Lock() {
resp, err := s.Conn.Exec("<rpc><lock-configuration/></rpc>")
if err != nil {
fmt.Printf("Error: %+v\n", err)
}
fmt.Printf("%+v\n", resp)
}
func (s *Session) Unlock() {
resp, err := s.Conn.Exec("<rpc><unlock-configuration/></rpc>")
if err != nil {
fmt.Printf("Error: %+v\n", err)
}
fmt.Printf("%+v\n", resp)
} |
package karts
import (
"fmt"
"log"
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/firedrake969/karts/staticfiles"
"github.com/firedrake969/karts/views"
)
func RunKarts(routes map[string]views.View) {
fmt.Println("Starting...")
router := httprouter.New()
staticlist := staticfiles.GetStaticfiles()
for staticfile := range staticlist {
router.GET(staticlist[staticfile].Servedpath, staticlist[staticfile].Serve)
}
for k := range routes {
route := routes[k]
router.GET(k, route.HandleGet)
router.POST(k, route.HandlePost)
}
log.Fatal(http.ListenAndServe(":3000", router))
}
comment for godoc
package karts
import (
"fmt"
"log"
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/firedrake969/karts/staticfiles"
"github.com/firedrake969/karts/views"
)
// This runs everything. Pass it a mapping of strings (url routes)
// to views.View structs and it will serve both your views and
// staticfiles.
func RunKarts(routes map[string]views.View) {
fmt.Println("Starting...")
router := httprouter.New()
staticlist := staticfiles.GetStaticfiles()
for staticfile := range staticlist {
router.GET(staticlist[staticfile].Servedpath, staticlist[staticfile].Serve)
}
for k := range routes {
route := routes[k]
router.GET(k, route.HandleGet)
router.POST(k, route.HandlePost)
}
log.Fatal(http.ListenAndServe(":3000", router))
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.